Python __builtin__.min() Examples
The following are 14
code examples of __builtin__.min().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
__builtin__
, or try the search function
.
Example #1
Source File: gnumpy.py From imageqa-public with MIT License | 5 votes |
def tile(a, reps): if type(reps) in _numberTypes: reps = (reps,) reps = tuple(reps) # for generator expressions if type(a) in _numberTypes: ret = empty(reps) ret._base.assign(a) return ret a = as_garray(a) if len(reps) > a.ndim: a = a._add_axes(len(reps)) if len(reps) < a.ndim: reps = _extend_shape(reps, a.ndim) # now len(reps)==a.ndim retShape = tuple([ a.shape[i] * reps[i] for i in tuple(xrange(len(reps)))]) if _prodT(retShape)==0: return zeros(retShape) if _prodT(reps)==1: return a for i in range(a.ndim-1): # merge replication requests on adjacent axes, for efficiency. if reps[i]!=1 and reps[i+1]!=1 and a.shape[i]==1: return a.reshape(_deleteT2(a.shape, i)).tile(reps[:i]+(_prodT(reps[i:i+2]),)+reps[i+2:]).reshape(map(operator.mul, a.shape, reps)) def dataIDone(nextA, i): return nextA.reshape(_modifyT(a.shape, i, a.shape[i]*reps[i])).tile(_modifyT(reps, i, 1)) if reps[0]!=1: # replicating rows is easy and efficient: just repeat the data a number of times. temp = empty((reps[0], a.size)) # shape doesn't matter because dataIDone changes it tempCm = temp._base_shaped(1) if reps[0]>=1: _cm_row_slice_read(tempCm, 0, 1).assign(a._base_as_row()) nCopiesDone = 1 while nCopiesDone < reps[0]: nNow = __builtin__.min(nCopiesDone, reps[0]-nCopiesDone) _cm_row_slice_read(tempCm, nCopiesDone, nCopiesDone + nNow).assign(_cm_row_slice_read(tempCm, 0, nNow)) nCopiesDone += nNow return dataIDone(temp, 0) # the general case is repeating a subset (aot the whole array) n times, before moving on to the next subset # using a transpose with the right shape, the subsets can become columns. those can be lengthened because that is replicating rows; a second transpose makes them now-lengthened subsets again axis = __builtin__.min( i for i in range(a.ndim) if reps[i]!=1) return dataIDone(a.reshape_2d(axis).T.tile((reps[axis], 1)).T, axis)
Example #2
Source File: gnumpy.py From imageqa-public with MIT License | 5 votes |
def min(x, axis=None): """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ return _reductor__base(x, axis, garray.min, numpy.min)
Example #3
Source File: gnumpy.py From imageqa-public with MIT License | 5 votes |
def min(self, axis=None): return -(-self).max(axis)
Example #4
Source File: gnumpy.py From imageqa-public with MIT License | 5 votes |
def all(self, axis=None): return ( True if self.size==0 else (self.as_bool()).min())
Example #5
Source File: gnumpy.py From DeepNeuralNet-QSAR with GNU General Public License v3.0 | 5 votes |
def tile(a, reps): if type(reps) in _numberTypes: reps = (reps,) reps = tuple(reps) # for generator expressions if type(a) in _numberTypes: ret = empty(reps) ret._base.assign(a) return ret a = as_garray(a) if len(reps) > a.ndim: a = a._add_axes(len(reps)) if len(reps) < a.ndim: reps = _extend_shape(reps, a.ndim) # now len(reps)==a.ndim retShape = tuple([ a.shape[i] * reps[i] for i in tuple(xrange(len(reps)))]) if _prodT(retShape)==0: return zeros(retShape) if _prodT(reps)==1: return a for i in range(a.ndim-1): # merge replication requests on adjacent axes, for efficiency. if reps[i]!=1 and reps[i+1]!=1 and a.shape[i]==1: return a.reshape(_deleteT2(a.shape, i)).tile(reps[:i]+(_prodT(reps[i:i+2]),)+reps[i+2:]).reshape(map(operator.mul, a.shape, reps)) def dataIDone(nextA, i): return nextA.reshape(_modifyT(a.shape, i, a.shape[i]*reps[i])).tile(_modifyT(reps, i, 1)) if reps[0]!=1: # replicating rows is easy and efficient: just repeat the data a number of times. temp = empty((reps[0], a.size)) # shape doesn't matter because dataIDone changes it tempCm = temp._base_shaped(1) if reps[0]>=1: _cm_row_slice_read(tempCm, 0, 1).assign(a._base_as_row()) nCopiesDone = 1 while nCopiesDone < reps[0]: nNow = __builtin__.min(nCopiesDone, reps[0]-nCopiesDone) _cm_row_slice_read(tempCm, nCopiesDone, nCopiesDone + nNow).assign(_cm_row_slice_read(tempCm, 0, nNow)) nCopiesDone += nNow return dataIDone(temp, 0) # the general case is repeating a subset (aot the whole array) n times, before moving on to the next subset # using a transpose with the right shape, the subsets can become columns. those can be lengthened because that is replicating rows; a second transpose makes them now-lengthened subsets again axis = __builtin__.min( i for i in range(a.ndim) if reps[i]!=1) return dataIDone(a.reshape_2d(axis).T.tile((reps[axis], 1)).T, axis)
Example #6
Source File: gnumpy.py From DeepNeuralNet-QSAR with GNU General Public License v3.0 | 5 votes |
def min(x, axis=None): """ On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """ return _reductor__base(x, axis, garray.min, numpy.min)
Example #7
Source File: gnumpy.py From DeepNeuralNet-QSAR with GNU General Public License v3.0 | 5 votes |
def min(self, axis=None): return -(-self).max(axis)
Example #8
Source File: gnumpy.py From DeepNeuralNet-QSAR with GNU General Public License v3.0 | 5 votes |
def all(self, axis=None): return ( True if self.size==0 else (self.as_bool()).min())
Example #9
Source File: new_min_max.py From addon with GNU General Public License v3.0 | 5 votes |
def newmin(*args, **kwargs): return new_min_max(_builtin_min, *args, **kwargs)
Example #10
Source File: fypp.py From fypp with BSD 2-Clause "Simplified" License | 5 votes |
def _process_arguments(self, args, keywords): kwdict = dict(keywords) argdict = {} nargs = min(len(args), len(self._argnames)) for iarg in range(nargs): argdict[self._argnames[iarg]] = args[iarg] if nargs < len(args): if self._varpos is None: msg = "macro '{0}' called with too many positional arguments "\ "(expected: {1}, received: {2})"\ .format(self._name, len(self._argnames), len(args)) raise FyppFatalError(msg, self._fname, self._spans[0]) else: argdict[self._varpos] = list(args[nargs:]) elif self._varpos is not None: argdict[self._varpos] = [] for argname in self._argnames[:nargs]: if argname in kwdict: msg = "got multiple values for argument '{0}'".format(argname) raise FyppFatalError(msg, self._fname, self._spans[0]) if nargs < len(self._argnames): for argname in self._argnames[nargs:]: if argname in kwdict: argdict[argname] = kwdict.pop(argname) elif argname in self._defaults: argdict[argname] = self._defaults[argname] else: msg = "macro '{0}' called without mandatory positional "\ "argument '{1}'".format(self._name, argname) raise FyppFatalError(msg, self._fname, self._spans[0]) if kwdict and self._varkw is None: kwstr = "', '".join(kwdict.keys()) msg = "macro '{0}' called with unknown keyword argument(s) '{1}'"\ .format(self._name, kwstr) raise FyppFatalError(msg, self._fname, self._spans[0]) if self._varkw is not None: argdict[self._varkw] = kwdict return argdict
Example #11
Source File: new_min_max.py From Tautulli with GNU General Public License v3.0 | 5 votes |
def newmin(*args, **kwargs): return new_min_max(_builtin_min, *args, **kwargs)
Example #12
Source File: new_min_max.py From V1EngineeringInc-Docs with Creative Commons Attribution Share Alike 4.0 International | 5 votes |
def newmin(*args, **kwargs): return new_min_max(_builtin_min, *args, **kwargs)
Example #13
Source File: gnumpy.py From imageqa-public with MIT License | 4 votes |
def _reduction__base(self, operatorName, axis): if axis==None: return self.ravel()._reduction__base(operatorName, 0).item() if not type(axis) in _numberTypes: raise TypeError('the value %s is not appropriate for the "axis" parameter.' % str(axis)) if axis < -self.ndim or axis>=self.ndim: raise ValueError('axis (%d) out of bounds for an array with %d axes.' % (axis, self.ndim)) axis = int(axis) % self.ndim if self.size==0: retShape = _deleteT2(self.shape, axis) if operatorName=='sum': return zeros(retShape) elif operatorName=='max': return tile(-inf, retShape) else: assert False if operatorName=='max' and axis==0 and cudamatHas('maxAxis0'): # my own fast implementation ret = empty(self.shape[1:]) _ctInt = _cudamat.ct.c_int nThreadsPerBlock = 32 gridX, gridY = ((ret.size+nThreadsPerBlock-1)//nThreadsPerBlock), 1 while gridX>65535: gridY*=2; gridX = (gridX+1)//2; _cudamat._cudamat.maxAxis0.restype = _ctypes.c_int assert 0==_cudamat._cudamat.maxAxis0(_ctInt(gridX), _ctInt(gridY), _ctInt(nThreadsPerBlock), self._base.p_mat, ret._base.p_mat, _ctInt(self.shape[0]), _ctInt(ret.size)) return ret if axis==0 and operatorName=='max': # max over rows is not yet supported in cudamat return self.reshape_2d(1).T.max(1).reshape(self.shape[1:]) if axis==0 and self.ndim==1 and self.size>5000 and operatorName=='sum': # optimization. apparently, cudamat is not maximally efficient. n = int(numpy.sqrt(self.size-1)) return self[:n*n].reshape((n, n))._reduction__base(operatorName, 0)._reduction__base(operatorName, 0) + self[n*n:]._reduction__base(operatorName, 0) if operatorName=='sum': chunkSize = 1024*256 # sum over longer dimensions fails in cudamat nChunks = (self.shape[axis] + chunkSize-1) // chunkSize if nChunks>1: return reduceAdd( self[(slice(None),) * axis + (slice(chunkI*chunkSize, __builtin__.min(self.shape[axis], (chunkI+1)*chunkSize)),)]._reduction__base(operatorName, axis) for chunkI in range(nChunks)) if operatorName=='max' and self.isnan().any2(): # cudamat bug workaround return garray(self.asarray().max(axis)) operatorInCm = {'sum': _cmType.sum, 'max': _cmType.max}[operatorName] if axis==0: return _check_number_types(garray(operatorInCm(self._base_shaped(1), 1, _new_cm(_prodT(self.shape[1:]))), self.shape[1:], None)) if axis==self.ndim-1: if self.ndim!=2: return self.reshape_2d(-1)._reduction__base(operatorName, 1).reshape(self.shape[:-1]) if self.ndim==2: chunkSize = 2**16-1 nChunks = (len(self) + chunkSize-1) // chunkSize if nChunks>1: # cudamat chokes on big arrays, so break it in pieces for cudamat chunks = tuple([ self[chunkI*chunkSize : __builtin__.min((chunkI+1)*chunkSize, len(self))] for chunkI in range(nChunks)]) return concatenate([ chunk._reduction__base(operatorName, 1) for chunk in chunks]) else: # small array return _check_number_types(garray(operatorInCm(self._base_shaped(1), 0, _new_cm((len(self), 1))), (len(self),), None)) return self.transpose_simple(axis)._reduction__base(operatorName, 0).transpose_simple(-axis) # ------------------------------------------------------------------------------- external misc non-numerical
Example #14
Source File: gnumpy.py From DeepNeuralNet-QSAR with GNU General Public License v3.0 | 4 votes |
def _reduction__base(self, operatorName, axis): if axis==None: return self.ravel()._reduction__base(operatorName, 0).item() if not type(axis) in _numberTypes: raise TypeError('the value %s is not appropriate for the "axis" parameter.' % str(axis)) if axis < -self.ndim or axis>=self.ndim: raise ValueError('axis (%d) out of bounds for an array with %d axes.' % (axis, self.ndim)) axis = int(axis) % self.ndim if self.size==0: retShape = _deleteT2(self.shape, axis) if operatorName=='sum': return zeros(retShape) elif operatorName=='max': return tile(-inf, retShape) else: assert False if operatorName=='max' and axis==0 and cudamatHas('maxAxis0'): # my own fast implementation ret = empty(self.shape[1:]) _ctInt = _cudamat.ct.c_int nThreadsPerBlock = 32 gridX, gridY = ((ret.size+nThreadsPerBlock-1)//nThreadsPerBlock), 1 while gridX>65535: gridY*=2; gridX = (gridX+1)//2; _cudamat._cudamat.maxAxis0.restype = _ctypes.c_int assert 0==_cudamat._cudamat.maxAxis0(_ctInt(gridX), _ctInt(gridY), _ctInt(nThreadsPerBlock), self._base.p_mat, ret._base.p_mat, _ctInt(self.shape[0]), _ctInt(ret.size)) return ret if axis==0 and operatorName=='max': # max over rows is not yet supported in cudamat return self.reshape_2d(1).T.max(1).reshape(self.shape[1:]) if axis==0 and self.ndim==1 and self.size>5000 and operatorName=='sum': # optimization. apparently, cudamat is not maximally efficient. n = int(numpy.sqrt(self.size-1)) return self[:n*n].reshape((n, n))._reduction__base(operatorName, 0)._reduction__base(operatorName, 0) + self[n*n:]._reduction__base(operatorName, 0) if operatorName=='sum': chunkSize = 1024*256 # sum over longer dimensions fails in cudamat nChunks = (self.shape[axis] + chunkSize-1) // chunkSize if nChunks>1: return reduceAdd( self[(slice(None),) * axis + (slice(chunkI*chunkSize, __builtin__.min(self.shape[axis], (chunkI+1)*chunkSize)),)]._reduction__base(operatorName, axis) for chunkI in range(nChunks)) if operatorName=='max' and self.isnan().any2(): # cudamat bug workaround return garray(self.asarray().max(axis)) operatorInCm = {'sum': _cmType.sum, 'max': _cmType.max}[operatorName] if axis==0: return _check_number_types(garray(operatorInCm(self._base_shaped(1), 1, _new_cm(_prodT(self.shape[1:]))), self.shape[1:], None)) if axis==self.ndim-1: if self.ndim!=2: return self.reshape_2d(-1)._reduction__base(operatorName, 1).reshape(self.shape[:-1]) if self.ndim==2: chunkSize = 2**16-1 nChunks = (len(self) + chunkSize-1) // chunkSize if nChunks>1: # cudamat chokes on big arrays, so break it in pieces for cudamat chunks = tuple([ self[chunkI*chunkSize : __builtin__.min((chunkI+1)*chunkSize, len(self))] for chunkI in range(nChunks)]) return concatenate([ chunk._reduction__base(operatorName, 1) for chunk in chunks]) else: # small array return _check_number_types(garray(operatorInCm(self._base_shaped(1), 0, _new_cm((len(self), 1))), (len(self),), None)) return self.transpose_simple(axis)._reduction__base(operatorName, 0).transpose_simple(-axis) # ------------------------------------------------------------------------------- external misc non-numerical