Python multiprocessing.sharedctypes.RawArray() Examples
The following are 21
code examples of multiprocessing.sharedctypes.RawArray().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
multiprocessing.sharedctypes
, or try the search function
.
Example #1
Source File: execution_context.py From video-long-term-feature-banks with Apache License 2.0 | 6 votes |
def create_execution_context(self): pools = {} shared_data_lists = {} for worker_id in self._worker_ids: shared_data_list = [] shared_data_lists[worker_id] = shared_data_list # for each worker_id, we fetch a batch size of 32 and this is being # done by various parallel processes for _ in range(self.batch_size): shared_arr = RawArray(ctypes.c_float, self._expected_data_size) shared_data_list.append(shared_arr) pools[worker_id] = Pool( processes=self._num_processes, initializer=self._init_pool, initargs=( shared_data_list, ) ) self.pools = pools self.shared_data_lists = shared_data_lists logger.info('execution_context created...') logger.info('pools: {}'.format(pools)) logger.info('shared_data_lists: {}'.format(shared_data_lists))
Example #2
Source File: shmarray.py From switchy with Mozilla Public License 2.0 | 5 votes |
def create(shape, dtype='d', alignment=32): '''Create an uninitialised shared array. Avoid object arrays, as these will almost certainly break as the objects themselves won't be stored in shared memory, only the pointers ''' shape = numpy.atleast_1d(shape).astype('i') dtype = numpy.dtype(dtype) # we're going to use a flat ctypes array N = numpy.prod(shape) + alignment # The upper bound of size we want to allocate to be certain # that we can take an aligned array of the right size from it. N_bytes_big = N * dtype.itemsize # The final (= right) size of the array N_bytes_right = numpy.prod(shape) * dtype.itemsize dt = 'b' # We create the big array first a = sharedctypes.RawArray(dt, int(N_bytes_big)) sa = shmarray(a, (N_bytes_big,), dt) # We pick the first index of the new array that is aligned # If the address of the first element is 1 and we want 8-alignment, the # first aligned index of the array is going to be 7 == -1 % 8 start_index = -sa.ctypes.data % alignment # Finally, we take the (aligned) subarray and reshape it. sa = sa[start_index:start_index + N_bytes_right].view(dtype).reshape(shape) return sa
Example #3
Source File: __init__.py From unity-python with MIT License | 5 votes |
def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer)
Example #4
Source File: __init__.py From PokemonGo-DesktopMap with MIT License | 5 votes |
def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer)
Example #5
Source File: shm_multiproc.py From reaver with MIT License | 5 votes |
def make_shared(n_envs, obs_space): shape = (n_envs, ) + obs_space.shape raw = RawArray(to_ctype(obs_space.dtype), int(np.prod(shape))) return np.frombuffer(raw, dtype=obs_space.dtype).reshape(shape)
Example #6
Source File: __init__.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer)
Example #7
Source File: feature.py From VidVRD-helper with MIT License | 5 votes |
def __init__(self, shape, dtype=np.float32): # Compute total number of elements size = np.prod(shape) # Get the size of element if dtype == np.float32: typecode = 'f' elif dtype == np.float64: typecode = 'd' else: assert False, 'Unknown dtype.' self.data = sharedctypes.RawArray(typecode, size) self.shape = shape self.dtype = dtype
Example #8
Source File: shmarray.py From Jacinle with MIT License | 5 votes |
def zeros(shape, dtype='d'): """Create an shared array initialised to zeros. Avoid object arrays, as these will almost certainly break as the objects themselves won't be stored in shared memory, only the pointers""" sa = create(shape, dtype=dtype) # contrary to the documentation, sharedctypes.RawArray does NOT always return # an array which is initialised to zero - do it ourselves # http://code.google.com/p/python-multiprocessing/issues/detail?id=25 sa[:] = numpy.zeros(1, dtype) return sa
Example #9
Source File: shmarray.py From Jacinle with MIT License | 5 votes |
def create(shape, dtype='d', alignment=32): """Create an uninitialised shared array. Avoid object arrays, as these will almost certainly break as the objects themselves won't be stored in shared memory, only the pointers""" shape = numpy.atleast_1d(shape).astype('i') dtype = numpy.dtype(dtype) # we're going to use a flat ctypes array N = int(numpy.prod(shape) + alignment) # The upper bound of size we want to allocate to be certain # that we can take an aligned array of the right size from it. N_bytes_big = int(N * dtype.itemsize) # The final (= right) size of the array N_bytes_right = int(numpy.prod(shape) * dtype.itemsize) dt = 'b' # We create the big array first a = sharedctypes.RawArray(dt, N_bytes_big) sa = shmarray(a, (N_bytes_big,), dt) # We pick the first index of the new array that is aligned # If the address of the first element is 1 and we want 8-alignment, the # first aligned index of the array is going to be 7 == -1 % 8 start_index = -sa.ctypes.data % alignment # Finally, we take the (aligned) subarray and reshape it. sa = sa[start_index:start_index + N_bytes_right].view(dtype).reshape(shape) return sa
Example #10
Source File: shmarray.py From switchy with Mozilla Public License 2.0 | 5 votes |
def zeros(shape, dtype='d'): """Create an shared array initialised to zeros. Avoid object arrays, as these will almost certainly break as the objects themselves won't be stored in shared memory, only the pointers """ sa = create(shape, dtype=dtype) # contrary to the documentation, sharedctypes.RawArray does NOT always # return an array which is initialised to zero - do it ourselves # http://code.google.com/p/python-multiprocessing/issues/detail?id=25 sa[:] = numpy.zeros(1, dtype) return sa
Example #11
Source File: __init__.py From jawfish with MIT License | 5 votes |
def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer)
Example #12
Source File: shmarray.py From switchio with Mozilla Public License 2.0 | 5 votes |
def zeros(shape, dtype='d'): """Create an shared array initialised to zeros. Avoid object arrays, as these will almost certainly break as the objects themselves won't be stored in shared memory, only the pointers """ sa = create(shape, dtype=dtype) # contrary to the documentation, sharedctypes.RawArray does NOT always # return an array which is initialised to zero - do it ourselves # http://code.google.com/p/python-multiprocessing/issues/detail?id=25 sa[:] = numpy.zeros(1, dtype) return sa
Example #13
Source File: shmarray.py From switchio with Mozilla Public License 2.0 | 5 votes |
def create(shape, dtype='d', alignment=32): '''Create an uninitialised shared array. Avoid object arrays, as these will almost certainly break as the objects themselves won't be stored in shared memory, only the pointers ''' shape = numpy.atleast_1d(shape).astype('i') dtype = numpy.dtype(dtype) # we're going to use a flat ctypes array N = numpy.prod(shape) + alignment # The upper bound of size we want to allocate to be certain # that we can take an aligned array of the right size from it. N_bytes_big = N * dtype.itemsize # The final (= right) size of the array N_bytes_right = numpy.prod(shape) * dtype.itemsize dt = 'b' # We create the big array first a = sharedctypes.RawArray(dt, int(N_bytes_big)) sa = shmarray(a, (N_bytes_big,), dt) # We pick the first index of the new array that is aligned # If the address of the first element is 1 and we want 8-alignment, the # first aligned index of the array is going to be 7 == -1 % 8 start_index = -sa.ctypes.data % alignment # Finally, we take the (aligned) subarray and reshape it. sa = sa[start_index:start_index + N_bytes_right].view(dtype).reshape(shape) return sa
Example #14
Source File: test_sharedbuffer.py From SenseAct with BSD 3-Clause "New" or "Revised" License | 5 votes |
def testInitLockFalse(self): buffer = SharedBuffer(array_len=self.array_len, array_type=self.array_type, np_array_type=self.np_array_type, array_lock=False) # Test array types are correct self.assertEqual(len(buffer._data_buffer), self.buffer_len) self.assertIsInstance(buffer._data_buffer[0], np.ndarray) self.assertIs(buffer._data_buffer[0].dtype, np.dtype(self.np_array_type)) self.assertIsInstance(buffer._data_buffer[0].base, type(Array(self.array_type, self.array_len).get_obj())) self.assertIsInstance(buffer._timestamp_buffer, type(RawArray("d", self.buffer_len))) self.assertIsInstance(buffer._index_buffer, type(RawArray("l", self.buffer_len)))
Example #15
Source File: __init__.py From oss-ftp with MIT License | 5 votes |
def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer)
Example #16
Source File: __init__.py From BinderFilter with MIT License | 5 votes |
def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer)
Example #17
Source File: __init__.py From ironpython2 with Apache License 2.0 | 5 votes |
def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer)
Example #18
Source File: data_maker.py From BiblioPixel with MIT License | 5 votes |
def __init__(self, floating=None, shared_memory=False, numpy_dtype=None): if numpy_dtype: log.debug('Using numpy') if numpy_dtype in NUMPY_DEFAULTS: numpy_dtype = 'float32' if numpy_dtype not in numpy.sctypeDict: raise ValueError(BAD_NUMPY_TYPE_ERROR % numpy_dtype) if shared_memory and numpy_dtype: log.error('Shared memory for numpy arrays is not yet supported.') numpy_dtype = None if floating is None: floating = not shared_memory c_type = c_float if floating else c_uint8 if shared_memory: self.bytes = lambda size: RawArray(c_uint8, size) self.color_list = lambda size: RawArray(3 * c_type, size) # Note https://stackoverflow.com/questions/37705974/ elif numpy_dtype: self.bytes = bytearray self.color_list = lambda size: numpy.zeros((size, 3), numpy_dtype) else: self.bytes = bytearray self.color_list = lambda size: [(0, 0, 0)] * size
Example #19
Source File: sharedbuffer.py From SenseAct with BSD 3-Clause "New" or "Revised" License | 4 votes |
def __init__(self, array_len, array_type, np_array_type, buffer_len=DEFAULT_BUFFER_LEN, array_lock=True): """Inits the SharedBuffer object with size and data type. Args: buffer_len: An integer size of the buffer array_len: An integer size of each buffer element (usually numpy array) array_type: A ctypes data type of buffer elements, e.g. 'd' np_array_type: A numpy data type of buffer elements, e.g. 'float64' array_lock: A bool specifying whether the buffer will be used with Lock """ self.array_len = array_len self.np_array_type = np_array_type self._buffer_len = buffer_len self._array_type = array_type # Data is stored in a circular buffer of shared arrays self._data_buffer = [] if array_lock: for _ in range(self._buffer_len): self._data_buffer.append(np.frombuffer(Array(self._array_type, self.array_len).get_obj(), dtype=self.np_array_type)) # We also store time stamps corresponding to each array record self._timestamp_buffer = Array('d', self._buffer_len) # We also store the index corresponding to each array record self._index_buffer = Array('l', self._buffer_len) else: # use RawArray without internal lock if needed for _ in range(self._buffer_len): self._data_buffer.append(np.frombuffer(RawArray(self._array_type, self.array_len), dtype=self.np_array_type)) self._timestamp_buffer = RawArray('d', self._buffer_len) self._index_buffer = RawArray('l', self._buffer_len) # Value of `index_buffer` is always set to `self._counter`, which is then increased self._counter = 0 # buffer_p is a pointer which always points to the next available slot in `data_buffer` # where the newest data array can be stored self._buffer_p = Value('i', 0) # This variable is set to 1 when a new array is stored and # set to 0 when a new array is read self._data_updated = Value('i', 0) # Lock to ensure that changing the `data_updated` as well as the data itself is atomic self._access_lock = Lock()
Example #20
Source File: aucell.py From pySCENIC with GNU General Public License v3.0 | 4 votes |
def aucell4r(df_rnk: pd.DataFrame, signatures: Sequence[Type[GeneSignature]], auc_threshold: float = 0.05, noweights: bool = False, normalize: bool = False, num_workers: int = cpu_count()) -> pd.DataFrame: """ Calculate enrichment of gene signatures for single cells. :param df_rnk: The rank matrix (n_cells x n_genes). :param signatures: The gene signatures or regulons. :param auc_threshold: The fraction of the ranked genome to take into account for the calculation of the Area Under the recovery Curve. :param noweights: Should the weights of the genes part of a signature be used in calculation of enrichment? :param normalize: Normalize the AUC values to a maximum of 1.0 per regulon. :param num_workers: The number of cores to use. :return: A dataframe with the AUCs (n_cells x n_modules). """ if num_workers == 1: # Show progress bar ... aucs = pd.concat([enrichment4cells(df_rnk, module.noweights() if noweights else module, auc_threshold=auc_threshold) for module in tqdm(signatures)]).unstack("Regulon") aucs.columns = aucs.columns.droplevel(0) else: # Decompose the rankings dataframe: the index and columns are shared with the child processes via pickling. genes = df_rnk.columns.values cells = df_rnk.index.values # The actual rankings are shared directly. This is possible because during a fork from a parent process the child # process inherits the memory of the parent process. A RawArray is used instead of a synchronize Array because # these rankings are read-only. shared_ro_memory_array = RawArray(DTYPE_C, mul(*df_rnk.shape)) array = np.frombuffer(shared_ro_memory_array, dtype=DTYPE) # Copy the contents of df_rank into this shared memory block using row-major ordering. array[:] = df_rnk.values.flatten(order='C') # The resulting AUCs are returned via a synchronize array. auc_mtx = Array('d', len(cells) * len(signatures)) # Double precision floats. # Convert the modules to modules with uniform weights if necessary. if noweights: signatures = list(map(lambda m: m.noweights(), signatures)) # Do the analysis in separate child processes. chunk_size = ceil(float(len(signatures)) / num_workers) processes = [Process(target=_enrichment, args=(shared_ro_memory_array, chunk, genes, cells, auc_threshold, auc_mtx, (chunk_size*len(cells))*idx)) for idx, chunk in enumerate(chunked(signatures, chunk_size))] for p in processes: p.start() for p in processes: p.join() # Reconstitute the results array. Using C or row-major ordering. aucs = pd.DataFrame(data=np.ctypeslib.as_array(auc_mtx.get_obj()).reshape(len(signatures), len(cells)), columns=pd.Index(data=cells, name='Cell'), index=pd.Index(data=list(map(attrgetter("name"), signatures)), name='Regulon')).T return aucs/aucs.max(axis=0) if normalize else aucs
Example #21
Source File: average.py From picasso with MIT License | 4 votes |
def open(self, path): self.path = path try: self.locs, self.info = io.load_locs(path, qt_parent=self) except io.NoMetadataFileError: return groups = np.unique(self.locs.group) n_groups = len(groups) n_locs = len(self.locs) self.group_index = scipy.sparse.lil_matrix( (n_groups, n_locs), dtype=np.bool ) progress = lib.ProgressDialog( "Creating group index", 0, len(groups), self ) progress.set_value(0) for i, group in enumerate(groups): index = np.where(self.locs.group == group)[0] self.group_index[i, index] = True progress.set_value(i + 1) progress = lib.ProgressDialog( "Aligning by center of mass", 0, len(groups), self ) progress.set_value(0) for i in range(n_groups): index = self.group_index[i, :].nonzero()[1] self.locs.x[index] -= np.mean(self.locs.x[index]) self.locs.y[index] -= np.mean(self.locs.y[index]) progress.set_value(i + 1) self.r = 2 * np.sqrt(np.mean(self.locs.x ** 2 + self.locs.y ** 2)) self.update_image() status = lib.StatusDialog("Starting parallel pool...", self.window) global pool, x, y try: pool.close() except NameError: pass x = sharedctypes.RawArray("f", self.locs.x) y = sharedctypes.RawArray("f", self.locs.y) n_workers = max(1, int(0.75 * multiprocessing.cpu_count())) pool = multiprocessing.Pool( n_workers, init_pool, (x, y, self.group_index) ) self.window.status_bar.showMessage("Ready for processing!") status.close()