Python multiprocessing.pool.terminate() Examples
The following are 11
code examples of multiprocessing.pool.terminate().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
multiprocessing.pool
, or try the search function
.
Example #1
Source File: _dataset_pools_container.py From buzzard with Apache License 2.0 | 5 votes |
def _close(self): for pool in self._managed_pools: pool.terminate() for pool in self._managed_pools: pool.join() self._aliases.clear() self._aliases_per_pool.clear() self._managed_pools.clear()
Example #2
Source File: queue_green_oa_scrape.py From oadoi with MIT License | 5 votes |
def scrape_with_timeout(page): pool = NDPool(processes=1) async_result = pool.apply_async(scrape_page, (page,)) result = None try: result = async_result.get(timeout=600) pool.close() except TimeoutError: logger.info(u'page scrape timed out: {}'.format(page)) pool.terminate() pool.join() return result
Example #3
Source File: fit_spectrum.py From PyXRF with BSD 3-Clause "New" or "Revised" License | 5 votes |
def fit_pixel_multiprocess_nonlinear(data, x, param, reg_mat, use_snip=False): """ Multiprocess fit of experiment data. Parameters ---------- data : array 3D data of experiment spectrum param : dict fitting parameters Returns ------- dict : fitting values for all the elements """ num_processors_to_use = multiprocessing.cpu_count() logger.info('cpu count: {}'.format(num_processors_to_use)) pool = multiprocessing.Pool(num_processors_to_use) # fit_params = lmfit.Parameters() # for i in range(reg_mat.shape[1]): # fit_params.add('a'+str(i), value=1.0, min=0, vary=True) result_pool = [pool.apply_async(fit_pixel_nonlinear_per_line, (n, data[n, :, :], x, param, reg_mat, use_snip)) for n in range(data.shape[0])] results = [] for r in result_pool: results.append(r.get()) pool.terminate() pool.join() return results
Example #4
Source File: fit_spectrum.py From PyXRF with BSD 3-Clause "New" or "Revised" License | 5 votes |
def roi_sum_multi_files(dir_path, file_prefix, start_i, end_i, element_dict, interpath='entry/instrument/detector/data'): """ Fitting for multiple files with Multiprocessing. Parameters ----------- dir_path : str file_prefix : str start_i : int start id of given file end_i: int end id of given file element_dict : dict dict of element with [low, high] bounds as values interpath : str path inside hdf5 file to fetch the data Returns ------- result : list fitting result as list of dict """ num_processors_to_use = multiprocessing.cpu_count() logger.info('cpu count: {}'.format(num_processors_to_use)) pool = multiprocessing.Pool(num_processors_to_use) result_pool = [pool.apply_async(roi_sum_calculation, (dir_path, file_prefix, m, element_dict, interpath)) for m in range(start_i, end_i+1)] results = [] for r in result_pool: results.append(r.get()) pool.terminate() pool.join() return results
Example #5
Source File: multithread_iterator.py From chainer with MIT License | 5 votes |
def finalize(self): pool = self._pool self._next = None self._pool = None if pool is not None: pool.terminate()
Example #6
Source File: git_common.py From luci-py with Apache License 2.0 | 5 votes |
def ScopedPool(*args, **kwargs): """Context Manager which returns a multiprocessing.pool instance which correctly deals with thrown exceptions. *args - Arguments to multiprocessing.pool Kwargs: kind ('threads', 'procs') - The type of underlying coprocess to use. **etc - Arguments to multiprocessing.pool """ if kwargs.pop('kind', None) == 'threads': pool = multiprocessing.pool.ThreadPool(*args, **kwargs) else: orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ()) kwargs['initializer'] = _ScopedPool_initer kwargs['initargs'] = orig, orig_args pool = multiprocessing.pool.Pool(*args, **kwargs) try: yield pool pool.close() except: pool.terminate() raise finally: pool.join()
Example #7
Source File: cgp_config.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def __call__(self, net_lists): evaluations = np.zeros(len(net_lists)) for i in np.arange(0, len(net_lists), self.gpu_num): process_num = np.min((i + self.gpu_num, len(net_lists))) - i pool = NoDaemonProcessPool(process_num) arg_data = [(cnn_eval, net_lists[i+j], j, self.epoch_num, self.dataset, self.verbose, self.imgSize, self.batchsize, self.mask) for j in range(process_num)] evaluations[i:i+process_num] = pool.map(arg_wrapper_mp, arg_data) pool.terminate() return evaluations # network configurations
Example #8
Source File: cgp_config.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def __call__(self, net_lists): evaluations = np.zeros(len(net_lists)) for i in np.arange(0, len(net_lists), self.gpu_num): process_num = np.min((i + self.gpu_num, len(net_lists))) - i pool = NoDaemonProcessPool(process_num) arg_data = [(cnn_eval, net_lists[i+j], j, self.epoch_num, self.batchsize, self.dataset, self.verbose, self.imgSize) for j in range(process_num)] evaluations[i:i+process_num] = pool.map(arg_wrapper_mp, arg_data) pool.terminate() return evaluations # network configurations
Example #9
Source File: debug_snapshot.py From ceph-lcm with Apache License 2.0 | 5 votes |
def closing_pool(pool): try: with contextlib.closing(pool) as pll: yield pll except Exception as exc: syslog.syslog(syslog.LOG_WARNING, "Terminate pool due to {0}".format(exc)) pool.terminate() raise finally: pool.join()
Example #10
Source File: cgp_config.py From cgp-cnn-PyTorch with MIT License | 5 votes |
def __call__(self, net_lists): evaluations = np.zeros(len(net_lists)) for i in np.arange(0, len(net_lists), self.gpu_num): process_num = np.min((i + self.gpu_num, len(net_lists))) - i pool = NoDaemonProcessPool(process_num) arg_data = [(cnn_eval, net_lists[i+j], j, self.epoch_num, self.batchsize, self.dataset, self.verbose, self.imgSize) for j in range(process_num)] evaluations[i:i+process_num] = pool.map(arg_wrapper_mp, arg_data) pool.terminate() return evaluations # network configurations
Example #11
Source File: parallel.py From tsd with GNU Affero General Public License v3.0 | 3 votes |
def run_calls(fun, list_of_args, extra_args=(), pool_type='processes', nb_workers=multiprocessing.cpu_count(), timeout=60, verbose=True, initializer=None, initargs=None): """ Run a function several times in parallel with different inputs. Args: fun: function to be called several times in parallel. list_of_args: list of (first positional) arguments passed to fun, one per call extra_args: tuple containing extra arguments to be passed to fun (same value for all calls) pool_type: either 'processes' or 'threads' nb_workers: number of calls run simultaneously timeout: number of seconds allowed per function call verbose: either True (show the amount of computed calls) or False initializer, initargs (optional): if initializer is not None then each worker process will call initializer(*initargs) when it starts Return: list of outputs """ if pool_type == 'processes': pool = multiprocessing.Pool(nb_workers, initializer, initargs) elif pool_type == 'threads': pool = multiprocessing.pool.ThreadPool(nb_workers) else: print('ERROR: unknow pool_type "{}"'.format(pool_type)) results = [] outputs = [] if verbose: show_progress.counter = 0 show_progress.total = len(list_of_args) for x in list_of_args: if type(x) == tuple: args = x + extra_args else: args = (x,) + extra_args results.append(pool.apply_async(fun, args=args, callback=show_progress if verbose else None)) for r in results: try: outputs.append(r.get(timeout)) except KeyboardInterrupt: pool.terminate() sys.exit(1) pool.close() pool.join() return outputs