Python multiprocessing.log_to_stderr() Examples
The following are 9
code examples of multiprocessing.log_to_stderr().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
multiprocessing
, or try the search function
.
Example #1
Source File: BrundleFuzzServer.py From BrundleFuzz with MIT License | 7 votes |
def main(): """Starts several processes This must be kept to the bare minimum """ multiprocessing.log_to_stderr() logger = multiprocessing.get_logger() logger.setLevel(logging.INFO) jobs = [] try: bfs = BrundleFuzzServer() jobs.append(bfs) bfs.start() for j in jobs: j.join() except KeyboardInterrupt: pass
Example #2
Source File: recognition.py From Piwho with MIT License | 6 votes |
def start_service(self): """ start speaker training service. """ # prevent signal from propagating to child process handler = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGINT, signal.SIG_IGN) if self.debug: self.sprecog.debug = True mp.log_to_stderr(logging.DEBUG) self.sprecog.speaker_name = self.speaker_name self.proc = mp.Process(name="watchdog", target=self.__run, args=(self.event,)) self.proc.setDaemon = False self.proc.start() # restore signal signal.signal(signal.SIGINT, handler)
Example #3
Source File: parallel.py From CityEnergyAnalyst with MIT License | 5 votes |
def __apply_func_with_worker_stream(args): """ Call func, using ``queue`` to redirect stdout and stderr, with a tuple of args because multiprocessing.Pool.map only accepts one argument for the function. This function is called _inside_ a separate process. """ # set up logging logger = multiprocessing.log_to_stderr() logger.setLevel(logging.WARNING) from cea import suppres_3rd_party_debug_loggers suppres_3rd_party_debug_loggers() # unpack the arguments func, queue, on_complete, i_queue, n, args = args[0], args[1], args[2], args[3], args[4], args[5:] # set up printing to stderr and stdout to go through the queue sys.stdout = QueueWorkerStream('stdout', queue) sys.stderr = QueueWorkerStream('stderr', queue) # CALL result = func(*args) if on_complete: on_complete(i_queue.get(), n, args, result) return result
Example #4
Source File: parallel.py From barrista with MIT License | 5 votes |
def init_filler(dummynet, filler_cbs, in_train_mode): # pragma: no cover """Initialize a filler thread.""" # pylint: disable=global-variable-undefined, global-variable-not-assigned global net, cbs, train_mode, initialized, logger logger = _log_to_stderr(_logging.WARN) logger.debug("Initializing filler. Train mode: %s.", in_train_mode) net = dummynet cbs = filler_cbs train_mode = in_train_mode initialized = False
Example #5
Source File: learners.py From M-LOOP with MIT License | 5 votes |
def run(self): ''' Starts running the Gaussian process learner. When the new parameters event is triggered, reads the cost information provided and updates the Gaussian process with the information. Then searches the Gaussian process for new optimal parameters to test based on the biased cost. Parameters to test next are put on the output parameters queue. ''' #logging to the main log file from a process (as apposed to a thread) in cpython is currently buggy on windows and/or python 2.7 #current solution is to only log to the console for warning and above from a process self.log = mp.log_to_stderr(logging.WARNING) try: while not self.end_event.is_set(): #self.log.debug('Learner waiting for new params event') self.save_archive() self.wait_for_new_params_event() #self.log.debug('Gaussian process learner reading costs') self.get_params_and_costs() self.fit_gaussian_process() for _ in range(self.generation_num): self.log.debug('Gaussian process learner generating parameter:'+ str(self.params_count+1)) next_params = self.find_next_parameters() self.params_out_queue.put(next_params) if self.end_event.is_set(): raise LearnerInterrupt() except LearnerInterrupt: pass end_dict = {} if self.predict_global_minima_at_end: self.get_params_and_costs() self.fit_gaussian_process() self.find_global_minima() end_dict.update({'predicted_best_parameters':self.predicted_best_parameters, 'predicted_best_cost':self.predicted_best_cost, 'predicted_best_uncertainty':self.predicted_best_uncertainty}) self.params_out_queue.put(end_dict) self._shut_down() self.log.debug('Ended Gaussian Process Learner')
Example #6
Source File: miner.py From crankycoin with MIT License | 5 votes |
def __init__(self, blockchain, mempool): mp.log_to_stderr() mp_logger = mp.get_logger() mp_logger.setLevel(logging.DEBUG) self.blockchain = blockchain self.mempool = mempool
Example #7
Source File: node.py From crankycoin with MIT License | 5 votes |
def __init__(self, peers, api_client, blockchain, mempool, validator): super(FullNode, self).__init__(peers, api_client) mp.log_to_stderr() mp_logger = mp.get_logger() mp_logger.setLevel(logging.DEBUG) self.app = Bottle() self.app.merge(public_app) self.app.merge(permissioned_app) self.blockchain = blockchain self.mempool = mempool self.validator = validator
Example #8
Source File: storage.py From switchio with Mozilla Public License 2.0 | 4 votes |
def _consume_and_write(queue, path, store, sharr): """Insert :var:`row` received from the queue into the shared memory array at the current index and increment. Empty rows are always written to disk (keeps stores 'call-index-aligned'). """ proc = mp.current_process() slog = utils.get_logger(proc.name) log = mp.log_to_stderr(slog.getEffectiveLevel()) log.debug("starting storage writer '{}'".format(proc.name)) log.info("storage path is '{}'".format(path)) log.debug("sharr is '{}'".format(sharr)) # set up a new store instance for writing with store.writer(path, dtypes=store.dtypes) as store: # notify parent that file has been created queue.put(path) # handle no pandas/np case buff = store if sharr is None else sharr bufftype = type(buff) log.debug('buffer type is {}'.format(bufftype)) for row in iter(queue.get, Terminate): # consume and process now = time.time() # write frame to disk on buffer fill if sharr and sharr.is_full(): log.debug('writing to {} storage...'.format(store.ext)) try: # push a data frame store.put(pd.DataFrame.from_records(buff.read())) except ValueError: log.error(traceback.format_exc()) log.debug("storage put took '{}'".format(time.time() - now)) try: # push to ring buffer (or store if no pd) buff.put(row) log.debug("{} insert took '{}'".format( bufftype, time.time() - now)) except ValueError: log.error(traceback.format_exc()) log.debug("terminating frame writer '{}'".format(proc.name))
Example #9
Source File: storage.py From switchy with Mozilla Public License 2.0 | 4 votes |
def _consume_and_write(queue, path, store, sharr): """Insert :var:`row` received from the queue into the shared memory array at the current index and increment. Empty rows are always written to disk (keeps stores 'call-index-aligned'). """ proc = mp.current_process() slog = utils.get_logger(proc.name) log = mp.log_to_stderr(slog.getEffectiveLevel()) log.debug("starting storage writer '{}'".format(proc.name)) log.info("storage path is '{}'".format(path)) log.debug("sharr is '{}'".format(sharr)) # set up a new store instance for writing with store.writer(path, dtypes=store.dtypes) as store: # notify parent that file has been created queue.put(path) # handle no pandas/np case buff = store if sharr is None else sharr bufftype = type(buff) log.debug('buffer type is {}'.format(bufftype)) for row in iter(queue.get, Terminate): # consume and process now = time.time() # write frame to disk on buffer fill if sharr and sharr.is_full(): log.debug('writing to {} storage...'.format(store.ext)) try: # push a data frame store.put(pd.DataFrame.from_records(buff.read())) except ValueError: log.error(traceback.format_exc()) log.debug("storage put took '{}'".format(time.time() - now)) try: # push to ring buffer (or store if no pd) buff.put(row) log.debug("{} insert took '{}'".format( bufftype, time.time() - now)) except ValueError: log.error(traceback.format_exc()) log.debug("terminating frame writer '{}'".format(proc.name))