Python Queue.get() Examples
The following are 25
code examples of Queue.get().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
Queue
, or try the search function
.
Example #1
Source File: rl_data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 14 votes |
def make_web(queue): app = Flask(__name__) @app.route('/') def index(): return render_template('index.html') def gen(): while True: frame = queue.get() _, frame = cv2.imencode('.JPEG', frame) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame.tostring() + b'\r\n') @app.route('/video_feed') def video_feed(): return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame') try: app.run(host='0.0.0.0', port=8889) except: print('unable to open port')
Example #2
Source File: rl_data.py From SNIPER-mxnet with Apache License 2.0 | 6 votes |
def make_web(queue): app = Flask(__name__) @app.route('/') def index(): return render_template('index.html') def gen(): while True: frame = queue.get() _, frame = cv2.imencode('.JPEG', frame) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame.tostring() + b'\r\n') @app.route('/video_feed') def video_feed(): return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame') try: app.run(host='0.0.0.0', port=8889) except: print('unable to open port')
Example #3
Source File: rl_data.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def make_web(queue): app = Flask(__name__) @app.route('/') def index(): return render_template('index.html') def gen(): while True: frame = queue.get() _, frame = cv2.imencode('.JPEG', frame) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame.tostring() + b'\r\n') @app.route('/video_feed') def video_feed(): return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame') try: app.run(host='0.0.0.0', port=8889) except: print('unable to open port')
Example #4
Source File: dataloader.py From mt-dnn with MIT License | 6 votes |
def _set_SIGCHLD_handler(): # Windows doesn't support SIGCHLD handler if sys.platform == 'win32': return # can't set signal in child threads if not isinstance(threading.current_thread(), threading._MainThread): return global _SIGCHLD_handler_set if _SIGCHLD_handler_set: return previous_handler = signal.getsignal(signal.SIGCHLD) if not callable(previous_handler): previous_handler = None def handler(signum, frame): # This following call uses `waitid` with WNOHANG from C side. Therefore, # Python can still get and update the process status successfully. _error_if_any_worker_fails() if previous_handler is not None: previous_handler(signum, frame) signal.signal(signal.SIGCHLD, handler) _SIGCHLD_handler_set = True
Example #5
Source File: dataloader.py From mt-dnn with MIT License | 6 votes |
def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id): if pin_memory: torch.cuda.set_device(device_id) while True: try: r = in_queue.get() except Exception: if done_event.is_set(): return raise if r is None: break if isinstance(r[1], ExceptionWrapper): out_queue.put(r) continue idx, batch = r try: if pin_memory: batch = pin_memory_batch(batch) except Exception: out_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: out_queue.put((idx, batch))
Example #6
Source File: parallel.py From twitter-photos with BSD 2-Clause "Simplified" License | 6 votes |
def worker(queue, user, size, outdir, total): while True: try: photo = queue.get(False) except Queue.Empty: break media_url = photo[1] urllib3_download(media_url, size, outdir) with lock: global downloaded downloaded += 1 d = { 'media_url': os.path.basename(media_url), 'user': user, 'index': downloaded + 1 if downloaded < total else total, 'total': total, } progress = PROGRESS_FORMATTER % d sys.stdout.write('\r%s' % progress) sys.stdout.flush()
Example #7
Source File: grr_utils.py From python-scripts with GNU General Public License v3.0 | 6 votes |
def get(self, poll_interval=5): while True: try: # Using Queue.get() with a timeout is really expensive - Python uses # busy waiting that wakes up the process every 50ms - so we switch # to a more efficient polling method if there is no activity for # <fast_poll_time> seconds. if time.time() - self.last_item_time < self.fast_poll_time: message = Queue.Queue.get(self, block=True, timeout=poll_interval) else: time.sleep(poll_interval) message = Queue.Queue.get(self, block=False) break except Queue.Empty: self.callback() self.last_item_time = time.time() return message
Example #8
Source File: socketclientthread.py From code-for-blog with The Unlicense | 5 votes |
def run(self): while self.alive.isSet(): try: # Queue.get with timeout to allow checking self.alive cmd = self.cmd_q.get(True, 0.1) self.handlers[cmd.type](cmd) except Queue.Empty as e: continue
Example #9
Source File: rl_data.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def act(self, action): if self.nthreads > 1: new = self.pool.map(env_step, zip(self.env, action)) else: new = [env.step(act) for env, act in zip(self.env, action)] reward = np.asarray([i[1] for i in new], dtype=np.float32) done = np.asarray([i[2] for i in new], dtype=np.float32) channels = self.state_.shape[1]//self.input_length state = np.zeros_like(self.state_) state[:,:-channels,:,:] = self.state_[:,channels:,:,:] for i, (ob, env) in enumerate(zip(new, self.env)): if ob[2]: state[i,-channels:,:,:] = env.reset().transpose((2,0,1)) else: state[i,-channels:,:,:] = ob[0].transpose((2,0,1)) self.state_ = state if self.web_viz: try: while self.queue.qsize() > 10: self.queue.get(False) except queue.Empty: pass frame = self.visual() self.queue.put(frame) return reward, done
Example #10
Source File: dataloader.py From mt-dnn with MIT License | 5 votes |
def _shutdown_workers(self): try: if not self.shutdown: self.shutdown = True self.done_event.set() for q in self.index_queues: q.put(None) # if some workers are waiting to put, make place for them try: while not self.worker_result_queue.empty(): self.worker_result_queue.get() except (FileNotFoundError, ImportError): # Many weird errors can happen here due to Python # shutting down. These are more like obscure Python bugs. # FileNotFoundError can happen when we rebuild the fd # fetched from the queue but the socket is already closed # from the worker side. # ImportError can happen when the unpickler loads the # resource from `get`. pass # done_event should be sufficient to exit worker_manager_thread, # but be safe here and put another None self.worker_result_queue.put(None) finally: # removes pids no matter what if self.worker_pids_set: _remove_worker_pids(id(self)) self.worker_pids_set = False
Example #11
Source File: dataloader.py From mt-dnn with MIT License | 5 votes |
def _get_batch(self): if self.timeout > 0: try: return self.data_queue.get(timeout=self.timeout) except queue.Empty: raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout)) else: return self.data_queue.get()
Example #12
Source File: dataloader.py From mt-dnn with MIT License | 5 votes |
def _worker_loop(dataset, index_queue, data_queue, collate_fn, init_fn, worker_id): global _use_shared_memory _use_shared_memory = True # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal # module's handlers are executed after Python returns from C low-level # handlers, likely when the same fatal signal happened again already. # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 _set_worker_signal_handlers() torch.set_num_threads(1) if init_fn is not None: init_fn(worker_id) watchdog = ManagerWatchdog() while True: try: r = index_queue.get(timeout=MANAGER_STATUS_CHECK_INTERVAL) except queue.Empty: if watchdog.is_alive(): continue else: break if r is None: break idx, batch_indices = r try: samples = collate_fn([dataset[i] for i in batch_indices]) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples)) del samples
Example #13
Source File: grr_utils.py From python-scripts with GNU General Public License v3.0 | 5 votes |
def __setstate__(self, state): self.__init__(max_size=state.get("max_size", 10))
Example #14
Source File: grr_utils.py From python-scripts with GNU General Public License v3.0 | 5 votes |
def Pop(self, key): """Remove the object from the cache completely.""" node = self._hash.get(key) if node: self._age.Unlink(node) return node.data
Example #15
Source File: data_parallel_dist.py From ps_pytorch with MIT License | 5 votes |
def _reduction_thread_fn(queue, group_id, device_ids, reduction_streams, nccl_streams): def _process_batch(): dev_grad_batch, dev_events, job_event = queue.get() dev_coalesced = [] # Coalesce the tensors on all devices and start a local reduction for dev_id, grad_batch, event, stream in zip(device_ids, dev_grad_batch, dev_events, reduction_streams): with torch.cuda.device(dev_id), torch.cuda.stream(stream): stream.wait_event(event) coalesced = _flatten_tensors(grad_batch) dev_coalesced.append(coalesced) # Wait for all copies to complete before starting the NCCL kernel for stream in reduction_streams: stream.synchronize() nccl.reduce(dev_coalesced, root=0, streams=nccl_streams) # From now on we're only going to work on the first device (from device_ids) grad_batch = dev_grad_batch[0] coalesced = dev_coalesced[0] reduce_stream = reduction_streams[0] with torch.cuda.stream(reduce_stream): reduce_stream.wait_stream(nccl_streams[0]) coalesced /= dist.get_world_size() dist.all_reduce(coalesced, group=group_id) for grad, reduced in zip(grad_batch, _unflatten_tensors(coalesced, grad_batch)): grad.copy_(reduced) job_event.set() with torch.cuda.device(device_ids[0]): while True: _process_batch() # just to have a clear scope
Example #16
Source File: parallel.py From pax with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_exception_from_process(p): crdict = p.shared_dict try: exc_type = eval(crdict.get('exception_type', 'UnknownPropagatedException'), exceptions.__dict__) except NameError: exc_type = exceptions.UnknownPropagatedException traceb = crdict.get('traceback', 'No traceback reported') return exc_type, traceb
Example #17
Source File: parallel.py From pax with BSD 3-Clause "New" or "Revised" License | 5 votes |
def check_local_processes_while_remote_processing(running_paxes, crash_fanout, terminate_host_on_crash=False): """Check on locally running paxes in running_paxes, returns list of remaining running pax processes. - Remove any paxes that have exited normally - If a pax has crashed, push a message to the crash fanout to terminate all paxes with the same id - Look for crash fanout messages from other processes, and terminate local paxes with the same id - terminate_host_on_crash: if True, raise exception in the host process if a pax crash is detected in a pax chain we're participating in. Do NOT use in a host process that can host multiple pax chains! We will not check the presence of other pax chains and terminate them too! """ p_by_status = group_by_status(running_paxes) running_paxes = p_by_status['running'] # If any of our own paxes crashed, send a message to the crash fanout # This will inform everyone connected to the server (including ourselves, on the next iteration) for crashed_w in p_by_status['crashed']: pax_id = crashed_w.pax_id exctype, traceb = get_exception_from_process(p_by_status['crashed'][0]) print("Pax %s crashed!\nDumping exception traceback:\n\n%s\n\nNotifying crash fanout." % ( pax_id, format_exception_dump(traceb) )) crash_fanout.put((pax_id, exctype, traceb)) running_paxes, _ = terminate_paxes_with_id(running_paxes, pax_id) if terminate_host_on_crash: raise exctype("Pax %s crashed! Traceback:\n %s" % (pax_id, format_exception_dump(traceb))) # If any of the remote paxes crashed, we will learn about it from the crash fanout. try: pax_id, exctype, traceb = crash_fanout.get() print("Remote crash notification for pax %s.\n" "Remote exception traceback dump:\n\n%s\n.Terminating paxes with id %s." % ( pax_id, format_exception_dump(traceb), pax_id)) running_paxes, n_terminated = terminate_paxes_with_id(running_paxes, pax_id) if n_terminated > 0 and terminate_host_on_crash: raise exctype("Pax %s crashed! Traceback:\n %s" % (pax_id, format_exception_dump(traceb))) except Empty: pass return running_paxes
Example #18
Source File: parallel.py From pax with BSD 3-Clause "New" or "Revised" License | 5 votes |
def multiprocess_configuration(n_cpus, pax_id, base_config_kwargs, processing_queue_kwargs, output_queue_kwargs): """Yields configuration override dicts for multiprocessing""" # Config overrides for child processes common_override = dict(pax=dict(autorun=True, show_progress_bar=False), DEFAULT=dict(pax_id=pax_id)) input_override = dict(pax=dict(plugin_group_names=['input', 'output'], encoder_plugin=None, decoder_plugin=None, output='Queues.PushToQueue'), Queues=dict(**processing_queue_kwargs)) worker_override = {'pax': dict(input='Queues.PullFromQueue', output='Queues.PushToQueue', event_numbers_file=None, events_to_process=None), # PullFromQueue can't have a timeout in the workers, see #444 'Queues.PullFromQueue': dict(timeout_after_sec=float('inf'), **processing_queue_kwargs), 'Queues.PushToQueue': dict(preserve_ids=True, many_to_one=True, **output_queue_kwargs)} output_override = dict(pax=dict(plugin_group_names=['input', 'output'], encoder_plugin=None, decoder_plugin=None, event_numbers_file=None, events_to_process=None, input='Queues.PullFromQueue'), Queues=dict(ordered_pull=True, **output_queue_kwargs)) overrides = [('input', input_override)] + [('worker', worker_override)] * n_cpus + [('output', output_override)] for worker_type, worker_overide in overrides: new_conf = deepcopy(base_config_kwargs) new_conf['config_dict'] = combine_configs(new_conf.get('config_dict'), common_override, worker_overide) yield worker_type, new_conf
Example #19
Source File: parallel.py From pax with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get(self, **kwargs): """Get an item from the queue. Kwargs are ignored (often used in standard library queue.get calls)""" msg = self.queue.get(acknowledge=False) if msg is None: raise Empty return pickle.loads(msg.body)
Example #20
Source File: datasets.py From adascan-public with GNU General Public License v3.0 | 5 votes |
def worker(sess,model_options,model_vars,Queue,CLASS_DICT): while True: # print 'Queue Size', Queue.qsize() try: fname = Queue.get() except: return start = time.time() file_name_orig = fname.split(' ')[0].split('/')[1].strip() file_name = file_name_orig.replace('.avi','.npz') class_name = fname.split(' ')[0].split('/')[0].strip().lower() class_idx = CLASS_DICT[class_name] try: frames = np.load(model_options['data_dir']+file_name)['arr_0'] except: print "Couldn't Open: ",model_options['data_dir']+file_name Queue.task_done() continue idx = 0 if model_options['mode'] == 'train': idx = random.randint(0,frames.shape[0]-1) frames = frames[idx] tmpImg,tmpLab,num_crops = getCrops(sess,model_options,model_vars,frames,np.array((class_idx))) if model_options['mode'] == 'train': for j in range(num_crops): size = model_options['example_size'] sess.run(model_vars['enqueue_op'],feed_dict={model_vars['images']:tmpImg[j*size:(j+1)*size], model_vars['labels']:tmpLab[j:(j+1)]}) else: sess.run(model_vars['enqueue_op'],feed_dict={model_vars['images']:tmpImg, model_vars['labels']:tmpLab, model_vars['names']:[[file_name_orig]]*num_crops}) Queue.task_done()
Example #21
Source File: interfacelift-downloader.py From interfacelift-downloader with MIT License | 5 votes |
def download_worker(): while True: url = queue.get() download_file(url, SAVE_DIR) queue.task_done() # Returns the path of the specified page number
Example #22
Source File: rl_data.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def act(self, action): if self.nthreads > 1: new = self.pool.map(env_step, zip(self.env, action)) else: new = [env.step(act) for env, act in zip(self.env, action)] reward = np.asarray([i[1] for i in new], dtype=np.float32) done = np.asarray([i[2] for i in new], dtype=np.float32) channels = self.state_.shape[1]//self.input_length state = np.zeros_like(self.state_) state[:,:-channels,:,:] = self.state_[:,channels:,:,:] for i, (ob, env) in enumerate(zip(new, self.env)): if ob[2]: state[i,-channels:,:,:] = env.reset().transpose((2,0,1)) else: state[i,-channels:,:,:] = ob[0].transpose((2,0,1)) self.state_ = state if self.web_viz: try: while self.queue.qsize() > 10: self.queue.get(False) except queue.Empty: pass frame = self.visual() self.queue.put(frame) return reward, done
Example #23
Source File: channels.py From Computable with MIT License | 5 votes |
def get_msg(self, block=True, timeout=None): """ Gets a message if there is one that is ready. """ if timeout is None: # Queue.get(timeout=None) has stupid uninteruptible # behavior, so wait for a week instead timeout = 604800 return self._in_queue.get(block, timeout)
Example #24
Source File: rl_data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def act(self, action): if self.nthreads > 1: new = self.pool.map(env_step, zip(self.env, action)) else: new = [env.step(act) for env, act in zip(self.env, action)] reward = np.asarray([i[1] for i in new], dtype=np.float32) done = np.asarray([i[2] for i in new], dtype=np.float32) channels = self.state_.shape[1]//self.input_length state = np.zeros_like(self.state_) state[:,:-channels,:,:] = self.state_[:,channels:,:,:] for i, (ob, env) in enumerate(zip(new, self.env)): if ob[2]: state[i,-channels:,:,:] = env.reset().transpose((2,0,1)) else: state[i,-channels:,:,:] = ob[0].transpose((2,0,1)) self.state_ = state if self.web_viz: try: while self.queue.qsize() > 10: self.queue.get(False) except queue.Empty: pass frame = self.visual() self.queue.put(frame) return reward, done
Example #25
Source File: subject.py From autopilot with Mozilla Public License 2.0 | 4 votes |
def get_weight(self, which='last', include_baseline=False): """ Gets start and stop weights. TODO: add ability to get weights by session number, dates, and ranges. Args: which (str): if 'last', gets most recent weights. Otherwise returns all weights. include_baseline (bool): if True, includes baseline and minimum mass. Returns: dict """ # get either the last start/stop weights, optionally including baseline # TODO: Get by session weights = {} h5f = self.open_hdf() weight_table = h5f.root.history.weights if which == 'last': for column in weight_table.colnames: try: weights[column] = weight_table.read(-1, field=column)[0] except IndexError: weights[column] = None else: for column in weight_table.colnames: try: weights[column] = weight_table.read(field=column) except IndexError: weights[column] = None if include_baseline is True: try: baseline = float(h5f.root.info._v_attrs['baseline_mass']) except KeyError: baseline = 0.0 minimum = baseline*0.8 weights['baseline_mass'] = baseline weights['minimum_mass'] = minimum self.close_hdf(h5f) return weights