Python queue.qsize() Examples
The following are 13
code examples of queue.qsize().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
queue
, or try the search function
.
Example #1
Source File: main_2.8-12.py From motorized_zoom_lens with GNU General Public License v3.0 | 6 votes |
def grab(cam, queue, width, height, fps): global running capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) while(running): frame = {} capture.grab() retval, img = capture.retrieve(0) frame["img"] = img frame["1"] = config["1"] frame["2"] = config["2"] blur = get_blur(img, 0.05) frame["blur"] = blur if queue.qsize() < 10: queue.put(frame) else: print(queue.qsize())
Example #2
Source File: main_5-50.py From motorized_zoom_lens with GNU General Public License v3.0 | 6 votes |
def grab(cam, queue, width, height, fps): global running capture = cv2.VideoCapture(cam) capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) capture.set(cv2.CAP_PROP_FPS, fps) while(running): frame = {} capture.grab() retval, img = capture.retrieve(0) frame["img"] = img frame["1"] = config["1"] frame["2"] = config["2"] blur = get_blur(img, 0.05) frame["blur"] = blur if queue.qsize() < 10: queue.put(frame) else: print(queue.qsize())
Example #3
Source File: projection_subtraction.py From pyem with GNU General Public License v3.0 | 6 votes |
def producer(pool, queue, submap_ft, refmap_ft, fname, particles, sx, sy, s, a, apix, coefs_method, r, nr, fftthreads=1, crop=None, pfac=2): log = logging.getLogger('root') log.debug("Producing %s" % fname) zreader = mrc.ZSliceReader(particles[star.UCSF.IMAGE_ORIGINAL_PATH].iloc[0]) for i, ptcl in particles.iterrows(): log.debug("Produce %d@%s" % (ptcl[star.UCSF.IMAGE_ORIGINAL_INDEX], ptcl[star.UCSF.IMAGE_ORIGINAL_PATH])) # p1r = mrc.read_imgs(stack[i], idx[i] - 1, compat="relion") p1r = zreader.read(ptcl[star.UCSF.IMAGE_ORIGINAL_INDEX]) log.debug("Apply") ri = pool.apply_async( subtract_outer, (p1r, ptcl, submap_ft, refmap_ft, sx, sy, s, a, apix, coefs_method, r, nr), {"fftthreads": fftthreads, "crop": crop, "pfac": pfac}) log.debug("Put") queue.put((ptcl[star.UCSF.IMAGE_INDEX], ri), block=True) log.debug("Queue for %s is size %d" % (ptcl[star.UCSF.IMAGE_ORIGINAL_PATH], queue.qsize())) zreader.close() log.debug("Put poison pill") queue.put((-1, None), block=True)
Example #4
Source File: projection_subtraction.py From pyem with GNU General Public License v3.0 | 6 votes |
def consumer(queue, stack, apix=1.0, iothreads=None): log = logging.getLogger('root') with mrc.ZSliceWriter(stack, psz=apix) as zwriter: while True: log.debug("Get") i, ri = queue.get(block=True) log.debug("Got %d, queue for %s is size %d" % (i, stack, queue.qsize())) if i == -1: break new_image = ri.get() log.debug("Result for %d was shape (%d,%d)" % (i, new_image.shape[0], new_image.shape[1])) zwriter.write(new_image) queue.task_done() log.debug("Wrote %d to %d@%s" % (i, zwriter.i, stack)) if iothreads is not None: iothreads.release()
Example #5
Source File: fullQueue.py From Learning-Concurrency-in-Python with MIT License | 5 votes |
def myPublisher(queue): while not queue.full(): queue.put(1) print("{} Appended 1 to queue: {}".format(threading.current_thread(), queue.qsize())) time.sleep(1)
Example #6
Source File: queueOperations.py From Learning-Concurrency-in-Python with MIT License | 5 votes |
def mySubscriber(queue): while True: item = queue.get() if item is None: break print("{} removed {} from the queue".format(threading.current_thread(), item)) print("Queue Size is now: {}".format(queue.qsize())) queue.task_done()
Example #7
Source File: kafka_listener.py From koku with GNU Affero General Public License v3.0 | 5 votes |
def _log_process_queue_event(queue, event): """Log process queue event.""" operation = event.get("operation", "unknown") provider = event.get("provider") name = provider.name if provider else "unknown" LOG.info(f"Adding operation {operation} for {name} to process queue (size: {queue.qsize()})")
Example #8
Source File: ht_proxy_if.py From hometop_HT3 with GNU General Public License v3.0 | 5 votes |
def __del__(self): self.__threadrun=False #clear queue while self._queue.qsize() > 0: self._queue.get_nowait()
Example #9
Source File: ht_proxy_if.py From hometop_HT3 with GNU General Public License v3.0 | 5 votes |
def remove_client(self, clientID): txThread=self._thread.pop(clientID) txThread.stop() queue=self._rxqueue.pop(clientID) while queue.qsize() > 0: queue.get_nowait() queue=self._txqueue.pop(clientID) while queue.qsize() > 0: queue.get_nowait() self._logger.info("Client-ID:{0}; removed; number of clients:{1}".format(clientID, self._clientcounter))
Example #10
Source File: client.py From sublime-elasticsearch-client with MIT License | 5 votes |
def flush(self): """Forces a flush from the internal queue to the server""" queue = self.queue size = queue.qsize() queue.join() self.log.debug('successfully flushed {0} items.'.format(size))
Example #11
Source File: gen.py From SRNet-Datagen with Apache License 2.0 | 5 votes |
def enqueue_data(queue, capacity): np.random.seed() gen = datagen() while True: try: data = gen.gen_srnet_data_with_background() except Exception as e: pass if queue.qsize() < capacity: queue.put(data)
Example #12
Source File: gen.py From SRNet-Datagen with Apache License 2.0 | 5 votes |
def get_queue_size(self): return self.queue.qsize()
Example #13
Source File: gen.py From SRNet-Datagen with Apache License 2.0 | 4 votes |
def dequeue_batch(self, batch_size, data_shape): while self.queue.qsize() < batch_size: pass i_t_batch, i_s_batch = [], [] t_sk_batch, t_t_batch, t_b_batch, t_f_batch = [], [], [], [] mask_t_batch = [] for i in range(batch_size): i_t, i_s, t_sk, t_t, t_b, t_f, mask_t = self.dequeue_data() i_t_batch.append(i_t) i_s_batch.append(i_s) t_sk_batch.append(t_sk) t_t_batch.append(t_t) t_b_batch.append(t_b) t_f_batch.append(t_f) mask_t_batch.append(mask_t) w_sum = 0 for t_b in t_b_batch: h, w = t_b.shape[:2] scale_ratio = data_shape[0] / h w_sum += int(w * scale_ratio) to_h = data_shape[0] to_w = w_sum // batch_size to_w = int(round(to_w / 8)) * 8 to_size = (to_w, to_h) # w first for cv2 for i in range(batch_size): i_t_batch[i] = cv2.resize(i_t_batch[i], to_size) i_s_batch[i] = cv2.resize(i_s_batch[i], to_size) t_sk_batch[i] = cv2.resize(t_sk_batch[i], to_size, interpolation=cv2.INTER_NEAREST) t_t_batch[i] = cv2.resize(t_t_batch[i], to_size) t_b_batch[i] = cv2.resize(t_b_batch[i], to_size) t_f_batch[i] = cv2.resize(t_f_batch[i], to_size) mask_t_batch[i] = cv2.resize(mask_t_batch[i], to_size, interpolation=cv2.INTER_NEAREST) # eliminate the effect of resize on t_sk t_sk_batch[i] = skeletonization.skeletonization(mask_t_batch[i], 127) i_t_batch = np.stack(i_t_batch) i_s_batch = np.stack(i_s_batch) t_sk_batch = np.expand_dims(np.stack(t_sk_batch), axis = -1) t_t_batch = np.stack(t_t_batch) t_b_batch = np.stack(t_b_batch) t_f_batch = np.stack(t_f_batch) mask_t_batch = np.expand_dims(np.stack(mask_t_batch), axis = -1) i_t_batch = i_t_batch.astype(np.float32) / 127.5 - 1. i_s_batch = i_s_batch.astype(np.float32) / 127.5 - 1. t_sk_batch = t_sk_batch.astype(np.float32) / 255. t_t_batch = t_t_batch.astype(np.float32) / 127.5 - 1. t_b_batch = t_b_batch.astype(np.float32) / 127.5 - 1. t_f_batch = t_f_batch.astype(np.float32) / 127.5 - 1. mask_t_batch = mask_t_batch.astype(np.float32) / 255. return [i_t_batch, i_s_batch, t_sk_batch, t_t_batch, t_b_batch, t_f_batch, mask_t_batch]