Python queue.get() Examples
The following are 30
code examples of queue.get().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
queue
, or try the search function
.
Example #1
Source File: rl_data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 14 votes |
def make_web(queue): app = Flask(__name__) @app.route('/') def index(): return render_template('index.html') def gen(): while True: frame = queue.get() _, frame = cv2.imencode('.JPEG', frame) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame.tostring() + b'\r\n') @app.route('/video_feed') def video_feed(): return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame') try: app.run(host='0.0.0.0', port=8889) except: print('unable to open port')
Example #2
Source File: talon.py From TALON with MIT License | 6 votes |
def find_gene_match_on_vertex_basis(vertex_IDs, strand, vertex_2_gene): """ Use vertices in a transcript to try to pinpoint the gene it belongs to. """ gene_matches = [] for vertex in vertex_IDs: if vertex in vertex_2_gene: curr_matches = vertex_2_gene[vertex] # Make sure the gene is on the correct strand gene_matches += [ x[0] for x in curr_matches if x[1] == strand ] if len(gene_matches) == 0: return None # Now count up how often we see each gene gene_tally = dict((x,gene_matches.count(x)) for x in set(gene_matches)) # TODO: deal with fusions # For the main assignment, pick the gene that is observed the most gene_ID = max(gene_tally, key=gene_tally.get) return gene_ID
Example #3
Source File: parallel.py From twitter-photos with BSD 2-Clause "Simplified" License | 6 votes |
def worker(queue, user, size, outdir, total): while True: try: photo = queue.get(False) except Queue.Empty: break media_url = photo[1] urllib3_download(media_url, size, outdir) with lock: global downloaded downloaded += 1 d = { 'media_url': os.path.basename(media_url), 'user': user, 'index': downloaded + 1 if downloaded < total else total, 'total': total, } progress = PROGRESS_FORMATTER % d sys.stdout.write('\r%s' % progress) sys.stdout.flush()
Example #4
Source File: test_utils.py From bigchaindb with Apache License 2.0 | 6 votes |
def mock_queue(monkeypatch): class MockQueue: items = [] def get(self, timeout=None): try: return self.items.pop() except IndexError: if timeout: raise queue.Empty() raise def put(self, item): self.items.append(item) mockqueue = MockQueue() monkeypatch.setattr('queue.Queue', lambda: mockqueue) return mockqueue
Example #5
Source File: chatlog.py From lrrbot with Apache License 2.0 | 6 votes |
def do_log_chat(time, event, metadata): """ Add a new message to the chat log. """ # Don't log blank lines or server commands like .timeout message = event.arguments[0] if not message or (message[0] in "./" and message[1:4].lower() != "me "): return source = irc.client.NickMask(event.source).nick html = await build_message_html(time, source, event.target, event.arguments[0], metadata.get('specialuser', []), metadata.get('usercolor'), metadata.get('emoteset', []), metadata.get('emotes'), metadata.get('display-name')) with lrrbot.main.bot.engine.begin() as conn: conn.execute(lrrbot.main.bot.metadata.tables["log"].insert(), time=time, source=source, target=event.target, message=event.arguments[0], specialuser=list(metadata.get('specialuser', [])), usercolor=metadata.get('usercolor'), emoteset=list(metadata.get('emoteset', [])), emotes=metadata.get('emotes'), displayname=metadata.get('display-name'), messagehtml=html, msgid=metadata.get('id'), )
Example #6
Source File: projection_subtraction.py From pyem with GNU General Public License v3.0 | 6 votes |
def consumer(queue, stack, apix=1.0, iothreads=None): log = logging.getLogger('root') with mrc.ZSliceWriter(stack, psz=apix) as zwriter: while True: log.debug("Get") i, ri = queue.get(block=True) log.debug("Got %d, queue for %s is size %d" % (i, stack, queue.qsize())) if i == -1: break new_image = ri.get() log.debug("Result for %d was shape (%d,%d)" % (i, new_image.shape[0], new_image.shape[1])) zwriter.write(new_image) queue.task_done() log.debug("Wrote %d to %d@%s" % (i, zwriter.i, stack)) if iothreads is not None: iothreads.release()
Example #7
Source File: test_utils.py From Decentralized-Internet with MIT License | 6 votes |
def mock_queue(monkeypatch): class MockQueue: items = [] def get(self, timeout=None): try: return self.items.pop() except IndexError: if timeout: raise queue.Empty() raise def put(self, item): self.items.append(item) mockqueue = MockQueue() monkeypatch.setattr('queue.Queue', lambda: mockqueue) return mockqueue
Example #8
Source File: msearch_daemon.py From search-MjoLniR with MIT License | 6 votes |
def _reflect_end_run(self, record: Mapping) -> None: """Reflect and end run sigil into the complete topic This is handled directly in the consumer thread, rather than as part of the work queue, to ensure that the offset is not committed to kafka until after processing is completed and it has been sucessfully reflected. Parameters ---------- record : dict Deserialized end run sigil """ log.info('reflecting end sigil for run %s and partition %d' % (record['run_id'], record['partition'])) # Wait for everything to at least start processing. We don't # actually know when the workers are finally idle. self.work_queue.join() future = self.ack_all_producer.send( self.topic_complete, json.dumps(record).encode('utf8')) future.add_errback(lambda e: log.critical( 'Failed to send the "end run" message: %s', e)) # Wait for ack (or failure to ack) future.get()
Example #9
Source File: ht_proxy_if.py From hometop_HT3 with GNU General Public License v3.0 | 6 votes |
def run(self): _ClientHandler.log_info("csocketsendThread(); socket.send thread start") self._tx=None while self.__threadrun==True: try: # get queue-value in blocking mode self._tx=self._queue.get(True) self._queue.task_done() except: self.__threadrun=False _ClientHandler.log_critical("csocketsendThread();Error on queue.get()") raise try: self._request.sendall(bytes(self._tx)) except: self.__threadrun=False _ClientHandler.log_critical("csocketsendThread();Error on socket.send") raise _ClientHandler.log_info("csocketsendThread(); socket.send thread terminated")
Example #10
Source File: inference.py From NeuralNetwork-Viterbi with MIT License | 6 votes |
def decode(queue, log_probs, decoder, index2label): while not queue.empty(): try: video = queue.get(timeout = 3) score, labels, segments = decoder.decode( log_probs[video] ) # save result with open('results/' + video, 'w') as f: f.write( '### Recognized sequence: ###\n' ) f.write( ' '.join( [index2label[s.label] for s in segments] ) + '\n' ) f.write( '### Score: ###\n' + str(score) + '\n') f.write( '### Frame level recognition: ###\n') f.write( ' '.join( [index2label[l] for l in labels] ) + '\n' ) except queue.Empty: pass ### read label2index mapping and index2label mapping ###########################
Example #11
Source File: oldtest.py From keylime with BSD 2-Clause "Simplified" License | 6 votes |
def test_concurrent_cloudnodiness_reset_request(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument): #time.sleep(2) test_record = self.test_table.get(test_method_name) #perform each of the test functions and store the results for test_functions in test_record[state_change_or_validation]: if test_functions.get("function_name") == test_function_name: request_body = test_functions.get("http_request_body") try: json_request_body = json.loads(request_body) #reset the request body to file arguments for next iteration json_request_body['cloudagent_ip'] = argument["ip_file"] json_request_body['cloudagent_port'] = argument["port_file"] test_functions['http_request_body'] = json.dumps(json_request_body) except Exception as e: self.fail("Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s"%e)
Example #12
Source File: oldtest.py From keylime with BSD 2-Clause "Simplified" License | 6 votes |
def check_test_persistance_file_write(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument): test_record = self.test_table.get(test_method_name) uuid_str = argument #perform each of the test functions and store the results for test_functions in test_record[state_change_or_validation]: if test_functions.get("function_name") == test_function_name: try: with open(cv_persistence_filename, "r") as persistance_file: file_contents = persistance_file.read() json_content = json.loads(file_contents) if len(json_content) != 1 or json_content.get(uuid_str) is None: self.fail("Unexpected persistence file contents.") except Exception as e: self.fail("Problem reading persistence file after POST. Error: %s"%e) try: with open(cv_persistence_filename + ".bak", "r") as backup_persistance_file: backup_file_contents = backup_persistance_file.read() json_backup_content = json.loads(backup_file_contents) if len(json_backup_content) != 0: self.fail("Unexpected backup persistence file contents.") except Exception as e: self.fail("Problem reading backup persistence file after POST. Error: %s"%e)
Example #13
Source File: oldtest.py From keylime with BSD 2-Clause "Simplified" License | 6 votes |
def check_test_persistance_file_load(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument): test_record = self.test_table.get(test_method_name) uuid_str = argument #perform each of the test functions and store the results for test_functions in test_record[state_change_or_validation]: if test_functions.get("function_name") == test_function_name: target_body = test_functions.get("http_result_body_actual") jsondecoded = json.loads(target_body) # test to make sure these two keys (and values) are in the return if len(jsondecoded) != 1 or jsondecoded.get(uuid_str) is None : self.fail("Expected " + uuid_str + " to be in the list of active agent_ids") # def do_mock_for_test_cloudverifier_tenant_provide_v(self, argument): # global text_callback # nonce = tpm_initialize.random_password(20) # tpm_policy = {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff" } # #theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier" + "?nonce=" + nonce + "&mask=" + tpm_policy['mask'] # theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier" # with requests_mock.Mocker(real_http=True) as m: # m.get(requests_mock.ANY, text=text_callback)
Example #14
Source File: oldtest.py From keylime with BSD 2-Clause "Simplified" License | 6 votes |
def check_validate_test_cloudverifier_tenant_provide_v(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument): test_record = self.test_table.get(test_method_name) #lookup test data and compare the results to canned values for test_functions in test_record[state_change_or_validation]: if test_functions.get("function_name") == test_function_name: target_body = test_functions.get("http_result_body_actual") jsondecoded = json.loads(target_body) v = jsondecoded.get("v") ip = jsondecoded.get("ip") port = jsondecoded.get("port") tpm_policy = jsondecoded.get("tpm_policy") if v is None or v != "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=": self.fail("Returned v from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.") if ip is None or ip != "127.0.0.1": self.fail("Returned ip from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.") if port is None or port != "8882": self.fail("Returned port from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.") if tpm_policy is None or tpm_policy != {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}: self.fail("Returned tpm_policy from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
Example #15
Source File: oldtest.py From keylime with BSD 2-Clause "Simplified" License | 6 votes |
def execute_test_definition(self): test_record = self.test_table.get(self._testMethodName) prerun_function_dict = test_record.get("prerun_function") if prerun_function_dict is not None: prerun_function_name = prerun_function_dict.get("name") prerun_function_args = prerun_function_dict.get("args") function_return = getattr(self, prerun_function_name)(prerun_function_args) self.execute_test_function_set("setup_functions") self.execute_test_function_set("state_change_functions") self.execute_test_function_set("state_validation_functions") postrun_function_dict = test_record.get("postrun_function") if postrun_function_dict is not None: postrun_function_name = postrun_function_dict.get("name") postrun_function_args = postrun_function_dict.get("args") function_return = getattr(self, postrun_function_name)(postrun_function_args)
Example #16
Source File: talon.py From TALON with MIT License | 6 votes |
def process_5p(chrom, positions, strand, vertex_IDs, gene_ID, gene_starts, edge_dict, locations, run_info): """ Conduct permissive match for 5' end and return assigned vertex, edge, and distance """ # First get a permissively matched start vertex start_vertex, diff_5p, known_start = permissive_match_with_gene_priority(chrom, positions[0], strand, positions[1], "start", gene_ID, gene_starts, locations, run_info) if start_vertex == None: start_vertex = create_vertex(chrom, positions[0], locations, run_info)['location_ID'] # Then get the start exon start_exon, start_novelty = match_or_create_edge(start_vertex, vertex_IDs[0], "exon", strand, edge_dict) # If known_start == 1, the start vertex is a known startpoint of this gene. # start novelty refers to the novelty of the first exon (1 if yes, 0 if no) return start_vertex, start_exon, start_novelty, known_start, diff_5p
Example #17
Source File: talon.py From TALON with MIT License | 6 votes |
def process_3p(chrom, positions, strand, vertex_IDs, gene_ID, gene_ends, edge_dict, locations, run_info): """ Conduct permissive match for 3' end and return assigned vertex, edge, and distance """ # First get a permissively matched end vertex end_vertex, diff_3p, known_end = permissive_match_with_gene_priority(chrom, positions[-1], strand, positions[-2], "end", gene_ID, gene_ends, locations, run_info) if end_vertex == None: end_vertex = create_vertex(chrom, positions[-1], locations, run_info)['location_ID'] # Then get the end exon end_exon, end_novelty = match_or_create_edge(vertex_IDs[-1], end_vertex, "exon", strand, edge_dict) # If known_end == 1, the end vertex is a known endpoint of this gene. # end novelty refers to the novelty of the final exon (1 if yes, 0 if no) return end_vertex, end_exon, end_novelty, known_end, diff_3p
Example #18
Source File: rl_data.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def make_web(queue): app = Flask(__name__) @app.route('/') def index(): return render_template('index.html') def gen(): while True: frame = queue.get() _, frame = cv2.imencode('.JPEG', frame) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame.tostring() + b'\r\n') @app.route('/video_feed') def video_feed(): return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame') try: app.run(host='0.0.0.0', port=8889) except: print('unable to open port')
Example #19
Source File: mayhem_1.py From mayhem with MIT License | 6 votes |
def consume(queue): """Consumer client to simulate subscribing to a publisher. Args: queue (queue.Queue): Queue from which to consume messages. """ while True: # wait for an item from the publisher msg = queue.get() # the publisher emits None to indicate that it is done if msg is None: break # process the msg logging.info(f"Consumed {msg}") # simulate i/o operation using sleep time.sleep(random.random())
Example #20
Source File: api.py From Avalon-Management-System with GNU General Public License v3.0 | 5 votes |
def update_nodes(): token = request.json.get('token') if not ams_auth(token): return ams_dumps({'auth': False}) nodes = request.json.get('nodes') g.database.run('raw', 'DROP TABLES IF EXISTS controller_config') g.database.run('create', 'controller_config', [ {"name": "ip", "type": "VARCHAR(40)"}, {"name": "port", "type": "SMALLINT UNSIGNED"}, {"name": "mods", "type": "SMALLINT UNSIGNED"}, ]) for node in nodes: safe_node = { 'ip': node['ip'], 'port': node['port'], 'mods': node['mods'], } g.database.run( 'insert', 'controller_config', list(safe_node.keys()), list(safe_node.values()) ) g.database.commit() return ams_dumps({'success': True}) # TODO: change to /shortlog/<time>
Example #21
Source File: talon.py From TALON with MIT License | 5 votes |
def listener(queue, outfiles, QC_header, timeout = 72): """ During the run, this function listens for messages on the provided queue. When a message is received (consisting of a filename and a string), it writes the string to that file. Timeout unit is in hours""" # Open all of the outfiles open_files = {} for fpath in outfiles.values(): open_files[fpath] = open(fpath, 'w') # Add a header to the QC file QC_file = open_files[outfiles.qc] QC_file.write(QC_header + "\n") # Set a timeout wait_until = datetime.now() + timedelta(hours=timeout) while True: msg = queue.get() msg_fname = msg[0] msg_value = msg[1] if datetime.now() > wait_until or msg_value == 'complete': ts = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) print("[ %s ] Shutting down message queue..." % (ts)) for f in open_files.values(): f.close() break open_files[msg_fname].write(msg_value + "\n") open_files[msg_fname].flush()
Example #22
Source File: interfacelift-downloader.py From interfacelift-downloader with MIT License | 5 votes |
def download_worker(): while True: url = queue.get() download_file(url, SAVE_DIR) queue.task_done() # Returns the path of the specified page number
Example #23
Source File: test_utils.py From Decentralized-Internet with MIT License | 5 votes |
def test_process_set_title(): from uuid import uuid4 from multiprocessing import Queue from setproctitle import getproctitle from bigchaindb.utils import Process queue = Queue() uuid = str(uuid4()) process = Process(target=lambda: queue.put(getproctitle()), name=uuid) process.start() assert queue.get() == uuid
Example #24
Source File: mayhem_2.py From mayhem with MIT License | 5 votes |
def consume_sync(queue): while True: msg = queue.get() logging.info(f"Consumed {msg}") # Substitute for handling a message time.sleep(random.random())
Example #25
Source File: data_parallel_dist.py From ps_pytorch with MIT License | 5 votes |
def _reduction_thread_fn(queue, group_id, device_ids, reduction_streams, nccl_streams): def _process_batch(): dev_grad_batch, dev_events, job_event = queue.get() dev_coalesced = [] # Coalesce the tensors on all devices and start a local reduction for dev_id, grad_batch, event, stream in zip(device_ids, dev_grad_batch, dev_events, reduction_streams): with torch.cuda.device(dev_id), torch.cuda.stream(stream): stream.wait_event(event) coalesced = _flatten_tensors(grad_batch) dev_coalesced.append(coalesced) # Wait for all copies to complete before starting the NCCL kernel for stream in reduction_streams: stream.synchronize() nccl.reduce(dev_coalesced, root=0, streams=nccl_streams) # From now on we're only going to work on the first device (from device_ids) grad_batch = dev_grad_batch[0] coalesced = dev_coalesced[0] reduce_stream = reduction_streams[0] with torch.cuda.stream(reduce_stream): reduce_stream.wait_stream(nccl_streams[0]) coalesced /= dist.get_world_size() dist.all_reduce(coalesced, group=group_id) for grad, reduced in zip(grad_batch, _unflatten_tensors(coalesced, grad_batch)): grad.copy_(reduced) job_event.set() with torch.cuda.device(device_ids[0]): while True: _process_batch() # just to have a clear scope
Example #26
Source File: mayhem_5.py From mayhem with MIT License | 5 votes |
def consume_sync(queue): while True: msg = queue.get() logging.info(f"Consumed {msg}") # Substitute for handling a message time.sleep(random.random())
Example #27
Source File: mayhem_1.py From mayhem with MIT License | 5 votes |
def consume_sync(queue): while True: msg = queue.get() logging.info(f"Consumed {msg}") # Substitute for handling a message time.sleep(random.random())
Example #28
Source File: mayhem_3.py From mayhem with MIT License | 5 votes |
def consume_sync(queue): while True: msg = queue.get() logging.info(f"Consumed {msg}") # Substitute for handling a message time.sleep(random.random())
Example #29
Source File: synchronize.py From rlpyt with MIT License | 5 votes |
def drain_queue(queue_obj, n_sentinel=0, guard_sentinel=False): """Empty a multiprocessing queue object, with options to protect against the delay between ``queue.put()`` and ``queue.get()``. Returns a list of the queue contents. With ``n_sentinel=0``, simply call ``queue.get(block=False)`` until ``queue.Empty`` exception (which can still happen slightly *after* another process called ``queue.put()``). With ``n_sentinel>1``, call ``queue.get()`` until `n_sentinel` ``None`` objects have been returned (marking that each ``put()`` process has finished). With ``guard_sentinel=True`` (need ``n_sentinel=0``), stops if a ``None`` is retrieved, and puts it back into the queue, so it can do a blocking drain later with ``n_sentinel>1``. """ contents = list() if n_sentinel > 0: # Block until this many None (sentinels) received. sentinel_counter = 0 while True: obj = queue_obj.get() if obj is None: sentinel_counter += 1 if sentinel_counter >= n_sentinel: return contents else: contents.append(obj) while True: # Non-blocking, beware of delay between put() and get(). try: obj = queue_obj.get(block=False) except queue.Empty: return contents if guard_sentinel and obj is None: # Restore sentinel, intend to do blocking drain later. queue_obj.put(None) return contents elif obj is not None: # Ignore sentinel. contents.append(obj)
Example #30
Source File: test_utils.py From bigchaindb with Apache License 2.0 | 5 votes |
def test_process_set_title(): from uuid import uuid4 from multiprocessing import Queue from setproctitle import getproctitle from bigchaindb.utils import Process queue = Queue() uuid = str(uuid4()) process = Process(target=lambda: queue.put(getproctitle()), name=uuid) process.start() assert queue.get() == uuid