Python Queue.PriorityQueue() Examples
The following are 30
code examples of Queue.PriorityQueue().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
Queue
, or try the search function
.
Example #1
Source File: core.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 6 votes |
def threaded_html_writer(interp=True, maxsize=25): """ Expects to be sent a tuple of (save_path, results_dict) """ messages = Queue.PriorityQueue(maxsize=maxsize) def run_thread(): while True: p, item = messages.get() if item is GeneratorExit: return else: save_path, results_dict = item save_results_as_html(save_path, results_dict) threading.Thread(target=run_thread).start() try: n = 0 while True: item = (yield) messages.put((n, item)) n -= 1 except GeneratorExit: messages.put((1, GeneratorExit))
Example #2
Source File: main_viacom.py From plugin.video.ustvvod with GNU General Public License v2.0 | 6 votes |
def play_video2(API, video_url = common.args.url, rtmp = True): try: qbitrate = common.args.quality except: qbitrate = None video_url2 = 'stack://' threads = [] segments = [] closedcaption = [] queue = PriorityQueue() video_data = connection.getURL(API + 'playlists/%s/videos.json' % video_url) video_tree = simplejson.loads(video_data) video_item = video_tree['playlist']['videos'] for i in range(0, len(video_item)): try: threads.append(Thread(get_videos, queue, i, video_item[i], qbitrate, rtmp)) except Exception, e: print "Exception: ", e
Example #3
Source File: toolbox.py From goreviewpartner with GNU General Public License v3.0 | 6 votes |
def __init__(self,g,rsgf_filename,profile): self.g=g self.rsgf_filename=rsgf_filename self.profile=profile self.bot=self.initialize_bot() self.update_queue=Queue.PriorityQueue() self.label_queue=Queue.Queue() self.best_moves_queue=Queue.Queue() self.move_zero=self.g.get_root() self.no_variation_if_same_move=True size=self.g.get_size() log("size of the tree:", size) self.size=size self.no_variation_if_same_move=grp_config.getboolean('Analysis', 'NoVariationIfSameMove') self.maxvariations=grp_config.getint("Analysis", "maxvariations") self.stop_at_first_resign=False self.cpu_lock=threading.Lock()
Example #4
Source File: __init__.py From AstroBox with GNU Affero General Public License v3.0 | 6 votes |
def __init__(self, getPathCallback, loadedCallback): self._getPathCallback = getPathCallback self._loadedCallback = loadedCallback self._active = threading.Event() self._active.set() self._currentFile = None self._currentProgress = None self._stop = False self._queue = Queue.PriorityQueue() self._worker = threading.Thread(target=self._work) self._worker.daemon = True self._worker.start()
Example #5
Source File: core.py From dagbldr with BSD 3-Clause "New" or "Revised" License | 6 votes |
def threaded_html_writer(maxsize=25): """ Expects to be sent a tuple of (save_path, results_dict) """ messages = Queue.PriorityQueue(maxsize=maxsize) def run_thread(): while True: p, item = messages.get() if item is GeneratorExit: return else: save_path, results_dict = item save_results_as_html(save_path, results_dict) threading.Thread(target=run_thread).start() try: n = 0 while True: item = (yield) messages.put((n, item)) n -= 1 except GeneratorExit: messages.put((1, GeneratorExit))
Example #6
Source File: core.py From dagbldr with BSD 3-Clause "New" or "Revised" License | 6 votes |
def threaded_weights_writer(maxsize=25): """ Expects to be sent a tuple of (save_path, checkpoint_dict) """ messages = Queue.PriorityQueue(maxsize=maxsize) def run_thread(): while True: p, item = messages.get() if item is GeneratorExit: return else: save_path, items_dict = item save_weights(save_path, items_dict) threading.Thread(target=run_thread).start() try: n = 0 while True: item = (yield) messages.put((n, item)) n -= 1 except GeneratorExit: messages.put((1, GeneratorExit))
Example #7
Source File: 414_Third_Maximum_Number.py From leetcode with MIT License | 6 votes |
def thirdMax(self, nums): """ :type nums: List[int] :rtype: int """ import Queue pq = Queue.PriorityQueue(4) check = set() for n in nums: if n in check: continue pq.put(n) check.add(n) if len(check) > 3: check.remove(pq.get()) total = len(check) while total < 3 and total > 1: total -= 1 return pq.get()
Example #8
Source File: Dijkstra.py From hacktoberfest2018 with GNU General Public License v3.0 | 6 votes |
def dijkstra(no): global distancias fila = Queue.PriorityQueue() distancias[no] = 0 par = (distancias[no], no) fila.put(par) while not fila.empty(): _, topo = fila.get() if adjacentes[topo] != 0: for peso, filho in adjacentes[topo]: if distancias[filho] > distancias[topo] + peso: distancias[filho] = distancias[topo] + peso fila.put((distancias[filho], filho))
Example #9
Source File: core.py From dagbldr with BSD 3-Clause "New" or "Revised" License | 6 votes |
def threaded_checkpoint_writer(maxsize=25): """ Expects to be sent a tuple of (save_path, checkpoint_dict) """ messages = Queue.PriorityQueue(maxsize=maxsize) def run_thread(): while True: p, item = messages.get() if item is GeneratorExit: return else: save_path, pickle_item = item save_checkpoint(save_path, pickle_item) threading.Thread(target=run_thread).start() try: n = 0 while True: item = (yield) messages.put((n, item)) n -= 1 except GeneratorExit: messages.put((1, GeneratorExit))
Example #10
Source File: executor.py From CUP with Apache License 2.0 | 6 votes |
def __init__(self, delay_exe_thdnum, queue_exec_thdnum, name): """ init """ self._toal_thdnum = delay_exe_thdnum + queue_exec_thdnum self._delay_exe_thdnu = delay_exe_thdnum self._queue_exe_thdnum = queue_exec_thdnum self._delay_queue = queue.PriorityQueue() self._exec_queue = queue.PriorityQueue() self._thdpool = threadpool.ThreadPool( self._toal_thdnum, self._toal_thdnum, name='executor_pool' ) self._status = 0 # 0 inited, 1 running 2 stopping log.info( 'Executor service inited, delay_exec thread num:%d,' ' exec thread num:%d' % (delay_exe_thdnum, queue_exec_thdnum) ) self._name = '' if name is None else name
Example #11
Source File: core.py From briefly with Apache License 2.0 | 5 votes |
def __init__(self, objs, task_done_callback=None): '''Constructor. Create and initialize members for execution.''' self.number_of_threads = objs.prop.run_threads self.dag = dag.DependencyGraph() self.executor_factory = NodeExecutor self.lock = threading.Lock() self.pending = Queue.PriorityQueue() self.task_done_callback = task_done_callback self.order = 1
Example #12
Source File: generator.py From point-location with MIT License | 5 votes |
def randomTiling(polygon, n, CONCAVE=False): """Generates a random concave tiling of a convex region.""" class PolygonWithArea(object): def __init__(self, polygon): self.polygon = polygon self.area = polygon.area() def __cmp__(self, that): return -cmp(self.area, that.area) # Start with initial convex region initial = PolygonWithArea(polygon) # Place in PQ to pop by area pq = Queue.PriorityQueue(maxsize=n + 1) pq.put(initial) # Create some concave regions triangles = [] for i in range(n): # Split up largest polygon polygon = pq.get().polygon for polygon in polygon.split(INTERIOR=CONCAVE): if polygon.n == 3: triangles.append(polygon) else: pq.put(PolygonWithArea(polygon)) polygons = triangles while pq.qsize(): polygons.append(pq.get().polygon) return polygons
Example #13
Source File: qubole.py From briefly with Apache License 2.0 | 5 votes |
def __init__(self): '''Constructor, initialize cluster queue.''' super(QuboleManager, self).__init__() self.clusters = [] self.cluster_queue = Queue.PriorityQueue() self.prop = None self.closed = False
Example #14
Source File: scheduler.py From fiaas-deploy-daemon with Apache License 2.0 | 5 votes |
def __init__(self, time_func=time_monotonic, delay_func=time.sleep): super(Scheduler, self).__init__() self._tasks = PriorityQueue() self._time_func = time_func self._delay_func = delay_func
Example #15
Source File: Irc.py From Doger with GNU General Public License v3.0 | 5 votes |
def __init__(self, instance): self.can_send = threading.Event() self.send_queue = Queue.PriorityQueue() self.whois_lock = threading.Lock() self.whois_queue = Queue.Queue() self.lastsend = time.time() self.lastwhois = None self.reader_dying = threading.Event() self.reader_dead = threading.Event() self.writer_dying = threading.Event() self.writer_dead = threading.Event() self.error_lock = threading.Lock()
Example #16
Source File: doo.py From dragonfly with MIT License | 5 votes |
def run_DOO(self, budget, nu, rho): """ Runs DOO optimisation. """ leaf_Q = Qu.PriorityQueue() d = self.doo_obj.domain_dim cell = tuple([tuple([0, 1]) for _ in range(d)]) height = 0 cost = 0 current, c = self.querie(cell, height, rho, nu, 0) cost = cost + c leaf_Q.put(current) dict_of_points = {} while cost <= budget: current = leaf_Q.get() dict_of_points[current.cell] = {'val':current.value, 'fidel': current.fidelity, 'height':current.height} children, curr_cost = self.split_children(current, rho, nu) if current.cell == children[0].cell: break cost = cost + curr_cost for child in children: leaf_Q.put(child) while not leaf_Q.empty(): c = leaf_Q.get() dict_of_points[c.cell] = {'val':c.value, 'fidel': c.fidelity, 'height':c.height} #maxi = float(-sys.maxint - 1) maxi = float('-inf') point = 0 maxh = 0 val = 0 fidel = 0 for key in dict_of_points: c = dict_of_points[key] if c['val'] - self.C*(1.0 - c['fidel']) > maxi: #- nu*(rho**c.height) > maxi: maxi = c['val'] - self.C*(1.0 - c['fidel']) #- nu*(rho**c.height) val = c['val'] fidel = c['fidel'] point = np.array([(s[0]+s[1])/2 for s in key]) maxh = c['height'] return val, fidel, point, cost, maxh
Example #17
Source File: connection_pool.py From watchmen with Apache License 2.0 | 5 votes |
def __init__(self, connections, dead_timeout=60, timeout_cutoff=5, selector_class=RoundRobinSelector, randomize_hosts=True, **kwargs): """ :arg connections: list of tuples containing the :class:`~elasticsearch.Connection` instance and it's options :arg dead_timeout: number of seconds a connection should be retired for after a failure, increases on consecutive failures :arg timeout_cutoff: number of consecutive failures after which the timeout doesn't increase :arg selector_class: :class:`~elasticsearch.ConnectionSelector` subclass to use if more than one connection is live :arg randomize_hosts: shuffle the list of connections upon arrival to avoid dog piling effect across processes """ if not connections: raise ImproperlyConfigured("No defined connections, you need to " "specify at least one host.") self.connection_opts = connections self.connections = [c for (c, opts) in connections] # remember original connection list for resurrect(force=True) self.orig_connections = tuple(self.connections) # PriorityQueue for thread safety and ease of timeout management self.dead = PriorityQueue(len(self.connections)) self.dead_count = {} if randomize_hosts: # randomize the connection list to avoid all clients hitting same node # after startup/restart random.shuffle(self.connections) # default timeout after which to try resurrecting a connection self.dead_timeout = dead_timeout self.timeout_cutoff = timeout_cutoff self.selector = selector_class(dict(connections))
Example #18
Source File: connection_pool.py From python-scripts with GNU General Public License v3.0 | 5 votes |
def __init__(self, connections, dead_timeout=60, timeout_cutoff=5, selector_class=RoundRobinSelector, randomize_hosts=True, **kwargs): """ :arg connections: list of tuples containing the :class:`~elasticsearch.Connection` instance and it's options :arg dead_timeout: number of seconds a connection should be retired for after a failure, increases on consecutive failures :arg timeout_cutoff: number of consecutive failures after which the timeout doesn't increase :arg selector_class: :class:`~elasticsearch.ConnectionSelector` subclass to use if more than one connection is live :arg randomize_hosts: shuffle the list of connections upon arrival to avoid dog piling effect across processes """ if not connections: raise ImproperlyConfigured("No defined connections, you need to " "specify at least one host.") self.connection_opts = connections self.connections = [c for (c, opts) in connections] # remember original connection list for resurrect(force=True) self.orig_connections = tuple(self.connections) # PriorityQueue for thread safety and ease of timeout management self.dead = PriorityQueue(len(self.connections)) self.dead_count = {} if randomize_hosts: # randomize the connection list to avoid all clients hitting same node # after startup/restart random.shuffle(self.connections) # default timeout after which to try resurrecting a connection self.dead_timeout = dead_timeout self.timeout_cutoff = timeout_cutoff self.selector = selector_class(dict(connections))
Example #19
Source File: proxylib.py From arkc-client with GNU General Public License v2.0 | 5 votes |
def __init__(self, window=4, connect_timeout=6, timeout=8, ssl_version='TLSv1', dns_servers=['8.8.8.8', '114.114.114.114'], dns_blacklist=[], dns_cachesize=64*1024): self.max_window = window self.connect_timeout = connect_timeout self.timeout = timeout self.ssl_version = getattr(ssl, 'PROTOCOL_%s' % ssl_version) self.openssl_context = OpenSSL.SSL.Context(getattr(OpenSSL.SSL, '%s_METHOD' % ssl_version)) self.dns_servers = dns_servers self.dns_blacklist = dns_blacklist self.dns_cache = LRUCache(dns_cachesize) self.tcp_connection_time = collections.defaultdict(float) self.tcp_connection_time_with_clienthello = collections.defaultdict(float) self.tcp_connection_cache = collections.defaultdict(Queue.PriorityQueue) self.tcp_connection_good_ipaddrs = {} self.tcp_connection_bad_ipaddrs = {} self.tcp_connection_unknown_ipaddrs = {} self.tcp_connection_cachesock = False self.tcp_connection_keepalive = False self.ssl_connection_time = collections.defaultdict(float) self.ssl_connection_cache = collections.defaultdict(Queue.PriorityQueue) self.ssl_connection_good_ipaddrs = {} self.ssl_connection_bad_ipaddrs = {} self.ssl_connection_unknown_ipaddrs = {} self.ssl_connection_cachesock = False self.ssl_connection_keepalive = False self.iplist_alias = {} self.fixed_iplist = set([]) self.host_map = collections.OrderedDict() self.host_postfix_map = collections.OrderedDict() self.host_postfix_endswith = tuple() self.hostport_map = collections.OrderedDict() self.hostport_postfix_map = collections.OrderedDict() self.hostport_postfix_endswith = tuple() self.urlre_map = collections.OrderedDict()
Example #20
Source File: priority_queue.py From mazesolving with The Unlicense | 5 votes |
def __init__(self): self.pq = Queue.PriorityQueue() self.removed = set() self.count = 0
Example #21
Source File: run.py From Katastrophe with MIT License | 5 votes |
def download(name): shared_mem = Queue.PriorityQueue() user = getpass.getuser() if platform == "win32": directory = 'C:\Users' + user + '\Torrents\\' else: directory = '/home/'+ user +'/Torrents/' Torrent = directory + name peerMngr = PeerManager(Torrent) bittorrentThread = Reactor(1, "Thread-1", peerMngr, shared_mem, debug=True) bittorrentThread.run()
Example #22
Source File: pgoapi.py From pogom with MIT License | 5 votes |
def __init__(self, signature_lib_path): self.set_logger() self._signature_lib_path = signature_lib_path self._work_queue = Queue() self._auth_queue = PriorityQueue() self._workers = [] self._api_endpoint = 'https://pgorelease.nianticlabs.com/plfe/rpc' self.log.info('%s v%s - %s', __title__, __version__, __copyright__)
Example #23
Source File: async_tools.py From openscap-daemon with GNU Lesser General Public License v2.1 | 5 votes |
def __init__(self, workers=0): self.queue = queue.PriorityQueue() self.sleep_time = 1 if workers == 0: try: import multiprocessing workers = multiprocessing.cpu_count() except NotImplementedError: workers = 4 self.workers = [] for i in range(workers): worker = threading.Thread( name="AsyncManager worker (%i out of %i)" % (i, workers), target=AsyncManager._worker_main, args=(self, i) ) worker.daemon = True self.workers.append(worker) worker.start() self.last_token = 0 self.actions = {} self.actions_lock = threading.Lock() logging.debug("Initialized AsyncManager, %i workers", len(self.workers))
Example #24
Source File: dispatch.py From laikaboss with Apache License 2.0 | 5 votes |
def _get_module_queue(yresults, result, scanObject, metaLabel): ''' Description: Takes the results from a dispatch yara scan and creates a priority queue from them. The function also adds dispatch flags if they exist in the rule. ''' moduleQueue = Queue.PriorityQueue() dispatchFlags = [] parentDispatchFlags = [] for yr in yresults: if 'scan_modules' in yr.meta: # Check to see if the rule has a priority, if not use the default if 'priority' in yr.meta: priority = int(yr.meta['priority']) logging.debug("Rule %s set priority %i" % (yr, priority)) else: priority = int(config.defaultmodulepriority) scanObject.addMetadata("DISPATCH", metaLabel, "%s (%i)" % (str(yr), priority)) moduleQueue.put((priority, uniqueList(yr.meta['scan_modules'].split()))) if 'flags' in yr.meta: dispatchFlags.extend(yr.meta['flags'].split()) if 'parent_flags' in yr.meta: parentDispatchFlags.extend(yr.meta['parent_flags'].split()) if 'file_type' in yr.meta: scanObject.fileType.append(yr.meta['file_type']) dispatchFlags = set(dispatchFlags) for df in dispatchFlags: scanObject.addFlag("dispatch::%s" % (df)) if scanObject.parent in result.files: for pdf in parentDispatchFlags: result.files[scanObject.parent].addFlag("dispatch::%s" % (pdf)) return moduleQueue
Example #25
Source File: GlobalData.py From MSpider with GNU General Public License v2.0 | 5 votes |
def set_urlnode_queue(self): if self.spider_policy == 1: self.spider_urlnode_queue = Queue.LifoQueue() elif self.spider_policy == 2: self.spider_urlnode_queue = Queue.PriorityQueue() else: self.spider_urlnode_queue = Queue.Queue()
Example #26
Source File: connection_pool.py From sublime-elasticsearch-client with MIT License | 5 votes |
def __init__(self, connections, dead_timeout=60, timeout_cutoff=5, selector_class=RoundRobinSelector, randomize_hosts=True, **kwargs): """ :arg connections: list of tuples containing the :class:`~elasticsearch.Connection` instance and it's options :arg dead_timeout: number of seconds a connection should be retired for after a failure, increases on consecutive failures :arg timeout_cutoff: number of consecutive failures after which the timeout doesn't increase :arg selector_class: :class:`~elasticsearch.ConnectionSelector` subclass to use if more than one connection is live :arg randomize_hosts: shuffle the list of connections upon arrival to avoid dog piling effect across processes """ if not connections: raise ImproperlyConfigured("No defined connections, you need to " "specify at least one host.") self.connection_opts = connections self.connections = [c for (c, opts) in connections] # remember original connection list for resurrect(force=True) self.orig_connections = tuple(self.connections) # PriorityQueue for thread safety and ease of timeout management self.dead = PriorityQueue(len(self.connections)) self.dead_count = {} if randomize_hosts: # randomize the connection list to avoid all clients hitting same node # after startup/restart random.shuffle(self.connections) # default timeout after which to try resurrecting a connection self.dead_timeout = dead_timeout self.timeout_cutoff = timeout_cutoff self.selector = selector_class(dict(connections))
Example #27
Source File: connection_pool.py From splunk-elasticsearch with Apache License 2.0 | 5 votes |
def __init__(self, connections, dead_timeout=60, timeout_cutoff=5, selector_class=RoundRobinSelector, randomize_hosts=True, **kwargs): """ :arg connections: list of tuples containing the :class:`~elasticsearch.Connection` instance and it's options :arg dead_timeout: number of seconds a connection should be retired for after a failure, increases on consecutive failures :arg timeout_cutoff: number of consecutive failures after which the timeout doesn't increase :arg selector_class: :class:`~elasticsearch.ConnectionSelector` subclass to use if more than one connection is live :arg randomize_hosts: shuffle the list of connections upon arrival to avoid dog piling effect across processes """ if not connections: raise ImproperlyConfigured("No defined connections, you need to " "specify at least one host.") self.connection_opts = connections self.connections = [c for (c, opts) in connections] # remember original connection list for resurrect(force=True) self.orig_connections = tuple(self.connections) # PriorityQueue for thread safety and ease of timeout management self.dead = PriorityQueue(len(self.connections)) self.dead_count = {} if randomize_hosts: # randomize the connection list to avoid all clients hitting same node # after startup/restart random.shuffle(self.connections) # default timeout after which to try resurrecting a connection self.dead_timeout = dead_timeout self.timeout_cutoff = timeout_cutoff self.selector = selector_class(dict(connections))
Example #28
Source File: events.py From AstroBox with GNU Affero General Public License v3.0 | 5 votes |
def __init__(self): self._registeredListeners = {} self._logger = logging.getLogger(__name__) self._queue = Queue.PriorityQueue() self._worker = threading.Thread(target=self._work) self._worker.daemon = True self._worker.start()
Example #29
Source File: threads.py From plex_autoscan with GNU General Public License v3.0 | 5 votes |
def __init__(self): self._is_available = True self._mutex = threading.Lock() self._waiter_queue = queue.PriorityQueue()
Example #30
Source File: InfrastructureInfo.py From im with GNU General Public License v3.0 | 5 votes |
def deserialize(str_data): newinf = InfrastructureInfo() dic = json.loads(str_data) vm_list = dic['vm_list'] vm_master_id = dic['vm_master'] dic['vm_master'] = None dic['vm_list'] = [] if dic['auth']: dic['auth'] = Authentication.deserialize(dic['auth']) if dic['radl']: dic['radl'] = parse_radl(dic['radl']) else: dic['radl'] = RADL() if 'extra_info' in dic and dic['extra_info'] and "TOSCA" in dic['extra_info']: try: dic['extra_info']['TOSCA'] = Tosca.deserialize(dic['extra_info']['TOSCA']) except Exception: del dic['extra_info']['TOSCA'] InfrastructureInfo.logger.exception("Error deserializing TOSCA document") newinf.__dict__.update(dic) newinf.cloud_connector = None # Set the ConfManager object and the lock to the data loaded newinf.cm = None newinf.ctxt_tasks = PriorityQueue() newinf.conf_threads = [] for vm_data in vm_list: vm = VirtualMachine.deserialize(vm_data) vm.inf = newinf if vm.im_id == vm_master_id: newinf.vm_master = vm newinf.vm_list.append(vm) newinf.adding = False newinf.deleting = False return newinf