Python threading.Semaphore() Examples
The following are 30
code examples of threading.Semaphore().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
threading
, or try the search function
.
Example #1
Source File: thread.py From addon with GNU General Public License v3.0 | 6 votes |
def __init__(self, max_workers=None, thread_name_prefix=''): """Initializes a new ThreadPoolExecutor instance. Args: max_workers: The maximum number of threads that can be used to execute the given calls. thread_name_prefix: An optional name prefix to give our threads. """ if max_workers is None: # Use this number because ThreadPoolExecutor is often # used to overlap I/O instead of CPU work. max_workers = (cpu_count() or 1) * 5 if max_workers <= 0: raise ValueError("max_workers must be greater than 0") self._max_workers = max_workers self._work_queue = queue.Queue() self._idle_semaphore = threading.Semaphore(0) self._threads = set() self._shutdown = False self._shutdown_lock = threading.Lock() self._thread_name_prefix = (thread_name_prefix or ("ThreadPoolExecutor-%d" % self._counter()))
Example #2
Source File: client_sync.py From INGInious with GNU Affero General Public License v3.0 | 6 votes |
def new_job(self, priority, task, inputdata, launcher_name="Unknown", debug=False): """ Runs a new job. It works exactly like the Client class, instead that there is no callback and directly returns result, in the form of a tuple (result, grade, problems, tests, custom, archive). """ job_semaphore = threading.Semaphore(0) def manage_output(result, grade, problems, tests, custom, state, archive, stdout, stderr): """ Manages the output of this job """ manage_output.job_return = (result, grade, problems, tests, custom, state, archive, stdout, stderr) job_semaphore.release() manage_output.job_return = None self._client.new_job(priority, task, inputdata, manage_output, launcher_name, debug) job_semaphore.acquire() job_return = manage_output.job_return return job_return
Example #3
Source File: RtkController.py From rtkbase with GNU Affero General Public License v3.0 | 6 votes |
def __init__(self, rtklib_path, config_path): self.bin_path = rtklib_path self.config_path = config_path self.child = 0 self.status = {} self.obs_rover = {} self.obs_base = {} self.info = {} self.semaphore = Semaphore() self.started = False self.launched = False self.current_config = ""
Example #4
Source File: __init__.py From multitasking with Apache License 2.0 | 6 votes |
def createPool(name="main", threads=None, engine=None): config["POOL_NAME"] = name try: threads = int(threads) except Exception: threads = config["MAX_THREADS"] if threads < 2: threads = 0 engine = engine if engine is not None else config["ENGINE"] config["MAX_THREADS"] = threads config["ENGINE"] = engine config["POOLS"][config["POOL_NAME"]] = { "pool": Semaphore(threads) if threads > 0 else None, "engine": Process if "process" in engine.lower() else Thread, "name": name, "threads": threads }
Example #5
Source File: Fuzzer.py From Yuki-Chan-The-Auto-Pentest with MIT License | 6 votes |
def start(self): # Setting up testers self.setupScanners() # Setting up threads self.setupThreads() self.index = 0 self.dictionary.reset() self.runningThreadsCount = len(self.threads) self.running = True self.playEvent = threading.Event() self.pausedSemaphore = threading.Semaphore(0) self.playEvent.clear() self.exit = False for thread in self.threads: thread.start() self.play()
Example #6
Source File: wsg50_gripper.py From visual_foresight with MIT License | 6 votes |
def __init__(self): super(WSG50Gripper, self).__init__() self.max_release = 0 self.sem_list = [Semaphore(value = 0)] self._status_mutex = Lock() self._desired_gpos = GRIPPER_OPEN self._gripper_speed = 300 self._force_counter = 0 self._integrate_gripper_force, self._last_integrate = 0., None self._last_status_t = time.time() self.num_timeouts = 0 self.gripper_pub = rospy.Publisher('/wsg_50_driver/goal_position', Cmd, queue_size=10) rospy.Subscriber("/wsg_50_driver/status", Status, self._gripper_callback) logging.getLogger('robot_logger').info("waiting for first status") self.sem_list[0].acquire() logging.getLogger('robot_logger').info('gripper initialized!') self._bg = Thread(target=self._background_monitor) self._bg.start()
Example #7
Source File: wsg50_gripper.py From visual_foresight with MIT License | 6 votes |
def _set_gripper(self, command_pos, wait=False): self._status_mutex.acquire() self._desired_gpos = command_pos if wait: if self.num_timeouts > MAX_TIMEOUT: rospy.signal_shutdown("MORE THAN {} GRIPPER TIMEOUTS".format(MAX_TIMEOUT)) sem = Semaphore(value=0) # use of semaphore ensures script will block if gripper dies during execution self.sem_list.append(sem) self._status_mutex.release() start = rospy.get_time() logging.getLogger('robot_logger').debug("gripper sem acquire, list len-{}".format(len(self.sem_list))) sem.acquire() logging.getLogger('robot_logger').debug("waited on gripper for {} seconds".format(rospy.get_time() - start)) else: self._status_mutex.release()
Example #8
Source File: network_semaphore.py From ACE with Apache License 2.0 | 6 votes |
def initialize_fallback_semaphores(): """This needs to be called once at the very beginning of starting ACE.""" # we need some fallback functionality for when the network semaphore server is down # these semaphores serve that purpose global_engine_instance_count = saq.CONFIG['global'].getint('global_engine_instance_count') for key in saq.CONFIG['network_semaphore'].keys(): if key.startswith('semaphore_'): semaphore_name = key[len('semaphore_'):] # the configuration settings for the network semaphore specify how many connections # are allowed to a specific resource at once, globally # so if we unable to coordinate globally, the fall back is to divide the available # number of resources between all the engines evenly # that's what this next equation is for fallback_limit = int(floor(saq.CONFIG['network_semaphore'].getfloat(key) / float(global_engine_instance_count))) # we allow a minimum of one per engine if fallback_limit < 1: fallback_limit = 1 logging.debug("fallback semaphore count for {0} is {1}".format(semaphore_name, fallback_limit)) fallback_semaphores[semaphore_name] = LoggingSemaphore(fallback_limit) #fallback_semaphores[semaphore_name] = multiprocessing.Semaphore(fallback_limit) #fallback_semaphores[semaphore_name].semaphore_name = 'fallback {0}'.format(semaphore_name)
Example #9
Source File: test_logging.py From ironpython3 with Apache License 2.0 | 6 votes |
def setUp(self): """Set up a TCP server to receive log messages, and a SocketHandler pointing to that server's address and port.""" BaseTest.setUp(self) self.server = server = self.server_class(self.address, self.handle_socket, 0.01) server.start() server.ready.wait() hcls = logging.handlers.SocketHandler if isinstance(server.server_address, tuple): self.sock_hdlr = hcls('localhost', server.port) else: self.sock_hdlr = hcls(server.server_address, None) self.log_output = '' self.root_logger.removeHandler(self.root_logger.handlers[0]) self.root_logger.addHandler(self.sock_hdlr) self.handled = threading.Semaphore(0)
Example #10
Source File: test_logging.py From Fluid-Designer with GNU General Public License v3.0 | 6 votes |
def setUp(self): """Set up a TCP server to receive log messages, and a SocketHandler pointing to that server's address and port.""" BaseTest.setUp(self) self.server = server = self.server_class(self.address, self.handle_socket, 0.01) server.start() server.ready.wait() hcls = logging.handlers.SocketHandler if isinstance(server.server_address, tuple): self.sock_hdlr = hcls('localhost', server.port) else: self.sock_hdlr = hcls(server.server_address, None) self.log_output = '' self.root_logger.removeHandler(self.root_logger.handlers[0]) self.root_logger.addHandler(self.sock_hdlr) self.handled = threading.Semaphore(0)
Example #11
Source File: elastic_scheduler.py From zoe with Apache License 2.0 | 6 votes |
def __init__(self, state: SQLManager, policy, metrics: StatsManager): if policy not in ('FIFO', 'SIZE', 'DYNSIZE'): raise UnsupportedSchedulerPolicyError self.metrics = metrics self.trigger_semaphore = threading.Semaphore(0) self.policy = policy self.queue = [] self.queue_running = [] self.queue_termination = [] self.additional_exec_state = {} self.loop_quit = False self.loop_th = threading.Thread(target=self.loop_start_th, name='scheduler') self.core_limit_recalc_trigger = threading.Event() self.core_limit_th = threading.Thread(target=self._adjust_core_limits, name='adjust_core_limits') self.state = state for execution in self.state.executions.select(status='running'): if execution.all_services_running: self.queue_running.append(execution) else: self.queue.append(execution) self.additional_exec_state[execution.id] = ExecutionProgress() self.loop_th.start() self.core_limit_th.start()
Example #12
Source File: cosmosdb_storage.py From botbuilder-python with MIT License | 6 votes |
def __init__( self, config: CosmosDbConfig, client: cosmos_client.CosmosClient = None ): """Create the storage object. :param config: """ super(CosmosDbStorage, self).__init__() self.config = config self.client = client or cosmos_client.CosmosClient( self.config.endpoint, {"masterKey": self.config.masterkey} ) # these are set by the functions that check # the presence of the database and container or creates them self.database = None self.container = None self._database_creation_options = config.database_creation_options self._container_creation_options = config.container_creation_options self.__semaphore = Semaphore()
Example #13
Source File: RtkController.py From ReachView with GNU General Public License v3.0 | 6 votes |
def __init__(self, rtklib_path): self.bin_path = rtklib_path + "/app/rtkrcv/gcc" self.config_path = rtklib_path + "/app/rtkrcv" self.child = 0 self.status = {} self.obs_rover = {} self.obs_base = {} self.info = {} self.semaphore = Semaphore() self.started = False self.launched = False self.current_config = ""
Example #14
Source File: adaptive_thread_pool.py From browserscope with Apache License 2.0 | 6 votes |
def __init__(self, num_threads, sleep=InterruptibleSleep): """Constructor for ThreadGate instances. Args: num_threads: The total number of threads using this gate. sleep: Used for dependency injection. """ self.__enabled_count = 1 self.__lock = threading.Lock() self.__thread_semaphore = threading.Semaphore(self.__enabled_count) self.__num_threads = num_threads self.__backoff_time = 0 self.__sleep = sleep
Example #15
Source File: aws_minion_manager.py From minion-manager with Apache License 2.0 | 6 votes |
def set_semaphore(self, asg_meta): """ Update no of instances can be terminated based on percentage. """ asg_name = asg_meta.get_name() asg_semaphore = 'semaphore' + asg_name resp = self._ac_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) desired_instances = resp["AutoScalingGroups"][0]["DesiredCapacity"] if self.terminate_percentage > 100: self.terminate_percentage = 100 elif self.terminate_percentage <= 0: self.terminate_percentage = 1 # Get no of instance can parallel be rotated svalue = int(round(desired_instances * (self.terminate_percentage/100.0))) if svalue == 0: svalue = 1 logger.info("Maximum %d instance will be rotated at a time for ASG %s", svalue, asg_name) asg_semaphore = Semaphore(value=svalue) return asg_semaphore
Example #16
Source File: thread.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def __init__(self, max_workers=None, thread_name_prefix=''): """Initializes a new ThreadPoolExecutor instance. Args: max_workers: The maximum number of threads that can be used to execute the given calls. thread_name_prefix: An optional name prefix to give our threads. """ if max_workers is None: # Use this number because ThreadPoolExecutor is often # used to overlap I/O instead of CPU work. max_workers = (cpu_count() or 1) * 5 if max_workers <= 0: raise ValueError("max_workers must be greater than 0") self._max_workers = max_workers self._work_queue = queue.Queue() self._idle_semaphore = threading.Semaphore(0) self._threads = set() self._shutdown = False self._shutdown_lock = threading.Lock() self._thread_name_prefix = (thread_name_prefix or ("ThreadPoolExecutor-%d" % self._counter()))
Example #17
Source File: __init__.py From script-languages with MIT License | 5 votes |
def __init__(self, debug=False, esmtp=True): super(SMTPServer, self).__init__() self.esmtp = esmtp self.debug = debug self._server = None self._wait_for_server = threading.Semaphore(0)
Example #18
Source File: pool.py From logscan with Apache License 2.0 | 5 votes |
def __init__(self, size): self.__pool = threading.Semaphore(size) self.__threads = []
Example #19
Source File: hsportscanner.py From hsportscanner with MIT License | 5 votes |
def check_host(hid_serv,state_changed_event): adr = hid_serv.onion_address ndx = hid_serv.next_port_ndx_to_scan + hid_serv.failed_connect_tries #print "ndx=",ndx if ndx >=len(hid_serv.ports_to_scan): port = hid_serv.ports_to_scan[-1] ndx = len(hid_serv.ports_to_scan) - 1 else: port = hid_serv.ports_to_scan[ndx] thdprint(LOG_DEBUG,"Checking connectivity for {0}. Port used is {1}.".format(adr,port)) hid_serv_lock = threading.Lock() make_connect(hid_serv,ndx,hid_serv_lock,None,None,5) if hid_serv.failed_connect_tries >= MAX_FAILED_CONNS: thdprint(LOG_DEBUG,"Tried to connect to {0} for {1} times. Port tried is {2}. Give up.".format(adr,MAX_FAILED_CONNS+1,port)) hid_serv.connect_state = CONN_STATE_FINISHED elif hid_serv.connect_state != CONN_STATE_CONNECTED: hid_serv.failed_connect_tries += 1 thdprint(LOG_DEBUG,"Tried to connect to {0} for {1} times. Port tried is {2}. Will try more.".format(adr,hid_serv.failed_connect_tries,port)) hid_serv.connect_state = CONN_STATE_UNTRIED else: thdprint(LOG_DEBUG,"Connected to {0} from {1} attempt. Port tried is {2}.".format(adr,hid_serv.failed_connect_tries+1,port)) state_changed_event.set() #print "Conncectivity checked" return 0 ## Scan a hidden service for open ports # # @param hid_serv Hidden service to scan. Type: class HiddenService. # @param n The maximum number of threads this function will create. Type: int. # @param fin_sem0 Semaphore we need to release at the end of the thread. Type: class threading.Semaphore. # @param state_changed_event Triggered if connect state of the hidden service changes. # @return 0
Example #20
Source File: hyperparam_search.py From keras-image-captioning with MIT License | 5 votes |
def main(training_label_prefix, dataset_name=None, epochs=None, time_limit=None, num_gpus=None): epochs = int(epochs) if epochs else None time_limit = parse_timedelta(time_limit) if time_limit else None num_gpus = int(num_gpus) if num_gpus else None search = HyperparamSearch(training_label_prefix=training_label_prefix, dataset_name=dataset_name, epochs=epochs, time_limit=time_limit, num_gpus=num_gpus) def handler(signum, frame): logging('Stopping hyperparam search..') with search.lock: search.stop() for index, running_command in search.running_commands: try: label = search.training_label(index) logging('Sending SIGINT to {}..'.format(label)) running_command.signal(signal.SIGINT) except OSError: # The process might have exited before logging('{} might have terminated before.'.format(label)) except: traceback.print_exc(file=sys.stderr) logging('All training processes have been sent SIGINT.') signal.signal(signal.SIGINT, handler) # We need to execute search.run() in another thread in order for Semaphore # inside it doesn't block the signal handler. Otherwise, the signal handler # will be executed after any training process finishes the whole epoch. executor = ThreadPoolExecutor(max_workers=1) executor.submit(search.run) # wait must be True in order for the mock works, # see the unit test for more details executor.shutdown(wait=True)
Example #21
Source File: __init__.py From shadowlands with MIT License | 5 votes |
def __init__(self, sl_config=None): super().__init__() self.config = sl_config self._client_name = None self._block_listener = None self.eth_price = None self.update_sem = threading.Semaphore(value=2) self.update_lock = threading.Lock() self.start_heartbeat_thread()
Example #22
Source File: SeleniumTest.py From INGInious with GNU Affero General Public License v3.0 | 5 votes |
def _start_frontend(config, host, port, ssh_port): semaphore = threading.Semaphore(0) def active_callback(): semaphore.release() func, close_app_func = get_app(config) inginious_root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) func = StaticMiddleware(func, [ ('/static/', os.path.join(inginious_root_path, 'frontend', 'static')) ]) server = web.httpserver.WSGIServer((host, port), func) class FrontendThread(threading.Thread): def __init__(self): super(FrontendThread, self).__init__() self.daemon = True def run(self): try: server.start() except: server.stop() thread = FrontendThread() thread.start() semaphore.acquire() return thread, server, close_app_func
Example #23
Source File: mpv.py From MellPlayer with MIT License | 5 votes |
def wait_for_property(self, name, cond=lambda val: val, level_sensitive=True): sema = threading.Semaphore(value=0) def observer(val): if cond(val): sema.release() self.observe_property(name, observer) if not level_sensitive or not cond(getattr(self, name.replace('-', '_'))): sema.acquire() self.unobserve_property(name, observer)
Example #24
Source File: upload_pool.py From cos-ftp-server-V5 with MIT License | 5 votes |
def __init__(self): if UploadPool._isInit: # 如果已经初始化就不再初始化了 return logger.info("init pool") self._thread_num = CosFtpConfig().upload_thread_num # 线程数目 self._semaphore = threading.Semaphore(CosFtpConfig().upload_thread_num) # 控制线程数目 self._reference_threads = set() # 引用计数 UploadPool._isInit = True
Example #25
Source File: webapi.py From WebWhatsapp-Wrapper with MIT License | 5 votes |
def acquire_semaphore(client_id, cancel_if_locked=False): if not client_id: return False if client_id not in semaphores: semaphores[client_id] = threading.Semaphore() timeout = 10 if cancel_if_locked: timeout = 0 val = semaphores[client_id].acquire(blocking=True, timeout=timeout) return val
Example #26
Source File: utils.py From grimoirelab-kingarthur with GNU General Public License v3.0 | 5 votes |
def __init__(self): self._readers = 0 self._order_mutex = threading.Semaphore() self._readers_mutex = threading.Semaphore() self._access_mutex = threading.Semaphore()
Example #27
Source File: test_webhook_worker.py From onedrived-dev with MIT License | 5 votes |
def setUp(self): self.temp_config_dir, self.temp_repo_dir, self.drive_config, self.repo = get_sample_repo() self.worker = od_webhook.WebhookWorkerThread('https://localhost:12345', callback_func=self._dummy_webhook_callback, action_delay_sec=0) self.callback_called_sem = threading.Semaphore(value=0) self.callback_repos = [] self.callback_count = 0
Example #28
Source File: test_threads.py From onedrived-dev with MIT License | 5 votes |
def test_lifecycle(self): sem = threading.Semaphore(value=0) t = DummyTask(sem, None, self.task_pool) w = od_threads.TaskWorkerThread('DummyWorker', task_pool=self.task_pool) w.start() self.assertTrue(self.task_pool.add_task(t)) self.assertTrue(sem.acquire(timeout=5)) w.stop() self.task_pool.close(1) w.join(timeout=5)
Example #29
Source File: od_task.py From onedrived-dev with MIT License | 5 votes |
def __init__(self): self.tasks_by_path = {} self.queued_tasks = [] self.semaphore = threading.Semaphore(0) self._lock = threading.Lock()
Example #30
Source File: exit.py From pscheduler with Apache License 2.0 | 5 votes |
def __init__(self, n): self.n = n self.count = 0 self.mutex = Semaphore(1) self.barrier = Semaphore(0)