Python signal.html() Examples
The following are 12
code examples of signal.html().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
signal
, or try the search function
.
Example #1
Source File: worker.py From rele with Apache License 2.0 | 6 votes |
def stop(self, signal=None, frame=None): """Manage the shutdown process of the worker. This function has two purposes: 1. Cancel all the futures created. 2. And close all the database connections opened by Django. Even though we cancel the connections for every execution of the callback, we want to be sure that all the database connections are closed in this process. Exits with code 0 for a clean exit. :param signal: Needed for `signal.signal <https://docs.python.org/3/library/signal.html#signal.signal>`_ # noqa :param frame: Needed for `signal.signal <https://docs.python.org/3/library/signal.html#signal.signal>`_ # noqa """ run_middleware_hook("pre_worker_stop", self._subscriptions) for future in self._futures: future.cancel() run_middleware_hook("post_worker_stop") sys.exit(0)
Example #2
Source File: start.py From RAFCON with Eclipse Public License 1.0 | 6 votes |
def signal_handler(signal, frame=None): state_machine_execution_engine = core_singletons.state_machine_execution_engine core_singletons.shut_down_signal = signal # in this case the print is on purpose to see more easily if the interrupt signal reached the thread print(_("Signal '{}' received.\nExecution engine will be stopped and program will be shutdown!").format( SIGNALS_TO_NAMES_DICT.get(signal, "[unknown]"))) # close gui properly gui_singletons.main_window_controller.get_controller('menu_bar_controller').on_quit_activate(None) post_gui_destruction() logging.shutdown() # Do not use sys.exit() in signal handler: # http://thushw.blogspot.de/2010/12/python-dont-use-sysexit-inside-signal.html # noinspection PyProtectedMember os._exit(0)
Example #3
Source File: taskd.py From allura with Apache License 2.0 | 6 votes |
def command(self): setproctitle('taskd') self.basic_setup() self.keep_running = True self.restart_when_done = False base.log.info('Starting taskd, pid %s' % os.getpid()) signal.signal(signal.SIGHUP, self.graceful_restart) signal.signal(signal.SIGTERM, self.graceful_stop) signal.signal(signal.SIGUSR1, self.log_current_task) # restore default behavior of not interrupting system calls # see http://docs.python.org/library/signal.html#signal.siginterrupt # and http://linux.die.net/man/3/siginterrupt signal.siginterrupt(signal.SIGHUP, False) signal.siginterrupt(signal.SIGTERM, False) signal.siginterrupt(signal.SIGUSR1, False) self.worker()
Example #4
Source File: dataloader.py From EMANet with GNU General Public License v3.0 | 5 votes |
def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id): global _use_shared_memory _use_shared_memory = True # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal # module's handlers are executed after Python returns from C low-level # handlers, likely when the same fatal signal happened again already. # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 _set_worker_signal_handlers() torch.set_num_threads(1) torch.manual_seed(seed) np.random.seed(seed) if init_fn is not None: init_fn(worker_id) while True: r = index_queue.get() if r is None: break idx, batch_indices = r try: samples = collate_fn([dataset[i] for i in batch_indices]) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples))
Example #5
Source File: dataloader.py From semantic-segmentation-pytorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id): global _use_shared_memory _use_shared_memory = True # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal # module's handlers are executed after Python returns from C low-level # handlers, likely when the same fatal signal happened again already. # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 _set_worker_signal_handlers() torch.set_num_threads(1) torch.manual_seed(seed) np.random.seed(seed) if init_fn is not None: init_fn(worker_id) while True: r = index_queue.get() if r is None: break idx, batch_indices = r try: samples = collate_fn([dataset[i] for i in batch_indices]) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples))
Example #6
Source File: cleanup.py From iris with Mozilla Public License 2.0 | 5 votes |
def init(): """Register cleanup handler.""" logger.debug("Registering cleanup handler") global __cleanup_done __cleanup_done = False # Will be OS-specific, see https://docs.python.org/2/library/signal.html atexit.register(cleanup_handler) signal.signal(signal.SIGTERM, cleanup_handler) if sys.platform == "darwin" or "linux" in sys.platform: # SIGHUP is not available on Windows signal.signal(signal.SIGHUP, cleanup_handler)
Example #7
Source File: persistent_dataloader.py From PVN3D with MIT License | 5 votes |
def _worker_loop( dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id ): global _use_shared_memory _use_shared_memory = True # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal # module's handlers are executed after Python returns from C low-level # handlers, likely when the same fatal signal happened again already. # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 # C._set_worker_signal_handlers() torch.set_num_threads(1) torch.manual_seed(seed) if init_fn is not None: init_fn(worker_id) try: dataset.init() except AttributeError: pass while True: r = index_queue.get() if r is None: break idx, batch_indices = r try: samples = collate_fn([dataset[i] for i in batch_indices]) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples))
Example #8
Source File: persistent_dataloader.py From sanet_relocal_demo with GNU General Public License v3.0 | 5 votes |
def _worker_loop( dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id ): global _use_shared_memory _use_shared_memory = True # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal # module's handlers are executed after Python returns from C low-level # handlers, likely when the same fatal signal happened again already. # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 # C._set_worker_signal_handlers() torch.set_num_threads(1) torch.manual_seed(seed) if init_fn is not None: init_fn(worker_id) try: dataset.init() except AttributeError: pass while True: r = index_queue.get() if r is None: break idx, batch_indices = r try: samples = collate_fn([dataset[i] for i in batch_indices]) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples))
Example #9
Source File: cleanup.py From tls-canary with Mozilla Public License 2.0 | 5 votes |
def init(): """Register cleanup handler""" # print "Registering cleanup handler" global __cleanup_done __cleanup_done = False # Will be OS-specific, see https://docs.python.org/2/library/signal.html atexit.register(cleanup_handler) signal.signal(signal.SIGTERM, cleanup_handler) if sys.platform == "darwin" or "linux" in sys.platform: # SIGHUP is not available on Windows signal.signal(signal.SIGHUP, cleanup_handler)
Example #10
Source File: loader_fn.py From 3D-convolutional-speaker-recognition-pytorch with Apache License 2.0 | 5 votes |
def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id): global _use_shared_memory _use_shared_memory = True # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal # module's handlers are executed after Python returns from C low-level # handlers, likely when the same fatal signal happened again already. # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 _set_worker_signal_handlers() torch.set_num_threads(1) random.seed(seed) torch.manual_seed(seed) if init_fn is not None: init_fn(worker_id) while True: r = index_queue.get() if r is None: break idx, batch_indices = r try: samples = collate_fn([dataset[i] for i in batch_indices]) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples))
Example #11
Source File: dataloader.py From mt-dnn with MIT License | 5 votes |
def _worker_loop(dataset, index_queue, data_queue, collate_fn, init_fn, worker_id): global _use_shared_memory _use_shared_memory = True # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal # module's handlers are executed after Python returns from C low-level # handlers, likely when the same fatal signal happened again already. # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 _set_worker_signal_handlers() torch.set_num_threads(1) if init_fn is not None: init_fn(worker_id) watchdog = ManagerWatchdog() while True: try: r = index_queue.get(timeout=MANAGER_STATUS_CHECK_INTERVAL) except queue.Empty: if watchdog.is_alive(): continue else: break if r is None: break idx, batch_indices = r try: samples = collate_fn([dataset[i] for i in batch_indices]) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples)) del samples
Example #12
Source File: scalene.py From scalene with Apache License 2.0 | 4 votes |
def parse_args() -> Tuple[argparse.Namespace, List[str]]: usage = dedent( """Scalene: a high-precision CPU and memory profiler. https://github.com/emeryberger/scalene % scalene yourprogram.py """ ) parser = argparse.ArgumentParser( prog="scalene", description=usage, formatter_class=argparse.RawTextHelpFormatter, allow_abbrev=False, ) parser.add_argument("prog", type=str, help="program to be profiled") parser.add_argument( "--outfile", type=str, default=None, help="file to hold profiler output (default: stdout)", ) parser.add_argument( "--html", dest="html", action="store_const", const=True, default=False, help="output as HTML (default: text)", ) parser.add_argument( "--profile-interval", type=float, default=float("inf"), help="output profiles every so many seconds.", ) parser.add_argument( "--wallclock", dest="wallclock", action="store_const", const=True, default=False, help="use wall clock time (default: virtual time)", ) parser.add_argument( "--cpu-only", dest="cpuonly", action="store_const", const=True, default=False, help="only profile CPU time (default: profile CPU, memory, and copying)", ) # the PID of the profiling process (for internal use only) parser.add_argument( "--pid", type=int, default=int(os.getpid()), help=argparse.SUPPRESS ) # Parse out all Scalene arguments and jam the remaining ones into argv. # https://stackoverflow.com/questions/35733262/is-there-any-way-to-instruct-argparse-python-2-7-to-remove-found-arguments-fro args, left = parser.parse_known_args() return args, left