Python humanfriendly.Timer() Examples
The following are 27
code examples of humanfriendly.Timer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
humanfriendly
, or try the search function
.
Example #1
Source File: utils.py From TC-ResNet with Apache License 2.0 | 6 votes |
def __call__(self, name, log_func=None): """ Example. timer = Timer(log) with timer("Some Routines"): routine1() routine2() """ if log_func is None: log_func = self.log.info start = time.clock() yield end = time.clock() duration = end - start readable_duration = format_timespan(duration) log_func(f"{name} :: {readable_duration}")
Example #2
Source File: tcp.py From python-executor with MIT License | 6 votes |
def wait_until_connected(self): """ Wait until connections are being accepted. :raises: :exc:`TimeoutError` when the SSH server isn't fast enough to initialize. """ timer = Timer() with Spinner(timer=timer) as spinner: while not self.is_connected: if timer.elapsed_time > self.wait_timeout: raise TimeoutError(format( "Failed to establish connection to %s within configured timeout of %s!", self.endpoint, format_timespan(self.wait_timeout), )) spinner.step(label="Waiting for %s to accept connections" % self.endpoint) spinner.sleep() logger.debug("Waited %s for %s to accept connections.", timer, self.endpoint)
Example #3
Source File: utils.py From MMNet with Apache License 2.0 | 6 votes |
def __call__(self, name, log_func=None): """ Example. timer = Timer(log) with timer("Some Routines"): routine1() routine2() """ if log_func is None: log_func = self.log.info start = time.clock() yield end = time.clock() duration = end - start readable_duration = format_timespan(duration) log_func(f"{name} :: {readable_duration}")
Example #4
Source File: cli.py From python-executor with MIT License | 6 votes |
def run_command(arguments, timeout=None): """ Run the specified command (with an optional timeout). :param arguments: The command line for the external command (a list of strings). :param timeout: The optional command timeout (a number or :data:`None`). :raises: :exc:`CommandTimedOut` if the command times out. """ timer = Timer() logger.info("Running command: %s", quote(arguments)) with execute(*arguments, asynchronous=True) as command: # Wait for the command to finish or exceed the given timeout. while command.is_running: if timeout and timer.elapsed_time > timeout: raise CommandTimedOut(command, timeout) # Sleep between 0.1 and 1 second, waiting for # the external command to finish its execution. time_to_sleep = min(1, max(0.1, timeout - timer.elapsed_time)) if time_to_sleep > 0: time.sleep(time_to_sleep) if command.succeeded: logger.info("Command completed successfully in %s.", timer)
Example #5
Source File: server.py From python-executor with MIT License | 5 votes |
def generate_key_file(self, filename): """ Generate a temporary host or client key for the OpenSSH server. The :func:`start()` method automatically calls :func:`generate_key_file()` to generate :data:`host_key_file` and :attr:`client_key_file`. This method uses the ``ssh-keygen`` program to generate the keys. """ if not os.path.isfile(filename): timer = Timer() self.logger.debug("Generating SSH key file (%s) ..", filename) execute('ssh-keygen', '-f', filename, '-N', '', '-t', 'rsa', silent=True, logger=self.logger) self.logger.debug("Generated key file %s in %s.", filename, timer)
Example #6
Source File: __init__.py From python-rotate-backups with MIT License | 5 votes |
def rotate_concurrent(self, *locations, **kw): """ Rotate the backups in the given locations concurrently. :param locations: One or more values accepted by :func:`coerce_location()`. :param kw: Any keyword arguments are passed on to :func:`rotate_backups()`. This function uses :func:`rotate_backups()` to prepare rotation commands for the given locations and then it removes backups in parallel, one backup per mount point at a time. The idea behind this approach is that parallel rotation is most useful when the files to be removed are on different disks and so multiple devices can be utilized at the same time. Because mount points are per system :func:`rotate_concurrent()` will also parallelize over backups located on multiple remote systems. """ timer = Timer() pool = CommandPool(concurrency=10) logger.info("Scanning %s ..", pluralize(len(locations), "backup location")) for location in locations: for cmd in self.rotate_backups(location, prepare=True, **kw): pool.add(cmd) if pool.num_commands > 0: backups = pluralize(pool.num_commands, "backup") logger.info("Preparing to rotate %s (in parallel) ..", backups) pool.run() logger.info("Successfully rotated %s in %s.", backups, timer)
Example #7
Source File: utils.py From TC-ResNet with Apache License 2.0 | 5 votes |
def timeit(method): def timed(*args, **kw): hf_timer = hf.Timer() result = method(*args, **kw) print("<Timeit> {!r} ({!r}, {!r}) {}".format(method.__name__, args, kw, hf_timer.rounded)) return result return timed
Example #8
Source File: utils.py From TC-ResNet with Apache License 2.0 | 5 votes |
def timer(name): st = time.time() yield print("<Timer> {} : {}".format(name, format_timespan(time.time() - st)))
Example #9
Source File: http.py From apt-smart with MIT License | 5 votes |
def fetch_worker(url): """ Fetch the given URL for :func:`fetch_concurrent()`. :param url: The URL to fetch (a string). :returns: A tuple of three values: 1. The URL that was fetched (a string). 2. The data that was fetched (a string or :data:`None`). 3. The number of seconds it took to fetch the URL (a number). """ # Ignore Control-C instead of raising KeyboardInterrupt because (due to a # quirk in multiprocessing) this can cause the parent and child processes # to get into a deadlock kind of state where only Control-Z will get you # your precious terminal back; super annoying IMHO. signal.signal(signal.SIGINT, signal.SIG_IGN) timer = Timer() try: data = fetch_url(url, retry=False) except Exception as e: logger.debug("Failed to fetch %s! (%s)", url, e) data = None else: kbps = format_size(round(len(data) / timer.elapsed_time, 2)) logger.debug("Downloaded %s at %s per second.", url, kbps) return url, data, timer.elapsed_time
Example #10
Source File: ubuntu.py From apt-smart with MIT License | 5 votes |
def discover_mirror_selection(): """Discover "geographically suitable" Ubuntu mirrors.""" timer = Timer() logger.info("Identifying fast Ubuntu mirrors using %s ..", MIRROR_SELECTION_URL) data = fetch_url(MIRROR_SELECTION_URL, timeout=3, retry=True, max_attempts=5) # shorter timeout with more retries is good for unstable connections to MIRROR_SELECTION_URL dammit = UnicodeDammit(data) mirrors = set( CandidateMirror(mirror_url=mirror_url.strip()) for mirror_url in dammit.unicode_markup.splitlines() if mirror_url and not mirror_url.isspace() and mirror_url.startswith(('http://', 'https://')) ) logger.debug("Found %s in %s.", pluralize(len(mirrors), "fast Ubuntu mirror"), timer) return mirrors
Example #11
Source File: ubuntu.py From apt-smart with MIT License | 5 votes |
def discover_mirrors(): """ Discover available Ubuntu mirrors. :returns: A set of :class:`.CandidateMirror` objects that have their :attr:`~.CandidateMirror.mirror_url` property set and may have the :attr:`~.CandidateMirror.last_updated` property set. :raises: If no mirrors are discovered an exception is raised. This only queries :data:`MIRROR_SELECTION_URL` to discover available Ubuntu mirrors. Here's an example run: >>> from apt_smart.backends.ubuntu import discover_mirrors >>> from pprint import pprint >>> pprint(discover_mirrors()) """ timer = Timer() mirrors = set() mirrors = discover_mirror_selection() if not mirrors: logger.warning("Failed to discover any Ubuntu mirrors! (using %s)" % MIRROR_SELECTION_URL) logger.info("Trying to use %s as fallback" % MIRRORS_URL) mirrors = discover_mirrors_old() elif len(mirrors) < 2: logger.warning("Too few mirrors, trying to use %s to find more" % MIRRORS_URL) mirrors |= discover_mirrors_old() # add mirrors from discover_mirrors_old() logger.info("Discovered %s in %s.", pluralize(len(mirrors), "Ubuntu mirror"), timer) return mirrors
Example #12
Source File: __init__.py From apt-smart with MIT License | 5 votes |
def dumb_update(self, *args): """ Update the system's package lists (by running ``apt-get update``). :param args: Command line arguments to ``apt-get update`` (zero or more strings). The :func:`dumb_update()` method doesn't do any error handling or retrying, if that's what you're looking for then you need :func:`smart_update()` instead. """ timer = Timer() logger.info("Updating package lists of %s ..", self.context) self.context.execute('apt-get', 'update', *args, sudo=True) logger.info("Finished updating package lists of %s in %s.", self.context, timer)
Example #13
Source File: __init__.py From apt-smart with MIT License | 5 votes |
def clear_package_lists(self): """Clear the package list cache by removing the files under ``/var/lib/apt/lists``.""" timer = Timer() logger.info("Clearing package list cache of %s ..", self.context) self.context.execute( # We use an ugly but necessary find | xargs pipeline here because # find's -delete option implies -depth which negates -prune. Sigh. 'find /var/lib/apt/lists -type f -name lock -prune -o -type f -print0 | xargs -0 rm -f', sudo=True, ) logger.info("Successfully cleared package list cache of %s in %s.", self.context, timer)
Example #14
Source File: utils.py From MMNet with Apache License 2.0 | 5 votes |
def timeit(method): def timed(*args, **kw): hf_timer = hf.Timer() result = method(*args, **kw) print("<Timeit> {!r} ({!r}, {!r}) {}".format(method.__name__, args, kw, hf_timer.rounded)) return result return timed
Example #15
Source File: utils.py From MMNet with Apache License 2.0 | 5 votes |
def timer(name): st = time.time() yield print("<Timer> {} : {}".format(name, format_timespan(time.time() - st)))
Example #16
Source File: tests.py From python-rsync-system-backup with MIT License | 5 votes |
def test_notifications(self): """Test the desktop notification functionality.""" timer = Timer() program = RsyncSystemBackup(destination='/backups/system') # The happy path. with MockedProgram('notify-send', returncode=0): program.notify_starting() program.notify_finished(timer) program.notify_failed(timer) # The sad path (should not raise exceptions). with MockedProgram('notify-send', returncode=1): program.notify_starting() program.notify_finished(timer) program.notify_failed(timer)
Example #17
Source File: cli.py From python-executor with MIT License | 5 votes |
def apply_fudge_factor(fudge_factor): """ Apply the requested scheduling fudge factor. :param fudge_factor: The maximum number of seconds to sleep (a number). Previous implementations of the fudge factor interrupt used UNIX signals (specifically ``SIGUSR1``) but the use of this signal turned out to be sensitive to awkward race conditions and it wasn't very cross platform, so now the creation of a regular file is used to interrupt the fudge factor. """ if fudge_factor: timer = Timer() logger.debug("Calculating fudge factor based on user defined maximum (%s) ..", format_timespan(fudge_factor)) fudged_sleep_time = fudge_factor * random.random() logger.info("Sleeping for %s because of user defined fudge factor ..", format_timespan(fudged_sleep_time)) interrupt_file = get_lock_path(INTERRUPT_FILE) while timer.elapsed_time < fudged_sleep_time: if os.path.isfile(interrupt_file): logger.info("Fudge factor sleep was interrupted! (%s exists)", interrupt_file) break time_to_sleep = min(1, fudged_sleep_time - timer.elapsed_time) if time_to_sleep > 0: time.sleep(time_to_sleep) else: logger.info("Finished sleeping because of fudge factor (took %s).", timer)
Example #18
Source File: tcp.py From python-executor with MIT License | 5 votes |
def port_number(self): """A dynamically selected free ephemeral port number (an integer between 49152 and 65535).""" timer = Timer() logger.debug("Looking for free ephemeral port number ..") for i in itertools.count(1): value = self.ephemeral_port_number set_property(self, 'port_number', value) if not self.is_connected: logger.debug("Found free ephemeral port number %s after %s (took %s).", value, pluralize(i, "attempt"), timer) return value
Example #19
Source File: tests.py From python-executor with MIT License | 5 votes |
def test_cli_timeout(self): """Ensure that external commands can be timed out.""" def timeout_hammer(): timer = Timer() returncode, output = run_cli( main, '--timeout=5', *python_golf('import time', 'time.sleep(10)') ) assert returncode != 0 assert timer.elapsed_time < 10 retry(timeout_hammer, 60)
Example #20
Source File: tests.py From python-executor with MIT License | 5 votes |
def test_cli_fudge_factor(self, fudge_factor=5): """Try to ensure that the fudge factor applies (a bit tricky to get right) ...""" def fudge_factor_hammer(): timer = Timer() returncode, output = run_cli( main, '--fudge-factor=%i' % fudge_factor, *python_golf('import sys', 'sys.exit(0)') ) assert returncode == 0 assert timer.elapsed_time > (fudge_factor / 2.0) retry(fudge_factor_hammer, 60)
Example #21
Source File: tests.py From python-executor with MIT License | 5 votes |
def test_command_pool(self): """Make sure command pools actually run multiple commands in parallel.""" num_commands = 10 sleep_time = 4 pool = CommandPool(5) for i in range(num_commands): pool.add(ExternalCommand('sleep %i' % sleep_time)) timer = Timer() results = pool.run() assert all(cmd.returncode == 0 for cmd in results.values()) assert timer.elapsed_time < (num_commands * sleep_time)
Example #22
Source File: tests.py From python-executor with MIT License | 5 votes |
def check_termination(self, method): """Helper method for process termination tests.""" with ExternalCommand('sleep', '60', check=False) as cmd: timer = Timer() # We use a positive but very low timeout so that all of the code # involved gets a chance to run, but without slowing us down. getattr(cmd, method)(timeout=0.1) # Gotcha: Call wait() so that the process (our own subprocess) is # reclaimed because until we do so proc.is_running will be True! cmd.wait() # Now we can verify our assertions. assert not cmd.is_running, "Child still running despite graceful termination request!" assert timer.elapsed_time < 10, "It look too long to terminate the child!"
Example #23
Source File: tests.py From python-executor with MIT License | 5 votes |
def test_graceful_to_forceful_fallback(self): """Test that graceful termination falls back to forceful termination.""" timer = Timer() expected_lifetime = 60 with NonGracefulCommand('sleep', str(expected_lifetime), check=False) as cmd: # Request graceful termination even though we know it will fail. cmd.terminate(timeout=1) # Verify that the process terminated even though our graceful # termination request was ignored. assert not cmd.is_running # Verify that the process actually terminated due to the fall back # and not because its expected life time simply ran out. assert timer.elapsed_time < expected_lifetime
Example #24
Source File: __init__.py From python-rsync-system-backup with MIT License | 5 votes |
def create_snapshot(self): """ Create a snapshot of the destination directory. :raises: The following exceptions can be raised: - :exc:`.DestinationContextUnavailable`, refer to :attr:`destination_context` for details. - :exc:`.ParentDirectoryUnavailable`, refer to :attr:`.parent_directory` for details. - :exc:`~executor.ExternalCommandFailed` when the ``cp`` command reports an error. """ # Compose the `cp' command needed to create a snapshot. snapshot = os.path.join(self.destination.parent_directory, time.strftime('%Y-%m-%d %H:%M:%S')) cp_command = [ 'cp', '--archive', '--link', self.destination.directory, snapshot, ] # Execute the `cp' command? if self.dry_run: logger.info("Snapshot command: %s", quote(cp_command)) else: timer = Timer() logger.info("Creating snapshot: %s", snapshot) self.destination_context.execute(*cp_command, ionice=self.ionice) logger.info("Took %s to create snapshot.", timer)
Example #25
Source File: __init__.py From python-rsync-system-backup with MIT License | 5 votes |
def execute_helper(self): """Helper for :func:`execute()`.""" timer = Timer() actions = [] if self.crypto_device and not self.crypto_device_available: msg = "Encrypted filesystem %s isn't available! (the device file %s doesn't exist)" raise MissingBackupDiskError(msg % (self.crypto_device, self.crypttab_entry.source_device)) if self.backup_enabled: self.notify_starting() self.unlock_device() try: self.mount_filesystem() if self.backup_enabled: self.transfer_changes() actions.append('create backup') if self.snapshot_enabled: self.create_snapshot() actions.append('create snapshot') if self.rotate_enabled: self.rotate_snapshots() actions.append('rotate old snapshots') except Exception: self.notify_failed(timer) raise else: if self.backup_enabled: self.notify_finished(timer) if actions: logger.info("Took %s to %s.", timer, concatenate(actions))
Example #26
Source File: __init__.py From apt-smart with MIT License | 4 votes |
def change_mirror(self, new_mirror=None, update=True): """ Change the main mirror in use in :attr:`main_sources_list`. :param new_mirror: The URL of the new mirror (a string, defaults to :attr:`best_mirror`). :param update: Whether an ``apt-get update`` should be run after changing the mirror (a boolean, defaults to :data:`True`). """ timer = Timer() # Default to the best available mirror. if new_mirror: logger.info("Changing mirror of %s to %s ..", self.context, new_mirror) else: logger.info("Changing mirror of %s to best available mirror ..", self.context) new_mirror = self.best_mirror logger.info("Selected mirror: %s", new_mirror) # Parse /etc/apt/sources.list to replace the old mirror with the new one. sources_list = self.get_sources_list() mirrors_to_replace = [normalize_mirror_url(self.current_mirror)] if self.release_is_eol: # When a release goes EOL the security updates mirrors stop # serving that release as well, so we need to remove them. logger.debug("Replacing %s URLs as well ..", self.security_url) mirrors_to_replace.append(normalize_mirror_url(self.security_url)) else: logger.debug("Not replacing %s URLs.", self.security_url) lines = sources_list.splitlines() sources_list_options = self.get_sources_list_options for i, line in enumerate(lines): # The first token should be `deb' or `deb-src', the second token is # the mirror's URL, the third token is the `distribution' and any # further tokens are `components'. tokens = line.split() if (len(tokens) >= 4 and tokens[0] in ('deb', 'deb-src') and normalize_mirror_url(tokens[1]) in mirrors_to_replace): tokens[1] = new_mirror if i in sources_list_options: tokens.insert(1, '[' + sources_list_options[i] + ']') # Get the [options] back lines[i] = u' '.join(tokens) # Install the modified package resource list. self.install_sources_list(u'\n'.join(lines)) # Clear (relevant) cached properties. del self.current_mirror # Make sure previous package lists are removed. self.clear_package_lists() # Make sure the package lists are up to date. if update: self.smart_update(switch_mirrors=False) logger.info("Finished changing mirror of %s in %s.", self.context, timer)
Example #27
Source File: concurrent.py From python-executor with MIT License | 4 votes |
def run(self): """ Keep spawning commands and collecting results until all commands have run. :returns: The value of :attr:`results`. :raises: Any exceptions raised by :func:`collect()`. This method calls :func:`spawn()` and :func:`collect()` in a loop until all commands registered using :func:`add()` have run and finished. If :func:`collect()` raises an exception any running commands are terminated before the exception is propagated to the caller. If you're writing code where you want to own the main loop then consider calling :func:`spawn()` and :func:`collect()` directly instead of using :func:`run()`. When :attr:`concurrency` is set to one, specific care is taken to make sure that the callbacks configured by :attr:`.start_event` and :attr:`.finish_event` are called in the expected (intuitive) order. """ # Start spawning processes to execute the commands. timer = Timer() logger.debug("Preparing to run %s with a concurrency of %i ..", pluralize(self.num_commands, "command"), self.concurrency) try: with self.get_spinner(timer) as spinner: num_started = 0 num_collected = 0 while not self.is_finished: # When concurrency is set to one (I know, initially it # sounds like a silly use case, bear with me) I want the # start_event and finish_event callbacks of external # commands to fire in the right order. The following # conditional is intended to accomplish this goal. if self.concurrency > (num_started - num_collected): num_started += self.spawn() num_collected += self.collect() spinner.step(label=format( "Waiting for %i/%i %s", self.num_commands - self.num_finished, self.num_commands, "command" if self.num_commands == 1 else "commands", )) spinner.sleep() except Exception: if self.num_running > 0: logger.warning("Command pool raised exception, terminating running commands!") # Terminate commands that are still running. self.terminate() # Re-raise the exception to the caller. raise # Collect the output and return code of any commands not yet collected. self.collect() logger.debug("Finished running %s in %s.", pluralize(self.num_commands, "command"), timer) # Report the results to the caller. return self.results