Python oslo_utils.timeutils.StopWatch() Examples

The following are 30 code examples of oslo_utils.timeutils.StopWatch(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module oslo_utils.timeutils , or try the search function .
Example #1
Source File: loopingcall.py    From oslo.service with Apache License 2.0 6 votes vote down vote up
def _start(self, idle_for, initial_delay=None, stop_on_exception=True):
        """Start the looping

        :param idle_for: Callable that takes two positional arguments, returns
                         how long to idle for. The first positional argument is
                         the last result from the function being looped and the
                         second positional argument is the time it took to
                         calculate that result.
        :param initial_delay: How long to delay before starting the looping.
                              Value is in seconds.
        :param stop_on_exception: Whether to stop if an exception occurs.
        :returns: eventlet event instance
        """
        if self._thread is not None:
            raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE)
        self.done = event.Event()
        self._abort.clear()
        self._thread = greenthread.spawn(
            self._run_loop, idle_for,
            initial_delay=initial_delay, stop_on_exception=stop_on_exception)
        self._thread.link(self._on_done)
        return self.done

    # NOTE(bnemec): This is just a wrapper function we can mock so we aren't
    # affected by other users of the StopWatch class. 
Example #2
Source File: health_manager.py    From senlin with Apache License 2.0 6 votes vote down vote up
def _wait_for_action(self, ctx, action_id, timeout):
        req = objects.ActionGetRequest(identity=action_id)
        action = {}
        with timeutils.StopWatch(timeout) as timeout_watch:
            while not timeout_watch.expired():
                action = self.rpc_client.call(ctx, 'action_get', req)
                if action['status'] in [consts.ACTION_SUCCEEDED,
                                        consts.ACTION_FAILED,
                                        consts.ACTION_CANCELLED]:
                    break
                eventlet.sleep(2)

        if not action:
            return False, "Failed to retrieve action."

        elif action['status'] == consts.ACTION_SUCCEEDED:
            return True, ""

        elif (action['status'] == consts.ACTION_FAILED or
              action['status'] == consts.ACTION_CANCELLED):
            return False, "Cluster check action failed or cancelled"

        return False, ("Timeout while waiting for node recovery action to "
                       "finish") 
Example #3
Source File: impl_zookeeper.py    From taskflow with Apache License 2.0 6 votes vote down vote up
def wait(self, timeout=None):
        # Wait until timeout expires (or forever) for jobs to appear.
        watch = timeutils.StopWatch(duration=timeout)
        watch.start()
        with self._job_cond:
            while True:
                if not self._known_jobs:
                    if watch.expired():
                        raise excp.NotFound("Expired waiting for jobs to"
                                            " arrive; waited %s seconds"
                                            % watch.elapsed())
                    # This is done since the given timeout can not be provided
                    # to the condition variable, since we can not ensure that
                    # when we acquire the condition that there will actually
                    # be jobs (especially if we are spuriously awaken), so we
                    # must recalculate the amount of time we really have left.
                    self._job_cond.wait(watch.leftover(return_none=True))
                else:
                    curr_jobs = self._fetch_jobs()
                    fetch_func = lambda ensure_fresh: curr_jobs
                    removal_func = lambda a_job: self._remove_job(a_job.path)
                    return base.JobBoardIterator(
                        self, LOG, board_fetch_func=fetch_func,
                        board_removal_func=removal_func) 
Example #4
Source File: latch.py    From taskflow with Apache License 2.0 6 votes vote down vote up
def wait(self, timeout=None):
        """Waits until the latch is released.

        :param timeout: wait until the timeout expires
        :type timeout: number
        :returns: true if the latch has been released before the
                  timeout expires otherwise false
        :rtype: boolean
        """
        watch = timeutils.StopWatch(duration=timeout)
        watch.start()
        with self._cond:
            while self._count > 0:
                if watch.expired():
                    return False
                else:
                    self._cond.wait(watch.leftover(return_none=True))
            return True 
Example #5
Source File: protocol.py    From taskflow with Apache License 2.0 6 votes vote down vote up
def __init__(self, task, uuid, action,
                 arguments, timeout=REQUEST_TIMEOUT, result=NO_RESULT,
                 failures=None):
        self._action = action
        self._event = ACTION_TO_EVENT[action]
        self._arguments = arguments
        self._result = result
        self._failures = failures
        self._watch = timeutils.StopWatch(duration=timeout).start()
        self._lock = threading.Lock()
        self._machine = build_a_machine()
        self._machine.initialize()
        self.task = task
        self.uuid = uuid
        self.created_on = timeutils.now()
        self.future = futurist.Future()
        self.future.atom = task 
Example #6
Source File: types.py    From taskflow with Apache License 2.0 6 votes vote down vote up
def wait_for_workers(self, workers=1, timeout=None):
        """Waits for geq workers to notify they are ready to do work.

        NOTE(harlowja): if a timeout is provided this function will wait
        until that timeout expires, if the amount of workers does not reach
        the desired amount of workers before the timeout expires then this will
        return how many workers are still needed, otherwise it will
        return zero.
        """
        if workers <= 0:
            raise ValueError("Worker amount must be greater than zero")
        watch = timeutils.StopWatch(duration=timeout)
        watch.start()
        with self._cond:
            while self.total_workers < workers:
                if watch.expired():
                    return max(0, workers - self.total_workers)
                self._cond.wait(watch.leftover(return_none=True))
            return 0 
Example #7
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 6 votes vote down vote up
def test_splits(self, mock_now):
        mock_now.side_effect = monotonic_iter()

        watch = timeutils.StopWatch()
        watch.start()
        self.assertEqual(0, len(watch.splits))

        watch.split()
        self.assertEqual(1, len(watch.splits))
        self.assertEqual(watch.splits[0].elapsed,
                         watch.splits[0].length)

        watch.split()
        splits = watch.splits
        self.assertEqual(2, len(splits))
        self.assertNotEqual(splits[0].elapsed, splits[1].elapsed)
        self.assertEqual(splits[1].length,
                         splits[1].elapsed - splits[0].elapsed)

        watch.stop()
        self.assertEqual(2, len(watch.splits))

        watch.start()
        self.assertEqual(0, len(watch.splits)) 
Example #8
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_context_manager_splits(self, mock_now):
        mock_now.side_effect = monotonic_iter()
        with timeutils.StopWatch() as watch:
            time.sleep(0.01)
            watch.split()
        self.assertRaises(RuntimeError, watch.split)
        self.assertEqual(1, len(watch.splits)) 
Example #9
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_splits_stopped(self):
        watch = timeutils.StopWatch()
        watch.start()
        watch.split()
        watch.stop()
        self.assertRaises(RuntimeError, watch.split) 
Example #10
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_context_manager(self, mock_now):
        mock_now.side_effect = monotonic_iter()
        with timeutils.StopWatch() as watch:
            pass
        matcher = matchers.GreaterThan(0.04)
        self.assertThat(watch.elapsed(), matcher) 
Example #11
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_pause_resume(self, mock_now):
        mock_now.side_effect = monotonic_iter()
        watch = timeutils.StopWatch()
        watch.start()
        watch.stop()
        elapsed = watch.elapsed()
        self.assertAlmostEqual(elapsed, watch.elapsed())
        watch.resume()
        self.assertNotEqual(elapsed, watch.elapsed()) 
Example #12
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_no_leftover(self):
        watch = timeutils.StopWatch()
        self.assertRaises(RuntimeError, watch.leftover)
        watch = timeutils.StopWatch(1)
        self.assertRaises(RuntimeError, watch.leftover) 
Example #13
Source File: lbaasv2.py    From kuryr-kubernetes with Apache License 2.0 5 votes vote down vote up
def _provisioning_timer(self, timeout,
                            interval=_LB_STS_POLL_FAST_INTERVAL):
        # REVISIT(ivc): consider integrating with Retry
        max_interval = 15
        with timeutils.StopWatch(duration=timeout) as timer:
            while not timer.expired():
                yield timer.leftover()
                interval = interval * 2 * random.gauss(0.8, 0.05)
                interval = min(interval, max_interval)
                interval = min(interval, timer.leftover())
                if interval:
                    time.sleep(interval) 
Example #14
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_elapsed(self, mock_now):
        mock_now.side_effect = monotonic_iter(incr=0.2)
        watch = timeutils.StopWatch()
        watch.start()
        matcher = matchers.GreaterThan(0.19)
        self.assertThat(watch.elapsed(), matcher) 
Example #15
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_no_expiry(self):
        watch = timeutils.StopWatch(0.1)
        self.assertRaises(RuntimeError, watch.expired) 
Example #16
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_has_started_stopped(self):
        watch = timeutils.StopWatch()
        self.assertFalse(watch.has_started())
        self.assertFalse(watch.has_stopped())
        watch.start()

        self.assertTrue(watch.has_started())
        self.assertFalse(watch.has_stopped())

        watch.stop()
        self.assertTrue(watch.has_stopped())
        self.assertFalse(watch.has_started()) 
Example #17
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_elapsed_maximum(self, mock_now):
        mock_now.side_effect = [0, 1] + ([11] * 4)

        watch = timeutils.StopWatch()
        watch.start()
        self.assertEqual(1, watch.elapsed())

        self.assertEqual(11, watch.elapsed())
        self.assertEqual(1, watch.elapsed(maximum=1))

        watch.stop()
        self.assertEqual(11, watch.elapsed())
        self.assertEqual(11, watch.elapsed())
        self.assertEqual(0, watch.elapsed(maximum=-1)) 
Example #18
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_not_expired(self, mock_now):
        mock_now.side_effect = monotonic_iter()
        watch = timeutils.StopWatch(0.1)
        watch.start()
        self.assertFalse(watch.expired()) 
Example #19
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_expiry(self, mock_now):
        mock_now.side_effect = monotonic_iter(incr=0.2)
        watch = timeutils.StopWatch(0.1)
        watch.start()
        self.assertTrue(watch.expired()) 
Example #20
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_bad_expiry(self):
        self.assertRaises(ValueError, timeutils.StopWatch, -1) 
Example #21
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_no_states(self):
        watch = timeutils.StopWatch()
        self.assertRaises(RuntimeError, watch.stop)
        self.assertRaises(RuntimeError, watch.resume) 
Example #22
Source File: test_timeutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def test_leftover_no_duration(self):
        watch = timeutils.StopWatch()
        watch.start()
        self.assertRaises(RuntimeError, watch.leftover)
        self.assertRaises(RuntimeError, watch.leftover, return_none=False)
        self.assertIsNone(watch.leftover(return_none=True)) 
Example #23
Source File: eventletutils.py    From oslo.utils with Apache License 2.0 5 votes vote down vote up
def wait(self, timeout=None):
        with timeutils.StopWatch(timeout) as sw:
            while True:
                event = self._event
                with _eventlet.timeout.Timeout(sw.leftover(return_none=True),
                                               False):
                    event.wait()
                    if event is not self._event:
                        continue
                return self.is_set() 
Example #24
Source File: speed_test.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def __init__(self, name, args):
        self.watch = timeutils.StopWatch()
        self.name = name
        self.args = args 
Example #25
Source File: impl_redis.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def wait(self, timeout=None, initial_delay=0.005,
             max_delay=1.0, sleep_func=time.sleep):
        if initial_delay > max_delay:
            raise ValueError("Initial delay %s must be less than or equal"
                             " to the provided max delay %s"
                             % (initial_delay, max_delay))
        # This does a spin-loop that backs off by doubling the delay
        # up to the provided max-delay. In the future we could try having
        # a secondary client connected into redis pubsub and use that
        # instead, but for now this is simpler.
        w = timeutils.StopWatch(duration=timeout)
        w.start()
        delay = initial_delay
        while True:
            jc = self.job_count
            if jc > 0:
                curr_jobs = self._fetch_jobs()
                if curr_jobs:
                    return base.JobBoardIterator(
                        self, LOG,
                        board_fetch_func=lambda ensure_fresh: curr_jobs)
            if w.expired():
                raise exc.NotFound("Expired waiting for jobs to"
                                   " arrive; waited %s seconds"
                                   % w.elapsed())
            else:
                remaining = w.leftover(return_none=True)
                if remaining is not None:
                    delay = min(delay * 2, remaining, max_delay)
                else:
                    delay = min(delay * 2, max_delay)
                sleep_func(delay) 
Example #26
Source File: base.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def wait(self, timeout=None,
             delay=0.01, delay_multiplier=2.0, max_delay=60.0,
             sleep_func=time.sleep):
        """Wait for job to enter completion state.

        If the job has not completed in the given timeout, then return false,
        otherwise return true (a job failure exception may also be raised if
        the job information can not be read, for whatever reason). Periodic
        state checks will happen every ``delay`` seconds where ``delay`` will
        be multiplied by the given multipler after a state is found that is
        **not** complete.

        Note that if no timeout is given this is equivalent to blocking
        until the job has completed. Also note that if a jobboard backend
        can optimize this method then its implementation may not use
        delays (and backoffs) at all. In general though no matter what
        optimizations are applied implementations must **always** respect
        the given timeout value.
        """
        if timeout is not None:
            w = timeutils.StopWatch(duration=timeout)
            w.start()
        else:
            w = None
        delay_gen = iter_utils.generate_delays(delay, max_delay,
                                               multiplier=delay_multiplier)
        while True:
            if w is not None and w.expired():
                return False
            if self.state == states.COMPLETE:
                return True
            sleepy_secs = six.next(delay_gen)
            if w is not None:
                sleepy_secs = min(w.leftover(), sleepy_secs)
            sleep_func(sleepy_secs)
        return False 
Example #27
Source File: types.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def __init__(self, uuid, proxy, topics,
                 beat_periodicity=pr.NOTIFY_PERIOD,
                 worker_expiry=pr.EXPIRES_AFTER):
        self._cond = threading.Condition()
        self._proxy = proxy
        self._topics = topics
        self._workers = {}
        self._uuid = uuid
        self._seen_workers = 0
        self._messages_processed = 0
        self._messages_published = 0
        self._worker_expiry = worker_expiry
        self._watch = timeutils.StopWatch(duration=beat_periodicity) 
Example #28
Source File: server.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def _delayed_process(self, func):
        """Runs the function using the instances executor (eventually).

        This adds a *nice* benefit on showing how long it took for the
        function to finally be executed from when the message was received
        to when it was finally ran (which can be a nice thing to know
        to determine bottle-necks...).
        """
        func_name = reflection.get_callable_name(func)

        def _on_run(watch, content, message):
            LOG.trace("It took %s seconds to get around to running"
                      " function/method '%s' with"
                      " message '%s'", watch.elapsed(), func_name,
                      ku.DelayedPretty(message))
            return func(content, message)

        def _on_receive(content, message):
            LOG.debug("Submitting message '%s' for execution in the"
                      " future to '%s'", ku.DelayedPretty(message), func_name)
            watch = timeutils.StopWatch()
            watch.start()
            try:
                self._executor.submit(_on_run, watch, content, message)
            except RuntimeError:
                LOG.error("Unable to continue processing message '%s',"
                          " submission to instance executor (with later"
                          " execution by '%s') was unsuccessful",
                          ku.DelayedPretty(message), func_name,
                          exc_info=True)

        return _on_receive 
Example #29
Source File: loopingcall.py    From oslo.service with Apache License 2.0 5 votes vote down vote up
def _run_loop(self, idle_for_func,
                  initial_delay=None, stop_on_exception=True):
        kind = self._KIND
        func_name = reflection.get_callable_name(self.f)
        func = self.f if stop_on_exception else _safe_wrapper(self.f, kind,
                                                              func_name)
        if initial_delay:
            self._sleep(initial_delay)
        try:
            watch = timeutils.StopWatch()
            while self._running:
                watch.restart()
                result = func(*self.args, **self.kw)
                watch.stop()
                if not self._running:
                    break
                idle = idle_for_func(result, self._elapsed(watch))
                LOG.trace('%(kind)s %(func_name)r sleeping '
                          'for %(idle).02f seconds',
                          {'func_name': func_name, 'idle': idle,
                           'kind': kind})
                self._sleep(idle)
        except LoopingCallDone as e:
            self.done.send(e.retvalue)
        except Exception:
            exc_info = sys.exc_info()
            try:
                LOG.error('%(kind)s %(func_name)r failed',
                          {'kind': kind, 'func_name': func_name},
                          exc_info=exc_info)
                self.done.send_exception(*exc_info)
            finally:
                del exc_info
            return
        else:
            self.done.send(True) 
Example #30
Source File: excutils.py    From oslo.utils with Apache License 2.0 4 votes vote down vote up
def forever_retry_uncaught_exceptions(*args, **kwargs):
    """Decorates provided function with infinite retry behavior.

    The function retry delay is **always** one second unless
    keyword argument ``retry_delay`` is passed that defines a value different
    than 1.0 (less than zero values are automatically changed to be 0.0).

    If repeated exceptions with the same message occur, logging will only
    output/get triggered for those equivalent messages every 60.0
    seconds, this can be altered by keyword argument ``same_log_delay`` to
    be a value different than 60.0 seconds (exceptions that change the
    message are always logged no matter what this delay is set to). As in
    the ``retry_delay`` case if this is less than zero, it will be
    automatically changed to be 0.0.
    """

    def decorator(infunc):
        retry_delay = max(0.0, float(kwargs.get('retry_delay', 1.0)))
        same_log_delay = max(0.0, float(kwargs.get('same_log_delay', 60.0)))

        @six.wraps(infunc)
        def wrapper(*args, **kwargs):
            last_exc_message = None
            same_failure_count = 0
            watch = timeutils.StopWatch(duration=same_log_delay)
            while True:
                try:
                    return infunc(*args, **kwargs)
                except Exception as exc:
                    this_exc_message = encodeutils.exception_to_unicode(exc)
                    if this_exc_message == last_exc_message:
                        same_failure_count += 1
                    else:
                        same_failure_count = 1
                    if this_exc_message != last_exc_message or watch.expired():
                        # The watch has expired or the exception message
                        # changed, so time to log it again...
                        logging.exception(
                            'Unexpected exception occurred %d time(s)... '
                            'retrying.' % same_failure_count)
                        if not watch.has_started():
                            watch.start()
                        else:
                            watch.restart()
                        same_failure_count = 0
                        last_exc_message = this_exc_message
                    time.sleep(retry_delay)
        return wrapper

    # This is needed to handle when the decorator has args or the decorator
    # doesn't have args, python is rather weird here...
    if kwargs or not args:
        return decorator
    else:
        if len(args) == 1:
            return decorator(args[0])
        else:
            return decorator