Python oslo_utils.timeutils.now() Examples

The following are 8 code examples of oslo_utils.timeutils.now(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module oslo_utils.timeutils , or try the search function .
Example #1
Source File: utils.py    From manila-ui with Apache License 2.0 6 votes vote down vote up
def set_project_name_to_objects(request, objects):
    global PROJECTS, TIME
    try:
        # NOTE(vponomaryov): we will use saved values making lots of requests
        # in short period of time. 'memoized' is not suitable here
        now = timeutils.now()
        if TIME is None:
            TIME = now
        if not PROJECTS or now > TIME + 20:
            projects, has_more = keystone.tenant_list(request)
            PROJECTS = {t.id: t for t in projects}
            TIME = now
    except Exception:
        msg = _('Unable to retrieve list of projects.')
        exceptions.handle(request, msg)

    for obj in objects:
        project_id = getattr(obj, "project_id", None)
        project = PROJECTS.get(project_id, None)
        obj.project_name = getattr(project, "name", None) 
Example #2
Source File: executor.py    From taskflow with Apache License 2.0 6 votes vote down vote up
def _handle_expired_request(request):
        """Handle a expired request.

        When a request has expired it is removed from the ongoing requests
        dictionary and a ``RequestTimeout`` exception is set as a
        request result.
        """
        if request.transition_and_log_error(pr.FAILURE, logger=LOG):
            # Raise an exception (and then catch it) so we get a nice
            # traceback that the request will get instead of it getting
            # just an exception with no traceback...
            try:
                request_age = timeutils.now() - request.created_on
                raise exc.RequestTimeout(
                    "Request '%s' has expired after waiting for %0.2f"
                    " seconds for it to transition out of (%s) states"
                    % (request, request_age, ", ".join(pr.WAITING_STATES)))
            except exc.RequestTimeout:
                with misc.capture_failure() as failure:
                    LOG.debug(failure.exception_str)
                    request.set_result(failure)
            return True
        return False 
Example #3
Source File: executor.py    From taskflow with Apache License 2.0 6 votes vote down vote up
def _publish_request(self, request, worker):
        """Publish request to a given topic."""
        LOG.debug("Submitting execution of '%s' to worker '%s' (expecting"
                  " response identified by reply_to=%s and"
                  " correlation_id=%s) - waited %0.3f seconds to"
                  " get published", request, worker, self._uuid,
                  request.uuid, timeutils.now() - request.created_on)
        try:
            self._proxy.publish(request, worker.topic,
                                reply_to=self._uuid,
                                correlation_id=request.uuid)
        except Exception:
            with misc.capture_failure() as failure:
                LOG.critical("Failed to submit '%s' (transitioning it to"
                             " %s)", request, pr.FAILURE, exc_info=True)
                if request.transition_and_log_error(pr.FAILURE, logger=LOG):
                    with self._ongoing_requests_lock:
                        del self._ongoing_requests[request.uuid]
                    request.set_result(failure) 
Example #4
Source File: executor.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def _on_wait(self):
        """This function is called cyclically between draining events."""
        # Publish any finding messages (used to locate workers).
        self._finder.maybe_publish()
        # If the finder hasn't heard from workers in a given amount
        # of time, then those workers are likely dead, so clean them out...
        self._finder.clean()
        # Process any expired requests or requests that have no current
        # worker located (publish messages for those if we now do have
        # a worker located).
        self._clean() 
Example #5
Source File: types.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def process_response(self, data, message):
        """Process notify message sent from remote side."""
        LOG.debug("Started processing notify response message '%s'",
                  ku.DelayedPretty(message))
        response = pr.Notify(**data)
        LOG.debug("Extracted notify response '%s'", response)
        with self._cond:
            worker, new_or_updated = self._add(response.topic,
                                               response.tasks)
            if new_or_updated:
                LOG.debug("Updated worker '%s' (%s total workers are"
                          " currently known)", worker, self.total_workers)
                self._cond.notify_all()
            worker.last_seen = timeutils.now()
            self._messages_processed += 1 
Example #6
Source File: types.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def clean(self):
        """Cleans out any dead/expired/not responding workers.

        Returns how many workers were removed.
        """
        if (not self._workers or
                (self._worker_expiry is None or self._worker_expiry <= 0)):
            return 0
        dead_workers = {}
        with self._cond:
            now = timeutils.now()
            for topic, worker in six.iteritems(self._workers):
                if worker.last_seen is None:
                    continue
                secs_since_last_seen = max(0, now - worker.last_seen)
                if secs_since_last_seen >= self._worker_expiry:
                    dead_workers[topic] = (worker, secs_since_last_seen)
            for topic in six.iterkeys(dead_workers):
                self._workers.pop(topic)
            if dead_workers:
                self._cond.notify_all()
        if dead_workers and LOG.isEnabledFor(logging.INFO):
            for worker, secs_since_last_seen in six.itervalues(dead_workers):
                LOG.info("Removed worker '%s' as it has not responded to"
                         " notification requests in %0.3f seconds",
                         worker, secs_since_last_seen)
        return len(dead_workers) 
Example #7
Source File: protocol.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def from_dict(data, task_uuid=None):
        """Parses **validated** data into a work unit.

        All :py:class:`~taskflow.types.failure.Failure` objects that have been
        converted to dict(s) on the remote side will now converted back
        to py:class:`~taskflow.types.failure.Failure` objects.
        """
        task_cls = data['task_cls']
        task_name = data['task_name']
        action = data['action']
        arguments = data.get('arguments', {})
        result = data.get('result')
        failures = data.get('failures')
        # These arguments will eventually be given to the task executor
        # so they need to be in a format it will accept (and using keyword
        # argument names that it accepts)...
        arguments = {
            'arguments': arguments,
        }
        if task_uuid is not None:
            arguments['task_uuid'] = task_uuid
        if result is not None:
            result_data_type, result_data = result
            if result_data_type == 'failure':
                arguments['result'] = ft.Failure.from_dict(result_data)
            else:
                arguments['result'] = result_data
        if failures is not None:
            arguments['failures'] = {}
            for task, fail_data in six.iteritems(failures):
                arguments['failures'][task] = ft.Failure.from_dict(fail_data)
        return _WorkUnit(task_cls, task_name, action, arguments) 
Example #8
Source File: lockutils.py    From oslo.concurrency with Apache License 2.0 4 votes vote down vote up
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None,
                 semaphores=None, delay=0.01, fair=False):
    """Synchronization decorator.

    Decorating a method like so::

        @synchronized('mylock')
        def foo(self, *args):
           ...

    ensures that only one thread will execute the foo method at a time.

    Different methods can share the same lock::

        @synchronized('mylock')
        def foo(self, *args):
           ...

        @synchronized('mylock')
        def bar(self, *args):
           ...

    This way only one of either foo or bar can be executing at a time.

    .. versionchanged:: 0.3
       Added *delay* and *semaphores* optional parameter.
    """

    def wrap(f):

        @functools.wraps(f)
        def inner(*args, **kwargs):
            t1 = timeutils.now()
            t2 = None
            try:
                with lock(name, lock_file_prefix, external, lock_path,
                          do_log=False, semaphores=semaphores, delay=delay,
                          fair=fair):
                    t2 = timeutils.now()
                    LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: '
                              'waited %(wait_secs)0.3fs',
                              {'name': name,
                               'function': reflection.get_callable_name(f),
                               'wait_secs': (t2 - t1)})
                    return f(*args, **kwargs)
            finally:
                t3 = timeutils.now()
                if t2 is None:
                    held_secs = "N/A"
                else:
                    held_secs = "%0.3fs" % (t3 - t2)
                LOG.debug('Lock "%(name)s" released by "%(function)s" :: held '
                          '%(held_secs)s',
                          {'name': name,
                           'function': reflection.get_callable_name(f),
                           'held_secs': held_secs})
        return inner

    return wrap