Python google.appengine.runtime.apiproxy_errors.DeadlineExceededError() Examples
The following are 30
code examples of google.appengine.runtime.apiproxy_errors.DeadlineExceededError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
google.appengine.runtime.apiproxy_errors
, or try the search function
.
Example #1
Source File: main.py From python-docs-samples with Apache License 2.0 | 6 votes |
def get(self): """Indefinitely fetch tasks and update the datastore.""" queue = taskqueue.Queue('pullq') while True: try: tasks = queue.lease_tasks_by_tag(3600, 1000, deadline=60) except (taskqueue.TransientError, apiproxy_errors.DeadlineExceededError) as e: logging.exception(e) time.sleep(1) continue if tasks: key = tasks[0].tag try: update_counter(key, tasks) except Exception as e: logging.exception(e) raise finally: queue.delete_tasks(tasks) time.sleep(1)
Example #2
Source File: main.py From python-compat-runtime with Apache License 2.0 | 6 votes |
def GetBackups(self, limit=100, deadline=10): """Obtain a list of backups. Args: limit: maximum number of backup records to retrieve. deadline: maximum number of seconds to spend getting backups. Returns: List of backups, sorted in reverse order by completion time. """ backups = [] query = backup_handler.BackupInformation.all() query.filter('complete_time > ', 0) query.order('-complete_time') try: backups.extend(query.run(deadline=deadline, limit=limit)) except (datastore_errors.Timeout, apiproxy_errors.DeadlineExceededError): logging.warning('Failed to retrieve all backups within deadline.') return backups
Example #3
Source File: apiproxy.py From python-compat-runtime with Apache License 2.0 | 6 votes |
def _WaitImpl(self): """Waits on the API call associated with this RPC. The callback, if provided, will be executed before Wait() returns. If this RPC is already complete, or if the RPC was never started, this function will return immediately. Raises: InterruptedError if a callback throws an uncaught exception. """ try: rpc_completed = _apphosting_runtime___python__apiproxy.Wait(self) except (runtime.DeadlineExceededError, apiproxy_errors.InterruptedError): raise except: exc_class, exc, tb = sys.exc_info() if (isinstance(exc, SystemError) and exc.args[0] == 'uncaught RPC exception'): raise rpc = None if hasattr(exc, "_appengine_apiproxy_rpc"): rpc = exc._appengine_apiproxy_rpc new_exc = apiproxy_errors.InterruptedError(exc, rpc) raise new_exc.__class__, new_exc, tb return True
Example #4
Source File: signature.py From luci-py with Apache License 2.0 | 6 votes |
def get_own_public_certificates(): """Returns CertificateBundle with certificates of the current service.""" attempt = 0 while True: attempt += 1 try: certs = app_identity.get_public_certificates(deadline=1.5) break except apiproxy_errors.DeadlineExceededError as e: logging.warning('%s', e) if attempt == 3: raise return CertificateBundle({ 'app_id': app_identity.get_application_id(), 'service_account_name': utils.get_service_account_name(), 'certificates': [ { 'key_name': cert.key_name, 'x509_certificate_pem': cert.x509_certificate_pem, } for cert in certs ], 'timestamp': utils.datetime_to_timestamp(utils.utcnow()), })
Example #5
Source File: signature.py From luci-py with Apache License 2.0 | 6 votes |
def get_own_public_certificates(): """Returns CertificateBundle with certificates of the current service.""" attempt = 0 while True: attempt += 1 try: certs = app_identity.get_public_certificates(deadline=1.5) break except apiproxy_errors.DeadlineExceededError as e: logging.warning('%s', e) if attempt == 3: raise return CertificateBundle({ 'app_id': app_identity.get_application_id(), 'service_account_name': utils.get_service_account_name(), 'certificates': [ { 'key_name': cert.key_name, 'x509_certificate_pem': cert.x509_certificate_pem, } for cert in certs ], 'timestamp': utils.datetime_to_timestamp(utils.utcnow()), })
Example #6
Source File: signature.py From luci-py with Apache License 2.0 | 6 votes |
def get_own_public_certificates(): """Returns CertificateBundle with certificates of the current service.""" attempt = 0 while True: attempt += 1 try: certs = app_identity.get_public_certificates(deadline=1.5) break except apiproxy_errors.DeadlineExceededError as e: logging.warning('%s', e) if attempt == 3: raise return CertificateBundle({ 'app_id': app_identity.get_application_id(), 'service_account_name': utils.get_service_account_name(), 'certificates': [ { 'key_name': cert.key_name, 'x509_certificate_pem': cert.x509_certificate_pem, } for cert in certs ], 'timestamp': utils.datetime_to_timestamp(utils.utcnow()), })
Example #7
Source File: signature.py From luci-py with Apache License 2.0 | 6 votes |
def get_own_public_certificates(): """Returns CertificateBundle with certificates of the current service.""" attempt = 0 while True: attempt += 1 try: certs = app_identity.get_public_certificates(deadline=1.5) break except apiproxy_errors.DeadlineExceededError as e: logging.warning('%s', e) if attempt == 3: raise return CertificateBundle({ 'app_id': app_identity.get_application_id(), 'service_account_name': utils.get_service_account_name(), 'certificates': [ { 'key_name': cert.key_name, 'x509_certificate_pem': cert.x509_certificate_pem, } for cert in certs ], 'timestamp': utils.datetime_to_timestamp(utils.utcnow()), })
Example #8
Source File: utils.py From python-compat-runtime with Apache License 2.0 | 5 votes |
def GetKindsForCurrentNamespace(deadline): """Obtain a list of all kind names from the datastore. Pulls kinds from the current namespace only. The result is alphabetized. Args: deadline: maximum number of seconds to spend getting kinds. Returns: kinds: an alphabetized list of kinds for the specified namespace(s). more_kinds: a boolean indicating whether there may be additional kinds not included in 'kinds' (e.g. because the query limit was reached). """ more_kinds = False kind_names = [] try: kinds = metadata.Kind.all().order('__key__').run(batch_size=1000, deadline=deadline) for kind in kinds: kind_name = kind.kind_name if IsKindNameVisible(kind_name): kind_names.append(kind_name) except (datastore_errors.Timeout, apiproxy_errors.DeadlineExceededError): more_kinds = True logging.warning('Failed to retrieve all kinds within deadline.') return kind_names, more_kinds
Example #9
Source File: api.py From luci-py with Apache License 2.0 | 5 votes |
def attempt_oauth_initialization(scope): """Attempts to perform GetOAuthUser RPC retrying deadlines. The result it cached in appengine.api.oauth guts. Never raises exceptions, just gives up letting subsequent oauth.* calls fail in a proper way. """ # 4 attempts: ~20 sec (default RPC deadline is 5 sec). attempt = 0 while attempt < 4: attempt += 1 try: oauth.get_client_id(scope) return except apiproxy_errors.DeadlineExceededError as e: logging.warning('DeadlineExceededError: %s', e) continue except oauth.OAuthServiceFailureError as e: logging.warning( 'oauth.OAuthServiceFailureError (%s): %s', e.__class__.__name__, e) # oauth library "caches" the error code in os.environ and retrying # oauth.get_client_id doesn't do anything. Clear this cache first, see # oauth_api.py, _maybe_call_get_oauth_user in GAE SDK. os.environ.pop('OAUTH_ERROR_CODE', None) continue except oauth.Error as e: # Next call to oauth.get_client_id() will trigger same error and it will # be handled for real. logging.warning('oauth.Error (%s): %s', e.__class__.__name__, e) return
Example #10
Source File: signature.py From luci-py with Apache License 2.0 | 5 votes |
def _fetch_service_certs(service_url): protocol = 'https://' if utils.is_local_dev_server(): protocol = ('http://', 'https://') assert service_url.startswith(protocol), (service_url, protocol) url = '%s/auth/api/v1/server/certificates' % service_url # Retry code is adapted from components/net.py. net.py can't be used directly # since it depends on components.auth (and dependency cycles between # components are bad). attempt = 0 result = None while attempt < 4: if attempt: logging.info('Retrying...') attempt += 1 logging.info('GET %s', url) try: result = urlfetch.fetch( url=url, method='GET', headers={'X-URLFetch-Service-Id': utils.get_urlfetch_service_id()}, follow_redirects=False, deadline=5, validate_certificate=True) except (apiproxy_errors.DeadlineExceededError, urlfetch.Error) as e: # Transient network error or URL fetch service RPC deadline. logging.warning('GET %s failed: %s', url, e) continue # It MUST return 200 on success, it can't return 403, 404 or >=500. if result.status_code != 200: logging.warning( 'GET %s failed, HTTP %d: %r', url, result.status_code, result.content) continue return json.loads(result.content) # All attempts failed, give up. msg = 'Failed to grab public certs from %s (HTTP code %s)' % ( service_url, result.status_code if result else '???') raise CertificateError(msg, transient=True)
Example #11
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def _get_memory_usage(): """Returns the amount of memory used as an float in MiB.""" try: return apiruntime.runtime.memory_usage().current() except (AssertionError, apiproxy_errors.CancelledError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.RPCFailedError, runtime.DeadlineExceededError) as e: logging.warning('Failed to get memory usage: %s', e) return None ## Handler
Example #12
Source File: api.py From luci-py with Apache License 2.0 | 5 votes |
def attempt_oauth_initialization(scope): """Attempts to perform GetOAuthUser RPC retrying deadlines. The result it cached in appengine.api.oauth guts. Never raises exceptions, just gives up letting subsequent oauth.* calls fail in a proper way. """ # 4 attempts: ~20 sec (default RPC deadline is 5 sec). attempt = 0 while attempt < 4: attempt += 1 try: oauth.get_client_id(scope) return except apiproxy_errors.DeadlineExceededError as e: logging.warning('DeadlineExceededError: %s', e) continue except oauth.OAuthServiceFailureError as e: logging.warning( 'oauth.OAuthServiceFailureError (%s): %s', e.__class__.__name__, e) # oauth library "caches" the error code in os.environ and retrying # oauth.get_client_id doesn't do anything. Clear this cache first, see # oauth_api.py, _maybe_call_get_oauth_user in GAE SDK. os.environ.pop('OAUTH_ERROR_CODE', None) continue except oauth.Error as e: # Next call to oauth.get_client_id() will trigger same error and it will # be handled for real. logging.warning('oauth.Error (%s): %s', e.__class__.__name__, e) return
Example #13
Source File: signature.py From luci-py with Apache License 2.0 | 5 votes |
def _fetch_service_certs(service_url): protocol = 'https://' if utils.is_local_dev_server(): protocol = ('http://', 'https://') assert service_url.startswith(protocol), (service_url, protocol) url = '%s/auth/api/v1/server/certificates' % service_url # Retry code is adapted from components/net.py. net.py can't be used directly # since it depends on components.auth (and dependency cycles between # components are bad). attempt = 0 result = None while attempt < 4: if attempt: logging.info('Retrying...') attempt += 1 logging.info('GET %s', url) try: result = urlfetch.fetch( url=url, method='GET', headers={'X-URLFetch-Service-Id': utils.get_urlfetch_service_id()}, follow_redirects=False, deadline=5, validate_certificate=True) except (apiproxy_errors.DeadlineExceededError, urlfetch.Error) as e: # Transient network error or URL fetch service RPC deadline. logging.warning('GET %s failed: %s', url, e) continue # It MUST return 200 on success, it can't return 403, 404 or >=500. if result.status_code != 200: logging.warning( 'GET %s failed, HTTP %d: %r', url, result.status_code, result.content) continue return json.loads(result.content) # All attempts failed, give up. msg = 'Failed to grab public certs from %s (HTTP code %s)' % ( service_url, result.status_code if result else '???') raise CertificateError(msg, transient=True)
Example #14
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def report_memory(app): """Wraps an app so handlers log when memory usage increased by at least 0.5MB after the handler completed. """ min_delta = 0.5 old_dispatcher = app.router.dispatch def dispatch_and_report(*args, **kwargs): before = _get_memory_usage() deadline = False try: return old_dispatcher(*args, **kwargs) except runtime.DeadlineExceededError: # Don't try to call any function after, it'll likely fail anyway. It is # because _get_memory_usage() does an RPC under the hood. deadline = True raise finally: if not deadline: after = _get_memory_usage() if before and after and after >= before + min_delta: logging.debug( 'Memory usage: %.1f -> %.1f MB; delta: %.1f MB', before, after, after-before) app.router.dispatch = dispatch_and_report ## Time
Example #15
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def _get_memory_usage(): """Returns the amount of memory used as an float in MiB.""" try: return apiruntime.runtime.memory_usage().current() except (AssertionError, apiproxy_errors.CancelledError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.RPCFailedError, runtime.DeadlineExceededError) as e: logging.warning('Failed to get memory usage: %s', e) return None ## Handler
Example #16
Source File: api.py From luci-py with Apache License 2.0 | 5 votes |
def attempt_oauth_initialization(scope): """Attempts to perform GetOAuthUser RPC retrying deadlines. The result it cached in appengine.api.oauth guts. Never raises exceptions, just gives up letting subsequent oauth.* calls fail in a proper way. """ # 4 attempts: ~20 sec (default RPC deadline is 5 sec). attempt = 0 while attempt < 4: attempt += 1 try: oauth.get_client_id(scope) return except apiproxy_errors.DeadlineExceededError as e: logging.warning('DeadlineExceededError: %s', e) continue except oauth.OAuthServiceFailureError as e: logging.warning( 'oauth.OAuthServiceFailureError (%s): %s', e.__class__.__name__, e) # oauth library "caches" the error code in os.environ and retrying # oauth.get_client_id doesn't do anything. Clear this cache first, see # oauth_api.py, _maybe_call_get_oauth_user in GAE SDK. os.environ.pop('OAUTH_ERROR_CODE', None) continue except oauth.Error as e: # Next call to oauth.get_client_id() will trigger same error and it will # be handled for real. logging.warning('oauth.Error (%s): %s', e.__class__.__name__, e) return
Example #17
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def report_memory(app): """Wraps an app so handlers log when memory usage increased by at least 0.5MB after the handler completed. """ min_delta = 0.5 old_dispatcher = app.router.dispatch def dispatch_and_report(*args, **kwargs): before = _get_memory_usage() deadline = False try: return old_dispatcher(*args, **kwargs) except runtime.DeadlineExceededError: # Don't try to call any function after, it'll likely fail anyway. It is # because _get_memory_usage() does an RPC under the hood. deadline = True raise finally: if not deadline: after = _get_memory_usage() if before and after and after >= before + min_delta: logging.debug( 'Memory usage: %.1f -> %.1f MB; delta: %.1f MB', before, after, after-before) app.router.dispatch = dispatch_and_report ## Time
Example #18
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def _get_memory_usage(): """Returns the amount of memory used as an float in MiB.""" try: return apiruntime.runtime.memory_usage().current() except (AssertionError, apiproxy_errors.CancelledError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.RPCFailedError, runtime.DeadlineExceededError) as e: logging.warning('Failed to get memory usage: %s', e) return None ## Handler
Example #19
Source File: api.py From luci-py with Apache License 2.0 | 5 votes |
def attempt_oauth_initialization(scope): """Attempts to perform GetOAuthUser RPC retrying deadlines. The result it cached in appengine.api.oauth guts. Never raises exceptions, just gives up letting subsequent oauth.* calls fail in a proper way. """ # 4 attempts: ~20 sec (default RPC deadline is 5 sec). attempt = 0 while attempt < 4: attempt += 1 try: oauth.get_client_id(scope) return except apiproxy_errors.DeadlineExceededError as e: logging.warning('DeadlineExceededError: %s', e) continue except oauth.OAuthServiceFailureError as e: logging.warning( 'oauth.OAuthServiceFailureError (%s): %s', e.__class__.__name__, e) # oauth library "caches" the error code in os.environ and retrying # oauth.get_client_id doesn't do anything. Clear this cache first, see # oauth_api.py, _maybe_call_get_oauth_user in GAE SDK. os.environ.pop('OAUTH_ERROR_CODE', None) continue except oauth.Error as e: # Next call to oauth.get_client_id() will trigger same error and it will # be handled for real. logging.warning('oauth.Error (%s): %s', e.__class__.__name__, e) return
Example #20
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def _get_memory_usage(): """Returns the amount of memory used as an float in MiB.""" try: return apiruntime.runtime.memory_usage().current() except (AssertionError, apiproxy_errors.CancelledError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.RPCFailedError, runtime.DeadlineExceededError) as e: logging.warning('Failed to get memory usage: %s', e) return None ## Handler
Example #21
Source File: signature.py From luci-py with Apache License 2.0 | 5 votes |
def _fetch_service_certs(service_url): protocol = 'https://' if utils.is_local_dev_server(): protocol = ('http://', 'https://') assert service_url.startswith(protocol), (service_url, protocol) url = '%s/auth/api/v1/server/certificates' % service_url # Retry code is adapted from components/net.py. net.py can't be used directly # since it depends on components.auth (and dependency cycles between # components are bad). attempt = 0 result = None while attempt < 4: if attempt: logging.info('Retrying...') attempt += 1 logging.info('GET %s', url) try: result = urlfetch.fetch( url=url, method='GET', headers={'X-URLFetch-Service-Id': utils.get_urlfetch_service_id()}, follow_redirects=False, deadline=5, validate_certificate=True) except (apiproxy_errors.DeadlineExceededError, urlfetch.Error) as e: # Transient network error or URL fetch service RPC deadline. logging.warning('GET %s failed: %s', url, e) continue # It MUST return 200 on success, it can't return 403, 404 or >=500. if result.status_code != 200: logging.warning( 'GET %s failed, HTTP %d: %r', url, result.status_code, result.content) continue return json.loads(result.content) # All attempts failed, give up. msg = 'Failed to grab public certs from %s (HTTP code %s)' % ( service_url, result.status_code if result else '???') raise CertificateError(msg, transient=True)
Example #22
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def report_memory(app): """Wraps an app so handlers log when memory usage increased by at least 0.5MB after the handler completed. """ min_delta = 0.5 old_dispatcher = app.router.dispatch def dispatch_and_report(*args, **kwargs): before = _get_memory_usage() deadline = False try: return old_dispatcher(*args, **kwargs) except runtime.DeadlineExceededError: # Don't try to call any function after, it'll likely fail anyway. It is # because _get_memory_usage() does an RPC under the hood. deadline = True raise finally: if not deadline: after = _get_memory_usage() if before and after and after >= before + min_delta: logging.debug( 'Memory usage: %.1f -> %.1f MB; delta: %.1f MB', before, after, after-before) app.router.dispatch = dispatch_and_report ## Time
Example #23
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def report_memory(app): """Wraps an app so handlers log when memory usage increased by at least 0.5MB after the handler completed. """ min_delta = 0.5 old_dispatcher = app.router.dispatch def dispatch_and_report(*args, **kwargs): before = _get_memory_usage() deadline = False try: return old_dispatcher(*args, **kwargs) except runtime.DeadlineExceededError: # Don't try to call any function after, it'll likely fail anyway. It is # because _get_memory_usage() does an RPC under the hood. deadline = True raise finally: if not deadline: after = _get_memory_usage() if before and after and after >= before + min_delta: logging.debug( 'Memory usage: %.1f -> %.1f MB; delta: %.1f MB', before, after, after-before) app.router.dispatch = dispatch_and_report ## Time
Example #24
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def _get_memory_usage(): """Returns the amount of memory used as an float in MiB.""" try: return apiruntime.runtime.memory_usage().current() except (AssertionError, apiproxy_errors.CancelledError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.RPCFailedError, runtime.DeadlineExceededError) as e: logging.warning('Failed to get memory usage: %s', e) return None ## Handler
Example #25
Source File: api.py From luci-py with Apache License 2.0 | 5 votes |
def attempt_oauth_initialization(scope): """Attempts to perform GetOAuthUser RPC retrying deadlines. The result it cached in appengine.api.oauth guts. Never raises exceptions, just gives up letting subsequent oauth.* calls fail in a proper way. """ # 4 attempts: ~20 sec (default RPC deadline is 5 sec). attempt = 0 while attempt < 4: attempt += 1 try: oauth.get_client_id(scope) return except apiproxy_errors.DeadlineExceededError as e: logging.warning('DeadlineExceededError: %s', e) continue except oauth.OAuthServiceFailureError as e: logging.warning( 'oauth.OAuthServiceFailureError (%s): %s', e.__class__.__name__, e) # oauth library "caches" the error code in os.environ and retrying # oauth.get_client_id doesn't do anything. Clear this cache first, see # oauth_api.py, _maybe_call_get_oauth_user in GAE SDK. os.environ.pop('OAUTH_ERROR_CODE', None) continue except oauth.Error as e: # Next call to oauth.get_client_id() will trigger same error and it will # be handled for real. logging.warning('oauth.Error (%s): %s', e.__class__.__name__, e) return
Example #26
Source File: signature.py From luci-py with Apache License 2.0 | 5 votes |
def _fetch_service_certs(service_url): protocol = 'https://' if utils.is_local_dev_server(): protocol = ('http://', 'https://') assert service_url.startswith(protocol), (service_url, protocol) url = '%s/auth/api/v1/server/certificates' % service_url # Retry code is adapted from components/net.py. net.py can't be used directly # since it depends on components.auth (and dependency cycles between # components are bad). attempt = 0 result = None while attempt < 4: if attempt: logging.info('Retrying...') attempt += 1 logging.info('GET %s', url) try: result = urlfetch.fetch( url=url, method='GET', headers={'X-URLFetch-Service-Id': utils.get_urlfetch_service_id()}, follow_redirects=False, deadline=5, validate_certificate=True) except (apiproxy_errors.DeadlineExceededError, urlfetch.Error) as e: # Transient network error or URL fetch service RPC deadline. logging.warning('GET %s failed: %s', url, e) continue # It MUST return 200 on success, it can't return 403, 404 or >=500. if result.status_code != 200: logging.warning( 'GET %s failed, HTTP %d: %r', url, result.status_code, result.content) continue return json.loads(result.content) # All attempts failed, give up. msg = 'Failed to grab public certs from %s (HTTP code %s)' % ( service_url, result.status_code if result else '???') raise CertificateError(msg, transient=True)
Example #27
Source File: bulkloader.py From browserscope with Apache License 2.0 | 4 votes |
def PerformWork(self, thread_pool): """Perform the work of this work item and report the results. Args: thread_pool: An AdaptiveThreadPool instance. Returns: A tuple (status, instruction) of the work status and an instruction for the ThreadGate. """ status = adaptive_thread_pool.WorkItem.FAILURE instruction = adaptive_thread_pool.ThreadGate.DECREASE try: self.MarkAsTransferring() try: transfer_time = self._TransferItem(thread_pool) if transfer_time is None: status = adaptive_thread_pool.WorkItem.RETRY instruction = adaptive_thread_pool.ThreadGate.HOLD else: logger.debug('[%s] %s Transferred %d entities in %0.1f seconds', threading.currentThread().getName(), self, self.count, transfer_time) sys.stdout.write('.') sys.stdout.flush() status = adaptive_thread_pool.WorkItem.SUCCESS if transfer_time <= MAXIMUM_INCREASE_DURATION: instruction = adaptive_thread_pool.ThreadGate.INCREASE elif transfer_time <= MAXIMUM_HOLD_DURATION: instruction = adaptive_thread_pool.ThreadGate.HOLD except (db.InternalError, db.NotSavedError, db.Timeout, db.TransactionFailedError, apiproxy_errors.OverQuotaError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.ApplicationError), e: status = adaptive_thread_pool.WorkItem.RETRY logger.exception('Retrying on non-fatal datastore error: %s', e) except urllib2.HTTPError, e: http_status = e.code if http_status >= 500 and http_status < 600: status = adaptive_thread_pool.WorkItem.RETRY logger.exception('Retrying on non-fatal HTTP error: %d %s', http_status, e.msg) else: self.SetError() status = adaptive_thread_pool.WorkItem.FAILURE
Example #28
Source File: utils.py From python-compat-runtime with Apache License 2.0 | 4 votes |
def GetKindsForAllNamespaces(deadline): """Obtain a list of all kind names from the datastore. Pulls kinds from all namespaces. The result is deduped and alphabetized. Args: deadline: maximum number of seconds to spend getting kinds. Returns: kinds: an alphabetized list of kinds for the specified namespace(s). more_kinds: a boolean indicating whether there may be additional kinds not included in 'kinds' (e.g. because the query deadline was reached). """ start = time.time() kind_name_set = set() def ReadFromKindIters(kind_iter_list): """Read kinds from a list of iterators. Reads a kind from each iterator in kind_iter_list, adds it to kind_name_set, and removes any completed iterators. Args: kind_iter_list: a list of iterators of kinds. """ completed = [] for kind_iter in kind_iter_list: try: kind_name = kind_iter.next().kind_name if IsKindNameVisible(kind_name): kind_name_set.add(kind_name) except StopIteration: completed.append(kind_iter) for kind_iter in completed: kind_iter_list.remove(kind_iter) more_kinds = False try: namespace_iter = metadata.Namespace.all().run(batch_size=1000, deadline=deadline) kind_iter_list = [] for ns in namespace_iter: remaining = deadline - (time.time() - start) if remaining <= 0: raise datastore_errors.Timeout kind_iter_list.append(metadata.Kind.all(namespace=ns.namespace_name) .run(batch_size=1000, deadline=remaining)) while len(kind_iter_list) == MAX_RPCS: ReadFromKindIters(kind_iter_list) while kind_iter_list: ReadFromKindIters(kind_iter_list) except (datastore_errors.Timeout, apiproxy_errors.DeadlineExceededError): more_kinds = True logging.warning('Failed to retrieve all kinds within deadline.') return sorted(kind_name_set), more_kinds
Example #29
Source File: bulkloader.py From python-compat-runtime with Apache License 2.0 | 4 votes |
def PerformWork(self, thread_pool): """Perform the work of this work item and report the results. Args: thread_pool: An AdaptiveThreadPool instance. Returns: A tuple (status, instruction) of the work status and an instruction for the ThreadGate. """ status = adaptive_thread_pool.WorkItem.FAILURE instruction = adaptive_thread_pool.ThreadGate.DECREASE try: self.MarkAsTransferring() try: transfer_time = self._TransferItem(thread_pool) if transfer_time is None: status = adaptive_thread_pool.WorkItem.RETRY instruction = adaptive_thread_pool.ThreadGate.HOLD else: logger.debug('[%s] %s Transferred %d entities in %0.1f seconds', threading.currentThread().getName(), self, self.count, transfer_time) sys.stdout.write('.') sys.stdout.flush() status = adaptive_thread_pool.WorkItem.SUCCESS if transfer_time <= MAXIMUM_INCREASE_DURATION: instruction = adaptive_thread_pool.ThreadGate.INCREASE elif transfer_time <= MAXIMUM_HOLD_DURATION: instruction = adaptive_thread_pool.ThreadGate.HOLD except (db.InternalError, db.NotSavedError, db.Timeout, db.TransactionFailedError, apiproxy_errors.OverQuotaError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.ApplicationError), e: status = adaptive_thread_pool.WorkItem.RETRY logger.exception('Retrying on non-fatal datastore error: %s', e) except urllib2.HTTPError, e: http_status = e.code if http_status >= 500 and http_status < 600: status = adaptive_thread_pool.WorkItem.RETRY logger.exception('Retrying on non-fatal HTTP error: %d %s', http_status, e.msg) else: self.SetError() status = adaptive_thread_pool.WorkItem.FAILURE
Example #30
Source File: service_account.py From luci-py with Apache License 2.0 | 4 votes |
def _call_async(url, payload, method, headers): """Makes URL fetch call aggressively retrying on errors a bunch of times. On success returns deserialized JSON response body. On failure raises AccessTokenError. """ attempt = 0 while attempt < 4: if attempt: logging.info('Retrying...') attempt += 1 logging.info('%s %s', method, url) try: response = yield _urlfetch( url=url, payload=payload, method=method, headers=headers, follow_redirects=False, deadline=5, # all RPCs we do should be fast validate_certificate=True) except (apiproxy_errors.DeadlineExceededError, urlfetch.Error) as e: # Transient network error or URL fetch service RPC deadline. logging.warning('%s %s failed: %s', method, url, e) continue # Transient error on the other side. if response.status_code >= 500: logging.warning( '%s %s failed with HTTP %d: %r', method, url, response.status_code, response.content) continue # Non-transient error. if 300 <= response.status_code < 500: logging.warning( '%s %s failed with HTTP %d: %r', method, url, response.status_code, response.content) raise AccessTokenError( 'Failed to call %s: HTTP %d' % (url, response.status_code)) # Success. try: body = json.loads(response.content) except ValueError: logging.error('Non-JSON response from %s: %r', url, response.content) raise AccessTokenError('Non-JSON response from %s' % url) raise ndb.Return(body) # All our attempts failed with transient errors. Perhaps some later retry # can help, so set transient to True. raise AccessTokenError( 'Failed to call %s after multiple attempts' % url, transient=True)