Python google.appengine.runtime.DeadlineExceededError() Examples
The following are 30
code examples of google.appengine.runtime.DeadlineExceededError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
google.appengine.runtime
, or try the search function
.
Example #1
Source File: util.py From browserscope with Apache License 2.0 | 6 votes |
def GvizTableData(request): """Returns a string formatted for consumption by a Google Viz table.""" #def throw_deadline(): # logging.info('MANUAL THROW!! DeadlineExceededError DeadlineExceededError') # raise DeadlineExceededError #t = Timer(15.0, throw_deadline) test_set = None category = request.GET.get('category') if not category: return http.HttpResponseBadRequest('Must pass category=something') test_set = all_test_sets.GetTestSet(category) if not test_set: return http.HttpResponseBadRequest( 'No test set was found for category=%s' % category) formatted_gviz_table_data = GetStats(request, test_set, 'gviz_table_data') return http.HttpResponse(formatted_gviz_table_data)
Example #2
Source File: datachecks.py From personfinder with Apache License 2.0 | 6 votes |
def post(self, request, *args, **kwargs): del request, args, kwargs # unused query = model.Note.all(filter_expired=False).filter( 'repo =', self.env.repo) cursor = self.params.cursor if cursor: query.with_cursor(cursor) try: for note in query: next_cursor = query.cursor() self._check_note(note) cursor = next_cursor except runtime.DeadlineExceededError: self.schedule_task(self.env.repo, cursor=cursor) except datastore_errors.Timeout: self.schedule_task(self.env.repo, cursor=cursor) return django.http.HttpResponse('')
Example #3
Source File: datachecks.py From personfinder with Apache License 2.0 | 6 votes |
def post(self, request, *args, **kwargs): del request, args, kwargs # unused query = model.Person.all(filter_expired=False).filter( 'repo =', self.env.repo) cursor = self.params.cursor if cursor: query.with_cursor(cursor) try: for person in query: next_cursor = query.cursor() self._check_person(person) cursor = next_cursor except runtime.DeadlineExceededError: self.schedule_task(self.env.repo, cursor=cursor) except datastore_errors.Timeout: self.schedule_task(self.env.repo, cursor=cursor) return django.http.HttpResponse('')
Example #4
Source File: tasks.py From personfinder with Apache License 2.0 | 6 votes |
def get(self): if self.repo: try: count_of_unreviewed_notes = ( model.Note.get_unreviewed_notes_count(self.repo)) self._maybe_notify(count_of_unreviewed_notes) except runtime.DeadlineExceededError: logging.info("DeadlineExceededError occurs") self.add_task_for_repo( self.repo, self.task_name(), self.ACTION) except datastore_errors.Timeout: logging.info("Timeout occurs in datastore") self.add_task_for_repo( self.repo, self.task_name(), self.ACTION) else: for repo in model.Repo.list(): self.add_task_for_repo( repo, self.task_name(), self.ACTION)
Example #5
Source File: tasks.py From personfinder with Apache License 2.0 | 6 votes |
def get(self): if self.repo: # Do some counting. try: counter = model.Counter.get_unfinished_or_create( self.repo, self.SCAN_NAME) entities_remaining = True while entities_remaining: # Batch the db updates. for _ in xrange(100): entities_remaining = run_count( self.make_query, self.update_counter, counter) if not entities_remaining: break # And put the updates at once. counter.put() except runtime.DeadlineExceededError: # Continue counting in another task. self.add_task_for_repo(self.repo, self.SCAN_NAME, self.ACTION) else: # Launch counting tasks for all repositories. for repo in model.Repo.list(): self.add_task_for_repo(repo, self.SCAN_NAME, self.ACTION)
Example #6
Source File: apiproxy.py From python-compat-runtime with Apache License 2.0 | 6 votes |
def _WaitImpl(self): """Waits on the API call associated with this RPC. The callback, if provided, will be executed before Wait() returns. If this RPC is already complete, or if the RPC was never started, this function will return immediately. Raises: InterruptedError if a callback throws an uncaught exception. """ try: rpc_completed = _apphosting_runtime___python__apiproxy.Wait(self) except (runtime.DeadlineExceededError, apiproxy_errors.InterruptedError): raise except: exc_class, exc, tb = sys.exc_info() if (isinstance(exc, SystemError) and exc.args[0] == 'uncaught RPC exception'): raise rpc = None if hasattr(exc, "_appengine_apiproxy_rpc"): rpc = exc._appengine_apiproxy_rpc new_exc = apiproxy_errors.InterruptedError(exc, rpc) raise new_exc.__class__, new_exc, tb return True
Example #7
Source File: deletion.py From personfinder with Apache License 2.0 | 6 votes |
def post(self, request, *args, **kwargs): del request, args, kwargs # unused q = self.get_query() cursor = self.params.get('cursor', '') if cursor: q.with_cursor(cursor) try: now = utils.get_utcnow() for item in q: next_cursor = q.cursor() associated_person = model.Person.get( self.env.repo, self.get_person_record_id(item)) if not associated_person: if now - self.get_base_timestamp(item) > _STRAY_CLEANUP_TTL: db.delete(item) cursor = next_cursor except runtime.DeadlineExceededError: self.schedule_task(self.env.repo, cursor=cursor) except datastore_errors.Timeout: self.schedule_task(self.env.repo, cursor=cursor) return django.http.HttpResponse('')
Example #8
Source File: test_deletion.py From personfinder with Apache License 2.0 | 6 votes |
def test_task_rescheduling(self): """Tests that task is rescheduled for continuation.""" tq_mock = mox.Mox() tq_mock.StubOutWithMock(taskqueue, 'add') taskqueue.add(name=mox.IsA(unicode), method='POST', url='/haiti/tasks/process_expirations', queue_name='expiry', params={'cursor': ''}) tq_mock.ReplayAll() # DeadlineExceededErrors can be raised at any time. A convenient way for # us to raise it during this test execution is with utils.get_utcnow. with mock.patch('utils.get_utcnow') as get_utcnow_mock: get_utcnow_mock.side_effect = runtime.DeadlineExceededError() self.run_task('/haiti/tasks/process_expirations', method='POST') tq_mock.VerifyAll() tq_mock.UnsetStubs()
Example #9
Source File: fields.py From graphene-gae with BSD 3-Clause "New" or "Revised" License | 6 votes |
def generate_edges_page(ndb_iter, page_size, keys_only, edge_type): edges = [] timeouts = 0 while len(edges) < page_size: try: entity = ndb_iter.next() except StopIteration: break except Timeout: timeouts += 1 if timeouts > 2: break continue except DeadlineExceededError: break if keys_only: # entity is actualy an ndb.Key and we need to create an empty entity to hold it entity = edge_type._meta.fields['node']._type._meta.model(key=entity) edges.append(edge_type(node=entity, cursor=ndb_iter.cursor_after().urlsafe())) return edges
Example #10
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def _get_memory_usage(): """Returns the amount of memory used as an float in MiB.""" try: return apiruntime.runtime.memory_usage().current() except (AssertionError, apiproxy_errors.CancelledError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.RPCFailedError, runtime.DeadlineExceededError) as e: logging.warning('Failed to get memory usage: %s', e) return None ## Handler
Example #11
Source File: main.py From solutions-vision-search with Apache License 2.0 | 5 votes |
def detect_labels(bucket_id, object_id): """Detects labels from uploaded image using Vision API.""" try: # Construct GCS uri path gcs_image_uri = 'gs://{}/{}'.format(bucket_id, object_id) # Build request payload dict for label detection request_dict = [{ 'image': { 'source': { 'gcsImageUri': gcs_image_uri } }, 'features': [{ 'type': 'LABEL_DETECTION', 'maxResults': 10, }] }] vision_svc = get_vision_svc() api_request = vision_svc.images().annotate(body={ 'requests': request_dict }) response = api_request.execute() labels = [] if 'labelAnnotations' in response['responses'][0]: labels = response['responses'][0]['labelAnnotations'] return labels except DeadlineExceededError: logging.exception('Exceeded deadline in detect_labels()')
Example #12
Source File: main.py From solutions-vision-search with Apache License 2.0 | 5 votes |
def detect_automl_labels(bucket_id, object_id): """Detects labels from image using AutoML Vision.""" try: # Read image file contents from GCS filename = '/{}/{}'.format(bucket_id, object_id) gcs_file = cloudstorage.open(filename) encoded_contents = base64.b64encode(gcs_file.read()) gcs_file.close() # Build request payload dict for label detection request_dict = { 'payload': { 'image': { 'imageBytes': encoded_contents } }, 'params': { 'score_threshold': "0.5" } } # Get predictions from the AutoML Vision model automl_svc = get_automl_svc() parent = 'projects/{}/locations/us-central1/models/{}'.format( app_identity.get_application_id(), current_app.config['AUTOML_MODEL_ID']) request = automl_svc.projects().locations().models().predict( name=parent, body=request_dict) response = request.execute() return response['payload'] except DeadlineExceededError: logging.exception('Exceeded deadline in detect_automl_labels()')
Example #13
Source File: tasks.py From personfinder with Apache License 2.0 | 5 votes |
def get(self): if self.repo: # To reuse the cursor from the previous task, we need to apply # exactly the same filter. So we use utcnow previously used # instead of the current time. utcnow = self.params.utcnow or utils.get_utcnow() max_entry_date = ( utcnow - datetime.timedelta( seconds=CleanUpInTestMode.DELETION_AGE_SECONDS)) query = model.Person.all_in_repo(self.repo) query.filter('entry_date <=', max_entry_date) if self.params.cursor: query.with_cursor(self.params.cursor) cursor = self.params.cursor # Uses query.get() instead of "for person in query". # If we use for-loop, query.cursor() points to an unexpected # position. person = query.get() # When the repository is no longer in test mode, aborts the # deletion. try: while person and self.in_test_mode(self.repo): if self.__listener: self.__listener.before_deletion(person.key()) person.delete_related_entities(delete_self=True) cursor = query.cursor() person = query.get() except runtime.DeadlineExceededError: self.schedule_next_task(cursor, utcnow) except datastore_errors.Timeout: # This exception is sometimes raised, maybe when the query # object live too long? self.schedule_next_task(cursor, utcnow) else: for repo in model.Repo.list(): if self.in_test_mode(repo): self.add_task_for_repo(repo, self.task_name(), self.ACTION)
Example #14
Source File: requestors.py From stocktwits with MIT License | 5 votes |
def post_json(url, params=None, deadline=30): """ Tries to post a couple times in a loop before giving up if a timeout. """ params = '?' + urllib.urlencode(params) if params else '' # URL query string parameters (Access Token) resp = None for i in range(4): try: resp = urlfetch.fetch(url+params, method='POST', deadline=deadline) except DeadlineExceededError: trimmed_params = {k: v for k, v in params.iteritems() if k not in ST_BASE_PARAMS.keys()} log.error('POST Timeout to {} w/ {}'.format(url[len(ST_BASE_URL):], trimmed_params)) if resp is not None: break # TODO wrap in appropriate try/except return json.loads(resp.content)
Example #15
Source File: test_datachecks.py From personfinder with Apache License 2.0 | 5 votes |
def deadline_exceeded_side_effect(*args, **kwargs): del args, kwargs # Unused. raise runtime.DeadlineExceededError()
Example #16
Source File: deletion.py From personfinder with Apache License 2.0 | 5 votes |
def post(self, request, *args, **kwargs): del request, args, kwargs # unused q = model.Person.all(filter_expired=False).filter( 'repo =', self.env.repo) cursor = self.params.get('cursor', '') if cursor: q.with_cursor(cursor) try: now = utils.get_utcnow() for person in q: next_cursor = q.cursor() was_expired = person.is_expired person.put_expiry_flags() if (now - person.get_effective_expiry_date() > _EXPIRED_TTL): # Only original records should get to this point, since # other records should have been deleted altogether as soon # as they expired. Just in case the deletion task has failed # for three days though, check that it's an original record # to ensure we don't change the contents of a non-original # record. if person.is_original(): person.wipe_contents() else: person.delete_related_entities(delete_self=True) elif person.is_expired and not was_expired: # Since we're not sending notices, handler isn't really # needed. # TODO(nworden): check with Product about whether we want to # send notices for expirations. The current language # indicates it was designed for cases where someone manually # requested deletion of the record. delete.delete_person(None, person, send_notices=False) cursor = next_cursor except runtime.DeadlineExceededError: self.schedule_task(self.env.repo, cursor=cursor) except datastore_errors.Timeout: self.schedule_task(self.env.repo, cursor=cursor) return django.http.HttpResponse('')
Example #17
Source File: app_logging.py From python-compat-runtime with Apache License 2.0 | 5 votes |
def emit(self, record): """Emit a record. This implementation is based on the implementation of StreamHandler.emit(). Args: record: A Python logging.LogRecord object. """ try: if features.IsEnabled("LogServiceWriteRecord"): logservice.write_record(self._AppLogsLevel(record.levelno), record.created, self.format(record), self._AppLogsLocation(record)) else: message = self._AppLogsMessage(record) if isinstance(message, unicode): message = message.encode("UTF-8") logservice.write(message) except (KeyboardInterrupt, SystemExit, runtime.DeadlineExceededError): raise except: self.handleError(record)
Example #18
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def report_memory(app): """Wraps an app so handlers log when memory usage increased by at least 0.5MB after the handler completed. """ min_delta = 0.5 old_dispatcher = app.router.dispatch def dispatch_and_report(*args, **kwargs): before = _get_memory_usage() deadline = False try: return old_dispatcher(*args, **kwargs) except runtime.DeadlineExceededError: # Don't try to call any function after, it'll likely fail anyway. It is # because _get_memory_usage() does an RPC under the hood. deadline = True raise finally: if not deadline: after = _get_memory_usage() if before and after and after >= before + min_delta: logging.debug( 'Memory usage: %.1f -> %.1f MB; delta: %.1f MB', before, after, after-before) app.router.dispatch = dispatch_and_report ## Time
Example #19
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def report_memory(app): """Wraps an app so handlers log when memory usage increased by at least 0.5MB after the handler completed. """ min_delta = 0.5 old_dispatcher = app.router.dispatch def dispatch_and_report(*args, **kwargs): before = _get_memory_usage() deadline = False try: return old_dispatcher(*args, **kwargs) except runtime.DeadlineExceededError: # Don't try to call any function after, it'll likely fail anyway. It is # because _get_memory_usage() does an RPC under the hood. deadline = True raise finally: if not deadline: after = _get_memory_usage() if before and after and after >= before + min_delta: logging.debug( 'Memory usage: %.1f -> %.1f MB; delta: %.1f MB', before, after, after-before) app.router.dispatch = dispatch_and_report ## Time
Example #20
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def _get_memory_usage(): """Returns the amount of memory used as an float in MiB.""" try: return apiruntime.runtime.memory_usage().current() except (AssertionError, apiproxy_errors.CancelledError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.RPCFailedError, runtime.DeadlineExceededError) as e: logging.warning('Failed to get memory usage: %s', e) return None ## Handler
Example #21
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def report_memory(app): """Wraps an app so handlers log when memory usage increased by at least 0.5MB after the handler completed. """ min_delta = 0.5 old_dispatcher = app.router.dispatch def dispatch_and_report(*args, **kwargs): before = _get_memory_usage() deadline = False try: return old_dispatcher(*args, **kwargs) except runtime.DeadlineExceededError: # Don't try to call any function after, it'll likely fail anyway. It is # because _get_memory_usage() does an RPC under the hood. deadline = True raise finally: if not deadline: after = _get_memory_usage() if before and after and after >= before + min_delta: logging.debug( 'Memory usage: %.1f -> %.1f MB; delta: %.1f MB', before, after, after-before) app.router.dispatch = dispatch_and_report ## Time
Example #22
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def _get_memory_usage(): """Returns the amount of memory used as an float in MiB.""" try: return apiruntime.runtime.memory_usage().current() except (AssertionError, apiproxy_errors.CancelledError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.RPCFailedError, runtime.DeadlineExceededError) as e: logging.warning('Failed to get memory usage: %s', e) return None ## Handler
Example #23
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def report_memory(app): """Wraps an app so handlers log when memory usage increased by at least 0.5MB after the handler completed. """ min_delta = 0.5 old_dispatcher = app.router.dispatch def dispatch_and_report(*args, **kwargs): before = _get_memory_usage() deadline = False try: return old_dispatcher(*args, **kwargs) except runtime.DeadlineExceededError: # Don't try to call any function after, it'll likely fail anyway. It is # because _get_memory_usage() does an RPC under the hood. deadline = True raise finally: if not deadline: after = _get_memory_usage() if before and after and after >= before + min_delta: logging.debug( 'Memory usage: %.1f -> %.1f MB; delta: %.1f MB', before, after, after-before) app.router.dispatch = dispatch_and_report ## Time
Example #24
Source File: bot_management.py From luci-py with Apache License 2.0 | 5 votes |
def cron_delete_old_bot(): """Deletes stale BotRoot entity groups.""" start = utils.utcnow() # Run for 4.5 minutes and schedule the cron job every 5 minutes. Running for # 9.5 minutes (out of 10 allowed for a cron job) results in 'Exceeded soft # private memory limit of 512 MB with 512 MB' even if this loop should be # fairly light on memory usage. time_to_stop = start + datetime.timedelta(seconds=int(4.5*60)) total = 0 deleted = [] try: q = BotRoot.query(default_options=ndb.QueryOptions(keys_only=True)) for bot_root_key in q: # Check if it has any BotEvent left. If not, it means that the entity is # older than _OLD_BOT_EVENTS_CUF_OFF, so the whole thing can be deleted # now. # In particular, ignore the fact that BotInfo may still exist, since if # there's no BotEvent left, it's probably a broken entity or a forgotten # dead bot. if BotEvent.query(ancestor=bot_root_key).count(limit=1): continue deleted.append(bot_root_key.string_id()) # Delete the whole group. An ancestor query will retrieve the entity # itself too, so no need to explicitly delete it. keys = ndb.Query(ancestor=bot_root_key).fetch(keys_only=True) ndb.delete_multi(keys) total += len(keys) if utils.utcnow() >= time_to_stop: break return total except runtime.DeadlineExceededError: pass finally: logging.info( 'Deleted %d entities from the following bots:\n%s', total, ', '.join(sorted(deleted)))
Example #25
Source File: main.py From python-docs-samples with Apache License 2.0 | 5 votes |
def get(self): from google.appengine.runtime import DeadlineExceededError try: time.sleep(70) self.response.write('Completed.') except DeadlineExceededError: self.response.clear() self.response.set_status(500) self.response.out.write( 'The request did not complete in time.') # [END gae_python_request_timer] # [START gae_python_environment]
Example #26
Source File: utils.py From luci-py with Apache License 2.0 | 5 votes |
def report_memory(app): """Wraps an app so handlers log when memory usage increased by at least 0.5MB after the handler completed. """ min_delta = 0.5 old_dispatcher = app.router.dispatch def dispatch_and_report(*args, **kwargs): before = _get_memory_usage() deadline = False try: return old_dispatcher(*args, **kwargs) except runtime.DeadlineExceededError: # Don't try to call any function after, it'll likely fail anyway. It is # because _get_memory_usage() does an RPC under the hood. deadline = True raise finally: if not deadline: after = _get_memory_usage() if before and after and after >= before + min_delta: logging.debug( 'Memory usage: %.1f -> %.1f MB; delta: %.1f MB', before, after, after-before) app.router.dispatch = dispatch_and_report ## Time
Example #27
Source File: main_test.py From python-docs-samples with Apache License 2.0 | 5 votes |
def test_timer(testbed): app = webtest.TestApp(main.app) with mock.patch('main.time.sleep') as sleep_mock: sleep_mock.side_effect = DeadlineExceededError() app.get('/timer', status=500) assert sleep_mock.called
Example #28
Source File: test_tasks.py From personfinder with Apache License 2.0 | 4 votes |
def test_clean_up_in_test_mode_multi_tasks(self): """Test the clean up in test mode when it is broken into multiple tasks.""" class Listener(object): def before_deletion(self, person): # This will be implemented later using mock. assert False tasks.CleanUpInTestMode.DELETION_AGE_SECONDS = 2 * 3600 # 2 hours utcnow = datetime.datetime(2010, 1, 1, 7, 0, 0) set_utcnow_for_test(utcnow) self.mox = mox.Mox() cleanup = \ test_handler.initialize_handler(tasks.CleanUpInTestMode, tasks.CleanUpInTestMode.ACTION) listener = Listener() cleanup.set_listener(listener) # Simulates add_task_for_repo() because it doesn't work in unit tests. def add_task_for_repo(repo, task_name, action, **kwargs): cleanup = test_handler.initialize_handler( tasks.CleanUpInTestMode, action, repo=repo, params=kwargs) cleanup.set_listener(listener) cleanup.get() self.mox.StubOutWithMock(cleanup, 'add_task_for_repo') (cleanup.add_task_for_repo( 'haiti', mox.IsA(str), mox.IsA(str), utcnow=str(calendar.timegm(utcnow.utctimetuple())), cursor=mox.IsA(str), queue_name=mox.IsA(str)). WithSideEffects(add_task_for_repo).MultipleTimes()) def raise_deadline_exceeded_error(_): raise runtime.DeadlineExceededError() self.mox.StubOutWithMock(listener, 'before_deletion') listener.before_deletion(self.key_p1) listener.before_deletion(self.key_p2).WithSideEffects( raise_deadline_exceeded_error) listener.before_deletion(self.key_p2) self.mox.ReplayAll() config.set(test_mode=True, repo='haiti') # This should run multiple tasks and finally deletes all records. cleanup.get() assert db.get(self.key_p1) is None assert db.get(self.key_p2) is None self.mox.UnsetStubs() self.mox.VerifyAll()
Example #29
Source File: wsgi.py From python-compat-runtime with Apache License 2.0 | 4 votes |
def Handle(self): """Handles the request represented by the WsgiRequest object. Loads the handler from the handler name provided. Calls the handler with the environ. Any exceptions in loading the user handler and executing it are caught and logged. Returns: A dict containing: error: App Engine error code. 0 for OK, 1 for error. response_code: HTTP response code. headers: A list of tuples (key, value) of HTTP headers. body: A str of the body of the response """ try: handler = _config_handle.add_wsgi_middleware(self._LoadHandler()) except runtime.DeadlineExceededError: exc_info = sys.exc_info() try: logging.error('', exc_info=exc_info) except runtime.DeadlineExceededError: logging.exception('Deadline exception occurred while logging a ' 'deadline exception.') logging.error('Original exception:', exc_info=exc_info) return {'error': _DEADLINE_DURING_LOADING} except: logging.exception('') return {'error': 1} result = None try: result = handler(dict(self._environ), self._StartResponse) for chunk in result: if not isinstance(chunk, str): raise InvalidResponseError('handler must return an iterable of str') self._body.append(chunk) body = ''.join(self._written_body + self._body) return {'response_code': self._status, 'headers': self._response_headers, 'body': body} except: logging.exception('') return {'error': 1} finally: if hasattr(result, 'close'): result.close()
Example #30
Source File: bot_management.py From luci-py with Apache License 2.0 | 4 votes |
def cron_delete_old_bot_events(): """Deletes very old BotEvent entities.""" start = utils.utcnow() # Run for 4.5 minutes and schedule the cron job every 5 minutes. Running for # 9.5 minutes (out of 10 allowed for a cron job) results in 'Exceeded soft # private memory limit of 512 MB with 512 MB' even if this loop should be # fairly light on memory usage. time_to_stop = start + datetime.timedelta(seconds=int(4.5*60)) end_ts = start - _OLD_BOT_EVENTS_CUT_OFF more = True cursor = None count = 0 first_ts = None try: # Order is by key, so it is naturally ordered by bot, which means the # operations will mainly operate on one root entity at a time. q = BotEvent.query(default_options=ndb.QueryOptions(keys_only=True)).filter( BotEvent.ts <= end_ts) while more: keys, cursor, more = q.fetch_page(10, start_cursor=cursor) if not keys: break if not first_ts: # Fetch the very first entity to get an idea of the range being # processed. while keys: # It's possible that the query returns ndb.Key for entities that do # not exist anymore due to an inconsistent index. Handle this # explicitly. e = keys[0].get() if not e: keys = keys[1:] continue first_ts = e.ts break ndb.delete_multi(keys) count += len(keys) if utils.utcnow() >= time_to_stop: break return count except runtime.DeadlineExceededError: pass finally: def _format_ts(t): # datetime.datetime return t.strftime(u'%Y-%m-%d %H:%M') if t else 'N/A' def _format_delta(e, s): # datetime.timedelta return str(e-s).rsplit('.', 1)[0] if e and s else 'N/A' logging.info( 'Deleted %d BotEvent entities; from %s\n' 'Cut off was %s; trailing by %s', count, _format_ts(first_ts), _format_ts(end_ts), _format_delta(end_ts, first_ts))