Python google.appengine.runtime.apiproxy_errors.OverQuotaError() Examples
The following are 5
code examples of google.appengine.runtime.apiproxy_errors.OverQuotaError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
google.appengine.runtime.apiproxy_errors
, or try the search function
.
Example #1
Source File: base_model.py From loaner with Apache License 2.0 | 6 votes |
def add_docs_to_index(cls, documents): """Adds a list of documents to a particular index. Args: documents: a list of search.Documents to add to the class' index. """ index = cls.get_index() for doc in documents: try: index.put(doc) except search.PutError as err: result = err.results[0] if result.code == search.OperationResult.TRANSIENT_ERROR: index.put(doc) except (search.Error, apiproxy_errors.OverQuotaError): logging.error(_PUT_DOC_ERR_MSG, doc, index)
Example #2
Source File: search.py From python-compat-runtime with Apache License 2.0 | 5 votes |
def get_result(self): try: return super(_PutOperationFuture, self).get_result() except apiproxy_errors.OverQuotaError, e: message = e.message + '; index = ' + self._index.name if self._index.namespace: message = message + ' in namespace ' + self._index.namespace raise apiproxy_errors.OverQuotaError(message)
Example #3
Source File: bulkloader.py From browserscope with Apache License 2.0 | 4 votes |
def PerformWork(self, thread_pool): """Perform the work of this work item and report the results. Args: thread_pool: An AdaptiveThreadPool instance. Returns: A tuple (status, instruction) of the work status and an instruction for the ThreadGate. """ status = adaptive_thread_pool.WorkItem.FAILURE instruction = adaptive_thread_pool.ThreadGate.DECREASE try: self.MarkAsTransferring() try: transfer_time = self._TransferItem(thread_pool) if transfer_time is None: status = adaptive_thread_pool.WorkItem.RETRY instruction = adaptive_thread_pool.ThreadGate.HOLD else: logger.debug('[%s] %s Transferred %d entities in %0.1f seconds', threading.currentThread().getName(), self, self.count, transfer_time) sys.stdout.write('.') sys.stdout.flush() status = adaptive_thread_pool.WorkItem.SUCCESS if transfer_time <= MAXIMUM_INCREASE_DURATION: instruction = adaptive_thread_pool.ThreadGate.INCREASE elif transfer_time <= MAXIMUM_HOLD_DURATION: instruction = adaptive_thread_pool.ThreadGate.HOLD except (db.InternalError, db.NotSavedError, db.Timeout, db.TransactionFailedError, apiproxy_errors.OverQuotaError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.ApplicationError), e: status = adaptive_thread_pool.WorkItem.RETRY logger.exception('Retrying on non-fatal datastore error: %s', e) except urllib2.HTTPError, e: http_status = e.code if http_status >= 500 and http_status < 600: status = adaptive_thread_pool.WorkItem.RETRY logger.exception('Retrying on non-fatal HTTP error: %d %s', http_status, e.msg) else: self.SetError() status = adaptive_thread_pool.WorkItem.FAILURE
Example #4
Source File: apiproxy.py From python-compat-runtime with Apache License 2.0 | 4 votes |
def _MakeCallDone(self): self._state = RPC.FINISHING self.cpu_usage_mcycles = self._result_dict['cpu_usage_mcycles'] if self._result_dict['error'] == APPLICATION_ERROR: appl_err = self._result_dict['application_error'] if appl_err == MEMCACHE_UNAVAILABLE and self.package == 'memcache': self._exception = apiproxy_errors.CapabilityDisabledError( 'The memcache service is temporarily unavailable. %s' % self._result_dict['error_detail']) else: self._exception = apiproxy_errors.ApplicationError( appl_err, self._result_dict['error_detail']) elif self._result_dict['error'] == CAPABILITY_DISABLED: if self._result_dict['error_detail']: self._exception = apiproxy_errors.CapabilityDisabledError( self._result_dict['error_detail']) else: self._exception = apiproxy_errors.CapabilityDisabledError( "The API call %s.%s() is temporarily unavailable." % ( self.package, self.call)) elif self._result_dict['error'] == FEATURE_DISABLED: self._exception = apiproxy_errors.FeatureNotEnabledError( self._result_dict['error_detail']) elif self._result_dict['error'] == OVER_QUOTA: if self._result_dict['error_detail']: self._exception = apiproxy_errors.OverQuotaError( ('The API call %s.%s() required more quota than is available. %s' % (self.package, self.call, self._result_dict['error_detail']))) else: exception_entry = _ExceptionsMap[self._result_dict['error']] self._exception = exception_entry[0]( exception_entry[1] % (self.package, self.call)) elif self._result_dict['error'] in _ExceptionsMap: exception_entry = _ExceptionsMap[self._result_dict['error']] self._exception = exception_entry[0]( exception_entry[1] % (self.package, self.call)) else: try: self.response.ParseFromString(self._result_dict['result_string']) except Exception, e: self._exception = e
Example #5
Source File: bulkloader.py From python-compat-runtime with Apache License 2.0 | 4 votes |
def PerformWork(self, thread_pool): """Perform the work of this work item and report the results. Args: thread_pool: An AdaptiveThreadPool instance. Returns: A tuple (status, instruction) of the work status and an instruction for the ThreadGate. """ status = adaptive_thread_pool.WorkItem.FAILURE instruction = adaptive_thread_pool.ThreadGate.DECREASE try: self.MarkAsTransferring() try: transfer_time = self._TransferItem(thread_pool) if transfer_time is None: status = adaptive_thread_pool.WorkItem.RETRY instruction = adaptive_thread_pool.ThreadGate.HOLD else: logger.debug('[%s] %s Transferred %d entities in %0.1f seconds', threading.currentThread().getName(), self, self.count, transfer_time) sys.stdout.write('.') sys.stdout.flush() status = adaptive_thread_pool.WorkItem.SUCCESS if transfer_time <= MAXIMUM_INCREASE_DURATION: instruction = adaptive_thread_pool.ThreadGate.INCREASE elif transfer_time <= MAXIMUM_HOLD_DURATION: instruction = adaptive_thread_pool.ThreadGate.HOLD except (db.InternalError, db.NotSavedError, db.Timeout, db.TransactionFailedError, apiproxy_errors.OverQuotaError, apiproxy_errors.DeadlineExceededError, apiproxy_errors.ApplicationError), e: status = adaptive_thread_pool.WorkItem.RETRY logger.exception('Retrying on non-fatal datastore error: %s', e) except urllib2.HTTPError, e: http_status = e.code if http_status >= 500 and http_status < 600: status = adaptive_thread_pool.WorkItem.RETRY logger.exception('Retrying on non-fatal HTTP error: %d %s', http_status, e.msg) else: self.SetError() status = adaptive_thread_pool.WorkItem.FAILURE