Python kazoo.exceptions.KazooException() Examples
The following are 17
code examples of kazoo.exceptions.KazooException().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
kazoo.exceptions
, or try the search function
.
Example #1
Source File: impl_zookeeper.py From taskflow with Apache License 2.0 | 6 votes |
def _wrap(self, job_uuid, job_path, fail_msg_tpl="Failure: %s", ensure_known=True): if job_path: fail_msg_tpl += " (%s)" % (job_path) if ensure_known: if not job_path: raise ValueError("Unable to check if %r is a known path" % (job_path)) if job_path not in self._known_jobs: fail_msg_tpl += ", unknown job" raise excp.NotFound(fail_msg_tpl % (job_uuid)) try: yield except self._client.handler.timeout_exception: fail_msg_tpl += ", operation timed out" excp.raise_with_cause(excp.JobFailure, fail_msg_tpl % (job_uuid)) except k_exceptions.SessionExpiredError: fail_msg_tpl += ", session expired" excp.raise_with_cause(excp.JobFailure, fail_msg_tpl % (job_uuid)) except k_exceptions.NoNodeError: fail_msg_tpl += ", unknown job" excp.raise_with_cause(excp.NotFound, fail_msg_tpl % (job_uuid)) except k_exceptions.KazooException: fail_msg_tpl += ", internal error" excp.raise_with_cause(excp.JobFailure, fail_msg_tpl % (job_uuid))
Example #2
Source File: impl_zookeeper.py From taskflow with Apache License 2.0 | 6 votes |
def _exc_wrapper(self): """Exception context-manager which wraps kazoo exceptions. This is used to capture and wrap any kazoo specific exceptions and then group them into corresponding taskflow exceptions (not doing that would expose the underlying kazoo exception model). """ try: yield except self._client.handler.timeout_exception: exc.raise_with_cause(exc.StorageFailure, "Storage backend timeout") except k_exc.SessionExpiredError: exc.raise_with_cause(exc.StorageFailure, "Storage backend session has expired") except k_exc.NoNodeError: exc.raise_with_cause(exc.NotFound, "Storage backend node not found") except k_exc.NodeExistsError: exc.raise_with_cause(exc.Duplicate, "Storage backend duplicate node") except (k_exc.KazooException, k_exc.ZookeeperError): exc.raise_with_cause(exc.StorageFailure, "Storage backend internal error")
Example #3
Source File: test_zookeeper_watcher.py From scrapy-cluster with MIT License | 5 votes |
def test_ping(self): self.zoo_watcher.zoo_client.server_version = MagicMock() self.assertTrue(self.zoo_watcher.ping()) self.zoo_watcher.zoo_client.server_version = MagicMock(side_effect=KazooException) self.assertFalse(self.zoo_watcher.ping())
Example #4
Source File: impl_zookeeper.py From taskflow with Apache License 2.0 | 5 votes |
def register_entity(self, entity): entity_type = entity.kind if entity_type == c_base.Conductor.ENTITY_KIND: entity_path = k_paths.join(self.entity_path, entity_type) try: self._client.ensure_path(entity_path) self._client.create(k_paths.join(entity_path, entity.name), value=misc.binary_encode( jsonutils.dumps(entity.to_dict())), ephemeral=True) except k_exceptions.NodeExistsError: pass except self._client.handler.timeout_exception: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, operation" " timed out" % (entity.name, entity_path)) except k_exceptions.SessionExpiredError: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, session" " expired" % (entity.name, entity_path)) except k_exceptions.KazooException: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, internal" " error" % (entity.name, entity_path)) else: raise excp.NotImplementedError( "Not implemented for other entity type '%s'" % entity_type)
Example #5
Source File: impl_zookeeper.py From taskflow with Apache License 2.0 | 5 votes |
def state(self): owner = self.board.find_owner(self) job_data = {} try: raw_data, _data_stat = self._client.get(self.path) job_data = misc.decode_json(raw_data) except k_exceptions.NoNodeError: pass except k_exceptions.SessionExpiredError: excp.raise_with_cause( excp.JobFailure, "Can not fetch the state of %s," " session expired" % (self.uuid)) except self._client.handler.timeout_exception: excp.raise_with_cause( excp.JobFailure, "Can not fetch the state of %s," " operation timed out" % (self.uuid)) except k_exceptions.KazooException: excp.raise_with_cause( excp.JobFailure, "Can not fetch the state of %s," " internal error" % (self.uuid)) if not job_data: # No data this job has been completed (the owner that we might have # fetched will not be able to be fetched again, since the job node # is a parent node of the owner/lock node). return states.COMPLETE if not owner: # No owner, but data, still work to be done. return states.UNCLAIMED return states.CLAIMED
Example #6
Source File: impl_zookeeper.py From taskflow with Apache License 2.0 | 5 votes |
def _get_node_attr(self, path, attr_name, trans_func=None): try: _data, node_stat = self._client.get(path) attr = getattr(node_stat, attr_name) if trans_func is not None: return trans_func(attr) else: return attr except k_exceptions.NoNodeError: excp.raise_with_cause( excp.NotFound, "Can not fetch the %r attribute of job %s (%s)," " path %s not found" % (attr_name, self.uuid, self.path, path)) except self._client.handler.timeout_exception: excp.raise_with_cause( excp.JobFailure, "Can not fetch the %r attribute of job %s (%s)," " operation timed out" % (attr_name, self.uuid, self.path)) except k_exceptions.SessionExpiredError: excp.raise_with_cause( excp.JobFailure, "Can not fetch the %r attribute of job %s (%s)," " session expired" % (attr_name, self.uuid, self.path)) except (AttributeError, k_exceptions.KazooException): excp.raise_with_cause( excp.JobFailure, "Can not fetch the %r attribute of job %s (%s)," " internal error" % (attr_name, self.uuid, self.path))
Example #7
Source File: impl_zookeeper.py From taskflow with Apache License 2.0 | 5 votes |
def close(self): self._validated = False if not self._owned: return try: k_utils.finalize_client(self._client) except (k_exc.KazooException, k_exc.ZookeeperError): exc.raise_with_cause(exc.StorageFailure, "Unable to finalize client")
Example #8
Source File: kazoo_utils.py From taskflow with Apache License 2.0 | 5 votes |
def checked_commit(txn): """Commits a kazoo transcation and validates the result. NOTE(harlowja): Until https://github.com/python-zk/kazoo/pull/224 is fixed or a similar pull request is merged we have to workaround the transaction failing silently. """ if not txn.operations: return [] results = txn.commit() failures = [] for op, result in compat_zip(txn.operations, results): if isinstance(result, k_exc.KazooException): failures.append((op, result)) if len(results) < len(txn.operations): raise KazooTransactionException( "Transaction returned %s results, this is less than" " the number of expected transaction operations %s" % (len(results), len(txn.operations)), failures) if len(results) > len(txn.operations): raise KazooTransactionException( "Transaction returned %s results, this is greater than" " the number of expected transaction operations %s" % (len(results), len(txn.operations)), failures) if failures: raise KazooTransactionException( "Transaction with %s operations failed: %s" % (len(txn.operations), prettify_failures(failures, limit=1)), failures) return results
Example #9
Source File: zk_state.py From incubator-retired-cotton with Apache License 2.0 | 5 votes |
def remove_cluster_state(self, cluster_name): path = self._get_cluster_state_path(cluster_name) try: self._client.retry(self._client.delete, path, recursive=True) except KazooException as e: raise self.Error("Failed to remove MySQLCluster: %s" % e) # --- Helper methods. ---
Example #10
Source File: zk_state.py From incubator-retired-cotton with Apache License 2.0 | 5 votes |
def load_cluster_state(self, cluster_name): path = self._get_cluster_state_path(cluster_name) try: content = self._client.get(path)[0] state = cPickle.loads(content) if not isinstance(state, MySQLCluster): raise self.Error("Invalid state object. Expect MySQLCluster, got %s" % type(state)) return state except NoNodeError: log.info('No cluster state found on path %s' % path) return None except (KazooException, PickleError, ValueError) as e: raise self.Error('Failed to recover MySQLCluster: %s' % e)
Example #11
Source File: zk_state.py From incubator-retired-cotton with Apache License 2.0 | 5 votes |
def load_scheduler_state(self): path = self._get_scheduler_state_path() try: content = self._client.get(path)[0] state = cPickle.loads(content) if not isinstance(state, Scheduler): raise self.Error("Invalid state object. Expect Scheduler, got %s" % type(state)) return state except NoNodeError: log.info('No scheduler state found on path %s' % path) return None except (KazooException, PickleError, ValueError) as e: raise self.Error('Failed to recover Scheduler: %s' % e)
Example #12
Source File: zk_state.py From incubator-retired-cotton with Apache License 2.0 | 5 votes |
def dump_scheduler_state(self, state): if not isinstance(state, Scheduler): raise TypeError("'state' should be an instance of Scheduler") path = self._get_scheduler_state_path() self._client.retry(self._client.ensure_path, posixpath.dirname(path)) content = cPickle.dumps(state) try: self._client.retry(self._create_or_set, path, content) except KazooException as e: raise self.Error('Failed to persist Scheduler: %s' % e)
Example #13
Source File: cluster.py From kafka-tools with Apache License 2.0 | 5 votes |
def set_topic_retention(topic, zk): try: zdata, zstat = zk.get("/config/topics/{0}".format(topic.name)) tdata = json_loads(zdata) topic.retention = int(tdata['config']['retention.ms']) except (KeyError, ValueError, KazooException): # If we can't get the config override for any reason, just stick with whatever the default is pass
Example #14
Source File: zkutil.py From pykit with MIT License | 5 votes |
def close_zk(zk): if not isinstance(zk, KazooClient): raise TypeError('expect KazooClient or KazooClientExt, but got {t}'.format(t=type(zk))) try: zk.stop() except KazooException as e: logger.exception(repr(e) + ' while stop zk client') try: zk.close() except Exception as e: logger.exception(repr(e) + ' while close zk client')
Example #15
Source File: zookeeper_watcher.py From scrapy-cluster with MIT License | 5 votes |
def ping(self): ''' Simple command to test if the zookeeper session is able to connect at this very moment ''' try: # dummy ping to ensure we are still connected self.zoo_client.server_version() return True except KazooException: return False
Example #16
Source File: impl_zookeeper.py From taskflow with Apache License 2.0 | 4 votes |
def _process_child(self, path, request, quiet=True): """Receives the result of a child data fetch request.""" job = None try: raw_data, node_stat = request.get() job_data = misc.decode_json(raw_data) job_created_on = misc.millis_to_datetime(node_stat.ctime) try: job_priority = job_data['priority'] job_priority = base.JobPriority.convert(job_priority) except KeyError: job_priority = base.JobPriority.NORMAL job_uuid = job_data['uuid'] job_name = job_data['name'] except (ValueError, TypeError, KeyError): with excutils.save_and_reraise_exception(reraise=not quiet): LOG.warning("Incorrectly formatted job data found at path: %s", path, exc_info=True) except self._client.handler.timeout_exception: with excutils.save_and_reraise_exception(reraise=not quiet): LOG.warning("Operation timed out fetching job data from" " from path: %s", path, exc_info=True) except k_exceptions.SessionExpiredError: with excutils.save_and_reraise_exception(reraise=not quiet): LOG.warning("Session expired fetching job data from path: %s", path, exc_info=True) except k_exceptions.NoNodeError: LOG.debug("No job node found at path: %s, it must have" " disappeared or was removed", path) except k_exceptions.KazooException: with excutils.save_and_reraise_exception(reraise=not quiet): LOG.warning("Internal error fetching job data from path: %s", path, exc_info=True) else: with self._job_cond: # Now we can officially check if someone already placed this # jobs information into the known job set (if it's already # existing then just leave it alone). if path not in self._known_jobs: job = ZookeeperJob(self, job_name, self._client, path, backend=self._persistence, uuid=job_uuid, book_data=job_data.get("book"), details=job_data.get("details", {}), created_on=job_created_on, priority=job_priority) self._known_jobs[path] = job self._job_cond.notify_all() if job is not None: self._try_emit(base.POSTED, details={'job': job})
Example #17
Source File: impl_zookeeper.py From taskflow with Apache License 2.0 | 4 votes |
def connect(self, timeout=10.0): def try_clean(): # Attempt to do the needed cleanup if post-connection setup does # not succeed (maybe the connection is lost right after it is # obtained). try: self.close() except k_exceptions.KazooException: LOG.exception("Failed cleaning-up after post-connection" " initialization failed") try: if timeout is not None: timeout = float(timeout) self._client.start(timeout=timeout) self._closing = False except (self._client.handler.timeout_exception, k_exceptions.KazooException): excp.raise_with_cause(excp.JobFailure, "Failed to connect to zookeeper") try: if self._conf.get('check_compatible', True): kazoo_utils.check_compatible(self._client, self.MIN_ZK_VERSION) if self._worker is None and self._emit_notifications: self._worker = futurist.ThreadPoolExecutor(max_workers=1) self._client.ensure_path(self.path) self._client.ensure_path(self.trash_path) if self._job_watcher is None: self._job_watcher = watchers.ChildrenWatch( self._client, self.path, func=self._on_job_posting, allow_session_lost=True) self._connected = True except excp.IncompatibleVersion: with excutils.save_and_reraise_exception(): try_clean() except (self._client.handler.timeout_exception, k_exceptions.KazooException): exc_type, exc, exc_tb = sys.exc_info() try: try_clean() excp.raise_with_cause(excp.JobFailure, "Failed to do post-connection" " initialization", cause=exc) finally: del(exc_type, exc, exc_tb)