Python kazoo.client.KazooClient() Examples

The following are 30 code examples of kazoo.client.KazooClient(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module kazoo.client , or try the search function .
Example #1
Source File: aurora.py    From aurproxy with Apache License 2.0 6 votes vote down vote up
def getMockServerSet(smox):
  listeners = []
  def add_listener(listener):
    listeners.append(listener)

  zk = smox.CreateMock(KazooClient)
  zk.connected = True
  zk.handler = SequentialGeventHandler()
  zk.retry = KazooRetry()

  mock_stat = smox.CreateMock(ZnodeStat)
  mock_stat.mzxid = 1

  zk.exists(TEST_PATH).AndReturn(True)
  zk.add_listener(mox.IgnoreArg()).WithSideEffects(add_listener)
  zk.get(TEST_PATH, mox.IgnoreArg()).AndReturn((1, mock_stat))
  zk.add_listener(mox.IgnoreArg()).WithSideEffects(add_listener)

  return zk 
Example #2
Source File: zookeeper_basic.py    From Zopkio with Apache License 2.0 6 votes vote down vote up
def test_zookeeper_process_tracking():
  """
  Tests if process register node correctly with zookeeper and zookeeper deletes it when process terminates
  """
  #Wait for zookeeper to start so that kazoo client can connect correctly
  time.sleep(5)
  #"connecting to esnure /my/zookeeper_test"

  kazoo_connection_url = str(runtime.get_active_config('zookeeper_host') + ':2181')
  zkclient = KazooClient(hosts=kazoo_connection_url)

  zkclient.start()

  zkclient.ensure_path("/my/zookeeper_test")
  #spawn a python multiprocess which creates an ephermeral node
  #once the process ends the node will be deleted.
  p = Process(target=zookeeper_ephemeral_node, args=("process1",))
  p.start()
  zkclient.stop() 
Example #3
Source File: test_iam_migration.py    From dcos with Apache License 2.0 6 votes vote down vote up
def create_dcos_oauth_users(zk: KazooClient) -> Generator:

    def _create_dcos_oauth_user(uid: str) -> None:
        log.info('Creating user `%s`', uid)
        zk.create('/dcos/users/{uid}'.format(uid=uid), makepath=True)

    def _delete_dcos_oauth_user(uid: str) -> None:
        try:
            zk.delete('/dcos/users/{uid}'.format(uid=uid))
        except kazoo.exceptions.NoNodeError:
            pass

    _create_dcos_oauth_user('user1@example.com')
    _create_dcos_oauth_user('user2@example.com')

    yield

    _delete_dcos_oauth_user('user1@example.com')
    _delete_dcos_oauth_user('user2@example.com') 
Example #4
Source File: zookeeper_cluster_tests.py    From Zopkio with Apache License 2.0 6 votes vote down vote up
def test_zookeeper_process_tracking():
  """
  Tests if process register node correctly with zookeeper and zookeeper deletes it when process terminates
  """
  #Wait for zookeeper to start so that kazoo client can connect correctly
  time.sleep(5)
  #"connecting to esnure /my/zookeeper_test"

  kazoo_connection_url = str(runtime.get_active_config('zookeeper_host') + ':2181')
  zkclient = KazooClient(hosts=kazoo_connection_url)

  zkclient.start()

  zkclient.ensure_path("/my/zookeeper_test")
  #spawn a python multiprocess which creates an ephermeral node
  #once the process ends the node will be deleted.
  p = Process(target=zookeeper_ephemeral_node, args=("process1",))
  p.start()
  zkclient.stop() 
Example #5
Source File: partitioner.py    From yelp_kafka with Apache License 2.0 6 votes vote down vote up
def start(self):
        """Create a new group and wait until the partitions have been
        acquired. This function should never be called twice.

        :raises: PartitionerError upon partitioner failures

        .. note: This is a blocking operation.
        """
        self.kazoo_retry = KazooRetry(**KAZOO_RETRY_DEFAULTS)
        self.kazoo_client = KazooClient(
            self.config.zookeeper,
            connection_retry=self.kazoo_retry,
        )
        self.kafka_client = KafkaClient(self.config.broker_list)

        self.log.debug("Starting a new group for topics %s", self.topics)
        self.released_flag = True
        self._refresh() 
Example #6
Source File: zookeeper_ztestsuite_example.py    From Zopkio with Apache License 2.0 6 votes vote down vote up
def test(self):
    """
    Tests if process register node correctly with zookeeper and zookeeper deletes it when process terminates
    """
    #Wait for zookeeper to start so that kazoo client can connect correctly
    time.sleep(5)
    #"connecting to esnure /my/zookeeper_test"

    kazoo_connection_url = str(runtime.get_active_config('zookeeper_host') + ':2181')
    zkclient = KazooClient(hosts=kazoo_connection_url)

    zkclient.start()

    zkclient.ensure_path("/my/zookeeper_test")
    #spawn a python multiprocess which creates an ephermeral node
    #once the process ends the node will be deleted.
    p = Process(target=zookeeper_ephemeral_node, args=("process1",))
    p.start()
    zkclient.stop() 
Example #7
Source File: create-calico-docker-network.py    From dcos with Apache License 2.0 6 votes vote down vote up
def zk_cluster_lock(zk: KazooClient, name: str, timeout: int = 30) -> Generator:
    lock = zk.Lock("{}/{}".format(ZK_PREFIX, name), socket.gethostname())
    try:
        print("Acquiring cluster lock '{}'".format(name))
        lock.acquire(blocking=True, timeout=timeout)
    except (ConnectionLoss, SessionExpiredError) as e:
        print("Failed to acquire cluster lock: {}".format(e.__class__.__name__))
        raise e
    except LockTimeout as e:
        print("Failed to acquire cluster lock in {} seconds".format(timeout))
        raise e
    else:
        print("ZooKeeper lock acquired.")
    try:
        yield
    finally:
        print("Releasing ZooKeeper lock")
        lock.release()
        print("ZooKeeper lock released.") 
Example #8
Source File: test_zklock.py    From pykit with MIT License 6 votes vote down vote up
def _loop_acquire(self, n, ident):

        zk = KazooClient(hosts='127.0.0.1:21811')
        zk.start()
        scheme, name, passw = zk_test_auth
        zk.add_auth(scheme, name + ':' + passw)

        for ii in range(n):
            l = zkutil.ZKLock('foo_name', zkclient=zk)
            with l:

                self.total += 1
                self.counter += 1

                self.assertTrue(self.counter == 1)

                time.sleep(0.01)
                self.counter -= 1

                dd("id={ident:0>2} n={ii:0>2} got and released lock: {holder}".format(
                    ident=ident,
                    ii=ii,
                    holder=l.lock_holder))

        zk.stop() 
Example #9
Source File: test_zkutil.py    From pykit with MIT License 6 votes vote down vote up
def setUp(self):

        utdocker.create_network()
        utdocker.start_container(
            zk_name,
            zk_tag,
            env={
                "ZOO_MY_ID": 1,
                "ZOO_SERVERS": "server.1=0.0.0.0:2888:3888",
            },
            port_bindings={2181: 21811}
        )

        self.zk = KazooClient('127.0.0.1:21811')
        self.zk.start()

        dd('start zk-test in docker') 
Example #10
Source File: test_acid.py    From pykit with MIT License 6 votes vote down vote up
def setUp(self):

        utdocker.create_network()
        utdocker.start_container(
            zk_name,
            zk_tag,
            env={
                "ZOO_MY_ID": 1,
                "ZOO_SERVERS": "server.1=0.0.0.0:2888:3888",
            },
            port_bindings={2181: 21811}
        )

        self.zk = KazooClient('127.0.0.1:21811')
        self.zk.start()

        dd('start zk-test in docker')

        self.path = 'a'
        self.zk.create(self.path, '1') 
Example #11
Source File: io.py    From ochothon with Apache License 2.0 6 votes vote down vote up
def initial(self, data):

        if self.terminate:

            #
            # - we're done, commit suicide
            # - the zk connection is guaranteed to be down at this point
            #
            self.exitcode()

        cnx_string = ','.join(self.brokers)
        data.zk = KazooClient(hosts=cnx_string, timeout=30.0, read_only=1, randomize_hosts=1)
        data.zk.add_listener(self.feedback)
        data.zk.start()

        return 'wait_for_cnx', data, 0 
Example #12
Source File: zk.py    From SolrClient with Apache License 2.0 6 votes vote down vote up
def __init__(self, solr, log):
        '''
        Helper class for working with Solr Zookeeper config and collections. 

        Still very experimental, I wouldn't even use it myself if I didn't like to live on the edge. 
        '''
        if not kz_imported:
            raise ImportError("To use the ZK Class you need to have Kazoo Client installed")
        self.solr = solr
        self.logger = log
        try:
            self.system_data = self.solr.transport.send_request(endpoint='admin/info/system', params={'wt':'json'})
            self.zk_hosts = self.system_data[0]['zkHost']
        except Exception as e:
            self.logger.error("Couldn't get System info From Solr or bad format.")
            self.logger.exception(e)
            raise
        try: 
            self.kz = KazooClient(hosts=self.zk_hosts)
            self.kz.start()
            if self.kz.state != 'CONNECTED':
                self.logger.error("Couldn't establish connection to Zookeeper")
        except Exception as e:
            self.logger.error("Couldn't Establish Connection To Zookeeper")
            raise(e) 
Example #13
Source File: lock.py    From ARK with MIT License 6 votes vote down vote up
def __init__(self, name):
        """
        初始化方法

        :param str name: 分布式锁名字
        :return: None
        :rtype: None
        :raises kazoo.interfaces.IHandler.timeout_exception: 连接超时异常
        """
        self._lock_name = name
        self._lock_node_path = config.GuardianConfig.get_persistent_path("lock")
        self._lock_node = self._lock_node_path + '/' + self._lock_name
        self._lock_handle = None

        hosts = config.GuardianConfig.get(config.STATE_SERVICE_HOSTS_NAME)
        self._zkc = KazooClient(hosts=hosts)
        self._zkc.start() 
Example #14
Source File: test_cached_reader.py    From pykit with MIT License 6 votes vote down vote up
def setUp(self):
        utdocker.create_network()
        utdocker.start_container(
            zk_test_name,
            zk_test_tag,
            env={
                "ZOO_MY_ID": 1,
                "ZOO_SERVERS": "server.1=0.0.0.0:2888:3888",
            },
            port_bindings={
                2181: 21811,
            }
        )

        self.zk = KazooClient(hosts='127.0.0.1:21811')
        self.zk.start()
        self.val = {'a': 1, 'b': 2}
        self.zk.create('foo', utfjson.dump(self.val)) 
Example #15
Source File: check_orphans.py    From paasta with Apache License 2.0 6 votes vote down vote up
def get_zk_data(blacklisted_services: Set[str]) -> SmartstackData:
    logger.info(f"using {DEFAULT_ZK_DISCOVERY_PATH} for zookeeper")
    zk_hosts = get_zk_hosts(DEFAULT_ZK_DISCOVERY_PATH)

    logger.debug(f"connecting to zk hosts {zk_hosts}")
    zk = KazooClient(hosts=zk_hosts)
    zk.start()

    logger.debug(f"pulling smartstack data from zookeeper")
    zk_data = {}
    services = zk.get_children(PREFIX)
    for service in services:
        if service in blacklisted_services:
            continue
        service_instances = zk.get_children(os.path.join(PREFIX, service))
        instances_data = {}
        for instance in service_instances:
            try:
                instance_node = zk.get(os.path.join(PREFIX, service, instance))
            except NoNodeError:
                continue
            instances_data[instance] = json.loads(instance_node[0])
            zk_data[service] = instances_data

    return zk_data 
Example #16
Source File: mesos_tools.py    From paasta with Apache License 2.0 6 votes vote down vote up
def get_number_of_mesos_masters(host, path):
    """Returns an array, containing mesos masters
    :param zk_config: dict containing information about zookeeper config.
    Masters register themselves in zookeeper by creating ``info_`` entries.
    We count these entries to get the number of masters.
    """
    zk = KazooClient(hosts=host, read_only=True)
    zk.start()
    try:
        root_entries = zk.get_children(path)
        result = [
            info
            for info in root_entries
            if info.startswith("json.info_") or info.startswith("info_")
        ]
        return len(result)
    finally:
        zk.stop()
        zk.close() 
Example #17
Source File: load_boost.py    From paasta with Apache License 2.0 6 votes vote down vote up
def get_boost_values(zk_boost_path: str, zk: KazooClient) -> BoostValues:
    # Default values, non-boost.
    end_time: float = 0
    boost_factor: float = 1.0
    expected_load: float = 0

    try:
        end_time = float(zk.get(zk_boost_path + "/end_time")[0].decode("utf-8"))
        boost_factor = float(zk.get(zk_boost_path + "/factor")[0].decode("utf-8"))
        expected_load = float(
            zk.get(zk_boost_path + "/expected_load")[0].decode("utf-8")
        )

    except NoNodeError:
        # If we can't read boost values from zookeeper
        return BoostValues(end_time=0, boost_factor=1.0, expected_load=0)

    return BoostValues(
        end_time=end_time, boost_factor=boost_factor, expected_load=expected_load
    ) 
Example #18
Source File: autoscaling_service_lib.py    From paasta with Apache License 2.0 6 votes vote down vote up
def create_autoscaling_lock(service: str, instance: str) -> Iterator[None]:
    """Acquire a lock in zookeeper for autoscaling. This is
    to avoid autoscaling a service multiple times, and to avoid
    having multiple paasta services all attempting to autoscale and
    fetching mesos data."""
    zk = KazooClient(
        hosts=load_system_paasta_config().get_zk_hosts(),
        timeout=ZK_LOCK_CONNECT_TIMEOUT_S,
    )
    zk.start()
    lock = zk.Lock(f"/autoscaling/{service}/{instance}/autoscaling.lock")
    try:
        lock.acquire(timeout=1)  # timeout=0 throws some other strange exception
        yield
    except LockTimeout:
        raise LockHeldException(
            f"Failed to acquire lock for autoscaling! {service}.{instance}"
        )
    else:
        lock.release()
    finally:
        zk.stop()
        zk.close() 
Example #19
Source File: zookeeper_test_faulttolerance.py    From Zopkio with Apache License 2.0 6 votes vote down vote up
def test_zookeeper_fault_tolerance():
  """
  Kill zookeeper1 and see if other zookeeper instances are in quorum
  """
  zookeper_deployer = runtime.get_deployer("zookeeper")
  kazoo_connection_url = str(runtime.get_active_config('zookeeper_host') + ':2181')
  zkclient = KazooClient(hosts=kazoo_connection_url)

  zkclient.start()

  zkclient.ensure_path("/my/zookeeper_errorinjection")
  # kill the Zookeeper1 instance
  print "killing zoookeeper instance1"
  zookeper_deployer.kill("zookeeper1")
  time.sleep(20)
  zkclient.stop() 
Example #20
Source File: bounce_lib.py    From paasta with Apache License 2.0 6 votes vote down vote up
def bounce_lock_zookeeper(
    name: str, system_paasta_config: Optional[SystemPaastaConfig] = None
) -> Iterator:
    """Acquire a bounce lock in zookeeper for the name given. The name should
    generally be the service namespace being bounced.
    This is a contextmanager. Please use it via 'with bounce_lock(name):'.
    :param name: The lock name to acquire"""
    if system_paasta_config is None:
        system_paasta_config = load_system_paasta_config()
    zk = KazooClient(
        hosts=system_paasta_config.get_zk_hosts(), timeout=ZK_LOCK_CONNECT_TIMEOUT_S,
    )
    zk.start()
    lock = zk.Lock(f"{ZK_LOCK_PATH}/{name}")
    try:
        lock.acquire(timeout=1)  # timeout=0 throws some other strange exception
        yield
    except LockTimeout:
        raise LockHeldException("Service %s is already being bounced!" % name)
    else:
        lock.release()
    finally:
        zk.stop()
        zk.close() 
Example #21
Source File: deploy_queue.py    From paasta with Apache License 2.0 6 votes vote down vote up
def list_deploy_queue(request) -> Mapping[str, Any]:
    zk_client = KazooClient(hosts=settings.system_paasta_config.get_zk_hosts())
    zk_client.start()

    queue = ZKDelayDeadlineQueue(client=zk_client)
    available_service_instances = queue.get_available_service_instances(
        fetch_service_instances=True
    )
    unavailable_service_instances = queue.get_unavailable_service_instances(
        fetch_service_instances=True
    )

    available_service_instance_dicts = [
        service_instance._asdict()
        for _, service_instance in available_service_instances
    ]
    unavailable_service_instance_dicts = [
        service_instance._asdict()
        for _, __, service_instance in unavailable_service_instances
    ]

    return {
        "available_service_instances": available_service_instance_dicts,
        "unavailable_service_instances": unavailable_service_instance_dicts,
    } 
Example #22
Source File: mysos_task_runner.py    From incubator-retired-cotton with Apache License 2.0 6 votes vote down vote up
def from_task(self, task, sandbox):
    data = json.loads(task.data)
    cluster_name, host, port, zk_url = data['cluster'], data['host'], data['port'], data['zk_url']
    _, servers, path = parse(zk_url)
    kazoo = KazooClient(servers)
    kazoo.start()
    self_instance = ServiceInstance(Endpoint(host, port))

    try:
      task_control = self._task_control_provider.from_task(task, sandbox)
      installer = self._installer_provider.from_task(task, sandbox)
      backup_store = self._backup_store_provider.from_task(task, sandbox)
    except (TaskControl.Error, PackageInstaller.Error) as e:
      kazoo.stop()  # Kazoo needs to be cleaned up. See kazoo/issues/217.
      raise TaskError(e.message)

    state_manager = StateManager(sandbox, backup_store)

    return MysosTaskRunner(
        self_instance,
        kazoo,
        get_cluster_path(path, cluster_name),
        installer,
        task_control,
        state_manager) 
Example #23
Source File: execution.py    From zoe with Apache License 2.0 6 votes vote down vote up
def set_cleaning_up(self):
        """The services of the execution are being terminated."""
        self._status = self.CLEANING_UP_STATUS
        self.sql_manager.executions.update(self.id, status=self._status)
        #  See comment in method above
        if zoe_lib.config.get_conf().traefik_zk_ips is not None:
            zk_cli = KazooClient(hosts=zoe_lib.config.get_conf().traefik_zk_ips)
            zk_cli.start()
            for service in self.services:
                for port in service.ports:
                    if port.enable_proxy:
                        traefik_name = 'zoe_exec_{}_{}'.format(self.id, port.id)
                        zk_cli.delete('/traefik/backends/{}'.format(traefik_name), recursive=True)
                        zk_cli.delete('/traefik/frontends/{}'.format(traefik_name), recursive=True)
            zk_cli.create('/traefik/alias')
            zk_cli.delete('/traefik/alias')
            zk_cli.stop() 
Example #24
Source File: kazoo.py    From iris with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, zk_hosts, hostname, port, join_cluster):
        self.me = '%s:%s' % (hostname, port)
        self.is_master = None
        self.slaves = cycle([])
        self.slave_count = 0
        self.started_shutdown = False

        if join_cluster:
            read_only = False
        else:
            read_only = True

        self.zk = KazooClient(hosts=zk_hosts, handler=SequentialGeventHandler(), read_only=read_only)
        event = self.zk.start_async()
        event.wait(timeout=5)

        self.lock = self.zk.Lock(path='/iris/sender_master', identifier=self.me)

        # Used to keep track of slaves / senders present in cluster
        self.party = Party(client=self.zk, path='/iris/sender_nodes', identifier=self.me)

        if join_cluster:
            self.zk.add_listener(self.event_listener)
            self.party.join() 
Example #25
Source File: test_cluster_zookeeper.py    From kafka-tools with Apache License 2.0 5 votes vote down vote up
def setUp(self):
        self.patcher_start = patch.object(KazooClient, 'start', autospec=True)
        self.patcher_children = patch.object(KazooClient, 'get_children', autospec=True)
        self.patcher_get = patch.object(KazooClient, 'get', autospec=True)
        self.patcher_stop = patch.object(KazooClient, 'stop', autospec=True)
        self.mock_start = self.patcher_start.start()
        self.mock_children = self.patcher_children.start()
        self.mock_get = self.patcher_get.start()
        self.mock_stop = self.patcher_stop.start() 
Example #26
Source File: cluster.py    From incubator-retired-cotton with Apache License 2.0 5 votes vote down vote up
def resolve_master(
      cluster_url, master_callback=lambda: True, termination_callback=lambda: True, zk_client=None):
  """
    Resolve the MySQL cluster master's endpoint from the given URL for this cluster.
    :param cluster_url: The ZooKeeper URL for this cluster.
    :param master_callback: A callback method with one argument: the ServiceInstance for the elected
                            master.
    :param termination_callback: A callback method with no argument. Invoked when the cluster
                                 terminates.
    :param zk_client: Use a custom ZK client instead of Kazoo if specified.
  """
  try:
    _, zk_servers, cluster_path = zookeeper.parse(cluster_url)
  except Exception as e:
    raise ValueError("Invalid cluster_url: %s" % e.message)

  if not zk_client:
    zk_client = KazooClient(zk_servers)
    zk_client.start()

  listener = ClusterListener(
      zk_client,
      cluster_path,
      None,
      master_callback=master_callback,
      termination_callback=termination_callback)
  listener.start() 
Example #27
Source File: execution.py    From zoe with Apache License 2.0 5 votes vote down vote up
def set_running(self):
        """The execution is running and producing useful work."""
        self._status = self.RUNNING_STATUS
        self.time_start = datetime.datetime.utcnow()
        self.sql_manager.executions.update(self.id, status=self._status, time_start=self.time_start)
        #  This hackish code is here to support dynamic reverse proxying of web interfaces running in Zoe executions (e.g. jupyter)
        #  The idea is to use Træfik to do the reverse proxying, configured to use zookeeper to store dynamic configuration
        #  Zoe updates ZooKeeper whenever an execution runs or is terminated and Træfik craetes or deletes the route automatically
        if zoe_lib.config.get_conf().traefik_zk_ips is not None:
            zk_cli = KazooClient(hosts=zoe_lib.config.get_conf().traefik_zk_ips)
            zk_cli.start()
            for service in self.services:
                for port in service.ports:
                    if port.enable_proxy:
                        format_args = {
                            "ip_port": port.external_ip + ":" + str(port.external_port),
                            "proxy_path": '{}/{}'.format(zoe_lib.config.get_conf().traefik_base_url, port.proxy_key())
                        }
                        endpoint = port.url_template.format(**format_args).encode('utf-8')
                        traefik_name = 'zoe_exec_{}_{}'.format(self.id, port.id)
                        zk_cli.create('/traefik/backends/{}/servers/server/url'.format(traefik_name), endpoint, makepath=True)
                        zk_cli.create('/traefik/frontends/{}/routes/path/rule'.format(traefik_name), 'PathPrefix:{}/{}'.format(zoe_lib.config.get_conf().traefik_base_url, port.proxy_key()).encode('utf-8'), makepath=True)
                        zk_cli.create('/traefik/frontends/{}/backend'.format(traefik_name), traefik_name.encode('utf-8'), makepath=True)
            zk_cli.create('/traefik/alias')
            zk_cli.delete('/traefik/alias')
            zk_cli.stop() 
Example #28
Source File: cluster.py    From kafka-tools with Apache License 2.0 5 votes vote down vote up
def create_from_zookeeper(cls, zkconnect, default_retention=1, fetch_topics=True):
        log.info("Connecting to zookeeper {0}".format(zkconnect))
        try:
            zk = KazooClient(zkconnect)
            zk.start()
        except Exception as e:
            raise ZookeeperException("Cannot connect to Zookeeper: {0}".format(e))

        # Get broker list
        cluster = cls(retention=default_retention)
        add_brokers_from_zk(cluster, zk)

        # Get current partition state
        if fetch_topics:
            log.info("Getting partition list from Zookeeper")
            for topic in zk.get_children("/brokers/topics"):
                zdata, zstat = zk.get("/brokers/topics/{0}".format(topic))
                add_topic_with_replicas(cluster, topic, json_loads(zdata))
                set_topic_retention(cluster.topics[topic], zk)

            if cluster.num_topics() == 0:
                raise ZookeeperException("The cluster specified does not have any topics")

        log.info("Closing connection to zookeeper")
        zk.stop()
        zk.close()

        return cluster 
Example #29
Source File: registry.py    From dubbo-python with Apache License 2.0 5 votes vote down vote up
def __init__(self, zk_hosts, application_config=None):
        Registry.__init__(self)
        if application_config:
            self._app_config = application_config
        self.__zk = KazooClient(hosts=zk_hosts)
        self.__zk.add_listener(self.__state_listener)
        self.__zk.start() 
Example #30
Source File: test_iam_migration.py    From dcos with Apache License 2.0 5 votes vote down vote up
def zk() -> KazooClient:
    conn_retry_policy = KazooRetry(max_tries=-1, delay=0.1, max_delay=0.1)
    cmd_retry_policy = KazooRetry(
        max_tries=3, delay=0.3, backoff=1, max_delay=1, ignore_expire=False)
    zk = KazooClient(
        hosts='zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181',
        connection_retry=conn_retry_policy,
        command_retry=cmd_retry_policy,
    )
    zk.start()
    yield zk
    zk.stop()