Java Code Examples for org.apache.flink.runtime.util.ZooKeeperUtils#startCuratorFramework()
The following examples show how to use
org.apache.flink.runtime.util.ZooKeeperUtils#startCuratorFramework() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HighAvailabilityServicesUtils.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static HighAvailabilityServices createAvailableOrEmbeddedServices( Configuration config, Executor executor) throws Exception { HighAvailabilityMode highAvailabilityMode = LeaderRetrievalUtils.getRecoveryMode(config); switch (highAvailabilityMode) { case NONE: return new EmbeddedHaServices(executor); case ZOOKEEPER: BlobStoreService blobStoreService = BlobUtils.createBlobStoreFromConfig(config); return new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(config), executor, config, blobStoreService); case FACTORY_CLASS: return createCustomHAServices(config, executor); default: throw new Exception("High availability mode " + highAvailabilityMode + " is not supported."); } }
Example 2
Source File: ZooKeeperCompletedCheckpointStoreTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Tests that checkpoints are discarded when the completed checkpoint store is shut * down with a globally terminal state. */ @Test public void testDiscardingCheckpointsAtShutDown() throws Exception { final SharedStateRegistry sharedStateRegistry = new SharedStateRegistry(); final Configuration configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zooKeeperResource.getConnectString()); final CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration); final ZooKeeperCompletedCheckpointStore checkpointStore = createZooKeeperCheckpointStore(client); try { final CompletedCheckpointStoreTest.TestCompletedCheckpoint checkpoint1 = CompletedCheckpointStoreTest.createCheckpoint(0, sharedStateRegistry); checkpointStore.addCheckpoint(checkpoint1); assertThat(checkpointStore.getAllCheckpoints(), Matchers.contains(checkpoint1)); checkpointStore.shutdown(JobStatus.FINISHED); // verify that the checkpoint is discarded CompletedCheckpointStoreTest.verifyCheckpointDiscarded(checkpoint1); } finally { client.close(); } }
Example 3
Source File: HighAvailabilityServicesUtils.java From flink with Apache License 2.0 | 6 votes |
public static ClientHighAvailabilityServices createClientHAService(Configuration configuration) throws Exception { HighAvailabilityMode highAvailabilityMode = HighAvailabilityMode.fromConfig(configuration); switch (highAvailabilityMode) { case NONE: final String webMonitorAddress = getWebMonitorAddress(configuration, AddressResolution.TRY_ADDRESS_RESOLUTION); return new StandaloneClientHAServices(webMonitorAddress); case ZOOKEEPER: final CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration); return new ZooKeeperClientHAServices(client, configuration); case FACTORY_CLASS: return createCustomClientHAServices(configuration); default: throw new Exception("Recovery mode " + highAvailabilityMode + " is not supported."); } }
Example 4
Source File: ZooKeeperHaServicesTest.java From flink with Apache License 2.0 | 5 votes |
private void runCleanupTest( Configuration configuration, TestingBlobStoreService blobStoreService, ThrowingConsumer<ZooKeeperHaServices, Exception> zooKeeperHaServicesConsumer) throws Exception { try (ZooKeeperHaServices zooKeeperHaServices = new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(configuration), Executors.directExecutor(), configuration, blobStoreService)) { // create some Zk services to trigger the generation of paths final LeaderRetrievalService resourceManagerLeaderRetriever = zooKeeperHaServices.getResourceManagerLeaderRetriever(); final LeaderElectionService resourceManagerLeaderElectionService = zooKeeperHaServices.getResourceManagerLeaderElectionService(); final RunningJobsRegistry runningJobsRegistry = zooKeeperHaServices.getRunningJobsRegistry(); final TestingListener listener = new TestingListener(); resourceManagerLeaderRetriever.start(listener); resourceManagerLeaderElectionService.start(new TestingContender("foobar", resourceManagerLeaderElectionService)); final JobID jobId = new JobID(); runningJobsRegistry.setJobRunning(jobId); listener.waitForNewLeader(2000L); resourceManagerLeaderRetriever.stop(); resourceManagerLeaderElectionService.stop(); runningJobsRegistry.clearJob(jobId); zooKeeperHaServicesConsumer.accept(zooKeeperHaServices); } }
Example 5
Source File: ZooKeeperSubmittedJobGraphStoreTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that we fail with an exception if the job cannot be removed from the * ZooKeeperSubmittedJobGraphStore. * * <p>Tests that a close ZooKeeperSubmittedJobGraphStore no longer holds any locks. */ @Test public void testJobGraphRemovalFailureAndLockRelease() throws Exception { try (final CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration)) { final TestingRetrievableStateStorageHelper<SubmittedJobGraph> stateStorage = new TestingRetrievableStateStorageHelper<>(); final ZooKeeperSubmittedJobGraphStore submittedJobGraphStore = createSubmittedJobGraphStore(client, stateStorage); submittedJobGraphStore.start(null); final ZooKeeperSubmittedJobGraphStore otherSubmittedJobGraphStore = createSubmittedJobGraphStore(client, stateStorage); otherSubmittedJobGraphStore.start(null); final SubmittedJobGraph jobGraph = new SubmittedJobGraph(new JobGraph()); submittedJobGraphStore.putJobGraph(jobGraph); final SubmittedJobGraph recoveredJobGraph = otherSubmittedJobGraphStore.recoverJobGraph(jobGraph.getJobId()); assertThat(recoveredJobGraph, is(notNullValue())); try { otherSubmittedJobGraphStore.removeJobGraph(recoveredJobGraph.getJobId()); fail("It should not be possible to remove the JobGraph since the first store still has a lock on it."); } catch (Exception ignored) { // expected } submittedJobGraphStore.stop(); // now we should be able to delete the job graph otherSubmittedJobGraphStore.removeJobGraph(recoveredJobGraph.getJobId()); assertThat(otherSubmittedJobGraphStore.recoverJobGraph(recoveredJobGraph.getJobId()), is(nullValue())); otherSubmittedJobGraphStore.stop(); } }
Example 6
Source File: ZooKeeperCompletedCheckpointStoreTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that subsumed checkpoints are discarded. */ @Test public void testDiscardingSubsumedCheckpoints() throws Exception { final SharedStateRegistry sharedStateRegistry = new SharedStateRegistry(); final Configuration configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zooKeeperResource.getConnectString()); final CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration); final ZooKeeperCompletedCheckpointStore checkpointStore = createZooKeeperCheckpointStore(client); try { final CompletedCheckpointStoreTest.TestCompletedCheckpoint checkpoint1 = CompletedCheckpointStoreTest.createCheckpoint(0, sharedStateRegistry); checkpointStore.addCheckpoint(checkpoint1); assertThat(checkpointStore.getAllCheckpoints(), Matchers.contains(checkpoint1)); final CompletedCheckpointStoreTest.TestCompletedCheckpoint checkpoint2 = CompletedCheckpointStoreTest.createCheckpoint(1, sharedStateRegistry); checkpointStore.addCheckpoint(checkpoint2); final List<CompletedCheckpoint> allCheckpoints = checkpointStore.getAllCheckpoints(); assertThat(allCheckpoints, Matchers.contains(checkpoint2)); assertThat(allCheckpoints, Matchers.not(Matchers.contains(checkpoint1))); // verify that the subsumed checkpoint is discarded CompletedCheckpointStoreTest.verifyCheckpointDiscarded(checkpoint1); } finally { client.close(); } }
Example 7
Source File: ZooKeeperHaServicesTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void runCleanupTest( Configuration configuration, TestingBlobStoreService blobStoreService, ThrowingConsumer<ZooKeeperHaServices, Exception> zooKeeperHaServicesConsumer) throws Exception { try (ZooKeeperHaServices zooKeeperHaServices = new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(configuration), Executors.directExecutor(), configuration, blobStoreService)) { // create some Zk services to trigger the generation of paths final LeaderRetrievalService resourceManagerLeaderRetriever = zooKeeperHaServices.getResourceManagerLeaderRetriever(); final LeaderElectionService resourceManagerLeaderElectionService = zooKeeperHaServices.getResourceManagerLeaderElectionService(); final RunningJobsRegistry runningJobsRegistry = zooKeeperHaServices.getRunningJobsRegistry(); final TestingListener listener = new TestingListener(); resourceManagerLeaderRetriever.start(listener); resourceManagerLeaderElectionService.start(new TestingContender("foobar", resourceManagerLeaderElectionService)); final JobID jobId = new JobID(); runningJobsRegistry.setJobRunning(jobId); listener.waitForNewLeader(2000L); resourceManagerLeaderRetriever.stop(); resourceManagerLeaderElectionService.stop(); runningJobsRegistry.clearJob(jobId); zooKeeperHaServicesConsumer.accept(zooKeeperHaServices); } }
Example 8
Source File: ZooKeeperRegistryTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests that the function of ZookeeperRegistry, setJobRunning(), setJobFinished(), isJobRunning() */ @Test public void testZooKeeperRegistry() throws Exception { Configuration configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); final HighAvailabilityServices zkHaService = new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(configuration), Executors.directExecutor(), configuration, new VoidBlobStore()); final RunningJobsRegistry zkRegistry = zkHaService.getRunningJobsRegistry(); try { JobID jobID = JobID.generate(); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobRunning(jobID); assertEquals(JobSchedulingStatus.RUNNING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobFinished(jobID); assertEquals(JobSchedulingStatus.DONE, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.clearJob(jobID); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); } finally { zkHaService.close(); } }
Example 9
Source File: ZooKeeperSubmittedJobGraphStoreTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests that we fail with an exception if the job cannot be removed from the * ZooKeeperSubmittedJobGraphStore. * * <p>Tests that a close ZooKeeperSubmittedJobGraphStore no longer holds any locks. */ @Test public void testJobGraphRemovalFailureAndLockRelease() throws Exception { try (final CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration)) { final TestingRetrievableStateStorageHelper<SubmittedJobGraph> stateStorage = new TestingRetrievableStateStorageHelper<>(); final ZooKeeperSubmittedJobGraphStore submittedJobGraphStore = createSubmittedJobGraphStore(client, stateStorage); submittedJobGraphStore.start(null); final ZooKeeperSubmittedJobGraphStore otherSubmittedJobGraphStore = createSubmittedJobGraphStore(client, stateStorage); otherSubmittedJobGraphStore.start(null); final SubmittedJobGraph jobGraph = new SubmittedJobGraph(new JobGraph()); submittedJobGraphStore.putJobGraph(jobGraph); final SubmittedJobGraph recoveredJobGraph = otherSubmittedJobGraphStore.recoverJobGraph(jobGraph.getJobId()); assertThat(recoveredJobGraph, is(notNullValue())); try { otherSubmittedJobGraphStore.removeJobGraph(recoveredJobGraph.getJobId()); fail("It should not be possible to remove the JobGraph since the first store still has a lock on it."); } catch (Exception ignored) { // expected } submittedJobGraphStore.stop(); // now we should be able to delete the job graph otherSubmittedJobGraphStore.removeJobGraph(recoveredJobGraph.getJobId()); assertThat(otherSubmittedJobGraphStore.recoverJobGraph(recoveredJobGraph.getJobId()), is(nullValue())); otherSubmittedJobGraphStore.stop(); } }
Example 10
Source File: ZooKeeperRegistryTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that the function of ZookeeperRegistry, setJobRunning(), setJobFinished(), isJobRunning() */ @Test public void testZooKeeperRegistry() throws Exception { Configuration configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); final HighAvailabilityServices zkHaService = new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(configuration), Executors.directExecutor(), configuration, new VoidBlobStore()); final RunningJobsRegistry zkRegistry = zkHaService.getRunningJobsRegistry(); try { JobID jobID = JobID.generate(); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobRunning(jobID); assertEquals(JobSchedulingStatus.RUNNING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobFinished(jobID); assertEquals(JobSchedulingStatus.DONE, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.clearJob(jobID); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); } finally { zkHaService.close(); } }
Example 11
Source File: LeaderElectionTest.java From flink with Apache License 2.0 | 5 votes |
@Override public void setup() throws Exception { try { testingServer = new TestingServer(); } catch (Exception e) { throw new RuntimeException("Could not start ZooKeeper testing cluster.", e); } configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); client = ZooKeeperUtils.startCuratorFramework(configuration); }
Example 12
Source File: ZooKeeperUtilityFactory.java From flink with Apache License 2.0 | 5 votes |
public ZooKeeperUtilityFactory(Configuration configuration, String path) throws Exception { Preconditions.checkNotNull(path, "path"); root = ZooKeeperUtils.startCuratorFramework(configuration); root.newNamespaceAwareEnsurePath(path).ensure(root.getZookeeperClient()); facade = root.usingNamespace(ZooKeeperUtils.generateZookeeperPath(root.getNamespace(), path)); }
Example 13
Source File: ZooKeeperLeaderElectionTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Before public void before() { try { testingServer = new TestingServer(); } catch (Exception e) { throw new RuntimeException("Could not start ZooKeeper testing cluster.", e); } configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); client = ZooKeeperUtils.startCuratorFramework(configuration); }
Example 14
Source File: LeaderElectionTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void setup() throws Exception { try { testingServer = new TestingServer(); } catch (Exception e) { throw new RuntimeException("Could not start ZooKeeper testing cluster.", e); } configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); client = ZooKeeperUtils.startCuratorFramework(configuration); }
Example 15
Source File: ZooKeeperCompletedCheckpointStoreTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests that subsumed checkpoints are discarded. */ @Test public void testDiscardingSubsumedCheckpoints() throws Exception { final SharedStateRegistry sharedStateRegistry = new SharedStateRegistry(); final Configuration configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zooKeeperResource.getConnectString()); final CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration); final ZooKeeperCompletedCheckpointStore checkpointStore = createZooKeeperCheckpointStore(client); try { final CompletedCheckpointStoreTest.TestCompletedCheckpoint checkpoint1 = CompletedCheckpointStoreTest.createCheckpoint(0, sharedStateRegistry); checkpointStore.addCheckpoint(checkpoint1); assertThat(checkpointStore.getAllCheckpoints(), Matchers.contains(checkpoint1)); final CompletedCheckpointStoreTest.TestCompletedCheckpoint checkpoint2 = CompletedCheckpointStoreTest.createCheckpoint(1, sharedStateRegistry); checkpointStore.addCheckpoint(checkpoint2); final List<CompletedCheckpoint> allCheckpoints = checkpointStore.getAllCheckpoints(); assertThat(allCheckpoints, Matchers.contains(checkpoint2)); assertThat(allCheckpoints, Matchers.not(Matchers.contains(checkpoint1))); // verify that the subsumed checkpoint is discarded CompletedCheckpointStoreTest.verifyCheckpointDiscarded(checkpoint1); } finally { client.close(); } }
Example 16
Source File: ZooKeeperJobGraphStoreTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that we fail with an exception if the job cannot be removed from the * ZooKeeperJobGraphStore. * * <p>Tests that a close ZooKeeperJobGraphStore no longer holds any locks. */ @Test public void testJobGraphRemovalFailureAndLockRelease() throws Exception { try (final CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration)) { final TestingRetrievableStateStorageHelper<JobGraph> stateStorage = new TestingRetrievableStateStorageHelper<>(); final ZooKeeperJobGraphStore submittedJobGraphStore = createSubmittedJobGraphStore(client, stateStorage); submittedJobGraphStore.start(null); final ZooKeeperJobGraphStore otherSubmittedJobGraphStore = createSubmittedJobGraphStore(client, stateStorage); otherSubmittedJobGraphStore.start(null); final JobGraph jobGraph = new JobGraph(); submittedJobGraphStore.putJobGraph(jobGraph); final JobGraph recoveredJobGraph = otherSubmittedJobGraphStore.recoverJobGraph(jobGraph.getJobID()); assertThat(recoveredJobGraph, is(notNullValue())); try { otherSubmittedJobGraphStore.removeJobGraph(recoveredJobGraph.getJobID()); fail("It should not be possible to remove the JobGraph since the first store still has a lock on it."); } catch (Exception ignored) { // expected } submittedJobGraphStore.stop(); // now we should be able to delete the job graph otherSubmittedJobGraphStore.removeJobGraph(recoveredJobGraph.getJobID()); assertThat(otherSubmittedJobGraphStore.recoverJobGraph(recoveredJobGraph.getJobID()), is(nullValue())); otherSubmittedJobGraphStore.stop(); } }
Example 17
Source File: HighAvailabilityServicesUtils.java From flink with Apache License 2.0 | 4 votes |
public static HighAvailabilityServices createHighAvailabilityServices( Configuration configuration, Executor executor, AddressResolution addressResolution) throws Exception { HighAvailabilityMode highAvailabilityMode = HighAvailabilityMode.fromConfig(configuration); switch (highAvailabilityMode) { case NONE: final Tuple2<String, Integer> hostnamePort = getJobManagerAddress(configuration); final String resourceManagerRpcUrl = AkkaRpcServiceUtils.getRpcUrl( hostnamePort.f0, hostnamePort.f1, AkkaRpcServiceUtils.createWildcardName(ResourceManager.RESOURCE_MANAGER_NAME), addressResolution, configuration); final String dispatcherRpcUrl = AkkaRpcServiceUtils.getRpcUrl( hostnamePort.f0, hostnamePort.f1, AkkaRpcServiceUtils.createWildcardName(Dispatcher.DISPATCHER_NAME), addressResolution, configuration); final String webMonitorAddress = getWebMonitorAddress( configuration, addressResolution); return new StandaloneHaServices( resourceManagerRpcUrl, dispatcherRpcUrl, webMonitorAddress); case ZOOKEEPER: BlobStoreService blobStoreService = BlobUtils.createBlobStoreFromConfig(configuration); return new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(configuration), executor, configuration, blobStoreService); case FACTORY_CLASS: return createCustomHAServices(configuration, executor); default: throw new Exception("Recovery mode " + highAvailabilityMode + " is not supported."); } }
Example 18
Source File: ZooKeeperHADispatcherTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests that the {@link Dispatcher} releases a locked {@link SubmittedJobGraph} if it * lost the leadership. */ @Test public void testSubmittedJobGraphRelease() throws Exception { final CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration); final CuratorFramework otherClient = ZooKeeperUtils.startCuratorFramework(configuration); try (final TestingHighAvailabilityServices testingHighAvailabilityServices = new TestingHighAvailabilityServices()) { testingHighAvailabilityServices.setSubmittedJobGraphStore(ZooKeeperUtils.createSubmittedJobGraphs(client, configuration)); final ZooKeeperSubmittedJobGraphStore otherSubmittedJobGraphStore = ZooKeeperUtils.createSubmittedJobGraphs( otherClient, configuration); otherSubmittedJobGraphStore.start(NoOpSubmittedJobGraphListener.INSTANCE); final TestingLeaderElectionService leaderElectionService = new TestingLeaderElectionService(); testingHighAvailabilityServices.setDispatcherLeaderElectionService(leaderElectionService); final TestingDispatcher dispatcher = createDispatcher( testingHighAvailabilityServices, new TestingJobManagerRunnerFactory(new CompletableFuture<>(), new CompletableFuture<>(), CompletableFuture.completedFuture(null))); dispatcher.start(); try { final DispatcherId expectedLeaderId = DispatcherId.generate(); leaderElectionService.isLeader(expectedLeaderId.toUUID()).get(); final DispatcherGateway dispatcherGateway = dispatcher.getSelfGateway(DispatcherGateway.class); final JobGraph nonEmptyJobGraph = DispatcherHATest.createNonEmptyJobGraph(); final CompletableFuture<Acknowledge> submissionFuture = dispatcherGateway.submitJob(nonEmptyJobGraph, TIMEOUT); submissionFuture.get(); Collection<JobID> jobIds = otherSubmittedJobGraphStore.getJobIds(); final JobID jobId = nonEmptyJobGraph.getJobID(); assertThat(jobIds, Matchers.contains(jobId)); leaderElectionService.notLeader(); // wait for the job to properly terminate final CompletableFuture<Void> jobTerminationFuture = dispatcher.getJobTerminationFuture(jobId, TIMEOUT); jobTerminationFuture.get(); // recover the job final SubmittedJobGraph submittedJobGraph = otherSubmittedJobGraphStore.recoverJobGraph(jobId); assertThat(submittedJobGraph, is(notNullValue())); // check that the other submitted job graph store can remove the job graph after the original leader // has lost its leadership otherSubmittedJobGraphStore.removeJobGraph(jobId); jobIds = otherSubmittedJobGraphStore.getJobIds(); assertThat(jobIds, Matchers.not(Matchers.contains(jobId))); } finally { RpcUtils.terminateRpcEndpoint(dispatcher, TIMEOUT); client.close(); otherClient.close(); } } }
Example 19
Source File: ZooKeeperTestEnvironment.java From flink with Apache License 2.0 | 4 votes |
/** * Creates a new client for the started ZooKeeper server/cluster. */ public CuratorFramework createClient() { Configuration config = new Configuration(); config.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, getConnectString()); return ZooKeeperUtils.startCuratorFramework(config); }
Example 20
Source File: ZooKeeperDefaultDispatcherRunnerTest.java From flink with Apache License 2.0 | 4 votes |
/** * See FLINK-11665. */ @Test public void testResourceCleanupUnderLeadershipChange() throws Exception { final TestingRpcService rpcService = testingRpcServiceResource.getTestingRpcService(); final TestingLeaderElectionService dispatcherLeaderElectionService = new TestingLeaderElectionService(); final CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration); try (final TestingHighAvailabilityServices highAvailabilityServices = new TestingHighAvailabilityServicesBuilder() .setRunningJobsRegistry(new ZooKeeperRunningJobsRegistry(client, configuration)) .setDispatcherLeaderElectionService(dispatcherLeaderElectionService) .setJobMasterLeaderRetrieverFunction(jobId -> ZooKeeperUtils.createLeaderRetrievalService(client, configuration)) .build()) { final PartialDispatcherServices partialDispatcherServices = new PartialDispatcherServices( configuration, highAvailabilityServices, CompletableFuture::new, blobServer, new TestingHeartbeatServices(), UnregisteredMetricGroups::createUnregisteredJobManagerMetricGroup, new MemoryArchivedExecutionGraphStore(), fatalErrorHandler, VoidHistoryServerArchivist.INSTANCE, null); final JobGraph jobGraph = createJobGraphWithBlobs(); final DefaultDispatcherRunnerFactory defaultDispatcherRunnerFactory = DefaultDispatcherRunnerFactory.createSessionRunner(SessionDispatcherFactory.INSTANCE); try (final DispatcherRunner dispatcherRunner = createDispatcherRunner( rpcService, dispatcherLeaderElectionService, () -> createZooKeeperJobGraphStore(client), partialDispatcherServices, defaultDispatcherRunnerFactory)) { // initial run DispatcherGateway dispatcherGateway = grantLeadership(dispatcherLeaderElectionService); LOG.info("Initial job submission {}.", jobGraph.getJobID()); dispatcherGateway.submitJob(jobGraph, TESTING_TIMEOUT).get(); dispatcherLeaderElectionService.notLeader(); // recovering submitted jobs LOG.info("Re-grant leadership first time."); dispatcherGateway = grantLeadership(dispatcherLeaderElectionService); LOG.info("Cancel recovered job {}.", jobGraph.getJobID()); // cancellation of the job should remove everything final CompletableFuture<JobResult> jobResultFuture = dispatcherGateway.requestJobResult(jobGraph.getJobID(), TESTING_TIMEOUT); dispatcherGateway.cancelJob(jobGraph.getJobID(), TESTING_TIMEOUT).get(); // a successful cancellation should eventually remove all job information final JobResult jobResult = jobResultFuture.get(); assertThat(jobResult.getApplicationStatus(), is(ApplicationStatus.CANCELED)); dispatcherLeaderElectionService.notLeader(); // check that the job has been removed from ZooKeeper final ZooKeeperJobGraphStore submittedJobGraphStore = createZooKeeperJobGraphStore(client); CommonTestUtils.waitUntilCondition(() -> submittedJobGraphStore.getJobIds().isEmpty(), Deadline.fromNow(VERIFICATION_TIMEOUT), 20L); } } // check resource clean up assertThat(clusterHaStorageDir.listFiles(), is(emptyArray())); }