Java Code Examples for org.apache.curator.test.TestingServer#close()
The following examples show how to use
org.apache.curator.test.TestingServer#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SampleApplicationTests.java From spring-cloud-zookeeper with Apache License 2.0 | 6 votes |
@Test public void contextLoads() throws Exception { int zkPort = SocketUtils.findAvailableTcpPort(); TestingServer server = new TestingServer(zkPort); int port = SocketUtils.findAvailableTcpPort(zkPort + 1); ConfigurableApplicationContext context = new SpringApplicationBuilder( SampleZookeeperApplication.class).run("--server.port=" + port, "--management.endpoints.web.exposure.include=*", "--spring.cloud.zookeeper.connect-string=localhost:" + zkPort); ResponseEntity<String> response = new TestRestTemplate() .getForEntity("http://localhost:" + port + "/hi", String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK); context.close(); server.close(); }
Example 2
Source File: ZookeeperEmbedded.java From eagle with Apache License 2.0 | 5 votes |
/** * Try to start zookeeper, if failed, retry with <code>port+1</code>. * * @return finally bound port */ public int start() throws Exception { FileUtils.deleteQuietly(logDir); int i = 0; boolean success = false; Exception lastException = null; while (!success && i < MAX_RETRIES) { try { server = new TestingServer(this.port, this.logDir); ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3); zookeeper = CuratorFrameworkFactory.newClient(server.getConnectString(), retryPolicy); zookeeper.start(); success = true; } catch (BindException exception) { lastException = exception; i++; LOG.warn("Port {} was taken, trying {}", this.port, this.port + i); this.port = this.port + i; try { server.close(); zookeeper.close(); } catch (Throwable throwable) { // ignored } } } if (!success) { LOG.error("Failed to start zookeeper after trying {} times", MAX_RETRIES); throw lastException; } return this.port; }
Example 3
Source File: ZookeeperClusterTest.java From pinpoint with Apache License 2.0 | 5 votes |
private static void closeZookeeperServer(TestingServer mockZookeeperServer) throws Exception { try { if (mockZookeeperServer != null) { mockZookeeperServer.close(); } } catch (Exception e) { e.printStackTrace(); } }
Example 4
Source File: ClusterTest.java From pinpoint with Apache License 2.0 | 5 votes |
private static void closeZookeeperServer(TestingServer mockZookeeperServer) throws Exception { try { if (mockZookeeperServer != null) { mockZookeeperServer.close(); } } catch (Exception e) { e.printStackTrace(); } }
Example 5
Source File: CuratorFrameworkFactoryBeanTests.java From spring-cloud-cluster with Apache License 2.0 | 5 votes |
@Test public void test() throws Exception { TestingServer testingServer = new TestingServer(); CuratorFrameworkFactoryBean fb = new CuratorFrameworkFactoryBean(testingServer.getConnectString()); CuratorFramework client = fb.getObject(); fb.start(); assertTrue(client.getState().equals(CuratorFrameworkState.STARTED)); fb.stop(); assertTrue(client.getState().equals(CuratorFrameworkState.STOPPED)); testingServer.close(); }
Example 6
Source File: SnowflakeUIDTest.java From datawave with Apache License 2.0 | 4 votes |
@Test public void testZkCache() throws Exception { long startingTimestamp = 12345678; int myMachineId = 41610; int startingSequence = SnowflakeUID.MAX_SEQUENCE_ID - 1; TestingServer zkTestServer = new TestingServer(2888); try { ZkSnowflakeCache.init(zkTestServer.getConnectString(), 5, 1000); ZkSnowflakeCache.store(BigInteger.valueOf(myMachineId), startingTimestamp);// stash the timestamp long expectedTimestamp; long storedTimestamp; int expectedSequence; // timestamp should be incremented by 1 because this startingTimestamp was stored above SnowflakeUIDBuilder builder = SnowflakeUID.builder(startingTimestamp, 10, 10, 10, startingSequence); storedTimestamp = startingTimestamp + 1; assertEquals(storedTimestamp, ZkSnowflakeCache.getLastCachedTid((BigInteger.valueOf(myMachineId)))); SnowflakeUID uid = builder.newId(); // using the started sequence id expectedSequence = startingSequence; // expected timestamp should be the one previously stored expectedTimestamp = storedTimestamp; // stored timestamp should not have changed // storedTimestamp = storedTimestamp; assertEquals(myMachineId, uid.getMachineId()); assertEquals(expectedTimestamp, uid.getTimestamp()); assertEquals(expectedSequence, uid.getSequenceId()); assertEquals(storedTimestamp, ZkSnowflakeCache.getLastCachedTid((BigInteger.valueOf(uid.getMachineId())))); uid = builder.newId(); // new sequence id expectedSequence++; // expected timestamp should be the one previously stored expectedTimestamp = storedTimestamp; // stored timestamp however should have incremented because maxed out the sequence id storedTimestamp++; assertEquals(expectedTimestamp, uid.getTimestamp()); assertEquals(expectedSequence, uid.getSequenceId()); assertEquals(storedTimestamp, ZkSnowflakeCache.getLastCachedTid((BigInteger.valueOf(uid.getMachineId())))); uid = builder.newId(); // sequence id should have rolled expectedSequence = 0; // expected timestamp should be the one previously stored expectedTimestamp = storedTimestamp; // stored timestamp should not have changed // storedTimestamp = storedTimestamp; assertEquals(expectedTimestamp, uid.getTimestamp()); assertEquals(expectedSequence, uid.getSequenceId()); assertEquals(storedTimestamp, ZkSnowflakeCache.getLastCachedTid((BigInteger.valueOf(uid.getMachineId())))); } finally { ZkSnowflakeCache.stop(); zkTestServer.close(); } }
Example 7
Source File: SnowflakeUIDTest.java From datawave with Apache License 2.0 | 4 votes |
@Test public void testZkCacheInitProps() throws Exception { int myMachineId = 41610; int startingSequence = 0; TestingServer zkTestServer = new TestingServer(2888); try { Configuration conf = new Configuration(this.conf); conf.set("snowflake.zookeepers", zkTestServer.getConnectString()); conf.set("snowflake.zookeeper.enabled", "true"); conf.set(UIDConstants.CONFIG_MACHINE_ID_KEY, Integer.toString(myMachineId)); UIDBuilder<UID> builder = UID.builder(conf); long expectedTimestamp; long storedTimestamp; int expectedSequence; SnowflakeUID uid = (SnowflakeUID) (builder.newId()); // sequence id should be what we initialized with expectedSequence = startingSequence; // expected timestamp should be what ever we got expectedTimestamp = uid.getTimestamp(); // stored timestamp should be this timestamp storedTimestamp = expectedTimestamp; assertEquals(myMachineId, uid.getMachineId()); assertTrue("Not initialized", ZkSnowflakeCache.isInitialized()); assertEquals(expectedTimestamp, uid.getTimestamp()); assertEquals(expectedSequence, uid.getSequenceId()); assertEquals(storedTimestamp, ZkSnowflakeCache.getLastCachedTid((BigInteger.valueOf(uid.getMachineId())))); uid = (SnowflakeUID) (builder.newId()); // sequence id should be incremented expectedSequence++; // expected timestamp should be the one previously stored expectedTimestamp = storedTimestamp; // stored timestamp should be unchanged // storedTimestamp = storedTimestamp; assertEquals(expectedTimestamp, uid.getTimestamp()); assertEquals(expectedSequence, uid.getSequenceId()); assertEquals(storedTimestamp, ZkSnowflakeCache.getLastCachedTid((BigInteger.valueOf(uid.getMachineId())))); uid = (SnowflakeUID) (builder.newId()); // sequence id should be incremented expectedSequence++; // expected timestamp should be the one previously stored expectedTimestamp = storedTimestamp; // stored timestamp should not have changed // storedTimestamp = storedTimestamp; assertEquals(expectedTimestamp, uid.getTimestamp()); assertEquals(expectedSequence, uid.getSequenceId()); assertEquals(storedTimestamp, ZkSnowflakeCache.getLastCachedTid((BigInteger.valueOf(uid.getMachineId())))); } finally { ZkSnowflakeCache.stop(); zkTestServer.close(); } }
Example 8
Source File: ZkStoreBucketServiceTest.java From pravega with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testOwnershipOfExistingBucket() throws Exception { RequestTracker requestTracker = new RequestTracker(true); TestingServer zkServer2 = new TestingServerStarter().start(); zkServer2.start(); CuratorFramework zkClient2 = CuratorFrameworkFactory.newClient(zkServer2.getConnectString(), 10000, 1000, (r, e, s) -> false); zkClient2.start(); @Cleanup("shutdownNow") ScheduledExecutorService executor2 = Executors.newScheduledThreadPool(10); String hostId = UUID.randomUUID().toString(); BucketStore bucketStore2 = StreamStoreFactory.createZKBucketStore(ImmutableMap.of(BucketStore.ServiceType.RetentionService, 1), zkClient2, executor2); StreamMetadataStore streamMetadataStore2 = StreamStoreFactory.createZKStore(zkClient2, executor2); TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor2); SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock(); StreamMetadataTasks streamMetadataTasks2 = new StreamMetadataTasks(streamMetadataStore2, bucketStore2, taskMetadataStore, segmentHelper, executor2, hostId, GrpcAuthHelper.getDisabledAuthHelper(), requestTracker); String scope = "scope1"; String streamName = "stream1"; bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor2).join(); String scope2 = "scope2"; String streamName2 = "stream2"; bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope2, streamName2, executor2).join(); BucketServiceFactory bucketStoreFactory = new BucketServiceFactory(hostId, bucketStore2, 5); BucketManager service2 = bucketStoreFactory.createRetentionService(Duration.ofMillis(5000), stream -> CompletableFuture.completedFuture(null), executor2); service2.startAsync(); service2.awaitRunning(); Thread.sleep(10000); assertTrue(service2.getBucketServices().values().stream().allMatch(x -> x.getKnownStreams().size() == 2)); service2.stopAsync(); service2.awaitTerminated(); zkClient2.close(); zkServer2.close(); streamMetadataTasks2.close(); ExecutorServiceHelpers.shutdown(executor2); }