Java Code Examples for org.apache.distributedlog.DistributedLogConfiguration#addConfiguration()
The following examples show how to use
org.apache.distributedlog.DistributedLogConfiguration#addConfiguration() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DLFileSystem.java From distributedlog with Apache License 2.0 | 6 votes |
@Override public FSDataOutputStream create(Path path, FsPermission fsPermission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progressable) throws IOException { // for overwrite, delete the existing file first. if (overwrite) { delete(path, false); } DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(dlConf); confLocal.setEnsembleSize(replication); confLocal.setWriteQuorumSize(replication); confLocal.setAckQuorumSize(replication); confLocal.setMaxLogSegmentBytes(blockSize); return append(path, bufferSize, Optional.of(confLocal)); }
Example 2
Source File: TestFederatedZKLogMetadataStore.java From distributedlog with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testZooKeeperSessionExpired() throws Exception { Set<String> allLogs = createLogs(2 * maxLogsPerSubnamespace, "test-zookeeper-session-expired-"); TestNamespaceListenerWithExpectedSize listener = new TestNamespaceListenerWithExpectedSize(2 * maxLogsPerSubnamespace + 1); metadataStore.registerNamespaceListener(listener); ZooKeeperClientUtils.expireSession(zkc, BKNamespaceDriver.getZKServersFromDLUri(uri), zkSessionTimeoutMs); String testLogName = "test-log-name"; allLogs.add(testLogName); DistributedLogConfiguration anotherConf = new DistributedLogConfiguration(); anotherConf.addConfiguration(baseConf); ZooKeeperClient anotherZkc = TestZooKeeperClientBuilder.newBuilder() .uri(uri) .sessionTimeoutMs(zkSessionTimeoutMs) .build(); FederatedZKLogMetadataStore anotherMetadataStore = new FederatedZKLogMetadataStore(anotherConf, uri, anotherZkc, scheduler); Utils.ioResult(anotherMetadataStore.createLog(testLogName)); listener.waitForDone(); Set<String> receivedLogs = listener.getResult(); assertEquals(2 * maxLogsPerSubnamespace + 1, receivedLogs.size()); assertEquals(allLogs, receivedLogs); }
Example 3
Source File: TestLedgerAllocator.java From distributedlog with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testAllocatorWithoutEnoughBookies() throws Exception { String allocationPath = "/allocator-without-enough-bookies"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setEnsembleSize(numBookies * 2); confLocal.setWriteQuorumSize(numBookies * 2); SimpleLedgerAllocator allocator1 = createAllocator(allocationPath, confLocal); allocator1.allocate(); ZKTransaction txn1 = newTxn(); try { Utils.ioResult(allocator1.tryObtain(txn1, NULL_LISTENER)); fail("Should fail allocating ledger if there aren't enough bookies"); } catch (AllocationException ioe) { // expected assertEquals(Phase.ERROR, ioe.getPhase()); } byte[] data = zkc.get().getData(allocationPath, false, null); assertEquals(0, data.length); }
Example 4
Source File: TestDistributedLogTool.java From distributedlog with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testToolTruncateStream() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setLogSegmentCacheEnabled(false); DistributedLogManager dlm = DLMTestUtil.createNewDLM("testToolTruncateStream", confLocal, defaultUri); DLMTestUtil.generateCompletedLogSegments(dlm, confLocal, 3, 1000); DLSN dlsn = new DLSN(2, 1, 0); TruncateStreamCommand cmd = new TruncateStreamCommand(); cmd.setDlsn(dlsn); cmd.setUri(defaultUri); cmd.setStreamName("testToolTruncateStream"); cmd.setForce(true); assertEquals(0, cmd.runCmd()); LogReader reader = dlm.getInputStream(0); LogRecordWithDLSN record = reader.readNext(false); assertEquals(dlsn, record.getDlsn()); reader.close(); dlm.close(); }
Example 5
Source File: TestFederatedZKLogMetadataStore.java From distributedlog with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { zkc = TestZooKeeperClientBuilder.newBuilder() .uri(createDLMURI("/")) .sessionTimeoutMs(zkSessionTimeoutMs) .build(); scheduler = OrderedScheduler.newBuilder() .name("test-zk-logmetadata-store") .corePoolSize(2) .build(); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.addConfiguration(baseConf); this.uri = createDLMURI("/" + runtime.getMethodName()); FederatedZKLogMetadataStore.createFederatedNamespace(uri, zkc); metadataStore = new FederatedZKLogMetadataStore(conf, uri, zkc, scheduler); }
Example 6
Source File: TestZKLogSegmentMetadataStore.java From distributedlog with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { zkc = TestZooKeeperClientBuilder.newBuilder() .uri(createDLMURI("/")) .sessionTimeoutMs(zkSessionTimeoutMs) .build(); scheduler = OrderedScheduler.newBuilder() .name("test-zk-logsegment-metadata-store") .corePoolSize(1) .build(); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.addConfiguration(baseConf); this.uri = createDLMURI("/" + runtime.getMethodName()); lsmStore = new ZKLogSegmentMetadataStore(conf, zkc, scheduler); zkc.get().create( "/" + runtime.getMethodName(), new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); this.rootZkPath = "/" + runtime.getMethodName(); }
Example 7
Source File: TestFederatedZKLogMetadataStore.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testCreateLog() throws Exception { DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.addConfiguration(baseConf); ZooKeeperClient anotherZkc = TestZooKeeperClientBuilder.newBuilder() .uri(uri) .sessionTimeoutMs(zkSessionTimeoutMs) .build(); FederatedZKLogMetadataStore anotherMetadataStore = new FederatedZKLogMetadataStore(conf, uri, anotherZkc, scheduler); for (int i = 0; i < 2 * maxLogsPerSubnamespace; i++) { LogMetadataStore createStore, checkStore; if (i % 2 == 0) { createStore = metadataStore; checkStore = anotherMetadataStore; } else { createStore = anotherMetadataStore; checkStore = metadataStore; } String logName = "test-create-log-" + i; URI logUri = Utils.ioResult(createStore.createLog(logName)); Optional<URI> logLocation = Utils.ioResult(checkStore.getLogLocation(logName)); assertTrue("Log " + logName + " doesn't exist", logLocation.isPresent()); assertEquals("Different log location " + logLocation.get() + " is found", logUri, logLocation.get()); } assertEquals(2, metadataStore.getSubnamespaces().size()); assertEquals(2, anotherMetadataStore.getSubnamespaces().size()); }
Example 8
Source File: TestZKLogMetadataStore.java From distributedlog with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { zkc = TestZooKeeperClientBuilder.newBuilder() .uri(createDLMURI("/")) .sessionTimeoutMs(zkSessionTimeoutMs) .build(); scheduler = OrderedScheduler.newBuilder() .name("test-zk-logmetadata-store") .corePoolSize(1) .build(); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.addConfiguration(baseConf); this.uri = createDLMURI("/" + runtime.getMethodName()); metadataStore = new ZKLogMetadataStore(conf, uri, zkc, scheduler); }
Example 9
Source File: TestBKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testCloseReaderToCancelPendingReads() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setNumPrefetchEntriesPerLogSegment(10); confLocal.setMaxPrefetchEntriesPerLogSegment(10); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); DLMTestUtil.generateCompletedLogSegments(dlm, confLocal, 1, 20); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size()); BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal); List<CompletableFuture<List<Entry.Reader>>> futures = Lists.newArrayList(); for (int i = 0; i < 5; i++) { futures.add(reader.readNext(1)); } assertFalse("Reader should not be closed yet", reader.isClosed()); Utils.close(reader); for (CompletableFuture<List<Entry.Reader>> future : futures) { try { Utils.ioResult(future); fail("The read request should be cancelled"); } catch (ReadCancelledException rce) { // expected } } assertFalse(reader.hasCaughtUpOnInprogress()); assertTrue("Reader should be closed yet", reader.isClosed()); }
Example 10
Source File: TestDistributedLogService.java From distributedlog with Apache License 2.0 | 5 votes |
@Before @Override public void setup() throws Exception { super.setup(); dlConf = new DistributedLogConfiguration(); dlConf.addConfiguration(conf); dlConf.setLockTimeout(0) .setOutputBufferSize(0) .setPeriodicFlushFrequencyMilliSeconds(10) .setSchedulerShutdownTimeoutMs(100); serverConf = newLocalServerConf(); uri = createDLMURI("/" + testName.getMethodName()); ensureURICreated(uri); service = createService(serverConf, dlConf, latch); }
Example 11
Source File: TestZKNamespaceWatcher.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testSessionExpired() throws Exception { URI uri = createDLMURI("/" + runtime.getMethodName()); zkc.get().create(uri.getPath(), new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.addConfiguration(baseConf); ZKNamespaceWatcher watcher = new ZKNamespaceWatcher(conf, uri, zkc, scheduler); final CountDownLatch[] latches = new CountDownLatch[10]; for (int i = 0; i < 10; i++) { latches[i] = new CountDownLatch(1); } final AtomicInteger numUpdates = new AtomicInteger(0); final AtomicReference<Set<String>> receivedLogs = new AtomicReference<Set<String>>(null); watcher.registerListener(new NamespaceListener() { @Override public void onStreamsChanged(Iterator<String> streams) { Set<String> streamSet = Sets.newHashSet(streams); int updates = numUpdates.incrementAndGet(); receivedLogs.set(streamSet); latches[updates - 1].countDown(); } }); latches[0].await(); createLogInNamespace(uri, "test1"); latches[1].await(); createLogInNamespace(uri, "test2"); latches[2].await(); assertEquals(2, receivedLogs.get().size()); ZooKeeperClientUtils.expireSession(zkc, BKNamespaceDriver.getZKServersFromDLUri(uri), zkSessionTimeoutMs); latches[3].await(); assertEquals(2, receivedLogs.get().size()); createLogInNamespace(uri, "test3"); latches[4].await(); assertEquals(3, receivedLogs.get().size()); }
Example 12
Source File: TestBKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testMaxPrefetchEntriesSmallBatch() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setOutputBufferSize(0); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setImmediateFlushEnabled(false); confLocal.setNumPrefetchEntriesPerLogSegment(2); confLocal.setMaxPrefetchEntriesPerLogSegment(10); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); generateCompletedLogSegments(dlm, confLocal, 1, 20); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size()); BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal); reader.start(); // wait for the read ahead entries to become available while (reader.readAheadEntries.size() < 10) { TimeUnit.MILLISECONDS.sleep(10); } long txId = 1L; long entryId = 0L; assertEquals(10, reader.readAheadEntries.size()); assertEquals(10, reader.getNextEntryId()); assertFalse(reader.hasCaughtUpOnInprogress()); // read first entry Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0); LogRecordWithDLSN record = entryReader.nextRecord(); while (null != record) { if (!record.isControl()) { DLMTestUtil.verifyLogRecord(record); assertEquals(txId, record.getTransactionId()); ++txId; } DLSN dlsn = record.getDlsn(); assertEquals(1L, dlsn.getLogSegmentSequenceNo()); assertEquals(entryId, dlsn.getEntryId()); record = entryReader.nextRecord(); } ++entryId; assertEquals(2L, txId); // wait for the read ahead entries to become 10 again while (reader.readAheadEntries.size() < 10) { TimeUnit.MILLISECONDS.sleep(10); } assertEquals(10, reader.readAheadEntries.size()); assertEquals(11, reader.getNextEntryId()); assertFalse(reader.hasCaughtUpOnInprogress()); Utils.close(reader); }
Example 13
Source File: TestZKNamespaceWatcher.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testNamespaceListener() throws Exception { URI uri = createDLMURI("/" + runtime.getMethodName()); zkc.get().create(uri.getPath(), new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.addConfiguration(baseConf); ZKNamespaceWatcher watcher = new ZKNamespaceWatcher(conf, uri, zkc, scheduler); final CountDownLatch[] latches = new CountDownLatch[10]; for (int i = 0; i < 10; i++) { latches[i] = new CountDownLatch(1); } final AtomicInteger numUpdates = new AtomicInteger(0); final AtomicReference<Set<String>> receivedLogs = new AtomicReference<Set<String>>(null); watcher.registerListener(new NamespaceListener() { @Override public void onStreamsChanged(Iterator<String> streams) { Set<String> streamSet = Sets.newHashSet(streams); int updates = numUpdates.incrementAndGet(); receivedLogs.set(streamSet); latches[updates - 1].countDown(); } }); // first update final Set<String> expectedLogs = Sets.newHashSet(); latches[0].await(); validateReceivedLogs(expectedLogs, receivedLogs.get()); // create test1 expectedLogs.add("test1"); createLogInNamespace(uri, "test1"); latches[1].await(); validateReceivedLogs(expectedLogs, receivedLogs.get()); // create invalid log createLogInNamespace(uri, ".test1"); latches[2].await(); validateReceivedLogs(expectedLogs, receivedLogs.get()); // create test2 expectedLogs.add("test2"); createLogInNamespace(uri, "test2"); latches[3].await(); validateReceivedLogs(expectedLogs, receivedLogs.get()); // delete test1 expectedLogs.remove("test1"); deleteLogInNamespace(uri, "test1"); latches[4].await(); validateReceivedLogs(expectedLogs, receivedLogs.get()); }
Example 14
Source File: TestBKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testMaxPrefetchEntriesSmallSegment() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setOutputBufferSize(0); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setImmediateFlushEnabled(false); confLocal.setNumPrefetchEntriesPerLogSegment(10); confLocal.setMaxPrefetchEntriesPerLogSegment(20); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); generateCompletedLogSegments(dlm, confLocal, 1, 5); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size()); BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal); reader.start(); // wait for the read ahead entries to become available while (reader.readAheadEntries.size() < (reader.getLastAddConfirmed() + 1)) { TimeUnit.MILLISECONDS.sleep(10); } long txId = 1L; long entryId = 0L; assertEquals((reader.getLastAddConfirmed() + 1), reader.readAheadEntries.size()); assertEquals((reader.getLastAddConfirmed() + 1), reader.getNextEntryId()); // read first entry Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0); LogRecordWithDLSN record = entryReader.nextRecord(); while (null != record) { if (!record.isControl()) { DLMTestUtil.verifyLogRecord(record); assertEquals(txId, record.getTransactionId()); ++txId; } DLSN dlsn = record.getDlsn(); assertEquals(1L, dlsn.getLogSegmentSequenceNo()); assertEquals(entryId, dlsn.getEntryId()); record = entryReader.nextRecord(); } ++entryId; assertEquals(2L, txId); assertEquals(reader.getLastAddConfirmed(), reader.readAheadEntries.size()); assertEquals((reader.getLastAddConfirmed() + 1), reader.getNextEntryId()); assertFalse(reader.hasCaughtUpOnInprogress()); Utils.close(reader); }
Example 15
Source File: TestBKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testMaxPrefetchEntriesLargeBatch() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setOutputBufferSize(0); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setImmediateFlushEnabled(false); confLocal.setNumPrefetchEntriesPerLogSegment(10); confLocal.setMaxPrefetchEntriesPerLogSegment(5); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); generateCompletedLogSegments(dlm, confLocal, 1, 20); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size()); BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal); reader.start(); // wait for the read ahead entries to become available while (reader.readAheadEntries.size() < 5) { TimeUnit.MILLISECONDS.sleep(10); } long txId = 1L; long entryId = 0L; assertEquals(5, reader.readAheadEntries.size()); assertEquals(5, reader.getNextEntryId()); // read first entry Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0); LogRecordWithDLSN record = entryReader.nextRecord(); while (null != record) { if (!record.isControl()) { DLMTestUtil.verifyLogRecord(record); assertEquals(txId, record.getTransactionId()); ++txId; } DLSN dlsn = record.getDlsn(); assertEquals(1L, dlsn.getLogSegmentSequenceNo()); assertEquals(entryId, dlsn.getEntryId()); record = entryReader.nextRecord(); } ++entryId; assertEquals(2L, txId); // wait for the read ahead entries to become 10 again while (reader.readAheadEntries.size() < 5) { TimeUnit.MILLISECONDS.sleep(10); } assertEquals(5, reader.readAheadEntries.size()); assertEquals(6, reader.getNextEntryId()); assertFalse(reader.hasCaughtUpOnInprogress()); Utils.close(reader); }
Example 16
Source File: TestBKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testReadEntriesFromCompleteLogSegment() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setOutputBufferSize(0); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setImmediateFlushEnabled(false); confLocal.setNumPrefetchEntriesPerLogSegment(10); confLocal.setMaxPrefetchEntriesPerLogSegment(10); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); generateCompletedLogSegments(dlm, confLocal, 1, 20); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size()); BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal); reader.start(); boolean done = false; long txId = 1L; long entryId = 0L; while (!done) { Entry.Reader entryReader; try { entryReader = Utils.ioResult(reader.readNext(1)).get(0); } catch (EndOfLogSegmentException eol) { done = true; continue; } LogRecordWithDLSN record = entryReader.nextRecord(); while (null != record) { if (!record.isControl()) { DLMTestUtil.verifyLogRecord(record); assertEquals(txId, record.getTransactionId()); ++txId; } DLSN dlsn = record.getDlsn(); assertEquals(1L, dlsn.getLogSegmentSequenceNo()); assertEquals(entryId, dlsn.getEntryId()); record = entryReader.nextRecord(); } ++entryId; } assertEquals(21, txId); assertFalse(reader.hasCaughtUpOnInprogress()); Utils.close(reader); }
Example 17
Source File: TestDistributedLogService.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testAcquireStreamsWhenExceedMaxAcquiredPartitions() throws Exception { String streamName = testName.getMethodName() + "_0000"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(dlConf); confLocal.setMaxCachedPartitionsPerProxy(-1); confLocal.setMaxAcquiredPartitionsPerProxy(1); ServerConfiguration serverConfLocal = new ServerConfiguration(); serverConfLocal.addConfiguration(serverConf); serverConfLocal.setStreamPartitionConverterClass(DelimiterStreamPartitionConverter.class); DistributedLogServiceImpl serviceLocal = createService(serverConfLocal, confLocal); Stream stream = serviceLocal.getLogWriter(streamName); // stream is cached assertNotNull(stream); assertEquals(1, serviceLocal.getStreamManager().numCached()); // create write ops WriteOp op0 = createWriteOp(service, streamName, 0L); stream.submit(op0); WriteResponse wr0 = Await.result(op0.result()); assertEquals("Op 0 should succeed", StatusCode.SUCCESS, wr0.getHeader().getCode()); assertEquals(1, serviceLocal.getStreamManager().numAcquired()); // should be able to cache partitions from same stream String anotherStreamName = testName.getMethodName() + "_0001"; Stream anotherStream = serviceLocal.getLogWriter(anotherStreamName); assertNotNull(anotherStream); assertEquals(2, serviceLocal.getStreamManager().numCached()); // create write ops WriteOp op1 = createWriteOp(service, anotherStreamName, 0L); anotherStream.submit(op1); WriteResponse wr1 = Await.result(op1.result()); assertEquals("Op 1 should fail", StatusCode.STREAM_UNAVAILABLE, wr1.getHeader().getCode()); assertEquals(1, serviceLocal.getStreamManager().numAcquired()); }
Example 18
Source File: TestDistributedLogService.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testAcquireStreamsWhenExceedMaxCachedPartitions() throws Exception { String streamName = testName.getMethodName() + "_0000"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(dlConf); confLocal.setMaxCachedPartitionsPerProxy(1); ServerConfiguration serverConfLocal = new ServerConfiguration(); serverConfLocal.addConfiguration(serverConf); serverConfLocal.setStreamPartitionConverterClass(DelimiterStreamPartitionConverter.class); DistributedLogServiceImpl serviceLocal = createService(serverConfLocal, confLocal); Stream stream = serviceLocal.getLogWriter(streamName); // stream is cached assertNotNull(stream); assertEquals(1, serviceLocal.getStreamManager().numCached()); // create write ops WriteOp op0 = createWriteOp(service, streamName, 0L); stream.submit(op0); WriteResponse wr0 = Await.result(op0.result()); assertEquals("Op 0 should succeed", StatusCode.SUCCESS, wr0.getHeader().getCode()); assertEquals(1, serviceLocal.getStreamManager().numAcquired()); // should fail to acquire another partition try { serviceLocal.getLogWriter(testName.getMethodName() + "_0001"); fail("Should fail to acquire new streams"); } catch (StreamUnavailableException sue) { // expected } assertEquals(1, serviceLocal.getStreamManager().numCached()); assertEquals(1, serviceLocal.getStreamManager().numAcquired()); // should be able to acquire partitions from other streams String anotherStreamName = testName.getMethodName() + "-another_0001"; Stream anotherStream = serviceLocal.getLogWriter(anotherStreamName); assertNotNull(anotherStream); assertEquals(2, serviceLocal.getStreamManager().numCached()); // create write ops WriteOp op1 = createWriteOp(service, anotherStreamName, 0L); anotherStream.submit(op1); WriteResponse wr1 = Await.result(op1.result()); assertEquals("Op 1 should succeed", StatusCode.SUCCESS, wr1.getHeader().getCode()); assertEquals(2, serviceLocal.getStreamManager().numAcquired()); }
Example 19
Source File: TestDistributedLogService.java From distributedlog with Apache License 2.0 | 4 votes |
private DistributedLogConfiguration newLocalConf() { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(dlConf); return confLocal; }