Java Code Examples for org.apache.distributedlog.api.DistributedLogManager#startAsyncLogSegmentNonPartitioned()
The following examples show how to use
org.apache.distributedlog.api.DistributedLogManager#startAsyncLogSegmentNonPartitioned() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestAsyncBulkWrite.java From distributedlog with Apache License 2.0 | 6 votes |
/** * Test Case: A large write batch will span multiple packets. * @throws Exception */ @Test(timeout = 60000) public void testAsyncBulkWriteSpanningPackets() throws Exception { String name = "distrlog-testAsyncBulkWriteSpanningPackets"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(1024); DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); // First entry. int numTransmissions = 4; int recSize = 10 * 1024; int batchSize = (numTransmissions * MAX_LOGRECORDSET_SIZE + 1) / recSize; long ledgerIndex = 1; long entryIndex = 0; long slotIndex = 0; long txIndex = 1; DLSN dlsn = checkAllSucceeded(writer, batchSize, recSize, ledgerIndex, entryIndex, slotIndex, txIndex); assertEquals(4, dlsn.getEntryId()); assertEquals(1, dlsn.getLogSegmentSequenceNo()); writer.closeAndComplete(); dlm.close(); }
Example 2
Source File: TestBKLogReadHandler.java From distributedlog with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testGetFirstDLSNAfterPartialTruncation() throws Exception { String dlName = runtime.getMethodName(); prepareLogSegmentsNonPartitioned(dlName, 3, 10); DistributedLogManager dlm = createNewDLM(conf, dlName); BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm).createReadHandler(); AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned(); // Only truncates at ledger boundary. CompletableFuture<Boolean> futureSuccess = writer.truncate(new DLSN(2, 5, 0)); Boolean success = Utils.ioResult(futureSuccess); assertTrue(success); CompletableFuture<LogRecordWithDLSN> futureRecord = readHandler.asyncGetFirstLogRecord(); LogRecordWithDLSN record = Utils.ioResult(futureRecord); assertEquals(new DLSN(2, 0, 0), record.getDlsn()); }
Example 3
Source File: TestAsyncReaderLock.java From distributedlog with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testReaderLockSharedDlmDoesNotConflict() throws Exception { String name = runtime.getMethodName(); DistributedLogManager dlm0 = createNewDLM(conf, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm0.startAsyncLogSegmentNonPartitioned()); writer.write(DLMTestUtil.getLogRecordInstance(1L)); writer.write(DLMTestUtil.getLogRecordInstance(2L)); writer.closeAndComplete(); DistributedLogManager dlm1 = createNewDLM(conf, name); CompletableFuture<AsyncLogReader> futureReader1 = dlm1.getAsyncLogReaderWithLock(DLSN.InitialDLSN); CompletableFuture<AsyncLogReader> futureReader2 = dlm1.getAsyncLogReaderWithLock(DLSN.InitialDLSN); // Both use the same client id, so there's no lock conflict. Not necessarily ideal, but how the // system currently works. Utils.ioResult(futureReader1); Utils.ioResult(futureReader2); dlm0.close(); dlm1.close(); }
Example 4
Source File: NonBlockingReadsTestUtil.java From distributedlog with Apache License 2.0 | 5 votes |
static void writeRecordsForNonBlockingReads(DistributedLogConfiguration conf, DistributedLogManager dlm, boolean recover, long segmentSize) throws Exception { long txId = 1; for (long i = 0; i < 3; i++) { BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonPartitioned(); for (long j = 1; j < segmentSize; j++) { Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txId++))); } if (recover) { Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txId++))); TimeUnit.MILLISECONDS.sleep(300); writer.abort(); LOG.debug("Recovering Segments"); BKLogWriteHandler blplm = ((BKDistributedLogManager) (dlm)).createWriteHandler(true); Utils.ioResult(blplm.recoverIncompleteLogSegments()); Utils.ioResult(blplm.asyncClose()); LOG.debug("Recovered Segments"); } else { Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txId++))); writer.closeAndComplete(); } TimeUnit.MILLISECONDS.sleep(300); } }
Example 5
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testAsyncWritePendingWritesAbortedWhenLedgerRollTriggerFails() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(1024); confLocal.setMaxLogSegmentBytes(1024); confLocal.setLogSegmentRollingIntervalMinutes(0); DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); // Write one record larger than max seg size. Ledger doesn't roll until next write. int txid = 1; LogRecord record = DLMTestUtil.getLogRecordInstance(txid++, 2048); CompletableFuture<DLSN> result = writer.write(record); DLSN dlsn = Utils.ioResult(result, 10, TimeUnit.SECONDS); assertEquals(1, dlsn.getLogSegmentSequenceNo()); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, LogRecordTooLongException.class); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, WriteException.class); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, WriteException.class); writer.closeAndComplete(); dlm.close(); }
Example 6
Source File: TestBKDistributedLogManager.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testGetLogRecordCountAsync() throws Exception { DistributedLogManager dlm = createNewDLM(conf, testNames.getMethodName()); BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonPartitioned(); DLMTestUtil.generateCompletedLogSegments(dlm, conf, 2, 10); CompletableFuture<Long> futureCount = dlm.getLogRecordCountAsync(DLSN.InitialDLSN); Long count = Utils.ioResult(futureCount, 2, TimeUnit.SECONDS); assertEquals(20, count.longValue()); writer.close(); dlm.close(); }
Example 7
Source File: DistributedLogTool.java From distributedlog with Apache License 2.0 | 5 votes |
private int truncateStream(final Namespace namespace, String streamName, DLSN dlsn) throws Exception { DistributedLogManager dlm = namespace.openLog(streamName); try { long totalRecords = dlm.getLogRecordCount(); long recordsAfterTruncate = FutureUtils.result(dlm.getLogRecordCountAsync(dlsn)); long recordsToTruncate = totalRecords - recordsAfterTruncate; if (!getForce() && !IOUtils.confirmPrompt("Do you want to truncate " + streamName + " at dlsn " + dlsn + " (" + recordsToTruncate + " records)?")) { return 0; } else { AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned(); try { if (!FutureUtils.result(writer.truncate(dlsn))) { System.out.println("Failed to truncate."); } return 0; } finally { Utils.close(writer); } } } catch (Exception ex) { System.err.println("Failed to truncate " + ex); return 1; } finally { dlm.close(); } }
Example 8
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 5 votes |
public void writeRecordsWithOutstandingWriteLimit(int stream, int global, boolean shouldFail) throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(testConf); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(true); confLocal.setPerWriterOutstandingWriteLimit(stream); confLocal.setOutstandingWriteLimitDarkmode(false); DistributedLogManager dlm; if (global > -1) { dlm = createNewDLM(confLocal, runtime.getMethodName(), new SimplePermitLimiter(false, global, new NullStatsLogger(), true, new FixedValueFeature("", 0))); } else { dlm = createNewDLM(confLocal, runtime.getMethodName()); } BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); ArrayList<CompletableFuture<DLSN>> results = new ArrayList<CompletableFuture<DLSN>>(1000); for (int i = 0; i < 1000; i++) { results.add(writer.write(DLMTestUtil.getLogRecordInstance(1L))); } for (CompletableFuture<DLSN> result : results) { try { Utils.ioResult(result); if (shouldFail) { fail("should fail due to no outstanding writes permitted"); } } catch (OverCapacityException ex) { assertTrue(shouldFail); } } writer.closeAndComplete(); dlm.close(); }
Example 9
Source File: TestLogSegmentsZK.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testCompleteLogSegmentConflicts() throws Exception { URI uri = createURI(); String streamName = testName.getMethodName(); DistributedLogConfiguration conf = new DistributedLogConfiguration() .setLockTimeout(99999) .setOutputBufferSize(0) .setImmediateFlushEnabled(true) .setEnableLedgerAllocatorPool(true) .setLedgerAllocatorPoolName("test"); Namespace namespace = NamespaceBuilder.newBuilder().conf(conf).uri(uri).build(); namespace.createLog(streamName); DistributedLogManager dlm1 = namespace.openLog(streamName); DistributedLogManager dlm2 = namespace.openLog(streamName); // dlm1 is writing BKSyncLogWriter out1 = (BKSyncLogWriter) dlm1.startLogSegmentNonPartitioned(); out1.write(DLMTestUtil.getLogRecordInstance(1)); // before out1 complete, out2 is in on recovery // it completed the log segments which bump the version of /ledgers znode BKAsyncLogWriter out2 = (BKAsyncLogWriter) dlm2.startAsyncLogSegmentNonPartitioned(); try { out1.closeAndComplete(); fail("Should fail closeAndComplete since other people already completed it."); } catch (IOException ioe) { } }
Example 10
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testReleaseLockAfterFailedToRecover() throws Exception { String name = "release-lock-after-failed-to-recover"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(testConf); confLocal.setLockTimeout(0); confLocal.setImmediateFlushEnabled(true); confLocal.setOutputBufferSize(0); DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(1L))); writer.abort(); for (int i = 0; i < 2; i++) { FailpointUtils.setFailpoint( FailpointUtils.FailPointName.FP_RecoverIncompleteLogSegments, FailpointUtils.FailPointActions.FailPointAction_Throw); try { dlm.startAsyncLogSegmentNonPartitioned(); fail("Should fail during recovering incomplete log segments"); } catch (IOException ioe) { // expected; } finally { FailpointUtils.removeFailpoint(FailpointUtils.FailPointName.FP_RecoverIncompleteLogSegments); } } writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(1, segments.size()); assertFalse(segments.get(0).isInProgress()); writer.close(); dlm.close(); }
Example 11
Source File: TestAsyncReaderLock.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testReaderLockManyLocks() throws Exception { String name = runtime.getMethodName(); DistributedLogManager dlm = createNewDLM(conf, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); writer.write(DLMTestUtil.getLogRecordInstance(1L)); writer.write(DLMTestUtil.getLogRecordInstance(2L)); writer.closeAndComplete(); int count = 5; final CountDownLatch acquiredLatch = new CountDownLatch(count); final ArrayList<CompletableFuture<AsyncLogReader>> readers = new ArrayList<CompletableFuture<AsyncLogReader>>(count); for (int i = 0; i < count; i++) { readers.add(null); } final DistributedLogManager[] dlms = new DistributedLogManager[count]; for (int i = 0; i < count; i++) { dlms[i] = createNewDLM(conf, name); readers.set(i, dlms[i].getAsyncLogReaderWithLock(DLSN.InitialDLSN)); readers.get(i).whenComplete(new FutureEventListener<AsyncLogReader>() { @Override public void onSuccess(AsyncLogReader reader) { acquiredLatch.countDown(); reader.asyncClose(); } @Override public void onFailure(Throwable cause) { fail("acquire shouldnt have failed"); } }); } acquiredLatch.await(); for (int i = 0; i < count; i++) { dlms[i].close(); } dlm.close(); }
Example 12
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testAsyncWriteWithMinDelayBetweenFlushes() throws Exception { String name = "distrlog-asyncwrite-mindelay"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(true); confLocal.setMinDelayBetweenImmediateFlushMs(100); DistributedLogManager dlm = createNewDLM(confLocal, name); final Thread currentThread = Thread.currentThread(); final int count = 5000; final CountDownLatch syncLatch = new CountDownLatch(count); int txid = 1; BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); Stopwatch executionTime = Stopwatch.createStarted(); for (long i = 0; i < count; i++) { Thread.sleep(1); final LogRecord record = DLMTestUtil.getLogRecordInstance(txid++); CompletableFuture<DLSN> dlsnFuture = writer.write(record); dlsnFuture.whenComplete(new FutureEventListener<DLSN>() { @Override public void onSuccess(DLSN value) { syncLatch.countDown(); LOG.debug("SyncLatch: {} ; DLSN: {} ", syncLatch.getCount(), value); } @Override public void onFailure(Throwable cause) { currentThread.interrupt(); } }); } boolean success = false; if (!(Thread.interrupted())) { try { success = syncLatch.await(10, TimeUnit.SECONDS); } catch (InterruptedException exc) { Thread.currentThread().interrupt(); } } // Abort, not graceful close, since the latter will // flush as well, and may add an entry. writer.abort(); executionTime.stop(); assertTrue(!(Thread.interrupted())); assertTrue(success); LogRecordWithDLSN last = dlm.getLastLogRecord(); LOG.info("Last Entry {}; elapsed time {}", last.getDlsn().getEntryId(), executionTime.elapsed(TimeUnit.MILLISECONDS)); // Regardless of how many records we wrote; the number of BK entries should always be bounded by the min delay. // Since there are two flush processes--data flush and control flush, and since control flush may also end up // flushing data if data is available, the upper bound is 2*(time/min_delay + 1) assertTrue(last.getDlsn().getEntryId() <= ((executionTime.elapsed(TimeUnit.MILLISECONDS) / confLocal.getMinDelayBetweenImmediateFlushMs() + 1)) * 2); DLMTestUtil.verifyLogRecord(last); dlm.close(); }
Example 13
Source File: TestRollLogSegments.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testRollingLogSegments() throws Exception { logger.info("start testRollingLogSegments"); String name = "distrlog-rolling-logsegments-hightraffic"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(conf); confLocal.setImmediateFlushEnabled(true); confLocal.setOutputBufferSize(0); confLocal.setLogSegmentRollingIntervalMinutes(0); confLocal.setMaxLogSegmentBytes(1); confLocal.setLogSegmentRollingConcurrency(Integer.MAX_VALUE); int numLogSegments = 10; DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonPartitioned(); final CountDownLatch latch = new CountDownLatch(numLogSegments); long startTime = System.currentTimeMillis(); // send requests in parallel to have outstanding requests for (int i = 1; i <= numLogSegments; i++) { final int entryId = i; CompletableFuture<DLSN> writeFuture = writer.write(DLMTestUtil.getLogRecordInstance(entryId)) .whenComplete(new FutureEventListener<DLSN>() { @Override public void onSuccess(DLSN value) { logger.info("Completed entry {} : {}.", entryId, value); latch.countDown(); } @Override public void onFailure(Throwable cause) { logger.error("Failed to write entries : {}", cause); } }); if (i == 1) { // wait for first log segment created Utils.ioResult(writeFuture); } } latch.await(); logger.info("Took {} ms to completed all requests.", System.currentTimeMillis() - startTime); List<LogSegmentMetadata> segments = dlm.getLogSegments(); logger.info("LogSegments : {}", segments); assertTrue(segments.size() >= 2); ensureOnlyOneInprogressLogSegments(segments); int numSegmentsAfterAsyncWrites = segments.size(); // writer should work after rolling log segments // there would be (numLogSegments/2) segments based on current rolling policy for (int i = 1; i <= numLogSegments; i++) { DLSN newDLSN = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(numLogSegments + i))); logger.info("Completed entry {} : {}", numLogSegments + i, newDLSN); } segments = dlm.getLogSegments(); logger.info("LogSegments : {}", segments); assertEquals(numSegmentsAfterAsyncWrites + numLogSegments / 2, segments.size()); ensureOnlyOneInprogressLogSegments(segments); writer.close(); dlm.close(); }
Example 14
Source File: TestLogSegmentCreation.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testCreateLogSegmentAfterLoseLock() throws Exception { URI uri = createDLMURI("/LogSegmentCreation"); String name = "distrlog-createlogsegment-afterloselock"; DistributedLogConfiguration conf = new DistributedLogConfiguration() .setLockTimeout(99999) .setOutputBufferSize(0) .setImmediateFlushEnabled(true) .setEnableLedgerAllocatorPool(true) .setLedgerAllocatorPoolName("test"); Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf).uri(uri).build(); DistributedLogManager dlm = namespace.openLog(name); final int numSegments = 3; for (int i = 0; i < numSegments; i++) { BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned(); out.write(DLMTestUtil.getLogRecordInstance(i)); out.closeAndComplete(); } List<LogSegmentMetadata> segments = dlm.getLogSegments(); LOG.info("Segments : {}", segments); assertEquals(3, segments.size()); final DistributedLogManager dlm1 = namespace.openLog(name); final DistributedLogManager dlm2 = namespace.openLog(name); BKAsyncLogWriter writer1 = (BKAsyncLogWriter) dlm1.startAsyncLogSegmentNonPartitioned(); LOG.info("Created writer 1."); BKSyncLogWriter writer2 = (BKSyncLogWriter) dlm2.startLogSegmentNonPartitioned(); LOG.info("Created writer 2."); writer2.write(DLMTestUtil.getLogRecordInstance(numSegments)); writer2.closeAndComplete(); try { Utils.ioResult(writer1.write(DLMTestUtil.getLogRecordInstance(numSegments + 1))); fail("Should fail on writing new log records."); } catch (Throwable t) { LOG.error("Failed to write entry : ", t); } segments = dlm.getLogSegments(); boolean hasInprogress = false; boolean hasDuplicatedSegment = false; long nextSeqNo = segments.get(0).getLogSegmentSequenceNumber(); for (int i = 1; i < segments.size(); i++) { LogSegmentMetadata segment = segments.get(i); assertTrue(segment.getLogSegmentSequenceNumber() >= nextSeqNo); if (segment.getLogSegmentSequenceNumber() == nextSeqNo) { hasDuplicatedSegment = true; } nextSeqNo = segment.getLogSegmentSequenceNumber(); if (segment.isInProgress()) { hasInprogress = true; } } assertEquals(4, segments.size()); assertFalse(hasInprogress); assertFalse(hasDuplicatedSegment); LOG.info("Segments : duplicated = {}, inprogress = {}, {}", new Object[] { hasDuplicatedSegment, hasInprogress, segments }); dlm1.close(); dlm2.close(); dlm.close(); namespace.close(); }
Example 15
Source File: TestNonBlockingReadsMultiReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testMultiReaders() throws Exception { String name = "distrlog-multireaders"; final RateLimiter limiter = RateLimiter.create(1000); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(true); DistributedLogManager dlmwrite = createNewDLM(confLocal, name); final AsyncLogWriter writer = dlmwrite.startAsyncLogSegmentNonPartitioned(); Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(0))); Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(1))); final AtomicInteger writeCount = new AtomicInteger(2); DistributedLogManager dlmread = createNewDLM(conf, name); BKSyncLogReader reader0 = (BKSyncLogReader) dlmread.getInputStream(0); try { ReaderThread[] readerThreads = new ReaderThread[1]; readerThreads[0] = new ReaderThread("reader0-non-blocking", reader0, false); // readerThreads[1] = new ReaderThread("reader1-non-blocking", reader0, false); final AtomicBoolean running = new AtomicBoolean(true); Thread writerThread = new Thread("WriteThread") { @Override public void run() { try { long txid = 2; DLSN dlsn = DLSN.InvalidDLSN; while (running.get()) { limiter.acquire(); long curTxId = txid++; dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId))); writeCount.incrementAndGet(); if (curTxId % 1000 == 0) { LOG.info("writer write {}", curTxId); } } LOG.info("Completed writing record at {}", dlsn); Utils.close(writer); } catch (DLInterruptedException die) { Thread.currentThread().interrupt(); } catch (Exception e) { } } }; for (ReaderThread rt : readerThreads) { rt.start(); } writerThread.start(); TimeUnit.SECONDS.sleep(5); LOG.info("Stopping writer"); running.set(false); writerThread.join(); LOG.info("Writer stopped after writing {} records, waiting for reader to complete", writeCount.get()); while (writeCount.get() > (readerThreads[0].getReadCount())) { LOG.info("Write Count = {}, Read Count = {}", new Object[] { writeCount.get(), readerThreads[0].getReadCount() }); TimeUnit.MILLISECONDS.sleep(100); } assertEquals(writeCount.get(), (readerThreads[0].getReadCount())); for (ReaderThread readerThread : readerThreads) { readerThread.stopReading(); } } finally { dlmwrite.close(); reader0.close(); dlmread.close(); } }
Example 16
Source File: TestTruncate.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testPartiallyTruncateTruncatedSegments() throws Exception { String name = "distrlog-partially-truncate-truncated-segments"; URI uri = createDLMURI("/" + name); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setExplicitTruncationByApplication(true); // populate Map<Long, DLSN> dlsnMap = new HashMap<Long, DLSN>(); populateData(dlsnMap, confLocal, name, 4, 10, false); DistributedLogManager dlm = createNewDLM(confLocal, name); List<LogSegmentMetadata> segments = dlm.getLogSegments(); LOG.info("Segments before modifying segment status : {}", segments); ZooKeeperClient zkc = TestZooKeeperClientBuilder.newBuilder(conf) .uri(uri) .build(); for (int i = 0; i < 4; i++) { LogSegmentMetadata segment = segments.get(i); setTruncationStatus(zkc, segment, TruncationStatus.TRUNCATED); } List<LogSegmentMetadata> newSegments = dlm.getLogSegments(); LOG.info("Segments after changing truncation status : {}", newSegments); dlm.close(); DistributedLogManager newDLM = createNewDLM(confLocal, name); AsyncLogWriter newWriter = newDLM.startAsyncLogSegmentNonPartitioned(); Utils.ioResult(newWriter.truncate(dlsnMap.get(15L))); List<LogSegmentMetadata> newSegments2 = newDLM.getLogSegments(); assertArrayEquals(newSegments.toArray(new LogSegmentMetadata[4]), newSegments2.toArray(new LogSegmentMetadata[4])); Utils.close(newWriter); newDLM.close(); zkc.close(); }
Example 17
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Flaky test fixed: readers need to be added to the pendingReaders. * @throws Exception */ @Test(timeout = 300000) public void testSimpleAsyncReadWriteSimulateErrors() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadBatchSize(10); confLocal.setOutputBufferSize(1024); DistributedLogManager dlm = createNewDLM(confLocal, name); int numLogSegments = 5; int numRecordsPerLogSegment = 10; final CountDownLatch doneLatch = new CountDownLatch(1); final CountDownLatch syncLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); TestReader reader = new TestReader( "test-reader", dlm, DLSN.InitialDLSN, true, 0, new CountDownLatch(1), syncLatch, doneLatch); reader.start(); final CountDownLatch writeLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final AtomicBoolean writeErrors = new AtomicBoolean(false); int txid = 1; for (long i = 0; i < numLogSegments; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < numRecordsPerLogSegment; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); CompletableFuture<DLSN> dlsnFuture = writer.write(record); dlsnFuture.whenComplete(new WriteFutureEventListener( record, currentLogSegmentSeqNo, currentEntryId, writeLatch, writeErrors, true)); } writer.closeAndComplete(); } writeLatch.await(); assertFalse("All writes should succeed", writeErrors.get()); doneLatch.await(); assertFalse("Should not encounter errors during reading", reader.areErrorsFound()); syncLatch.await(); assertTrue("Should position reader at least once", reader.getNumReaderPositions().get() > 1); reader.stop(); dlm.close(); }
Example 18
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Test Case: starting reading when the streams don't exist. * * @throws Exception */ @Test(timeout = 60000) public void testSimpleAsyncReadWriteStartEmpty() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadBatchSize(10); confLocal.setOutputBufferSize(1024); int numLogSegments = 3; int numRecordsPerLogSegment = 10; DistributedLogManager dlm = createNewDLM(confLocal, name); final CountDownLatch readerReadyLatch = new CountDownLatch(1); final CountDownLatch readerDoneLatch = new CountDownLatch(1); final CountDownLatch readerSyncLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final TestReader reader = new TestReader( "test-reader", dlm, DLSN.InitialDLSN, false, 0, readerReadyLatch, readerSyncLatch, readerDoneLatch); reader.start(); // Increase the probability of reader failure and retry Thread.sleep(500); final AtomicBoolean writeErrors = new AtomicBoolean(false); final CountDownLatch writeLatch = new CountDownLatch(30); int txid = 1; for (long i = 0; i < 3; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < 10; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); CompletableFuture<DLSN> dlsnFuture = writer.write(record); dlsnFuture.whenComplete(new WriteFutureEventListener( record, currentLogSegmentSeqNo, currentEntryId, writeLatch, writeErrors, true)); } writer.closeAndComplete(); } writeLatch.await(); assertFalse("All writes should succeed", writeErrors.get()); readerDoneLatch.await(); assertFalse("Should not encounter errors during reading", reader.areErrorsFound()); readerSyncLatch.await(); assertTrue("Should position reader at least once", reader.getNumReaderPositions().get() > 1); reader.stop(); dlm.close(); }
Example 19
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Test Case: Simple Async Writes. Writes 30 records. They should be written correctly. * @throws Exception */ @Test(timeout = 60000) public void testSimpleAsyncWrite() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(1024); int numLogSegments = 3; int numRecordsPerLogSegment = 10; DistributedLogManager dlm = createNewDLM(confLocal, name); final CountDownLatch syncLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final AtomicBoolean errorsFound = new AtomicBoolean(false); final AtomicReference<DLSN> maxDLSN = new AtomicReference<DLSN>(DLSN.InvalidDLSN); int txid = 1; for (long i = 0; i < numLogSegments; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < numRecordsPerLogSegment; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); CompletableFuture<DLSN> dlsnFuture = writer.write(record); dlsnFuture.whenComplete(new FutureEventListener<DLSN>() { @Override public void onSuccess(DLSN value) { if (value.getLogSegmentSequenceNo() != currentLogSegmentSeqNo) { LOG.debug("LogSegmentSequenceNumber: {}, Expected {}", value.getLogSegmentSequenceNo(), currentLogSegmentSeqNo); errorsFound.set(true); } if (value.getEntryId() != currentEntryId) { LOG.debug("EntryId: {}, Expected {}", value.getEntryId(), currentEntryId); errorsFound.set(true); } if (value.compareTo(maxDLSN.get()) > 0) { maxDLSN.set(value); } syncLatch.countDown(); LOG.debug("SyncLatch: {}", syncLatch.getCount()); } @Override public void onFailure(Throwable cause) { LOG.error("Encountered exception on writing record {} in log segment {}", currentEntryId, currentLogSegmentSeqNo); errorsFound.set(true); } }); } writer.closeAndComplete(); } syncLatch.await(); assertFalse("Should not encounter any errors for async writes", errorsFound.get()); LogRecordWithDLSN last = dlm.getLastLogRecord(); assertEquals("Last DLSN" + last.getDlsn() + " isn't the maximum DLSN " + maxDLSN.get(), last.getDlsn(), maxDLSN.get()); assertEquals(last.getDlsn(), dlm.getLastDLSN()); assertEquals(last.getDlsn(), Utils.ioResult(dlm.getLastDLSNAsync())); DLMTestUtil.verifyLargeLogRecord(last); dlm.close(); }
Example 20
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
void testSimpleAsyncReadWriteInternal(String name, boolean immediateFlush, int logSegmentVersion) throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadBatchSize(10); confLocal.setOutputBufferSize(1024); confLocal.setDLLedgerMetadataLayoutVersion(logSegmentVersion); confLocal.setImmediateFlushEnabled(immediateFlush); DistributedLogManager dlm = createNewDLM(confLocal, name); int numLogSegments = 3; int numRecordsPerLogSegment = 10; final CountDownLatch readLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final CountDownLatch readDoneLatch = new CountDownLatch(1); final AtomicBoolean readErrors = new AtomicBoolean(false); final CountDownLatch writeLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final AtomicBoolean writeErrors = new AtomicBoolean(false); final AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InvalidDLSN); assertEquals(name, reader.getStreamName()); int txid = 1; for (long i = 0; i < 3; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < 10; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); CompletableFuture<DLSN> dlsnFuture = writer.write(record); dlsnFuture.whenComplete(new WriteFutureEventListener( record, currentLogSegmentSeqNo, currentEntryId, writeLatch, writeErrors, true)); if (i == 0 && j == 0) { boolean monotonic = LogSegmentMetadata.supportsSequenceId(logSegmentVersion); TestAsyncReaderWriter.readNext( reader, DLSN.InvalidDLSN, monotonic ? 0L : Long.MIN_VALUE, monotonic, readLatch, readDoneLatch, readErrors); } } writer.closeAndComplete(); } writeLatch.await(); assertFalse("All writes should succeed", writeErrors.get()); readDoneLatch.await(); assertFalse("All reads should succeed", readErrors.get()); readLatch.await(); Utils.close(reader); dlm.close(); }