Java Code Examples for org.apache.distributedlog.api.DistributedLogManager#close()
The following examples show how to use
org.apache.distributedlog.api.DistributedLogManager#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testOutstandingWriteLimitBlockAllLimitWithDarkmode() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(testConf); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(true); confLocal.setPerWriterOutstandingWriteLimit(0); confLocal.setOutstandingWriteLimitDarkmode(true); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); ArrayList<CompletableFuture<DLSN>> results = new ArrayList<CompletableFuture<DLSN>>(1000); for (int i = 0; i < 1000; i++) { results.add(writer.write(DLMTestUtil.getLogRecordInstance(1L))); } for (CompletableFuture<DLSN> result : results) { Utils.ioResult(result); } writer.closeAndComplete(); dlm.close(); }
Example 2
Source File: DistributedLogTool.java From distributedlog with Apache License 2.0 | 6 votes |
@Override protected int runCmd() throws Exception { DistributedLogManager dlm = getNamespace().openLog(getStreamName()); try { if (listEppStats) { bkc = new SimpleBookKeeperClient(getConf(), getUri()); } printMetadata(dlm); } finally { dlm.close(); if (null != bkc) { bkc.close(); } } return 0; }
Example 3
Source File: DistributedLogTool.java From distributedlog with Apache License 2.0 | 6 votes |
@Override protected int runCmd() throws Exception { DistributedLogManager dlm = getNamespace().openLog(getStreamName()); try { long count = 0; if (null == endDLSN) { count = countToLastRecord(dlm); } else { count = countFromStartToEnd(dlm); } System.out.println("total is " + count + " records."); return 0; } finally { dlm.close(); } }
Example 4
Source File: DLAuditor.java From distributedlog with Apache License 2.0 | 6 votes |
private List<Long> collectLedgersFromStream(Namespace namespace, String stream, Set<Long> ledgers) throws IOException { DistributedLogManager dlm = namespace.openLog(stream); try { List<LogSegmentMetadata> segments = dlm.getLogSegments(); List<Long> sLedgers = new ArrayList<Long>(); for (LogSegmentMetadata segment : segments) { synchronized (ledgers) { ledgers.add(segment.getLogSegmentId()); } sLedgers.add(segment.getLogSegmentId()); } return sLedgers; } finally { dlm.close(); } }
Example 5
Source File: DistributedLogTool.java From distributedlog with Apache License 2.0 | 6 votes |
private void deleteSubscriber(Namespace namespace, List<String> streams, int tid, int numStreamsPerThreads) throws Exception { int startIdx = tid * numStreamsPerThreads; int endIdx = Math.min(streams.size(), (tid + 1) * numStreamsPerThreads); for (int i = startIdx; i < endIdx; i++) { final String s = streams.get(i); DistributedLogManager dlm = namespace.openLog(s); final CountDownLatch countDownLatch = new CountDownLatch(1); dlm.getSubscriptionsStore().deleteSubscriber(subscriberId) .whenComplete(new FutureEventListener<Boolean>() { @Override public void onFailure(Throwable cause) { System.out.println("Failed to delete subscriber for stream " + s); cause.printStackTrace(); countDownLatch.countDown(); } @Override public void onSuccess(Boolean value) { countDownLatch.countDown(); } }); countDownLatch.await(); dlm.close(); } }
Example 6
Source File: DistributedLogTool.java From distributedlog with Apache License 2.0 | 5 votes |
private int truncateStream(final Namespace namespace, String streamName, DLSN dlsn) throws Exception { DistributedLogManager dlm = namespace.openLog(streamName); try { long totalRecords = dlm.getLogRecordCount(); long recordsAfterTruncate = FutureUtils.result(dlm.getLogRecordCountAsync(dlsn)); long recordsToTruncate = totalRecords - recordsAfterTruncate; if (!getForce() && !IOUtils.confirmPrompt("Do you want to truncate " + streamName + " at dlsn " + dlsn + " (" + recordsToTruncate + " records)?")) { return 0; } else { AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned(); try { if (!FutureUtils.result(writer.truncate(dlsn))) { System.out.println("Failed to truncate."); } return 0; } finally { Utils.close(writer); } } } catch (Exception ex) { System.err.println("Failed to truncate " + ex); return 1; } finally { dlm.close(); } }
Example 7
Source File: TestBKDistributedLogManager.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testNumberOfTransactions() throws Exception { String name = "distrlog-txncount"; DistributedLogManager dlm = createNewDLM(conf, name); BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned(); for (long i = 1; i <= 100; i++) { LogRecord op = DLMTestUtil.getLogRecordInstance(i); out.write(op); } out.closeAndComplete(); long numTrans = DLMTestUtil.getNumberofLogRecords(createNewDLM(conf, name), 1); assertEquals(100, numTrans); dlm.close(); }
Example 8
Source File: TestBKDistributedLogManager.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testGetLogRecordCountAsync() throws Exception { DistributedLogManager dlm = createNewDLM(conf, testNames.getMethodName()); BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonPartitioned(); DLMTestUtil.generateCompletedLogSegments(dlm, conf, 2, 10); CompletableFuture<Long> futureCount = dlm.getLogRecordCountAsync(DLSN.InitialDLSN); Long count = Utils.ioResult(futureCount, 2, TimeUnit.SECONDS); assertEquals(20, count.longValue()); writer.close(); dlm.close(); }
Example 9
Source File: TestAsyncBulkWrite.java From distributedlog with Apache License 2.0 | 5 votes |
/** * Test Case: A large write batch will span records into multiple entries and ledgers. * @throws Exception */ @Test(timeout = 60000) public void testSimpleAsyncBulkWriteSpanningEntryAndLedger() throws Exception { String name = "distrlog-testSimpleAsyncBulkWriteSpanningEntryAndLedger"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(1024); DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); int batchSize = 100; int recSize = 1024; // First entry. long ledgerIndex = 1; long entryIndex = 0; long slotIndex = 0; long txIndex = 1; checkAllSucceeded(writer, batchSize, recSize, ledgerIndex, entryIndex, slotIndex, txIndex); // New entry. entryIndex++; slotIndex = 0; txIndex += batchSize; checkAllSucceeded(writer, batchSize, recSize, ledgerIndex, entryIndex, slotIndex, txIndex); // Roll ledger. ledgerIndex++; entryIndex = 0; slotIndex = 0; txIndex += batchSize; writer.closeAndComplete(); writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); checkAllSucceeded(writer, batchSize, recSize, ledgerIndex, entryIndex, slotIndex, txIndex); writer.closeAndComplete(); dlm.close(); }
Example 10
Source File: TailReader.java From distributedlog with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { if (2 != args.length) { System.out.println(HELP); return; } String dlUriStr = args[0]; final String streamName = args[1]; URI uri = URI.create(dlUriStr); DistributedLogConfiguration conf = new DistributedLogConfiguration(); Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); // open the dlm System.out.println("Opening log stream " + streamName); DistributedLogManager dlm = namespace.openLog(streamName); // get the last record LogRecordWithDLSN lastRecord; DLSN dlsn; try { lastRecord = dlm.getLastLogRecord(); dlsn = lastRecord.getDlsn(); readLoop(dlm, dlsn); } catch (LogNotFoundException lnfe) { System.err.println("Log stream " + streamName + " is not found. Please create it first."); return; } catch (LogEmptyException lee) { System.err.println("Log stream " + streamName + " is empty."); dlsn = DLSN.InitialDLSN; readLoop(dlm, dlsn); } finally { dlm.close(); namespace.close(); } }
Example 11
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testAsyncWritePendingWritesAbortedWhenLedgerRollTriggerFails() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(1024); confLocal.setMaxLogSegmentBytes(1024); confLocal.setLogSegmentRollingIntervalMinutes(0); DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); // Write one record larger than max seg size. Ledger doesn't roll until next write. int txid = 1; LogRecord record = DLMTestUtil.getLogRecordInstance(txid++, 2048); CompletableFuture<DLSN> result = writer.write(record); DLSN dlsn = Utils.ioResult(result, 10, TimeUnit.SECONDS); assertEquals(1, dlsn.getLogSegmentSequenceNo()); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, LogRecordTooLongException.class); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, WriteException.class); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, WriteException.class); writer.closeAndComplete(); dlm.close(); }
Example 12
Source File: ReaderWithOffsets.java From distributedlog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (4 != args.length) { System.out.println(HELP); return; } String dlUriStr = args[0]; final String streamName = args[1]; final String readerId = args[2]; final String offsetStoreFile = args[3]; URI uri = URI.create(dlUriStr); DistributedLogConfiguration conf = new DistributedLogConfiguration(); Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); // open the dlm System.out.println("Opening log stream " + streamName); DistributedLogManager dlm = namespace.openLog(streamName); // open the offset store Options options = new Options(); options.createIfMissing(true); final DB offsetDB = factory.open(new File(offsetStoreFile), options); final AtomicReference<DLSN> lastDLSN = new AtomicReference<DLSN>(null); // offset updater final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); executorService.scheduleAtFixedRate(new Runnable() { @Override public void run() { if (null != lastDLSN.get()) { offsetDB.put(readerId.getBytes(UTF_8), lastDLSN.get().serializeBytes()); System.out.println("Updated reader " + readerId + " offset to " + lastDLSN.get()); } } }, 10, 10, TimeUnit.SECONDS); try { byte[] offset = offsetDB.get(readerId.getBytes(UTF_8)); DLSN dlsn; if (null == offset) { dlsn = DLSN.InitialDLSN; } else { dlsn = DLSN.deserializeBytes(offset); } readLoop(dlm, dlsn, lastDLSN); } finally { offsetDB.close(); dlm.close(); namespace.close(); } }
Example 13
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
void testSimpleAsyncReadWriteInternal(String name, boolean immediateFlush, int logSegmentVersion) throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadBatchSize(10); confLocal.setOutputBufferSize(1024); confLocal.setDLLedgerMetadataLayoutVersion(logSegmentVersion); confLocal.setImmediateFlushEnabled(immediateFlush); DistributedLogManager dlm = createNewDLM(confLocal, name); int numLogSegments = 3; int numRecordsPerLogSegment = 10; final CountDownLatch readLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final CountDownLatch readDoneLatch = new CountDownLatch(1); final AtomicBoolean readErrors = new AtomicBoolean(false); final CountDownLatch writeLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final AtomicBoolean writeErrors = new AtomicBoolean(false); final AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InvalidDLSN); assertEquals(name, reader.getStreamName()); int txid = 1; for (long i = 0; i < 3; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < 10; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); CompletableFuture<DLSN> dlsnFuture = writer.write(record); dlsnFuture.whenComplete(new WriteFutureEventListener( record, currentLogSegmentSeqNo, currentEntryId, writeLatch, writeErrors, true)); if (i == 0 && j == 0) { boolean monotonic = LogSegmentMetadata.supportsSequenceId(logSegmentVersion); TestAsyncReaderWriter.readNext( reader, DLSN.InvalidDLSN, monotonic ? 0L : Long.MIN_VALUE, monotonic, readLatch, readDoneLatch, readErrors); } } writer.closeAndComplete(); } writeLatch.await(); assertFalse("All writes should succeed", writeErrors.get()); readDoneLatch.await(); assertFalse("All reads should succeed", readErrors.get()); readLatch.await(); Utils.close(reader); dlm.close(); }
Example 14
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testAsyncWriteWithMinDelayBetweenFlushes() throws Exception { String name = "distrlog-asyncwrite-mindelay"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(true); confLocal.setMinDelayBetweenImmediateFlushMs(100); DistributedLogManager dlm = createNewDLM(confLocal, name); final Thread currentThread = Thread.currentThread(); final int count = 5000; final CountDownLatch syncLatch = new CountDownLatch(count); int txid = 1; BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned()); Stopwatch executionTime = Stopwatch.createStarted(); for (long i = 0; i < count; i++) { Thread.sleep(1); final LogRecord record = DLMTestUtil.getLogRecordInstance(txid++); CompletableFuture<DLSN> dlsnFuture = writer.write(record); dlsnFuture.whenComplete(new FutureEventListener<DLSN>() { @Override public void onSuccess(DLSN value) { syncLatch.countDown(); LOG.debug("SyncLatch: {} ; DLSN: {} ", syncLatch.getCount(), value); } @Override public void onFailure(Throwable cause) { currentThread.interrupt(); } }); } boolean success = false; if (!(Thread.interrupted())) { try { success = syncLatch.await(10, TimeUnit.SECONDS); } catch (InterruptedException exc) { Thread.currentThread().interrupt(); } } // Abort, not graceful close, since the latter will // flush as well, and may add an entry. writer.abort(); executionTime.stop(); assertTrue(!(Thread.interrupted())); assertTrue(success); LogRecordWithDLSN last = dlm.getLastLogRecord(); LOG.info("Last Entry {}; elapsed time {}", last.getDlsn().getEntryId(), executionTime.elapsed(TimeUnit.MILLISECONDS)); // Regardless of how many records we wrote; the number of BK entries should always be bounded by the min delay. // Since there are two flush processes--data flush and control flush, and since control flush may also end up // flushing data if data is available, the upper bound is 2*(time/min_delay + 1) assertTrue(last.getDlsn().getEntryId() <= ((executionTime.elapsed(TimeUnit.MILLISECONDS) / confLocal.getMinDelayBetweenImmediateFlushMs() + 1)) * 2); DLMTestUtil.verifyLogRecord(last); dlm.close(); }
Example 15
Source File: TestNonBlockingReadsMultiReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testMultiReaders() throws Exception { String name = "distrlog-multireaders"; final RateLimiter limiter = RateLimiter.create(1000); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(true); DistributedLogManager dlmwrite = createNewDLM(confLocal, name); final AsyncLogWriter writer = dlmwrite.startAsyncLogSegmentNonPartitioned(); Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(0))); Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(1))); final AtomicInteger writeCount = new AtomicInteger(2); DistributedLogManager dlmread = createNewDLM(conf, name); BKSyncLogReader reader0 = (BKSyncLogReader) dlmread.getInputStream(0); try { ReaderThread[] readerThreads = new ReaderThread[1]; readerThreads[0] = new ReaderThread("reader0-non-blocking", reader0, false); // readerThreads[1] = new ReaderThread("reader1-non-blocking", reader0, false); final AtomicBoolean running = new AtomicBoolean(true); Thread writerThread = new Thread("WriteThread") { @Override public void run() { try { long txid = 2; DLSN dlsn = DLSN.InvalidDLSN; while (running.get()) { limiter.acquire(); long curTxId = txid++; dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId))); writeCount.incrementAndGet(); if (curTxId % 1000 == 0) { LOG.info("writer write {}", curTxId); } } LOG.info("Completed writing record at {}", dlsn); Utils.close(writer); } catch (DLInterruptedException die) { Thread.currentThread().interrupt(); } catch (Exception e) { } } }; for (ReaderThread rt : readerThreads) { rt.start(); } writerThread.start(); TimeUnit.SECONDS.sleep(5); LOG.info("Stopping writer"); running.set(false); writerThread.join(); LOG.info("Writer stopped after writing {} records, waiting for reader to complete", writeCount.get()); while (writeCount.get() > (readerThreads[0].getReadCount())) { LOG.info("Write Count = {}, Read Count = {}", new Object[] { writeCount.get(), readerThreads[0].getReadCount() }); TimeUnit.MILLISECONDS.sleep(100); } assertEquals(writeCount.get(), (readerThreads[0].getReadCount())); for (ReaderThread readerThread : readerThreads) { readerThread.stopReading(); } } finally { dlmwrite.close(); reader0.close(); dlmread.close(); } }
Example 16
Source File: TestInterleavedReaders.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testInterleavedReadersWithCleanup() throws Exception { String name = "distrlog-interleaved-cleanup"; BKDistributedLogManager dlmwrite0 = createNewDLM(conf, name + "-0"); BKDistributedLogManager dlmwrite1 = createNewDLM(conf, name + "-1"); long txid = 1; Long retentionPeriodOverride = null; BKAsyncLogWriter writer0 = dlmwrite0.startAsyncLogSegmentNonPartitioned(); BKAsyncLogWriter writer1 = dlmwrite1.startAsyncLogSegmentNonPartitioned(); for (long j = 1; j <= 4; j++) { for (int k = 1; k <= 10; k++) { if (k == 5) { writer0.setForceRolling(true); writer0.overRideMinTimeStampToKeep(retentionPeriodOverride); writer1.setForceRolling(true); writer1.overRideMinTimeStampToKeep(retentionPeriodOverride); } DLSN dlsn1 = Utils.ioResult(writer1.write(DLMTestUtil.getLogRecordInstance(txid++))); LOG.info("writer1 write record {}", dlsn1); DLSN dlsn0 = Utils.ioResult(writer0.write(DLMTestUtil.getLogRecordInstance(txid++))); LOG.info("writer0 write record {}", dlsn0); if (k == 5) { writer0.setForceRolling(false); writer1.setForceRolling(false); retentionPeriodOverride = System.currentTimeMillis(); } Thread.sleep(5); } Utils.ioResult(writer1.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid - 1))); Utils.ioResult(writer0.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid - 1))); } writer0.close(); writer1.close(); DistributedLogManager dlmreader0 = createNewDLM(conf, name + "-0"); DistributedLogManager dlmreader1 = createNewDLM(conf, name + "-1"); LogReader reader0 = dlmreader0.getInputStream(1); LogReader reader1 = dlmreader1.getInputStream(1); int numTrans = drainStreams(reader0, 15, reader1, 15); assertEquals(30, numTrans); reader0.close(); reader1.close(); dlmreader0.close(); dlmwrite0.close(); dlmreader1.close(); dlmwrite1.close(); }
Example 17
Source File: TestRollLogSegments.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 600000) public void testLastDLSNInRollingLogSegments() throws Exception { final Map<Long, DLSN> lastDLSNs = new HashMap<Long, DLSN>(); String name = "distrlog-lastdlsn-in-rolling-log-segments"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(conf); confLocal.setImmediateFlushEnabled(true); confLocal.setOutputBufferSize(0); confLocal.setLogSegmentRollingIntervalMinutes(0); confLocal.setMaxLogSegmentBytes(40); int numEntries = 100; DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonPartitioned(); final CountDownLatch latch = new CountDownLatch(numEntries); // send requests in parallel to have outstanding requests for (int i = 1; i <= numEntries; i++) { final int entryId = i; CompletableFuture<DLSN> writeFuture = writer.write(DLMTestUtil.getLogRecordInstance(entryId)) .whenComplete(new FutureEventListener<DLSN>() { @Override public void onSuccess(DLSN value) { logger.info("Completed entry {} : {}.", entryId, value); synchronized (lastDLSNs) { DLSN lastDLSN = lastDLSNs.get(value.getLogSegmentSequenceNo()); if (null == lastDLSN || lastDLSN.compareTo(value) < 0) { lastDLSNs.put(value.getLogSegmentSequenceNo(), value); } } latch.countDown(); } @Override public void onFailure(Throwable cause) { } }); if (i == 1) { // wait for first log segment created Utils.ioResult(writeFuture); } } latch.await(); // make sure all ensure blocks were executed. writer.closeAndComplete(); List<LogSegmentMetadata> segments = dlm.getLogSegments(); logger.info("lastDLSNs after writes {} {}", lastDLSNs.size(), lastDLSNs); logger.info("segments after writes {} {}", segments.size(), segments); assertTrue(segments.size() >= 2); assertTrue(lastDLSNs.size() >= 2); assertEquals(lastDLSNs.size(), segments.size()); for (LogSegmentMetadata segment : segments) { DLSN dlsnInMetadata = segment.getLastDLSN(); DLSN dlsnSeen = lastDLSNs.get(segment.getLogSegmentSequenceNumber()); assertNotNull(dlsnInMetadata); assertNotNull(dlsnSeen); if (dlsnInMetadata.compareTo(dlsnSeen) != 0) { logger.error("Last dlsn recorded in log segment {} is different from the one already seen {}.", dlsnInMetadata, dlsnSeen); } assertEquals(0, dlsnInMetadata.compareTo(dlsnSeen)); } dlm.close(); }
Example 18
Source File: StreamTransformer.java From distributedlog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (3 != args.length) { System.out.println(HELP); return; } String dlUriStr = args[0]; final String srcStreamName = args[1]; final String targetStreamName = args[2]; URI uri = URI.create(dlUriStr); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.setOutputBufferSize(16*1024); // 16KB conf.setPeriodicFlushFrequencyMilliSeconds(5); // 5ms Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); // open the dlm System.out.println("Opening log stream " + srcStreamName); DistributedLogManager srcDlm = namespace.openLog(srcStreamName); System.out.println("Opening log stream " + targetStreamName); DistributedLogManager targetDlm = namespace.openLog(targetStreamName); Transformer<byte[], byte[]> replicationTransformer = new IdenticalTransformer<byte[]>(); LogRecordWithDLSN lastTargetRecord; DLSN srcDlsn; try { lastTargetRecord = targetDlm.getLastLogRecord(); TransformedRecord lastTransformedRecord = new TransformedRecord(); try { lastTransformedRecord.read(protocolFactory.getProtocol( new TIOStreamTransport(new ByteArrayInputStream(lastTargetRecord.getPayload())))); srcDlsn = DLSN.deserializeBytes(lastTransformedRecord.getSrcDlsn()); System.out.println("Last transformed record is " + srcDlsn); } catch (TException e) { System.err.println("Error on reading last transformed record"); e.printStackTrace(System.err); srcDlsn = DLSN.InitialDLSN; } } catch (LogNotFoundException lnfe) { srcDlsn = DLSN.InitialDLSN; } catch (LogEmptyException lee) { srcDlsn = DLSN.InitialDLSN; } AsyncLogWriter targetWriter = FutureUtils.result(targetDlm.openAsyncLogWriter()); try { readLoop(srcDlm, srcDlsn, targetWriter, replicationTransformer); } finally { FutureUtils.result(targetWriter.asyncClose(), 5, TimeUnit.SECONDS); targetDlm.close(); srcDlm.close(); namespace.close(); } }
Example 19
Source File: TestAsyncReaderLock.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testReaderLockSessionExpires() throws Exception { String name = runtime.getMethodName(); URI uri = createDLMURI("/" + name); ensureURICreated(uri); Namespace ns0 = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); DistributedLogManager dlm0 = ns0.openLog(name); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm0.startAsyncLogSegmentNonPartitioned()); writer.write(DLMTestUtil.getLogRecordInstance(1L)); writer.write(DLMTestUtil.getLogRecordInstance(2L)); writer.closeAndComplete(); Namespace ns1 = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); DistributedLogManager dlm1 = ns1.openLog(name); CompletableFuture<AsyncLogReader> futureReader1 = dlm1.getAsyncLogReaderWithLock(DLSN.InitialDLSN); AsyncLogReader reader1 = Utils.ioResult(futureReader1); ZooKeeperClientUtils.expireSession(((BKNamespaceDriver) ns1.getNamespaceDriver()).getWriterZKC(), zkServers, 1000); // The result of expireSession is somewhat non-deterministic with this lock. // It may fail with LockingException or it may succesfully reacquire, so for // the moment rather than make it deterministic we accept either result. boolean success = false; try { Utils.ioResult(reader1.readNext()); success = true; } catch (LockingException ex) { } if (success) { Utils.ioResult(reader1.readNext()); } Utils.close(reader1); dlm0.close(); ns0.close(); dlm1.close(); ns1.close(); }
Example 20
Source File: TestLogSegmentsZK.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Create Log Segment while max sequence number isn't match with list of log segments. */ @Test(timeout = 60000) public void testCreateLogSegmentUnmatchMaxSequenceNumber() throws Exception { URI uri = createURI(); String streamName = testName.getMethodName(); DistributedLogConfiguration conf = new DistributedLogConfiguration() .setLockTimeout(99999) .setOutputBufferSize(0) .setImmediateFlushEnabled(true) .setEnableLedgerAllocatorPool(true) .setLedgerAllocatorPoolName("test"); Namespace namespace = NamespaceBuilder.newBuilder().conf(conf).uri(uri).build(); namespace.createLog(streamName); MaxLogSegmentSequenceNo max1 = getMaxLogSegmentSequenceNo(getZooKeeperClient(namespace), uri, streamName, conf); assertEquals(DistributedLogConstants.UNASSIGNED_LOGSEGMENT_SEQNO, max1.getSequenceNumber()); DistributedLogManager dlm = namespace.openLog(streamName); final int numSegments = 3; for (int i = 0; i < numSegments; i++) { BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned(); out.write(DLMTestUtil.getLogRecordInstance(i)); out.closeAndComplete(); } MaxLogSegmentSequenceNo max2 = getMaxLogSegmentSequenceNo(getZooKeeperClient(namespace), uri, streamName, conf); assertEquals(3, max2.getSequenceNumber()); // update the max ledger sequence number updateMaxLogSegmentSequenceNo(getZooKeeperClient(namespace), uri, streamName, conf, DLUtils.serializeLogSegmentSequenceNumber(99)); DistributedLogManager dlm1 = namespace.openLog(streamName); try { BKSyncLogWriter out1 = (BKSyncLogWriter) dlm1.startLogSegmentNonPartitioned(); out1.write(DLMTestUtil.getLogRecordInstance(numSegments + 1)); out1.closeAndComplete(); fail("Should fail creating new log segment when encountered unmatch max ledger sequence number"); } catch (DLIllegalStateException lse) { // expected } finally { dlm1.close(); } DistributedLogManager dlm2 = namespace.openLog(streamName); List<LogSegmentMetadata> segments = dlm2.getLogSegments(); try { assertEquals(3, segments.size()); assertEquals(1L, segments.get(0).getLogSegmentSequenceNumber()); assertEquals(2L, segments.get(1).getLogSegmentSequenceNumber()); assertEquals(3L, segments.get(2).getLogSegmentSequenceNumber()); } finally { dlm2.close(); } dlm.close(); namespace.close(); }