Java Code Examples for com.twitter.util.Future#addEventListener()
The following examples show how to use
com.twitter.util.Future#addEventListener() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FutureUtils.java From distributedlog with Apache License 2.0 | 6 votes |
/** * Ignore exception from the <i>future</i> and log <i>errorMsg</i> on exceptions. * * @param future the original future * @param errorMsg the error message to log on exceptions * @return a transformed future ignores exceptions */ public static <T> Promise<Void> ignore(Future<T> future, final String errorMsg) { final Promise<Void> promise = new Promise<Void>(); future.addEventListener(new FutureEventListener<T>() { @Override public void onSuccess(T value) { setValue(promise, null); } @Override public void onFailure(Throwable cause) { if (null != errorMsg) { logger.error(errorMsg, cause); } setValue(promise, null); } }); return promise; }
Example 2
Source File: WriterWorker.java From distributedlog with Apache License 2.0 | 6 votes |
@Override public void run() { LOG.info("Started writer {}.", idx); while (running) { rateLimiter.getLimiter().acquire(batchSize); String streamName = streamNames.get(random.nextInt(numStreams)); final long requestMillis = System.currentTimeMillis(); final List<ByteBuffer> data = buildBufferList(batchSize, requestMillis, messageSizeBytes); if (null == data) { break; } List<Future<DLSN>> results = dlc.writeBulk(streamName, data); for (Future<DLSN> result : results) { result.addEventListener(new TimedRequestHandler(streamName, requestMillis)); } } dlc.close(); }
Example 3
Source File: FederatedZKLogMetadataStore.java From distributedlog with Apache License 2.0 | 6 votes |
private <T> Future<T> postStateCheck(Future<T> future) { final Promise<T> postCheckedPromise = new Promise<T>(); future.addEventListener(new FutureEventListener<T>() { @Override public void onSuccess(T value) { if (duplicatedLogFound.get()) { postCheckedPromise.setException(new UnexpectedException("Duplicate log found under " + namespace)); } else { postCheckedPromise.setValue(value); } } @Override public void onFailure(Throwable cause) { postCheckedPromise.setException(cause); } }); return postCheckedPromise; }
Example 4
Source File: FutureUtils.java From distributedlog with Apache License 2.0 | 6 votes |
/** * Ignore exception from the <i>future</i> and log <i>errorMsg</i> on exceptions * * @param future the original future * @param errorMsg the error message to log on exceptions * @return a transformed future ignores exceptions */ public static <T> Promise<Void> ignore(Future<T> future, final String errorMsg) { final Promise<Void> promise = new Promise<Void>(); future.addEventListener(new FutureEventListener<T>() { @Override public void onSuccess(T value) { setValue(promise, null); } @Override public void onFailure(Throwable cause) { if (null != errorMsg) { logger.error(errorMsg, cause); } setValue(promise, null); } }); return promise; }
Example 5
Source File: ZKDistributedLock.java From distributedlog with Apache License 2.0 | 6 votes |
void interruptTryLock(final Future<LockWaiter> tryLockFuture, final Promise<Void> closePromise) { if (null == tryLockFuture) { unlockInternalLock(closePromise); } else { tryLockFuture.addEventListener(OrderedFutureEventListener.of( new FutureEventListener<LockWaiter>() { @Override public void onSuccess(LockWaiter waiter) { closeWaiter(waiter, closePromise); } @Override public void onFailure(Throwable cause) { unlockInternalLock(closePromise); } }, lockStateExecutor, lockPath)); FutureUtils.cancel(tryLockFuture); } }
Example 6
Source File: WriterWorker.java From distributedlog with Apache License 2.0 | 6 votes |
@Override public void run() { LOG.info("Started writer {}.", idx); while (running) { rateLimiter.getLimiter().acquire(batchSize); String streamName = streamNames.get(random.nextInt(numStreams)); final long requestMillis = System.currentTimeMillis(); final List<ByteBuffer> data = buildBufferList(batchSize, requestMillis, messageSizeBytes); if (null == data) { break; } List<Future<DLSN>> results = dlc.writeBulk(streamName, data); for (Future<DLSN> result : results) { result.addEventListener(new TimedRequestHandler(streamName, requestMillis)); } } dlc.close(); }
Example 7
Source File: AppendOnlyStreamWriter.java From distributedlog with Apache License 2.0 | 4 votes |
public Future<DLSN> write(byte[] data) { requestPos += data.length; Future<DLSN> writeResult = logWriter.write(new LogRecord(requestPos, data)); return writeResult.addEventListener(new WriteCompleteListener(requestPos)); }
Example 8
Source File: TestReader.java From distributedlog with Apache License 2.0 | 4 votes |
private void readNext() { Future<LogRecordWithDLSN> record = reader.readNext(); record.addEventListener(this); }
Example 9
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Test Case: Simple Async Writes. Writes 30 records. They should be written correctly. * @throws Exception */ @Test(timeout = 60000) public void testSimpleAsyncWrite() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(1024); int numLogSegments = 3; int numRecordsPerLogSegment = 10; DistributedLogManager dlm = createNewDLM(confLocal, name); final CountDownLatch syncLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final AtomicBoolean errorsFound = new AtomicBoolean(false); final AtomicReference<DLSN> maxDLSN = new AtomicReference<DLSN>(DLSN.InvalidDLSN); int txid = 1; for (long i = 0; i < numLogSegments; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < numRecordsPerLogSegment; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); Future<DLSN> dlsnFuture = writer.write(record); dlsnFuture.addEventListener(new FutureEventListener<DLSN>() { @Override public void onSuccess(DLSN value) { if(value.getLogSegmentSequenceNo() != currentLogSegmentSeqNo) { LOG.debug("LogSegmentSequenceNumber: {}, Expected {}", value.getLogSegmentSequenceNo(), currentLogSegmentSeqNo); errorsFound.set(true); } if(value.getEntryId() != currentEntryId) { LOG.debug("EntryId: {}, Expected {}", value.getEntryId(), currentEntryId); errorsFound.set(true); } if (value.compareTo(maxDLSN.get()) > 0) { maxDLSN.set(value); } syncLatch.countDown(); LOG.debug("SyncLatch: {}", syncLatch.getCount()); } @Override public void onFailure(Throwable cause) { LOG.error("Encountered exception on writing record {} in log segment {}", currentEntryId, currentLogSegmentSeqNo); errorsFound.set(true); } }); } writer.closeAndComplete(); } syncLatch.await(); assertFalse("Should not encounter any errors for async writes", errorsFound.get()); LogRecordWithDLSN last = dlm.getLastLogRecord(); assertEquals("Last DLSN" + last.getDlsn() + " isn't the maximum DLSN " + maxDLSN.get(), last.getDlsn(), maxDLSN.get()); assertEquals(last.getDlsn(), dlm.getLastDLSN()); assertEquals(last.getDlsn(), Await.result(dlm.getLastDLSNAsync())); DLMTestUtil.verifyLargeLogRecord(last); dlm.close(); }
Example 10
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
private static void readNext(final AsyncLogReader reader, final DLSN startPosition, final long startSequenceId, final boolean monotonic, final CountDownLatch syncLatch, final CountDownLatch completionLatch, final AtomicBoolean errorsFound) { Future<LogRecordWithDLSN> record = reader.readNext(); record.addEventListener(new FutureEventListener<LogRecordWithDLSN>() { @Override public void onSuccess(LogRecordWithDLSN value) { try { if (monotonic) { assertEquals(startSequenceId, value.getSequenceId()); } else { assertTrue(value.getSequenceId() < 0); assertTrue(value.getSequenceId() > startSequenceId); } LOG.debug("Recevied record {} from {}", value.getDlsn(), reader.getStreamName()); assertTrue(!value.isControl()); assertTrue(value.getDlsn().getSlotId() == 0); assertTrue(value.getDlsn().compareTo(startPosition) >= 0); DLMTestUtil.verifyLargeLogRecord(value); } catch (Exception exc) { LOG.debug("Exception Encountered when verifying log record {} : ", value.getDlsn(), exc); errorsFound.set(true); completionLatch.countDown(); return; } syncLatch.countDown(); if (syncLatch.getCount() <= 0) { completionLatch.countDown(); } else { TestAsyncReaderWriter.readNext( reader, value.getDlsn().getNextDLSN(), monotonic ? value.getSequenceId() + 1 : value.getSequenceId(), monotonic, syncLatch, completionLatch, errorsFound); } } @Override public void onFailure(Throwable cause) { LOG.debug("Encountered Exception on reading {}", reader.getStreamName(), cause); errorsFound.set(true); completionLatch.countDown(); } }); }
Example 11
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
void testSimpleAsyncReadWriteInternal(String name, boolean immediateFlush, int logSegmentVersion) throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadBatchSize(10); confLocal.setOutputBufferSize(1024); confLocal.setDLLedgerMetadataLayoutVersion(logSegmentVersion); confLocal.setImmediateFlushEnabled(immediateFlush); DistributedLogManager dlm = createNewDLM(confLocal, name); int numLogSegments = 3; int numRecordsPerLogSegment = 10; final CountDownLatch readLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final CountDownLatch readDoneLatch = new CountDownLatch(1); final AtomicBoolean readErrors = new AtomicBoolean(false); final CountDownLatch writeLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final AtomicBoolean writeErrors = new AtomicBoolean(false); final AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InvalidDLSN); assertEquals(name, reader.getStreamName()); int txid = 1; for (long i = 0; i < 3; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < 10; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); Future<DLSN> dlsnFuture = writer.write(record); dlsnFuture.addEventListener(new WriteFutureEventListener( record, currentLogSegmentSeqNo, currentEntryId, writeLatch, writeErrors, true)); if (i == 0 && j == 0) { boolean monotonic = LogSegmentMetadata.supportsSequenceId(logSegmentVersion); TestAsyncReaderWriter.readNext( reader, DLSN.InvalidDLSN, monotonic ? 0L : Long.MIN_VALUE, monotonic, readLatch, readDoneLatch, readErrors); } } writer.closeAndComplete(); } writeLatch.await(); assertFalse("All writes should succeed", writeErrors.get()); readDoneLatch.await(); assertFalse("All reads should succeed", readErrors.get()); readLatch.await(); Utils.close(reader); dlm.close(); }
Example 12
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Test Case: starting reading when the streams don't exist. * * @throws Exception */ @Test(timeout = 60000) public void testSimpleAsyncReadWriteStartEmpty() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadBatchSize(10); confLocal.setOutputBufferSize(1024); int numLogSegments = 3; int numRecordsPerLogSegment = 10; DistributedLogManager dlm = createNewDLM(confLocal, name); final CountDownLatch readerReadyLatch = new CountDownLatch(1); final CountDownLatch readerDoneLatch = new CountDownLatch(1); final CountDownLatch readerSyncLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); final TestReader reader = new TestReader( "test-reader", dlm, DLSN.InitialDLSN, false, 0, readerReadyLatch, readerSyncLatch, readerDoneLatch); reader.start(); // Increase the probability of reader failure and retry Thread.sleep(500); final AtomicBoolean writeErrors = new AtomicBoolean(false); final CountDownLatch writeLatch = new CountDownLatch(30); int txid = 1; for (long i = 0; i < 3; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < 10; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); Future<DLSN> dlsnFuture = writer.write(record); dlsnFuture.addEventListener(new WriteFutureEventListener( record, currentLogSegmentSeqNo, currentEntryId, writeLatch, writeErrors, true)); } writer.closeAndComplete(); } writeLatch.await(); assertFalse("All writes should succeed", writeErrors.get()); readerDoneLatch.await(); assertFalse("Should not encounter errors during reading", reader.areErrorsFound()); readerSyncLatch.await(); assertTrue("Should position reader at least once", reader.getNumReaderPositions().get() > 1); dlm.close(); }
Example 13
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Test Case: starting reading when the streams don't exist. * {@link https://issues.apache.org/jira/browse/DL-42} */ @DistributedLogAnnotations.FlakyTest @Ignore @Test(timeout = 120000) public void testSimpleAsyncReadWriteStartEmptyFactory() throws Exception { int count = 50; String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadBatchSize(10); confLocal.setOutputBufferSize(1024); int numLogSegments = 3; int numRecordsPerLogSegment = 1; URI uri = createDLMURI("/" + name); ensureURICreated(uri); DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder() .conf(confLocal).uri(uri).build(); final DistributedLogManager[] dlms = new DistributedLogManager[count]; final TestReader[] readers = new TestReader[count]; final CountDownLatch readyLatch = new CountDownLatch(count); final CountDownLatch[] syncLatches = new CountDownLatch[count]; final CountDownLatch[] readerDoneLatches = new CountDownLatch[count]; for (int s = 0; s < count; s++) { dlms[s] = namespace.openLog(name + String.format("%d", s)); readerDoneLatches[s] = new CountDownLatch(1); syncLatches[s] = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); readers[s] = new TestReader("reader-" + s, dlms[s], DLSN.InitialDLSN, false, 0, readyLatch, syncLatches[s], readerDoneLatches[s]); readers[s].start(); } // wait all readers were positioned at least once readyLatch.await(); final CountDownLatch writeLatch = new CountDownLatch(3 * count); final AtomicBoolean writeErrors = new AtomicBoolean(false); int txid = 1; for (long i = 0; i < 3; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter[] writers = new BKAsyncLogWriter[count]; for (int s = 0; s < count; s++) { writers[s] = (BKAsyncLogWriter)(dlms[s].startAsyncLogSegmentNonPartitioned()); } for (long j = 0; j < 1; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); for (int s = 0; s < count; s++) { Future<DLSN> dlsnFuture = writers[s].write(record); dlsnFuture.addEventListener(new WriteFutureEventListener( record, currentLogSegmentSeqNo, currentEntryId, writeLatch, writeErrors, true)); } } for (int s = 0; s < count; s++) { writers[s].closeAndComplete(); } } writeLatch.await(); assertFalse("All writes should succeed", writeErrors.get()); for (int s = 0; s < count; s++) { readerDoneLatches[s].await(); assertFalse("Reader " + s + " should not encounter errors", readers[s].areErrorsFound()); syncLatches[s].await(); assertEquals(numLogSegments * numRecordsPerLogSegment, readers[s].getNumReads().get()); assertTrue("Reader " + s + " should position at least once", readers[s].getNumReaderPositions().get() > 0); } for (int s = 0; s < count; s++) { readers[s].stop(); dlms[s].close(); } }
Example 14
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Flaky test fixed: readers need to be added to the pendingReaders * @throws Exception */ @Test(timeout = 300000) public void testSimpleAsyncReadWriteSimulateErrors() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadBatchSize(10); confLocal.setOutputBufferSize(1024); DistributedLogManager dlm = createNewDLM(confLocal, name); int numLogSegments = 20; int numRecordsPerLogSegment = 10; final CountDownLatch doneLatch = new CountDownLatch(1); final CountDownLatch syncLatch = new CountDownLatch(numLogSegments * numRecordsPerLogSegment); TestReader reader = new TestReader( "test-reader", dlm, DLSN.InitialDLSN, true, 0, new CountDownLatch(1), syncLatch, doneLatch); reader.start(); final CountDownLatch writeLatch = new CountDownLatch(200); final AtomicBoolean writeErrors = new AtomicBoolean(false); int txid = 1; for (long i = 0; i < numLogSegments; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < numRecordsPerLogSegment; j++) { final long currentEntryId = j; final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); Future<DLSN> dlsnFuture = writer.write(record); dlsnFuture.addEventListener(new WriteFutureEventListener( record, currentLogSegmentSeqNo, currentEntryId, writeLatch, writeErrors, true)); } writer.closeAndComplete(); } writeLatch.await(); assertFalse("All writes should succeed", writeErrors.get()); doneLatch.await(); assertFalse("Should not encounter errors during reading", reader.areErrorsFound()); syncLatch.await(); assertTrue("Should position reader at least once", reader.getNumReaderPositions().get() > 1); dlm.close(); }
Example 15
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testSimpleAsyncReadWritePiggyBack() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setEnableReadAhead(true); confLocal.setReadAheadWaitTime(500); confLocal.setReadAheadBatchSize(10); confLocal.setReadAheadMaxRecords(100); confLocal.setOutputBufferSize(1024); confLocal.setPeriodicFlushFrequencyMilliSeconds(100); DistributedLogManager dlm = createNewDLM(confLocal, name); final AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InvalidDLSN); int numLogSegments = 3; int numRecordsPerLogSegment = 10; final CountDownLatch readLatch = new CountDownLatch(30); final CountDownLatch readDoneLatch = new CountDownLatch(1); final AtomicBoolean readErrors = new AtomicBoolean(false); final CountDownLatch writeLatch = new CountDownLatch(30); final AtomicBoolean writeErrors = new AtomicBoolean(false); int txid = 1; for (long i = 0; i < numLogSegments; i++) { final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); for (long j = 0; j < numRecordsPerLogSegment; j++) { Thread.sleep(50); final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); Future<DLSN> dlsnFuture = writer.write(record); dlsnFuture.addEventListener(new WriteFutureEventListener( record, currentLogSegmentSeqNo, j, writeLatch, writeErrors, false)); if (i == 0 && j == 0) { boolean monotonic = LogSegmentMetadata.supportsSequenceId(confLocal.getDLLedgerMetadataLayoutVersion()); TestAsyncReaderWriter.readNext( reader, DLSN.InvalidDLSN, monotonic ? 0L : Long.MIN_VALUE, monotonic, readLatch, readDoneLatch, readErrors); } } writer.closeAndComplete(); } writeLatch.await(); assertFalse("All writes should succeed", writeErrors.get()); readDoneLatch.await(); assertFalse("All reads should succeed", readErrors.get()); readLatch.await(); Utils.close(reader); dlm.close(); }
Example 16
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testAsyncWriteWithMinDelayBetweenFlushes() throws Exception { String name = "distrlog-asyncwrite-mindelay"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(true); confLocal.setMinDelayBetweenImmediateFlushMs(100); DistributedLogManager dlm = createNewDLM(confLocal, name); final Thread currentThread = Thread.currentThread(); final int COUNT = 5000; final CountDownLatch syncLatch = new CountDownLatch(COUNT); int txid = 1; BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); Stopwatch executionTime = Stopwatch.createStarted(); for (long i = 0; i < COUNT; i++) { Thread.sleep(1); final LogRecord record = DLMTestUtil.getLogRecordInstance(txid++); Future<DLSN> dlsnFuture = writer.write(record); dlsnFuture.addEventListener(new FutureEventListener<DLSN>() { @Override public void onSuccess(DLSN value) { syncLatch.countDown(); LOG.debug("SyncLatch: {} ; DLSN: {} ", syncLatch.getCount(), value); } @Override public void onFailure(Throwable cause) { currentThread.interrupt(); } }); } boolean success = false; if (!(Thread.interrupted())) { try { success = syncLatch.await(10, TimeUnit.SECONDS); } catch (InterruptedException exc) { Thread.currentThread().interrupt(); } } // Abort, not graceful close, since the latter will // flush as well, and may add an entry. writer.abort(); executionTime.stop(); assert(!(Thread.interrupted())); assert(success); LogRecordWithDLSN last = dlm.getLastLogRecord(); LOG.info("Last Entry {}; elapsed time {}", last.getDlsn().getEntryId(), executionTime.elapsed(TimeUnit.MILLISECONDS)); // Regardless of how many records we wrote; the number of BK entries should always be bounded by the min delay. // Since there are two flush processes--data flush and control flush, and since control flush may also end up flushing // data if data is available, the upper bound is 2*(time/min_delay + 1) assertTrue(last.getDlsn().getEntryId() <= ((executionTime.elapsed(TimeUnit.MILLISECONDS) / confLocal.getMinDelayBetweenImmediateFlushMs() + 1))*2); DLMTestUtil.verifyLogRecord(last); dlm.close(); }
Example 17
Source File: StreamImpl.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Shouldn't call close directly. The callers should call #requestClose instead * * @param shouldAbort shall we abort the stream instead of closing */ private Future<Void> close(boolean shouldAbort) { boolean abort; closeLock.writeLock().lock(); try { if (StreamStatus.CLOSED == status) { return closePromise; } abort = shouldAbort || (StreamStatus.INITIALIZED != status && StreamStatus.CLOSING != status); status = StreamStatus.CLOSED; streamManager.notifyReleased(this); } finally { closeLock.writeLock().unlock(); } logger.info("Closing stream {} ...", name); running = false; // stop any outstanding ownership acquire actions first synchronized (this) { if (null != tryAcquireScheduledFuture) { tryAcquireScheduledFuture.cancel(true); } } logger.info("Stopped threads of stream {}.", name); // Close the writers to release the locks before failing the requests Future<Void> closeWriterFuture; if (abort) { closeWriterFuture = Abortables.asyncAbort(writer, true); } else { closeWriterFuture = Utils.asyncClose(writer, true); } // close the manager and error out pending requests after close writer closeWriterFuture.addEventListener(FutureUtils.OrderedFutureEventListener.of( new FutureEventListener<Void>() { @Override public void onSuccess(Void value) { closeManagerAndErrorOutPendingRequests(); FutureUtils.setValue(closePromise, null); } @Override public void onFailure(Throwable cause) { closeManagerAndErrorOutPendingRequests(); FutureUtils.setValue(closePromise, null); } }, scheduler, name)); return closePromise; }
Example 18
Source File: FutureUtils.java From distributedlog with Apache License 2.0 | 2 votes |
/** * Add a event listener over <i>result</i> for collecting the operation stats. * * @param result result to listen on * @param opStatsLogger stats logger to record operations stats * @param stopwatch stop watch to time operation * @param <T> * @return result after registered the event listener */ public static <T> Future<T> stats(Future<T> result, OpStatsLogger opStatsLogger, Stopwatch stopwatch) { return result.addEventListener(new OpStatsListener<T>(opStatsLogger, stopwatch)); }