com.twitter.util.Future Java Examples
The following examples show how to use
com.twitter.util.Future.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: LedgerHandleCache.java From distributedlog with Apache License 2.0 | 6 votes |
/** * Async Read Entries * * @param ledgerDesc * ledger descriptor * @param first * first entry * @param last * second entry */ public Future<Enumeration<LedgerEntry>> asyncReadEntries( LedgerDescriptor ledgerDesc, long first, long last) { RefCountedLedgerHandle refHandle = handlesMap.get(ledgerDesc); if (null == refHandle) { LOG.error("Accessing ledger {} without opening.", ledgerDesc); return Future.exception(BKException.create(BKException.Code.UnexpectedConditionException)); } final Promise<Enumeration<LedgerEntry>> promise = new Promise<Enumeration<LedgerEntry>>(); refHandle.handle.asyncReadEntries(first, last, new AsyncCallback.ReadCallback() { @Override public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> entries, Object ctx) { if (BKException.Code.OK == rc) { promise.setValue(entries); } else { promise.setException(BKException.create(rc)); } } }, null); return promise; }
Example #2
Source File: BKLogHandler.java From distributedlog with Apache License 2.0 | 6 votes |
protected Future<List<LogSegmentMetadata>> asyncForceGetLedgerList(final Comparator<LogSegmentMetadata> comparator, final LogSegmentFilter segmentFilter, final boolean throwOnEmpty) { final Promise<List<LogSegmentMetadata>> promise = new Promise<List<LogSegmentMetadata>>(); final Stopwatch stopwatch = Stopwatch.createStarted(); asyncGetLedgerListWithRetries(comparator, segmentFilter, null) .addEventListener(new FutureEventListener<List<LogSegmentMetadata>>() { @Override public void onSuccess(List<LogSegmentMetadata> ledgers) { forceGetListStat.registerSuccessfulEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS)); if (ledgers.isEmpty() && throwOnEmpty) { promise.setException(new LogEmptyException("Log " + getFullyQualifiedName() + " is empty")); } else { promise.setValue(ledgers); } } @Override public void onFailure(Throwable cause) { forceGetListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS)); promise.setException(cause); } }); return promise; }
Example #3
Source File: SafeQueueingFuturePool.java From distributedlog with Apache License 2.0 | 6 votes |
public synchronized Future<T> apply(final Function0<T> fn) { Preconditions.checkNotNull(fn); if (closed) { return Future.exception(new RejectedExecutionException("Operation submitted to closed SafeQueueingFuturePool")); } ++outstanding; queue.add(fn); Future<T> result = orderedFuturePool.apply(new Function0<T>() { @Override public T apply() { return queue.poll().apply(); } @Override public String toString() { return fn.toString(); } }).ensure(new Function0<BoxedUnit>() { public BoxedUnit apply() { if (decrOutstandingAndCheckDone()) { applyAll(); } return null; } }); return result; }
Example #4
Source File: HeartbeatOp.java From distributedlog with Apache License 2.0 | 6 votes |
@Override protected Future<WriteResponse> executeOp(AsyncLogWriter writer, Sequencer sequencer, Object txnLock) { // write a control record if heartbeat is the first request of the recovered log segment. if (writeControlRecord) { long txnId; Future<DLSN> writeResult; synchronized (txnLock) { txnId = sequencer.nextId(); LogRecord hbRecord = new LogRecord(txnId, HEARTBEAT_DATA); hbRecord.setControl(); writeResult = newTFuture(writer.write(hbRecord)); } return writeResult.map(new AbstractFunction1<DLSN, WriteResponse>() { @Override public WriteResponse apply(DLSN value) { return ResponseUtils.writeSuccess().setDlsn(value.serialize(dlsnVersion)); } }); } else { return Future.value(ResponseUtils.writeSuccess()); } }
Example #5
Source File: HeartbeatOp.java From distributedlog with Apache License 2.0 | 6 votes |
@Override protected Future<WriteResponse> executeOp(AsyncLogWriter writer, Sequencer sequencer, Object txnLock) { // write a control record if heartbeat is the first request of the recovered log segment. if (writeControlRecord) { long txnId; Future<DLSN> writeResult; synchronized (txnLock) { txnId = sequencer.nextId(); writeResult = ((BKAsyncLogWriter) writer).writeControlRecord(new LogRecord(txnId, HEARTBEAT_DATA)); } return writeResult.map(new AbstractFunction1<DLSN, WriteResponse>() { @Override public WriteResponse apply(DLSN value) { return ResponseUtils.writeSuccess().setDlsn(value.serialize(dlsnVersion)); } }); } else { return Future.value(ResponseUtils.writeSuccess()); } }
Example #6
Source File: Utils.java From distributedlog with Apache License 2.0 | 6 votes |
/** * Asynchronously create zookeeper path recursively and optimistically. * * @param zkc Zookeeper client * @param pathToCreate Zookeeper full path * @param data Zookeeper data * @param acl Acl of the zk path * @param createMode Create mode of zk path */ public static Future<BoxedUnit> zkAsyncCreateFullPathOptimistic( final ZooKeeperClient zkc, final String pathToCreate, final byte[] data, final List<ACL> acl, final CreateMode createMode) { Optional<String> parentPathShouldNotCreate = Optional.absent(); return zkAsyncCreateFullPathOptimistic( zkc, pathToCreate, parentPathShouldNotCreate, data, acl, createMode); }
Example #7
Source File: StreamManagerImpl.java From distributedlog with Apache License 2.0 | 6 votes |
/** * Must be enqueued to an executor to avoid deadlocks (close and execute-op both * try to acquire the same read-write lock). */ @Override public Future<Void> closeAndRemoveAsync(final String streamName) { final Promise<Void> releasePromise = new Promise<Void>(); java.util.concurrent.Future<?> scheduleFuture = schedule(new Runnable() { @Override public void run() { releasePromise.become(doCloseAndRemoveAsync(streamName)); } }, 0); if (null == scheduleFuture) { return Future.exception( new ServiceUnavailableException("Couldn't schedule a release task.")); } return releasePromise; }
Example #8
Source File: WriterWorker.java From distributedlog with Apache License 2.0 | 6 votes |
@Override public void run() { LOG.info("Started writer {}.", idx); while (running) { rateLimiter.getLimiter().acquire(batchSize); String streamName = streamNames.get(random.nextInt(numStreams)); final long requestMillis = System.currentTimeMillis(); final List<ByteBuffer> data = buildBufferList(batchSize, requestMillis, messageSizeBytes); if (null == data) { break; } List<Future<DLSN>> results = dlc.writeBulk(streamName, data); for (Future<DLSN> result : results) { result.addEventListener(new TimedRequestHandler(streamName, requestMillis)); } } dlc.close(); }
Example #9
Source File: TestDistributedLogServerBase.java From distributedlog with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testBulkWriteEmptyBuffer() throws Exception { String name = String.format("dlserver-bulk-write-%s", "empty"); dlClient.routingService.addHost(name, dlServer.getAddress()); List<ByteBuffer> writes = new ArrayList<ByteBuffer>(); writes.add(ByteBuffer.wrap(("").getBytes())); writes.add(ByteBuffer.wrap(("").getBytes())); List<Future<DLSN>> futures = dlClient.dlClient.writeBulk(name, writes); assertEquals(2, futures.size()); for (Future<DLSN> future : futures) { // No throw == pass DLSN dlsn = Await.result(future, Duration.fromSeconds(10)); } }
Example #10
Source File: StatsFilter.java From distributedlog with Apache License 2.0 | 6 votes |
@Override public Future<Rep> apply(Req req, Service<Req, Rep> service) { Future<Rep> result = null; outstandingAsync.inc(); final Stopwatch stopwatch = Stopwatch.createStarted(); try { result = service.apply(req); serviceExec.registerSuccessfulEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS)); } finally { outstandingAsync.dec(); if (null == result) { serviceExec.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS)); } } return result; }
Example #11
Source File: TestSafeQueueingFuturePool.java From distributedlog with Apache License 2.0 | 6 votes |
@Test public void testFailedDueToClosed() throws Exception { TestFuturePool<Void> pool = new TestFuturePool<Void>(); pool.wrapper.close(); Future<Void> future = pool.wrapper.apply(new Function0<Void>() { public Void apply() { throw new RuntimeException("failed"); } }); try { Await.result(future); fail("should have thrown"); } catch (RejectedExecutionException ex) { } pool.shutdown(); }
Example #12
Source File: TestBKLogReadHandler.java From distributedlog with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testGetLogRecordCountWithSingleInProgressLedger() throws Exception { String streamName = runtime.getMethodName(); BKDistributedLogManager bkdlm = (BKDistributedLogManager) createNewDLM(conf, streamName); AsyncLogWriter out = bkdlm.startAsyncLogSegmentNonPartitioned(); int txid = 1; Await.result(out.write(DLMTestUtil.getLargeLogRecordInstance(txid++, false))); Await.result(out.write(DLMTestUtil.getLargeLogRecordInstance(txid++, false))); Await.result(out.write(DLMTestUtil.getLargeLogRecordInstance(txid++, false))); BKLogReadHandler readHandler = bkdlm.createReadHandler(); List<LogSegmentMetadata> ledgerList = readHandler.getLedgerList(false, false, LogSegmentMetadata.COMPARATOR, false); assertEquals(1, ledgerList.size()); assertTrue(ledgerList.get(0).isInProgress()); Future<Long> count = null; count = readHandler.asyncGetLogRecordCount(new DLSN(1, 0, 0)); assertEquals(2, Await.result(count).longValue()); Utils.close(out); }
Example #13
Source File: FutureUtils.java From distributedlog with Apache License 2.0 | 6 votes |
/** * Ignore exception from the <i>future</i> and log <i>errorMsg</i> on exceptions. * * @param future the original future * @param errorMsg the error message to log on exceptions * @return a transformed future ignores exceptions */ public static <T> Promise<Void> ignore(Future<T> future, final String errorMsg) { final Promise<Void> promise = new Promise<Void>(); future.addEventListener(new FutureEventListener<T>() { @Override public void onSuccess(T value) { setValue(promise, null); } @Override public void onFailure(Throwable cause) { if (null != errorMsg) { logger.error(errorMsg, cause); } setValue(promise, null); } }); return promise; }
Example #14
Source File: TestDistributedLogMultiStreamWriter.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 20000) public void testFailRequestAfterRetriedAllStreams() throws Exception { DistributedLogClient client = mock(DistributedLogClient.class); when(client.writeRecordSet((String) any(), (LogRecordSetBuffer) any())) .thenReturn(new Promise<DLSN>()); DistributedLogMultiStreamWriter writer = DistributedLogMultiStreamWriter.newBuilder() .streams(Lists.newArrayList("stream1", "stream2")) .client(client) .compressionCodec(CompressionCodec.Type.LZ4) .firstSpeculativeTimeoutMs(10) .maxSpeculativeTimeoutMs(20) .speculativeBackoffMultiplier(2) .requestTimeoutMs(5000000) .flushIntervalMs(10) .bufferSize(Integer.MAX_VALUE) .build(); byte[] data = "test-test".getBytes(UTF_8); ByteBuffer buffer = ByteBuffer.wrap(data); Future<DLSN> writeFuture = writer.write(buffer); try { Await.result(writeFuture); fail("Should fail the request after retries all streams"); } catch (IndividualRequestTimeoutException e) { long timeoutMs = e.timeout().inMilliseconds(); assertTrue(timeoutMs >= (10 + 20) && timeoutMs < 5000000); } writer.close(); }
Example #15
Source File: TestBKLogReadHandler.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testGetFirstDLSNWithOpenLedger() throws Exception { String dlName = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(conf); confLocal.setImmediateFlushEnabled(true); confLocal.setOutputBufferSize(0); int numEntriesPerSegment = 100; DistributedLogManager dlm1 = createNewDLM(confLocal, dlName); long txid = 1; ArrayList<Future<DLSN>> futures = new ArrayList<Future<DLSN>>(numEntriesPerSegment); AsyncLogWriter out = dlm1.startAsyncLogSegmentNonPartitioned(); for (int eid = 0; eid < numEntriesPerSegment; ++eid) { futures.add(out.write(DLMTestUtil.getLogRecordInstance(txid))); ++txid; } for (Future<DLSN> future : futures) { Await.result(future); } BKLogReadHandler readHandler = ((BKDistributedLogManager) dlm1).createReadHandler(); DLSN last = dlm1.getLastDLSN(); assertEquals(new DLSN(1,99,0), last); DLSN first = Await.result(dlm1.getFirstDLSNAsync()); assertEquals(new DLSN(1,0,0), first); Utils.close(out); }
Example #16
Source File: LedgerAllocatorDelegator.java From distributedlog with Apache License 2.0 | 5 votes |
@Override public Future<Void> asyncClose() { if (ownAllocator) { return this.allocator.asyncClose(); } else { return Future.value(null); } }
Example #17
Source File: TestDistributedLogServer.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testBulkWriteTotalFailureLostLock() throws Exception { String name = String.format("dlserver-bulk-write-%s", "lost-lock"); dlClient.routingService.addHost(name, dlServer.getAddress()); final int writeCount = 8; List<ByteBuffer> writes = new ArrayList<ByteBuffer>(writeCount + 1); ByteBuffer buf = ByteBuffer.allocate(8); writes.add(buf); for (long i = 1; i <= writeCount; i++) { writes.add(ByteBuffer.wrap(("" + i).getBytes())); } // Warm it up with a write. Await.result(dlClient.dlClient.write(name, ByteBuffer.allocate(8))); // Failpoint a lost lock, make sure the failure gets promoted to an operation failure. DistributedLogServiceImpl svcImpl = (DistributedLogServiceImpl) dlServer.dlServer.getLeft(); try { FailpointUtils.setFailpoint( FailpointUtils.FailPointName.FP_WriteInternalLostLock, FailpointUtils.FailPointActions.FailPointAction_Default ); Future<BulkWriteResponse> futures = svcImpl.writeBulkWithContext(name, writes, new WriteContext()); assertEquals(StatusCode.LOCKING_EXCEPTION, Await.result(futures).header.code); } finally { FailpointUtils.removeFailpoint( FailpointUtils.FailPointName.FP_WriteInternalLostLock ); } }
Example #18
Source File: BKDistributedLogManager.java From distributedlog with Apache License 2.0 | 5 votes |
private Future<LogRecordWithDLSN> getLastLogRecordAsyncInternal(final boolean recover, final boolean includeEndOfStream) { return processReaderOperation(new Function<BKLogReadHandler, Future<LogRecordWithDLSN>>() { @Override public Future<LogRecordWithDLSN> apply(final BKLogReadHandler ledgerHandler) { return ledgerHandler.getLastLogRecordAsync(recover, includeEndOfStream); } }); }
Example #19
Source File: BKLogWriteHandler.java From distributedlog with Apache License 2.0 | 5 votes |
Future<List<LogSegmentMetadata>> setLogSegmentsOlderThanDLSNTruncated(final DLSN dlsn) { if (DLSN.InvalidDLSN == dlsn) { List<LogSegmentMetadata> emptyList = new ArrayList<LogSegmentMetadata>(0); return Future.value(emptyList); } scheduleGetAllLedgersTaskIfNeeded(); return asyncGetFullLedgerList(false, false).flatMap( new AbstractFunction1<List<LogSegmentMetadata>, Future<List<LogSegmentMetadata>>>() { @Override public Future<List<LogSegmentMetadata>> apply(List<LogSegmentMetadata> logSegments) { return setLogSegmentsOlderThanDLSNTruncated(logSegments, dlsn); } }); }
Example #20
Source File: StreamImpl.java From distributedlog with Apache License 2.0 | 5 votes |
Future<Void> requestClose(String reason, boolean uncache) { final boolean abort; closeLock.writeLock().lock(); try { if (StreamStatus.CLOSING == status || StreamStatus.CLOSED == status) { return closePromise; } logger.info("Request to close stream {} : {}", getStreamName(), reason); // if the stream isn't closed from INITIALIZED state, we abort the stream instead of closing it. abort = StreamStatus.INITIALIZED != status; status = StreamStatus.CLOSING; streamManager.notifyReleased(this); } finally { closeLock.writeLock().unlock(); } // we will fail the requests that are coming in between closing and closed only // after the async writer is closed. so we could clear up the lock before redirect // them. close(abort); if (uncache) { closePromise.onSuccess(new AbstractFunction1<Void, BoxedUnit>() { @Override public BoxedUnit apply(Void result) { if (streamManager.notifyRemoved(StreamImpl.this)) { logger.info("Removed cached stream {} after closed.", name); } return BoxedUnit.UNIT; } }); } return closePromise; }
Example #21
Source File: FederatedZKLogMetadataStore.java From distributedlog with Apache License 2.0 | 5 votes |
@Override public Future<Iterator<String>> getLogs() { if (duplicatedLogFound.get()) { return duplicatedLogException(duplicatedLogName.get()); } return postStateCheck(retrieveLogs().map( new AbstractFunction1<List<Set<String>>, Iterator<String>>() { @Override public Iterator<String> apply(List<Set<String>> resultList) { return getIterator(resultList); } })); }
Example #22
Source File: ServerTracingFilterInterceptor.java From skywalking with Apache License 2.0 | 5 votes |
@Override public Object afterMethodImpl(EnhancedInstance enhancedInstance, Method method, Object[] objects, Class<?>[] classes, Object ret) throws Throwable { final AbstractSpan finagleSpan = getSpan(); getLocalContextHolder().remove(FinagleCtxs.SW_SPAN); /* * If the intercepted method throws exception, the ret will be null */ if (ret == null) { ContextManager.stopSpan(finagleSpan); } else { finagleSpan.prepareForAsync(); ContextManager.stopSpan(finagleSpan); ((Future<?>) ret).addEventListener(new FutureEventListener<Object>() { @Override public void onSuccess(Object value) { finagleSpan.asyncFinish(); } @Override public void onFailure(Throwable cause) { finagleSpan.errorOccurred(); finagleSpan.log(cause); finagleSpan.asyncFinish(); } }); } return ret; }
Example #23
Source File: DeleteOp.java From distributedlog with Apache License 2.0 | 5 votes |
@Override protected Future<WriteResponse> executeOp(AsyncLogWriter writer, Sequencer sequencer, Object txnLock) { Future<Void> result = streamManager.deleteAndRemoveAsync(streamName()); return result.map(new AbstractFunction1<Void, WriteResponse>() { @Override public WriteResponse apply(Void value) { return ResponseUtils.writeSuccess(); } }); }
Example #24
Source File: FutureUtils.java From distributedlog with Apache License 2.0 | 5 votes |
/** * Wait for the result of a lock operation. * * @param result result to wait * @param lockPath path of the lock * @return the result * @throws LockingException when encountered exceptions on the result of lock operation */ public static <T> T lockResult(Future<T> result, String lockPath) throws LockingException { try { return Await.result(result); } catch (LockingException le) { throw le; } catch (Exception e) { throw new LockingException(lockPath, "Encountered exception on locking ", e); } }
Example #25
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testAsyncWritePendingWritesAbortedWhenLedgerRollTriggerFails() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(1024); confLocal.setMaxLogSegmentBytes(1024); confLocal.setLogSegmentRollingIntervalMinutes(0); DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); // Write one record larger than max seg size. Ledger doesn't roll until next write. int txid = 1; LogRecord record = DLMTestUtil.getLogRecordInstance(txid++, 2048); Future<DLSN> result = writer.write(record); DLSN dlsn = Await.result(result, Duration.fromSeconds(10)); assertEquals(1, dlsn.getLogSegmentSequenceNo()); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, LogRecordTooLongException.class); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, WriteException.class); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, WriteException.class); writer.closeAndComplete(); dlm.close(); }
Example #26
Source File: BKLogSegmentWriter.java From distributedlog with Apache License 2.0 | 5 votes |
@Override public Future<Long> apply(Integer transmitRc) { if (BKException.Code.OK == transmitRc) { return Future.value(getLastTxIdAcknowledged()); } else { return Future.exception(new BKTransmitException("Failed to transmit entry", transmitRc)); } }
Example #27
Source File: FutureUtilTest.java From terrapin with Apache License 2.0 | 5 votes |
private Future<Integer> getFuture(final long futureExecutionTimeMs, final Integer futureValue, final boolean isFutureSuccessful) { return timer.doLater( Duration.fromMilliseconds(futureExecutionTimeMs), new Function0<Integer>() { public Integer apply() { if (isFutureSuccessful) { return futureValue; } else { throw new RuntimeException(EXCEPTION_MSG); } } }); }
Example #28
Source File: DistributedLogClientImpl.java From distributedlog with Apache License 2.0 | 5 votes |
@Override public List<Future<DLSN>> writeBulk(String stream, List<ByteBuffer> data) { if (data.size() > 0) { final BulkWriteOp op = new BulkWriteOp(stream, data); sendRequest(op); return op.result(); } else { return Collections.emptyList(); } }
Example #29
Source File: LindenController.java From linden with Apache License 2.0 | 5 votes |
@RequestMapping(value = "/delete", method = RequestMethod.POST) @ResponseBody public String delete(@RequestParam("bql") String bql) { Response response; try { Future<Response> future = LindenAdmin.getService().handleClusterDeleteRequest(bql); response = Await.result(future, Duration.apply(30000, TimeUnit.MILLISECONDS)); } catch (Exception e) { response = new Response(); response.setSuccess(false).setError(Throwables.getStackTraceAsString(e)); } return ThriftToJSON(response); }
Example #30
Source File: LogSegmentMetadataStoreUpdater.java From distributedlog with Apache License 2.0 | 5 votes |
/** * Change the truncation status of a <i>log segment</i> to be active * * @param segment log segment to change truncation status to active. * @return new log segment */ @Override public Future<LogSegmentMetadata> setLogSegmentActive(LogSegmentMetadata segment) { final LogSegmentMetadata newSegment = segment.mutator() .setTruncationStatus(LogSegmentMetadata.TruncationStatus.ACTIVE) .build(); return addNewSegmentAndDeleteOldSegment(newSegment, segment); }