com.google.common.collect.Queues Java Examples
The following examples show how to use
com.google.common.collect.Queues.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BatchedPermitsRequesterTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void testRetriableFail() throws Exception { Queue<RequestAndCallback> queue = Queues.newArrayDeque(); BatchedPermitsRequester container = BatchedPermitsRequester.builder().resourceId("resource") .requestorIdentifier("requestor").requestSender(new TestRequestSender(queue, false)) .maxTimeoutMillis(1000).build(); try (ParallelRequester requester = new ParallelRequester(container)) { Future<Boolean> future = requester.request(10); for (int i = 0; i < BatchedPermitsRequester.MAX_RETRIES; i++) { // container will fail 5 times await(new QueueSize(queue, 1), 1000); Assert.assertFalse(future.isDone()); failRequestBuilder().requestAndCallback(queue.poll()).fail(); } // should return a failure Assert.assertFalse(future.get()); // should not make any more request Assert.assertEquals(queue.size(), 0); } }
Example #2
Source File: JsonSerializationTest.java From SpinalTap with Apache License 2.0 | 6 votes |
@Test public void testSerializeStateHistory() throws Exception { SourceState firstState = new SourceState(15l, 20l, -1l, BINLOG_FILE_POS); SourceState secondState = new SourceState(16l, 21l, -1l, BINLOG_FILE_POS); SourceState thirdState = new SourceState(17l, 22l, -1l, BINLOG_FILE_POS); Deque<SourceState> stateHistory = Queues.newArrayDeque(); stateHistory.addLast(firstState); stateHistory.addLast(secondState); stateHistory.addLast(thirdState); Collection<SourceState> states = JsonUtil.OBJECT_MAPPER.readValue( JsonUtil.OBJECT_MAPPER.writeValueAsString(stateHistory), new TypeReference<Collection<SourceState>>() {}); stateHistory = Queues.newArrayDeque(states); assertEquals(3, states.size()); assertEquals(thirdState, stateHistory.removeLast()); assertEquals(secondState, stateHistory.removeLast()); assertEquals(firstState, stateHistory.removeLast()); }
Example #3
Source File: FilteredTargetMap.java From intellij with Apache License 2.0 | 6 votes |
private ImmutableSet<TargetIdeInfo> targetsForSourceFilesImpl( ImmutableMultimap<TargetKey, TargetKey> rdepsMap, Collection<File> sourceFiles) { ImmutableSet.Builder<TargetIdeInfo> result = ImmutableSet.builder(); Set<TargetKey> roots = sourceFiles.stream() .flatMap(f -> rootsMap.get(f).stream()) .collect(ImmutableSet.toImmutableSet()); Queue<TargetKey> todo = Queues.newArrayDeque(); todo.addAll(roots); Set<TargetKey> seen = Sets.newHashSet(); while (!todo.isEmpty()) { TargetKey targetKey = todo.remove(); if (!seen.add(targetKey)) { continue; } TargetIdeInfo target = targetMap.get(targetKey); if (filter.test(target)) { result.add(target); } todo.addAll(rdepsMap.get(targetKey)); } return result.build(); }
Example #4
Source File: ConsumerAuditUtil.java From apollo with Apache License 2.0 | 6 votes |
@Override public void afterPropertiesSet() throws Exception { auditExecutorService.submit(() -> { while (!auditStopped.get() && !Thread.currentThread().isInterrupted()) { List<ConsumerAudit> toAudit = Lists.newArrayList(); try { Queues.drain(audits, toAudit, BATCH_SIZE, BATCH_TIMEOUT, BATCH_TIMEUNIT); if (!toAudit.isEmpty()) { consumerService.createConsumerAudits(toAudit); } } catch (Throwable ex) { Tracer.logError(ex); } } }); }
Example #5
Source File: TaskManagerImpl.java From ganttproject with GNU General Public License v3.0 | 6 votes |
@Override public void breadthFirstSearch(Task root, Predicate<Pair<Task, Task>> predicate) { Preconditions.checkNotNull(root); Queue<Task> queue = Queues.newArrayDeque(); if (predicate.apply(Pair.create((Task) null, root))) { queue.add(root); } while (!queue.isEmpty()) { Task head = queue.poll(); for (Task child : head.getNestedTasks()) { if (predicate.apply(Pair.create(head, child))) { queue.add(child); } } } }
Example #6
Source File: DefaultDocumentTreeService.java From atomix with Apache License 2.0 | 6 votes |
@Override public void clear() { Queue<DocumentPath> toClearQueue = Queues.newArrayDeque(); Map<String, Versioned<byte[]>> topLevelChildren = docTree.getChildren(DocumentPath.ROOT); toClearQueue.addAll(topLevelChildren.keySet() .stream() .map(name -> new DocumentPath(name, DocumentPath.ROOT)) .collect(Collectors.toList())); while (!toClearQueue.isEmpty()) { DocumentPath path = toClearQueue.remove(); Map<String, Versioned<byte[]>> children = docTree.getChildren(path); if (children.size() == 0) { docTree.remove(path); } else { children.keySet().forEach(name -> toClearQueue.add(new DocumentPath(name, path))); toClearQueue.add(path); } } }
Example #7
Source File: Http2To1ProxyRequestQueue.java From xio with Apache License 2.0 | 6 votes |
public void onRequestWriteOrEnqueue( ChannelHandlerContext ctx, Integer streamId, Object request, ChannelPromise promise) { if (streamId == null || streamId == Message.H1_STREAM_ID_NONE) { log.debug("writing request {}", request); ctx.write(request, promise); } else { boolean shouldWrite = currentProxiedH2StreamId().map(id -> id.equals(streamId)).orElse(Boolean.TRUE); Queue<PendingRequest> queue = streamQueue.computeIfAbsent(streamId, k -> Queues.newArrayDeque()); if (shouldWrite) { log.debug("writing h2-h1 proxy request {}", request); ctx.write(request, promise); } else { log.debug("enqueuing h2-h1 proxy request {}", request); queue.offer(new PendingRequest(request, promise)); } } }
Example #8
Source File: BatchedPermitsRequesterTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void testForwardingOfRequests() throws Exception { Queue<RequestAndCallback> queue = Queues.newArrayDeque(); BatchedPermitsRequester container = BatchedPermitsRequester.builder().resourceId("resource") .requestorIdentifier("requestor").requestSender(new TestRequestSender(queue, false)).build(); try (ParallelRequester requester = new ParallelRequester(container)) { Future<Boolean> future = requester.request(10); await(new QueueSize(queue, 1), 1000); Assert.assertEquals(queue.size(), 1); satisfyRequestBuilder().requestAndCallback(queue.poll()).satisfy(); future.get(1, TimeUnit.SECONDS); Assert.assertTrue(future.isDone()); Assert.assertTrue(future.get()); } }
Example #9
Source File: ParallelRunnerTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test(dependsOnMethods = "testSerializeToSequenceFile") public void testDeserializeFromSequenceFile() throws IOException { Queue<WorkUnitState> workUnitStates = Queues.newConcurrentLinkedQueue(); Path seqPath1 = new Path(this.outputPath, "seq1"); Path seqPath2 = new Path(this.outputPath, "seq2"); try (ParallelRunner parallelRunner = new ParallelRunner(2, this.fs)) { parallelRunner.deserializeFromSequenceFile(Text.class, WorkUnitState.class, seqPath1, workUnitStates, true); parallelRunner.deserializeFromSequenceFile(Text.class, WorkUnitState.class, seqPath2, workUnitStates, true); } Assert.assertFalse(this.fs.exists(seqPath1)); Assert.assertFalse(this.fs.exists(seqPath2)); Assert.assertEquals(workUnitStates.size(), 2); for (WorkUnitState workUnitState : workUnitStates) { TestWatermark watermark = new Gson().fromJson(workUnitState.getActualHighWatermark(), TestWatermark.class); Assert.assertTrue(watermark.getLongWatermark() == 10L || watermark.getLongWatermark() == 100L); } }
Example #10
Source File: PubSubClient.java From tracing-framework with BSD 3-Clause "New" or "Revised" License | 6 votes |
PubSubClient(String hostname, int port, int maxPendingMessages) throws IOException { this.hostname = hostname; this.port = port; this.maxPendingMessages = maxPendingMessages; if (maxPendingMessages <= 0) { this.pending = Queues.newLinkedBlockingDeque(); } else { this.pending = Queues.newLinkedBlockingDeque(maxPendingMessages); } this.selector = Selector.open(); Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { close(); } }); }
Example #11
Source File: EventReporter.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public EventReporter(Builder builder) { super(builder.context, builder.name, builder.filter, builder.rateUnit, builder.durationUnit); this.closer = Closer.create(); this.immediateReportExecutor = MoreExecutors.getExitingExecutorService( (ThreadPoolExecutor) Executors.newFixedThreadPool(1, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("EventReporter-" + builder.name + "-%d"))), 5, TimeUnit.MINUTES); this.metricContext = builder.context; this.notificationTargetKey = builder.context.addNotificationTarget(new Function<Notification, Void>() { @Nullable @Override public Void apply(Notification notification) { notificationCallback(notification); return null; } }); this.reportingQueue = Queues.newLinkedBlockingQueue(QUEUE_CAPACITY); }
Example #12
Source File: SelectDispatcher.java From newts with Apache License 2.0 | 6 votes |
SelectDispatcher(SelectConfig config) { super(config); m_config = config; CassandraSession session = new CassandraSessionImpl( config.getCassandraKeyspace(), config.getCassandraHost(), config.getCassandraPort(), config.getCassandraCompression(), config.getCassandraUsername(), config.getCassandraPassword(), config.getCassandraSsl()); m_repository = new CassandraSampleRepository( session, Config.CASSANDRA_TTL, m_metricRegistry, new DefaultSampleProcessorService(1), new ContextConfigurations()); m_queryQueue = Queues.newArrayBlockingQueue(config.getThreads() * 10); }
Example #13
Source File: HadoopUtils.java From incubator-gobblin with Apache License 2.0 | 5 votes |
/** * This method is an additive implementation of the {@link FileSystem#rename(Path, Path)} method. It moves all the * files/directories under 'from' path to the 'to' path without overwriting existing directories in the 'to' path. * * <p> * The rename operation happens at the first non-existent sub-directory. If a directory at destination path already * exists, it recursively tries to move sub-directories. If all the sub-directories also exist at the destination, * a file level move is done * </p> * * @param fileSystem on which the data needs to be moved * @param from path of the data to be moved * @param to path of the data to be moved */ public static void renameRecursively(FileSystem fileSystem, Path from, Path to) throws IOException { log.info(String.format("Recursively renaming %s in %s to %s.", from, fileSystem.getUri(), to)); FileSystem throttledFS = getOptionallyThrottledFileSystem(fileSystem, 10000); ExecutorService executorService = ScalingThreadPoolExecutor.newScalingThreadPool(1, 100, 100, ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("rename-thread-%d"))); Queue<Future<?>> futures = Queues.newConcurrentLinkedQueue(); try { if (!fileSystem.exists(from)) { throw new IOException("Trying to rename a path that does not exist! " + from); } futures.add(executorService .submit(new RenameRecursively(throttledFS, fileSystem.getFileStatus(from), to, executorService, futures))); int futuresUsed = 0; while (!futures.isEmpty()) { try { futures.poll().get(); futuresUsed++; } catch (ExecutionException | InterruptedException ee) { throw new IOException(ee.getCause()); } } log.info(String.format("Recursive renaming of %s to %s. (details: used %d futures)", from, to, futuresUsed)); } finally { ExecutorsUtils.shutdownExecutorService(executorService, Optional.of(log), 1, TimeUnit.SECONDS); } }
Example #14
Source File: BoundedBlockingRecordQueue.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private BoundedBlockingRecordQueue(Builder<T> builder) { Preconditions.checkArgument(builder.capacity > 0, "Invalid queue capacity"); Preconditions.checkArgument(builder.timeout > 0, "Invalid timeout time"); this.capacity = builder.capacity; this.timeout = builder.timeout; this.timeoutTimeUnit = builder.timeoutTimeUnit; this.blockingQueue = Queues.newArrayBlockingQueue(builder.capacity); this.queueStats = builder.ifCollectStats ? Optional.of(new QueueStats()) : Optional.<QueueStats> absent(); }
Example #15
Source File: ManagedLedgerImpl.java From pulsar with Apache License 2.0 | 5 votes |
public ManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper, MetaStore store, ManagedLedgerConfig config, OrderedScheduler scheduledExecutor, OrderedExecutor orderedExecutor, final String name, final Supplier<Boolean> mlOwnershipChecker) { this.factory = factory; this.bookKeeper = bookKeeper; this.config = config; this.store = store; this.name = name; this.ledgerMetadata = LedgerMetadataUtils.buildBaseManagedLedgerMetadata(name); this.digestType = BookKeeper.DigestType.fromApiDigestType(config.getDigestType()); this.scheduledExecutor = scheduledExecutor; this.executor = orderedExecutor; TOTAL_SIZE_UPDATER.set(this, 0); NUMBER_OF_ENTRIES_UPDATER.set(this, 0); ENTRIES_ADDED_COUNTER_UPDATER.set(this, 0); STATE_UPDATER.set(this, State.None); this.ledgersStat = null; this.mbean = new ManagedLedgerMBeanImpl(this); this.entryCache = factory.getEntryCacheManager().getEntryCache(this); this.waitingCursors = Queues.newConcurrentLinkedQueue(); this.uninitializedCursors = Maps.newHashMap(); this.clock = config.getClock(); // Get the next rollover time. Add a random value upto 5% to avoid rollover multiple ledgers at the same time this.maximumRolloverTimeMs = (long) (config.getMaximumRolloverTimeMs() * (1 + random.nextDouble() * 5 / 100.0)); this.mlOwnershipChecker = mlOwnershipChecker; this.propertiesMap = Maps.newHashMap(); }
Example #16
Source File: NodeStatusUpdater.java From tajo with Apache License 2.0 | 5 votes |
@Override public void serviceInit(Configuration conf) throws Exception { this.systemConf = TUtil.checkTypeAndGet(conf, TajoConf.class); this.rpcParams = RpcParameterFactory.get(this.systemConf); this.heartBeatRequestQueue = Queues.newLinkedBlockingQueue(); this.serviceTracker = ServiceTrackerFactory.get(systemConf); this.workerContext.getNodeResourceManager().getDispatcher().register(NodeStatusEvent.EventType.class, this); this.heartBeatInterval = systemConf.getIntVar(TajoConf.ConfVars.WORKER_HEARTBEAT_IDLE_INTERVAL); this.updaterThread = new StatusUpdaterThread(); this.updaterThread.setName("NodeStatusUpdater"); super.serviceInit(conf); }
Example #17
Source File: WebHdfsPersistReader.java From streams with Apache License 2.0 | 5 votes |
@Override public void prepare(Object configurationObject) { LOGGER.debug("Prepare"); lineReaderUtil = LineReadWriteUtil.getInstance(hdfsConfiguration); connectToWebHDFS(); String pathString = hdfsConfiguration.getPath() + "/" + hdfsConfiguration.getReaderPath(); LOGGER.info("Path : {}", pathString); path = new Path(pathString); try { if ( client.isFile(path)) { LOGGER.info("Found File"); FileStatus fileStatus = client.getFileStatus(path); status = new FileStatus[1]; status[0] = fileStatus; } else if ( client.isDirectory(path)) { status = client.listStatus(path); List<FileStatus> statusList = Arrays.asList(status); Collections.sort(statusList); status = statusList.toArray(new FileStatus[0]); LOGGER.info("Found Directory : {} files", status.length); } else { LOGGER.error("Neither file nor directory, wtf"); } } catch (IOException ex) { LOGGER.error("IOException", ex); } streamsConfiguration = StreamsConfigurator.detectConfiguration(); persistQueue = Queues.synchronizedQueue(new LinkedBlockingQueue<StreamsDatum>(streamsConfiguration.getBatchSize().intValue())); executor = Executors.newSingleThreadExecutor(); mapper = StreamsJacksonMapper.getInstance(); }
Example #18
Source File: SerializeTest.java From ig-json-parser with MIT License | 5 votes |
@Test public void stringSerializeTest() throws IOException { final int intValue = 25; final int integerValue = 37; final String stringValue = "hello world\r\n\'\""; final List<Integer> integerList = Lists.newArrayList(1, 2, 3, 4); final Queue<Integer> integerQueue = Queues.newArrayDeque(Arrays.asList(1, 2, 3, 4)); final Set<Integer> integerSet = Sets.newHashSet(1, 2, 3, 4); final int subIntValue = 30; SimpleParseUUT source = new SimpleParseUUT(); source.intField = intValue; source.integerField = integerValue; source.stringField = stringValue; source.integerListField = integerList; source.integerQueueField = integerQueue; source.integerSetField = integerSet; source.subobjectField = new SimpleParseUUT.SubobjectParseUUT(); source.subobjectField.intField = subIntValue; String serialized = SimpleParseUUT__JsonHelper.serializeToJson(source); SimpleParseUUT parsed = SimpleParseUUT__JsonHelper.parseFromJson(serialized); assertSame(source.intField, parsed.intField); assertEquals(source.integerField, parsed.integerField); assertEquals(source.stringField, parsed.stringField); assertEquals(source.integerListField, parsed.integerListField); // NOTE: this is because ArrayDeque hilariously does not implement .equals()/.hashcode(). assertEquals( Lists.newArrayList(source.integerQueueField), Lists.newArrayList(parsed.integerQueueField)); assertEquals(source.integerSetField, parsed.integerSetField); assertSame(source.subobjectField.intField, parsed.subobjectField.intField); }
Example #19
Source File: ExecutorWithLambdasIT.java From glowroot with Apache License 2.0 | 5 votes |
@Override public void executeApp() throws Exception { executor = new ThreadPoolExecutor(1, 1, 60, MILLISECONDS, Queues.newLinkedBlockingQueue()); // need to pre-create threads, otherwise lambda execution will be captured by the // initial thread run, and won't really test lambda execution capture executor.prestartAllCoreThreads(); transactionMarker(); }
Example #20
Source File: AsyncAppender.java From bither-desktop-java with Apache License 2.0 | 5 votes |
private AsyncAppender(Appender<ILoggingEvent> delegate) { this.delegate = delegate; this.queue = Queues.newLinkedBlockingQueue(); this.batch = Lists.newArrayListWithCapacity(BATCH_SIZE); this.dispatcher = THREAD_FACTORY.newThread(this); setContext(delegate.getContext()); }
Example #21
Source File: TreeUtil.java From ganttproject with GNU General Public License v3.0 | 5 votes |
public static void breadthFirstSearch(MutableTreeTableNode root, Predicate<Pair<MutableTreeTableNode, MutableTreeTableNode>> predicate) { final Queue<MutableTreeTableNode> queue = Queues.newArrayDeque(); if (predicate.apply(Pair.create((MutableTreeTableNode) null, root))) { queue.add(root); } while (!queue.isEmpty()) { MutableTreeTableNode head = queue.poll(); for (int i = 0; i < head.getChildCount(); i++) { MutableTreeTableNode child = (MutableTreeTableNode) head.getChildAt(i); if (predicate.apply(Pair.create(head, child))) { queue.add(child); } } } }
Example #22
Source File: DistributedCacheManagerDecoratorTest.java From rice with Educational Community License v2.0 | 5 votes |
/** * Test that duplicate cache flushes are filtered by * DistributedCacheManagerDecorator.CacheMessageSendingTransactionSynchronization.exhaustQueue(...) * to just the unique set, and that duplicate cache entry flushes are filtered to just the unique set as well. */ @Test public void testDuplicateCacheRemovalCase2() { Queue<CacheTarget> targets = Queues.newLinkedBlockingQueue(); // duplicate caches, we expect these to be filtered to the unique set targets.add(CacheTarget.entireCache(ROLE_TYPE_CACHE)); targets.add(CacheTarget.entireCache(ROLE_TYPE_CACHE)); targets.add(CacheTarget.entireCache(DELEGATE_TYPE_CACHE)); targets.add(CacheTarget.entireCache(DELEGATE_TYPE_CACHE)); targets.add(CacheTarget.entireCache(PERMISSION_TYPE)); targets.add(CacheTarget.entireCache(PERMISSION_TYPE)); // cache entries -- we expect no filtering, since (1) the caches these entries are in are not being // flushed in their entirety, and (2) the cache + key combinations are unique targets.add(CacheTarget.singleEntry(ROLE_MEMBER_TYPE, "key1")); targets.add(CacheTarget.singleEntry(ROLE_MEMBER_TYPE, "key2")); targets.add(CacheTarget.singleEntry(ROLE_RESPONSIBILITY_CACHE, "key3")); targets.add(CacheTarget.singleEntry(ROLE_RESPONSIBILITY_CACHE, "key4")); // the expected result is the unique set of caches, and each of the specified cache entries ArrayList<CacheTarget> correctResults = Lists.newArrayList( CacheTarget.entireCache(ROLE_TYPE_CACHE), CacheTarget.entireCache(DELEGATE_TYPE_CACHE), CacheTarget.entireCache(PERMISSION_TYPE), CacheTarget.singleEntry(ROLE_MEMBER_TYPE, "key1"), CacheTarget.singleEntry(ROLE_MEMBER_TYPE, "key2"), CacheTarget.singleEntry(ROLE_RESPONSIBILITY_CACHE, "key3"), CacheTarget.singleEntry(ROLE_RESPONSIBILITY_CACHE, "key4")); Collection<CacheTarget> results = new ArrayList<CacheTarget>(invokeExhaustQueue(targets)); assertTrue(CollectionUtils.diff(correctResults, results).isEmpty()); }
Example #23
Source File: VplsOperationManager.java From onos with Apache License 2.0 | 5 votes |
/** * Adds a VPLS operation to the queue of pending operations. * * @param vplsOperation the VPLS operation to add */ private void addVplsOperation(VplsOperation vplsOperation) { VplsData vplsData = vplsOperation.vpls(); pendingVplsOperations.compute(vplsData.name(), (name, opQueue) -> { opQueue = opQueue == null ? Queues.newArrayDeque() : opQueue; // If the operation already exist in queue, ignore it. if (opQueue.contains(vplsOperation)) { return opQueue; } opQueue.add(vplsOperation); return opQueue; }); }
Example #24
Source File: Futures.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
/** * Returns a list of delegate futures that correspond to the futures received in the order that * they complete. Delegate futures return the same value or throw the same exception as the * corresponding input future returns/throws. * * <p>Cancelling a delegate future has no effect on any input future, since the delegate future * does not correspond to a specific input future until the appropriate number of input futures * have completed. At that point, it is too late to cancel the input future. The input future's * result, which cannot be stored into the cancelled delegate future, is ignored. * * @since 17.0 */ @Beta @GwtIncompatible // TODO public static <T> ImmutableList<ListenableFuture<T>> inCompletionOrder(Iterable<? extends ListenableFuture<? extends T>> futures) { // A CLQ may be overkill here. We could save some pointers/memory by synchronizing on an // ArrayDeque final ConcurrentLinkedQueue<SettableFuture<T>> delegates = Queues.newConcurrentLinkedQueue(); ImmutableList.Builder<ListenableFuture<T>> listBuilder = ImmutableList.builder(); // Using SerializingExecutor here will ensure that each CompletionOrderListener executes // atomically and therefore that each returned future is guaranteed to be in completion order. // N.B. there are some cases where the use of this executor could have possibly surprising // effects when input futures finish at approximately the same time _and_ the output futures // have directExecutor listeners. In this situation, the listeners may end up running on a // different thread than if they were attached to the corresponding input future. We believe // this to be a negligible cost since: // 1. Using the directExecutor implies that your callback is safe to run on any thread. // 2. This would likely only be noticeable if you were doing something expensive or blocking on // a directExecutor listener on one of the output futures which is an antipattern anyway. SerializingExecutor executor = new SerializingExecutor(directExecutor()); for (final ListenableFuture<? extends T> future : futures) { SettableFuture<T> delegate = SettableFuture.create(); // Must make sure to add the delegate to the queue first in case the future is already done delegates.add(delegate); future.addListener( new Runnable() { @Override public void run() { delegates.remove().setFuture(future); } }, executor); listBuilder.add(delegate); } return listBuilder.build(); }
Example #25
Source File: ClientCnx.java From pulsar with Apache License 2.0 | 5 votes |
public ClientCnx(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, int protocolVersion) { super(conf.getKeepAliveIntervalSeconds(), TimeUnit.SECONDS); checkArgument(conf.getMaxLookupRequest() > conf.getConcurrentLookupRequest()); this.pendingLookupRequestSemaphore = new Semaphore(conf.getConcurrentLookupRequest(), false); this.maxLookupRequestSemaphore = new Semaphore(conf.getMaxLookupRequest() - conf.getConcurrentLookupRequest(), false); this.waitingLookupRequests = Queues.newConcurrentLinkedQueue(); this.authentication = conf.getAuthentication(); this.eventLoopGroup = eventLoopGroup; this.maxNumberOfRejectedRequestPerConnection = conf.getMaxNumberOfRejectedRequestPerConnection(); this.operationTimeoutMs = conf.getOperationTimeoutMs(); this.state = State.None; this.isTlsHostnameVerificationEnable = conf.isTlsHostnameVerificationEnable(); this.protocolVersion = protocolVersion; }
Example #26
Source File: ConsumerBase.java From pulsar with Apache License 2.0 | 5 votes |
private void pendingBatchReceiveTask(Timeout timeout) throws Exception { if (timeout.isCancelled()) { return; } long timeToWaitMs; synchronized (this) { // If it's closing/closed we need to ignore this timeout and not schedule next timeout. if (getState() == State.Closing || getState() == State.Closed) { return; } if (pendingBatchReceives == null) { pendingBatchReceives = Queues.newConcurrentLinkedQueue(); } OpBatchReceive<T> firstOpBatchReceive = pendingBatchReceives.peek(); timeToWaitMs = batchReceivePolicy.getTimeoutMs(); while (firstOpBatchReceive != null) { // If there is at least one batch receive, calculate the diff between the batch receive timeout // and the elapsed time since the operation was created. long diff = batchReceivePolicy.getTimeoutMs() - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - firstOpBatchReceive.createdAt); if (diff <= 0) { // The diff is less than or equal to zero, meaning that the batch receive has been timed out. // complete the OpBatchReceive and continue to check the next OpBatchReceive in pendingBatchReceives. OpBatchReceive<T> op = pendingBatchReceives.poll(); completeOpBatchReceive(op); firstOpBatchReceive = pendingBatchReceives.peek(); } else { // The diff is greater than zero, set the timeout to the diff value timeToWaitMs = diff; break; } } batchReceiveTimeout = client.timer().newTimeout(this::pendingBatchReceiveTask, timeToWaitMs, TimeUnit.MILLISECONDS); } }
Example #27
Source File: ConsumerImpl.java From pulsar with Apache License 2.0 | 5 votes |
@Override protected CompletableFuture<Messages<T>> internalBatchReceiveAsync() { CompletableFuture<Messages<T>> result = new CompletableFuture<>(); try { lock.writeLock().lock(); if (pendingBatchReceives == null) { pendingBatchReceives = Queues.newConcurrentLinkedQueue(); } if (hasEnoughMessagesForBatchReceive()) { MessagesImpl<T> messages = getNewMessagesImpl(); Message<T> msgPeeked = incomingMessages.peek(); while (msgPeeked != null && messages.canAdd(msgPeeked)) { Message<T> msg = incomingMessages.poll(); if (msg != null) { messageProcessed(msg); Message<T> interceptMsg = beforeConsume(msg); messages.add(interceptMsg); } msgPeeked = incomingMessages.peek(); } result.complete(messages); } else { pendingBatchReceives.add(OpBatchReceive.of(result)); } } finally { lock.writeLock().unlock(); } return result; }
Example #28
Source File: VersionIdGenerator.java From quantumdb with Apache License 2.0 | 5 votes |
private Set<String> index() { Set<String> visited = Sets.newHashSet(); Queue<Version> toVisit = Queues.newLinkedBlockingDeque(); toVisit.add(rootVersion); while (!toVisit.isEmpty()) { Version current = toVisit.poll(); if (visited.add(current.getId()) && current.getChild() != null) { toVisit.add(current.getChild()); } } return visited; }
Example #29
Source File: Futures.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
/** * Returns a list of delegate futures that correspond to the futures received in the order that * they complete. Delegate futures return the same value or throw the same exception as the * corresponding input future returns/throws. * * <p>Cancelling a delegate future has no effect on any input future, since the delegate future * does not correspond to a specific input future until the appropriate number of input futures * have completed. At that point, it is too late to cancel the input future. The input future's * result, which cannot be stored into the cancelled delegate future, is ignored. * * @since 17.0 */ @Beta @GwtIncompatible // TODO public static <T> ImmutableList<ListenableFuture<T>> inCompletionOrder( Iterable<? extends ListenableFuture<? extends T>> futures) { // A CLQ may be overkill here. We could save some pointers/memory by synchronizing on an // ArrayDeque final ConcurrentLinkedQueue<SettableFuture<T>> delegates = Queues.newConcurrentLinkedQueue(); ImmutableList.Builder<ListenableFuture<T>> listBuilder = ImmutableList.builder(); // Using SerializingExecutor here will ensure that each CompletionOrderListener executes // atomically and therefore that each returned future is guaranteed to be in completion order. // N.B. there are some cases where the use of this executor could have possibly surprising // effects when input futures finish at approximately the same time _and_ the output futures // have directExecutor listeners. In this situation, the listeners may end up running on a // different thread than if they were attached to the corresponding input future. We believe // this to be a negligible cost since: // 1. Using the directExecutor implies that your callback is safe to run on any thread. // 2. This would likely only be noticeable if you were doing something expensive or blocking on // a directExecutor listener on one of the output futures which is an antipattern anyway. SerializingExecutor executor = new SerializingExecutor(directExecutor()); for (final ListenableFuture<? extends T> future : futures) { SettableFuture<T> delegate = SettableFuture.create(); // Must make sure to add the delegate to the queue first in case the future is already done delegates.add(delegate); future.addListener( new Runnable() { @Override public void run() { delegates.remove().setFuture(future); } }, executor); listBuilder.add(delegate); } return listBuilder.build(); }
Example #30
Source File: MultiTopicsConsumerImpl.java From pulsar with Apache License 2.0 | 5 votes |
@Override protected CompletableFuture<Messages<T>> internalBatchReceiveAsync() { CompletableFuture<Messages<T>> result = new CompletableFuture<>(); try { lock.writeLock().lock(); if (pendingBatchReceives == null) { pendingBatchReceives = Queues.newConcurrentLinkedQueue(); } if (hasEnoughMessagesForBatchReceive()) { MessagesImpl<T> messages = getNewMessagesImpl(); Message<T> msgPeeked = incomingMessages.peek(); while (msgPeeked != null && messages.canAdd(msgPeeked)) { Message<T> msg = incomingMessages.poll(); if (msg != null) { INCOMING_MESSAGES_SIZE_UPDATER.addAndGet(this, -msg.getData().length); Message<T> interceptMsg = beforeConsume(msg); messages.add(interceptMsg); } msgPeeked = incomingMessages.peek(); } result.complete(messages); } else { pendingBatchReceives.add(OpBatchReceive.of(result)); } resumeReceivingFromPausedConsumersIfNeeded(); } finally { lock.writeLock().unlock(); } return result; }