Java Code Examples for java.util.ArrayDeque#poll()
The following examples show how to use
java.util.ArrayDeque#poll() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: World.java From es6draft with MIT License | 6 votes |
/** * Executes the queue of pending jobs. * * @param jobSource * the job source * @throws InterruptedException * if interrupted while waiting */ public void runEventLoop(JobSource jobSource) throws InterruptedException { ArrayDeque<Job> scriptJobs = this.scriptJobs; ArrayDeque<Job> promiseJobs = this.promiseJobs; ArrayDeque<Job> finalizerJobs = this.finalizerJobs; ConcurrentLinkedDeque<Job> asyncJobs = this.asyncJobs; ArrayDeque<Object> unhandledRejections = this.unhandledRejections; for (;;) { while (!(scriptJobs.isEmpty() && promiseJobs.isEmpty() && finalizerJobs.isEmpty() && asyncJobs.isEmpty())) { executeJobs(scriptJobs); executeJobs(promiseJobs); executeJobs(finalizerJobs); executeJobs(asyncJobs); } if (!unhandledRejections.isEmpty()) { throw new UnhandledRejectionException(unhandledRejections.poll()); } Job job = jobSource.nextJob(); if (job == null) { break; } enqueueScriptJob(job); } }
Example 2
Source File: DeserializationUtils.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Iterates over the provided records to deserialize, verifies the results and stats * the number of full records. * * @param records records to be deserialized * @param deserializer the record deserializer * @return the number of full deserialized records */ public static int deserializeRecords( ArrayDeque<SerializationTestType> records, RecordDeserializer<SerializationTestType> deserializer) throws Exception { int deserializedRecords = 0; while (!records.isEmpty()) { SerializationTestType expected = records.poll(); SerializationTestType actual = expected.getClass().newInstance(); if (deserializer.getNextRecord(actual).isFullRecord()) { Assert.assertEquals(expected, actual); deserializedRecords++; } else { records.addFirst(expected); break; } } return deserializedRecords; }
Example 3
Source File: NavDeepLinkBuilder.java From android_9.0.0_r45 with Apache License 2.0 | 6 votes |
private void fillInIntent() { NavDestination node = null; ArrayDeque<NavDestination> possibleDestinations = new ArrayDeque<>(); possibleDestinations.add(mGraph); while (!possibleDestinations.isEmpty() && node == null) { NavDestination destination = possibleDestinations.poll(); if (destination.getId() == mDestId) { node = destination; } else if (destination instanceof NavGraph) { for (NavDestination child : (NavGraph) destination) { possibleDestinations.add(child); } } } if (node == null) { final String dest = NavDestination.getDisplayName(mContext, mDestId); throw new IllegalArgumentException("navigation destination " + dest + " is unknown to this NavController"); } mIntent.putExtra(NavController.KEY_DEEP_LINK_IDS, node.buildDeepLinkIds()); }
Example 4
Source File: KontalkIOProcessor.java From tigase-extension with GNU General Public License v3.0 | 6 votes |
@Override public void ack(int value) { int count = get() - value; if (count < 0) { count = (Integer.MAX_VALUE - value) + get() + 1; } ArrayDeque<Entry> queue = getQueue(); if (log.isLoggable(Level.FINEST)) { log.log(Level.FINEST, "acking {0} packets", new Object[] { queue.size() - count }); } while (count < queue.size()) { Entry entry = queue.poll(); Packet packet = entry.getPacketWithStamp(); if (shouldRequestAck(packet)) { if (log.isLoggable(Level.FINEST)) { log.log(Level.FINEST, "acking message: {0}", packet.toString()); } messagesWaiting--; } } }
Example 5
Source File: DeserializationUtils.java From flink with Apache License 2.0 | 6 votes |
/** * Iterates over the provided records to deserialize, verifies the results and stats * the number of full records. * * @param records records to be deserialized * @param deserializer the record deserializer * @return the number of full deserialized records */ public static int deserializeRecords( ArrayDeque<SerializationTestType> records, RecordDeserializer<SerializationTestType> deserializer) throws Exception { int deserializedRecords = 0; while (!records.isEmpty()) { SerializationTestType expected = records.poll(); SerializationTestType actual = expected.getClass().newInstance(); if (deserializer.getNextRecord(actual).isFullRecord()) { Assert.assertEquals(expected, actual); deserializedRecords++; } else { records.addFirst(expected); break; } } return deserializedRecords; }
Example 6
Source File: DeserializationUtils.java From flink with Apache License 2.0 | 6 votes |
/** * Iterates over the provided records to deserialize, verifies the results and stats * the number of full records. * * @param records records to be deserialized * @param deserializer the record deserializer * @return the number of full deserialized records */ public static int deserializeRecords( ArrayDeque<SerializationTestType> records, RecordDeserializer<SerializationTestType> deserializer) throws Exception { int deserializedRecords = 0; while (!records.isEmpty()) { SerializationTestType expected = records.poll(); SerializationTestType actual = expected.getClass().newInstance(); if (deserializer.getNextRecord(actual).isFullRecord()) { Assert.assertEquals(expected, actual); deserializedRecords++; } else { records.addFirst(expected); break; } } return deserializedRecords; }
Example 7
Source File: BufferManager.java From flink with Apache License 2.0 | 6 votes |
/** * Recycles all the exclusive and floating buffers from the given buffer queue. */ void releaseAllBuffers(ArrayDeque<Buffer> buffers) throws IOException { // Gather all exclusive buffers and recycle them to global pool in batch, because // we do not want to trigger redistribution of buffers after each recycle. final List<MemorySegment> exclusiveRecyclingSegments = new ArrayList<>(); Buffer buffer; while ((buffer = buffers.poll()) != null) { if (buffer.getRecycler() == this) { exclusiveRecyclingSegments.add(buffer.getMemorySegment()); } else { buffer.recycleBuffer(); } } synchronized (bufferQueue) { bufferQueue.releaseAll(exclusiveRecyclingSegments); bufferQueue.notifyAll(); } if (exclusiveRecyclingSegments.size() > 0) { globalPool.recycleMemorySegments(exclusiveRecyclingSegments); } }
Example 8
Source File: SpillableSubpartitionView.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
SpillableSubpartitionView( SpillableSubpartition parent, ArrayDeque<BufferConsumer> buffers, IOManager ioManager, int memorySegmentSize, BufferAvailabilityListener listener) { this.parent = checkNotNull(parent); this.buffers = checkNotNull(buffers); this.ioManager = checkNotNull(ioManager); this.memorySegmentSize = memorySegmentSize; this.listener = checkNotNull(listener); synchronized (buffers) { numBuffers = buffers.size(); nextBuffer = buffers.poll(); } if (nextBuffer != null) { listener.notifyDataAvailable(); } }
Example 9
Source File: ByteBufferProxy.java From lmdbjava with Apache License 2.0 | 5 votes |
@Override protected final ByteBuffer allocate() { final ArrayDeque<ByteBuffer> queue = BUFFERS.get(); final ByteBuffer buffer = queue.poll(); if (buffer != null && buffer.capacity() >= 0) { return buffer; } else { return allocateDirect(0); } }
Example 10
Source File: PublisherTakeLast.java From reactive-streams-commons with Apache License 2.0 | 5 votes |
@Override public void onNext(T t) { ArrayDeque<T> bs = buffer; if (bs.size() == n) { bs.poll(); } bs.offer(t); }
Example 11
Source File: SchemaModifier.java From requery with Apache License 2.0 | 5 votes |
private ArrayList<Type<?>> sortTypes() { // sort the types in table creation order to avoid referencing not created table via a // reference (could also add constraints at the end but SQLite doesn't support that) ArrayDeque<Type<?>> queue = new ArrayDeque<>(model.getTypes()); ArrayList<Type<?>> sorted = new ArrayList<>(); while (!queue.isEmpty()) { Type<?> type = queue.poll(); if (type.isView()) { continue; } Set<Type<?>> referencing = referencedTypesOf(type); for (Type<?> referenced : referencing) { Set<Type<?>> backReferences = referencedTypesOf(referenced); if (backReferences.contains(type)) { throw new CircularReferenceException("circular reference detected between " + type.getName() + " and " + referenced.getName()); } } if (referencing.isEmpty() || sorted.containsAll(referencing)) { sorted.add(type); queue.remove(type); } else { queue.offer(type); // put back } } return sorted; }
Example 12
Source File: ByteBufProxy.java From lmdbjava with Apache License 2.0 | 5 votes |
@Override protected ByteBuf allocate() { final ArrayDeque<ByteBuf> queue = BUFFERS.get(); final ByteBuf buffer = queue.poll(); if (buffer != null && buffer.capacity() >= 0) { return buffer; } else { return createBuffer(); } }
Example 13
Source File: ModuleHashesBuilder.java From Bytecoder with Apache License 2.0 | 5 votes |
/** * Returns all nodes reachable from the given set of roots. */ public Set<T> dfs(Set<T> roots) { ArrayDeque<T> todo = new ArrayDeque<>(roots); Set<T> visited = new HashSet<>(); T u; while ((u = todo.poll()) != null) { if (visited.add(u) && contains(u)) { adjacentNodes(u).stream() .filter(v -> !visited.contains(v)) .forEach(todo::push); } } return visited; }
Example 14
Source File: StockPerformance.java From kafka-streams-in-action with Apache License 2.0 | 5 votes |
private double calculateNewAverage(double newValue, double currentAverage, ArrayDeque<Double> deque) { if (deque.size() < MAX_LOOK_BACK) { deque.add(newValue); if (deque.size() == MAX_LOOK_BACK) { currentAverage = deque.stream().reduce(0.0, Double::sum) / MAX_LOOK_BACK; } } else { double oldestValue = deque.poll(); deque.add(newValue); currentAverage = (currentAverage + (newValue / MAX_LOOK_BACK)) - oldestValue / MAX_LOOK_BACK; } return currentAverage; }
Example 15
Source File: PrimitiveArrayPool.java From incubator-iotdb with Apache License 2.0 | 5 votes |
public synchronized Object getPrimitiveDataListByType(TSDataType dataType) { ArrayDeque<Object> dataListQueue = primitiveArraysMap.computeIfAbsent(dataType, k ->new ArrayDeque<>()); Object dataArray = dataListQueue.poll(); switch (dataType) { case BOOLEAN: if (dataArray == null) { dataArray = new boolean[ARRAY_SIZE]; } break; case INT32: if (dataArray == null) { dataArray = new int[ARRAY_SIZE]; } break; case INT64: if (dataArray == null) { dataArray = new long[ARRAY_SIZE]; } break; case FLOAT: if (dataArray == null) { dataArray = new float[ARRAY_SIZE]; } break; case DOUBLE: if (dataArray == null) { dataArray = new double[ARRAY_SIZE]; } break; case TEXT: if (dataArray == null) { dataArray = new Binary[ARRAY_SIZE]; } break; default: throw new UnSupportedDataTypeException("DataType: " + dataType); } return dataArray; }
Example 16
Source File: BinaryTree.java From JImageHash with MIT License | 4 votes |
/** * Return all elements of the tree whose hamming distance is smaller or equal * than the supplied max distance. * * If the tree is configured to ensureHashConsistency this function will throw * an unchecked IlleglStateException if the checked hash does not comply with * the first hash added to the tree. * * @param hash The hash to search for * @param maxDistance The maximal hamming distance deviation all found hashes * may possess. A distance of 0 will return all objects added * whose hash is exactly the hash supplied as the first * argument * * @return Search results contain objects and distances matching the search * criteria. The results returned are ordered to return the closest * match first. */ @Override public PriorityQueue<Result<T>> getElementsWithinHammingDistance(Hash hash, int maxDistance) { if (ensureHashConsistency && algoId != hash.getAlgorithmId()) { throw new IllegalStateException("Tried to add an incompatible hash to the binary tree"); } // Iterative implementation. Recursion might get too expensive if the key lenght // increases and we need to be aware of the stack depth PriorityQueue<Result<T>> result = new PriorityQueue<Result<T>>(); BigInteger hashValue = hash.getHashValue(); int treeDepth = hash.getBitResolution(); ArrayDeque<NodeInfo<T>> queue = new ArrayDeque<>(); // Breadth first search // Begin search at the root queue.add(new NodeInfo<T>(root, 0, treeDepth)); while (!queue.isEmpty()) { NodeInfo<T> info = queue.poll(); // We reached a leaf if (info.depth == 0) { @SuppressWarnings("unchecked") Leaf<T> leaf = (Leaf<T>) info.node; for (T o : leaf.getData()) { result.add(new Result<T>(o, info.distance, info.distance / (double) treeDepth)); } continue; } /* * else { System.out.printf("%-8s Depth: %d Distance: %d Next Bit: %s%n", * info.curPath, info.depth, info.distance, hashValue.testBit(info.depth - 1) ? * "1" : "0"); } */ // Next bit boolean bit = hashValue.testBit(info.depth - 1); // Are children of the current Node correctChild = info.node.getChild(bit); if (correctChild != null) { queue.add(new NodeInfo<T>(correctChild, info.distance, info.depth - 1)); } if (info.distance + 1 <= maxDistance) { Node failedChild = info.node.getChild(!bit); // Maybe the child does not exist if (failedChild != null) { queue.add(new NodeInfo<T>(failedChild, info.distance + 1, info.depth - 1)); } } } return result; }
Example 17
Source File: SpanningRecordSerializationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Iterates over the provided records and tests whether {@link SpanningRecordSerializer} and {@link RecordDeserializer} * interact as expected. * * <p>Only a single {@link MemorySegment} will be allocated. * * @param records records to test * @param segmentSize size for the {@link MemorySegment} */ private static void testSerializationRoundTrip( Iterable<SerializationTestType> records, int segmentSize, RecordSerializer<SerializationTestType> serializer, RecordDeserializer<SerializationTestType> deserializer) throws Exception { final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>(); // ------------------------------------------------------------------------------------------------------------- BufferAndSerializerResult serializationResult = setNextBufferForSerializer(serializer, segmentSize); int numRecords = 0; for (SerializationTestType record : records) { serializedRecords.add(record); numRecords++; // serialize record serializer.serializeRecord(record); if (serializer.copyToBufferBuilder(serializationResult.getBufferBuilder()).isFullBuffer()) { // buffer is full => start deserializing deserializer.setNextBuffer(serializationResult.buildBuffer()); numRecords -= DeserializationUtils.deserializeRecords(serializedRecords, deserializer); // move buffers as long as necessary (for long records) while ((serializationResult = setNextBufferForSerializer(serializer, segmentSize)).isFullBuffer()) { deserializer.setNextBuffer(serializationResult.buildBuffer()); } } } // deserialize left over records deserializer.setNextBuffer(serializationResult.buildBuffer()); while (!serializedRecords.isEmpty()) { SerializationTestType expected = serializedRecords.poll(); SerializationTestType actual = expected.getClass().newInstance(); RecordDeserializer.DeserializationResult result = deserializer.getNextRecord(actual); Assert.assertTrue(result.isFullRecord()); Assert.assertEquals(expected, actual); numRecords--; } // assert that all records have been serialized and deserialized Assert.assertEquals(0, numRecords); Assert.assertFalse(serializer.hasSerializedData()); Assert.assertFalse(deserializer.hasUnfinishedData()); }
Example 18
Source File: SpanningRecordSerializationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Iterates over the provided records and tests whether {@link SpanningRecordSerializer} and {@link RecordDeserializer} * interact as expected. * * <p>Only a single {@link MemorySegment} will be allocated. * * @param records records to test * @param segmentSize size for the {@link MemorySegment} */ private static void testSerializationRoundTrip( Iterable<SerializationTestType> records, int segmentSize, RecordSerializer<SerializationTestType> serializer, RecordDeserializer<SerializationTestType> deserializer) throws Exception { final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>(); // ------------------------------------------------------------------------------------------------------------- BufferAndSerializerResult serializationResult = setNextBufferForSerializer(serializer, segmentSize); int numRecords = 0; for (SerializationTestType record : records) { serializedRecords.add(record); numRecords++; // serialize record serializer.serializeRecord(record); if (serializer.copyToBufferBuilder(serializationResult.getBufferBuilder()).isFullBuffer()) { // buffer is full => start deserializing deserializer.setNextBuffer(serializationResult.buildBuffer()); numRecords -= DeserializationUtils.deserializeRecords(serializedRecords, deserializer); // move buffers as long as necessary (for long records) while ((serializationResult = setNextBufferForSerializer(serializer, segmentSize)).isFullBuffer()) { deserializer.setNextBuffer(serializationResult.buildBuffer()); } } } // deserialize left over records deserializer.setNextBuffer(serializationResult.buildBuffer()); while (!serializedRecords.isEmpty()) { SerializationTestType expected = serializedRecords.poll(); SerializationTestType actual = expected.getClass().newInstance(); RecordDeserializer.DeserializationResult result = deserializer.getNextRecord(actual); Assert.assertTrue(result.isFullRecord()); Assert.assertEquals(expected, actual); numRecords--; } // assert that all records have been serialized and deserialized Assert.assertEquals(0, numRecords); Assert.assertFalse(serializer.hasSerializedData()); Assert.assertFalse(deserializer.hasUnfinishedData()); }
Example 19
Source File: SpanningRecordSerializationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Iterates over the provided records and tests whether {@link SpanningRecordSerializer} and {@link RecordDeserializer} * interact as expected. * * <p>Only a single {@link MemorySegment} will be allocated. * * @param records records to test * @param segmentSize size for the {@link MemorySegment} */ private static void testSerializationRoundTrip( Iterable<SerializationTestType> records, int segmentSize, RecordSerializer<SerializationTestType> serializer, RecordDeserializer<SerializationTestType> deserializer) throws Exception { final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>(); // ------------------------------------------------------------------------------------------------------------- BufferAndSerializerResult serializationResult = setNextBufferForSerializer(serializer, segmentSize); int numRecords = 0; for (SerializationTestType record : records) { serializedRecords.add(record); numRecords++; // serialize record serializer.serializeRecord(record); if (serializer.copyToBufferBuilder(serializationResult.getBufferBuilder()).isFullBuffer()) { // buffer is full => start deserializing deserializer.setNextBuffer(serializationResult.buildBuffer()); numRecords -= DeserializationUtils.deserializeRecords(serializedRecords, deserializer); // move buffers as long as necessary (for long records) while ((serializationResult = setNextBufferForSerializer(serializer, segmentSize)).isFullBuffer()) { deserializer.setNextBuffer(serializationResult.buildBuffer()); } } } // deserialize left over records deserializer.setNextBuffer(serializationResult.buildBuffer()); while (!serializedRecords.isEmpty()) { SerializationTestType expected = serializedRecords.poll(); SerializationTestType actual = expected.getClass().newInstance(); RecordDeserializer.DeserializationResult result = deserializer.getNextRecord(actual); Assert.assertTrue(result.isFullRecord()); Assert.assertEquals(expected, actual); numRecords--; } // assert that all records have been serialized and deserialized Assert.assertEquals(0, numRecords); Assert.assertFalse(serializer.hasSerializedData()); Assert.assertFalse(deserializer.hasUnfinishedData()); }
Example 20
Source File: PublisherSkipLast.java From reactive-streams-commons with Apache License 2.0 | 3 votes |
@Override public void onNext(T t) { ArrayDeque<T> bs = buffer; if (bs.size() == n) { T v = bs.poll(); actual.onNext(v); } bs.offer(t); }