org.apache.flink.util.MathUtils Java Examples
The following examples show how to use
org.apache.flink.util.MathUtils.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KeyMap.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a new table with a capacity tailored to the given expected number of elements. * * @param expectedNumberOfElements The number of elements to tailor the capacity to. */ public KeyMap(int expectedNumberOfElements) { if (expectedNumberOfElements < 0) { throw new IllegalArgumentException("Invalid capacity: " + expectedNumberOfElements); } // round up to the next power or two // guard against too small capacity and integer overflows int capacity = Integer.highestOneBit(expectedNumberOfElements) << 1; capacity = capacity >= 0 ? Math.max(MIN_CAPACITY, capacity) : MAX_CAPACITY; // this also acts as a sanity check log2size = MathUtils.log2strict(capacity); shift = FULL_BIT_RANGE - log2size; table = allocateTable(capacity); rehashThreshold = getRehashThreshold(capacity); }
Example #2
Source File: InPlaceMutableHashTable.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void allocateBucketSegments(int numBucketSegments) { if (numBucketSegments < 1) { throw new RuntimeException("Bug in InPlaceMutableHashTable"); } bucketSegments = new MemorySegment[numBucketSegments]; for(int i = 0; i < bucketSegments.length; i++) { bucketSegments[i] = forcedAllocateSegment(); // Init all pointers in all buckets to END_OF_LIST for(int j = 0; j < numBucketsPerSegment; j++) { bucketSegments[i].putLong(j << bucketSizeBits, END_OF_LIST); } } numBuckets = numBucketSegments * numBucketsPerSegment; numBucketsMask = (1 << MathUtils.log2strict(numBuckets)) - 1; }
Example #3
Source File: InPlaceMutableHashTable.java From flink with Apache License 2.0 | 6 votes |
/** * Inserts the given record into the hash table. * Note: this method doesn't care about whether a record with the same key is already present. * @param record The record to insert. * @throws IOException (EOFException specifically, if memory ran out) */ @Override public void insert(T record) throws IOException { if (closed) { return; } final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record)); final int bucket = hashCode & numBucketsMask; final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex]; final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment final long firstPointer = bucketSegment.getLong(bucketOffset); try { final long newFirstPointer = recordArea.appendPointerAndRecord(firstPointer, record); bucketSegment.putLong(bucketOffset, newFirstPointer); } catch (EOFException ex) { compactOrThrow(); insert(record); return; } numElements++; resizeTableIfNecessary(); }
Example #4
Source File: InPlaceMutableHashTable.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Inserts the given record into the hash table. * Note: this method doesn't care about whether a record with the same key is already present. * @param record The record to insert. * @throws IOException (EOFException specifically, if memory ran out) */ @Override public void insert(T record) throws IOException { if (closed) { return; } final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record)); final int bucket = hashCode & numBucketsMask; final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex]; final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment final long firstPointer = bucketSegment.getLong(bucketOffset); try { final long newFirstPointer = recordArea.appendPointerAndRecord(firstPointer, record); bucketSegment.putLong(bucketOffset, newFirstPointer); } catch (EOFException ex) { compactOrThrow(); insert(record); return; } numElements++; resizeTableIfNecessary(); }
Example #5
Source File: CompactingHashTable.java From flink with Apache License 2.0 | 6 votes |
@Override public final void insert(T record) throws IOException { if (this.closed) { return; } final int hashCode = MathUtils.jenkinsHash(this.buildSideComparator.hash(record)); final int posHashCode = hashCode % this.numBuckets; // get the bucket for the given hash code final int bucketArrayPos = posHashCode >>> this.bucketsPerSegmentBits; final int bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS; final MemorySegment bucket = this.buckets[bucketArrayPos]; // get the basic characteristics of the bucket final int partitionNumber = bucket.get(bucketInSegmentPos + HEADER_PARTITION_OFFSET); InMemoryPartition<T> partition = this.partitions.get(partitionNumber); long pointer = insertRecordIntoPartition(record, partition, false); insertBucketEntryFromStart(bucket, bucketInSegmentPos, hashCode, pointer, partitionNumber); }
Example #6
Source File: ConfigurationParserUtils.java From flink with Apache License 2.0 | 6 votes |
/** * Parses the configuration to get the page size and validates the value. * * @param configuration configuration object * @return size of memory segment */ public static int getPageSize(Configuration configuration) { final int pageSize = checkedDownCast( configuration.get(TaskManagerOptions.MEMORY_SEGMENT_SIZE).getBytes()); // check page size of for minimum size checkConfigParameter( pageSize >= MemoryManager.MIN_PAGE_SIZE, pageSize, TaskManagerOptions.MEMORY_SEGMENT_SIZE.key(), "Minimum memory segment size is " + MemoryManager.MIN_PAGE_SIZE); // check page size for power of two checkConfigParameter( MathUtils.isPowerOf2(pageSize), pageSize, TaskManagerOptions.MEMORY_SEGMENT_SIZE.key(), "Memory segment size must be a power of 2."); return pageSize; }
Example #7
Source File: HashPartition.java From flink with Apache License 2.0 | 6 votes |
/** * Constructor creating a partition from a spilled partition file that could be read in one because it was * known to completely fit into memory. * * @param buildSideAccessors The data type accessors for the build side data-type. * @param probeSideAccessors The data type accessors for the probe side data-type. * @param partitionNumber The number of the partition. * @param recursionLevel The recursion level of the partition. * @param buffers The memory segments holding the records. * @param buildSideRecordCounter The number of records in the buffers. * @param segmentSize The size of the memory segments. */ HashPartition(TypeSerializer<BT> buildSideAccessors, TypeSerializer<PT> probeSideAccessors, int partitionNumber, int recursionLevel, List<MemorySegment> buffers, long buildSideRecordCounter, int segmentSize, int lastSegmentLimit) { super(0); this.buildSideSerializer = buildSideAccessors; this.probeSideSerializer = probeSideAccessors; this.partitionNumber = partitionNumber; this.recursionLevel = recursionLevel; this.memorySegmentSize = segmentSize; this.segmentSizeBits = MathUtils.log2strict(segmentSize); this.finalBufferLimit = lastSegmentLimit; this.partitionBuffers = (MemorySegment[]) buffers.toArray(new MemorySegment[buffers.size()]); this.buildSideRecordCounter = buildSideRecordCounter; this.overflowSegments = new MemorySegment[2]; this.numOverflowSegments = 0; this.nextOverflowBucket = 0; }
Example #8
Source File: InPlaceMutableHashTable.java From flink with Apache License 2.0 | 6 votes |
/** * Inserts the given record into the hash table. * Note: this method doesn't care about whether a record with the same key is already present. * @param record The record to insert. * @throws IOException (EOFException specifically, if memory ran out) */ @Override public void insert(T record) throws IOException { if (closed) { return; } final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record)); final int bucket = hashCode & numBucketsMask; final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex]; final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment final long firstPointer = bucketSegment.getLong(bucketOffset); try { final long newFirstPointer = recordArea.appendPointerAndRecord(firstPointer, record); bucketSegment.putLong(bucketOffset, newFirstPointer); } catch (EOFException ex) { compactOrThrow(); insert(record); return; } numElements++; resizeTableIfNecessary(); }
Example #9
Source File: LongHashPartition.java From flink with Apache License 2.0 | 6 votes |
/** * Entrance 3: dense mode for just data search (bucket in LongHybridHashTable of dense mode). */ LongHashPartition( LongHybridHashTable longTable, BinaryRowSerializer buildSideSerializer, MemorySegment[] partitionBuffers) { super(0); this.longTable = longTable; this.buildSideSerializer = buildSideSerializer; this.buildReuseRow = buildSideSerializer.createInstance(); this.segmentSize = longTable.pageSize(); Preconditions.checkArgument(segmentSize % 16 == 0); this.partitionBuffers = partitionBuffers; this.segmentSizeBits = MathUtils.log2strict(segmentSize); this.segmentSizeMask = segmentSize - 1; this.finalBufferLimit = segmentSize; this.iterator = new MatchIterator(); }
Example #10
Source File: BinaryHashPartition.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a new partition, initially in memory, with one buffer for the build side. The * partition is initialized to expect record insertions for the build side. * * @param partitionNumber The number of the partition. * @param recursionLevel The recursion level - zero for partitions from the initial build, * <i>n + 1</i> for partitions that are created from spilled partition * with recursion level <i>n</i>. * @param initialBuffer The initial buffer for this partition. */ BinaryHashPartition(BinaryHashBucketArea bucketArea, BinaryRowSerializer buildSideAccessors, BinaryRowSerializer probeSideAccessors, int partitionNumber, int recursionLevel, MemorySegment initialBuffer, MemorySegmentPool memPool, int segmentSize, boolean compressionEnable, BlockCompressionFactory compressionCodecFactory, int compressionBlockSize) { super(0); this.bucketArea = bucketArea; this.buildSideSerializer = buildSideAccessors; this.probeSideSerializer = probeSideAccessors; this.partitionNumber = partitionNumber; this.recursionLevel = recursionLevel; this.memorySegmentSize = segmentSize; this.segmentSizeBits = MathUtils.log2strict(segmentSize); this.compressionEnable = compressionEnable; this.compressionCodecFactory = compressionCodecFactory; this.compressionBlockSize = compressionBlockSize; this.buildSideWriteBuffer = new BuildSideBuffer(initialBuffer, memPool); this.memPool = memPool; }
Example #11
Source File: InPlaceMutableHashTable.java From flink with Apache License 2.0 | 6 votes |
private void allocateBucketSegments(int numBucketSegments) { if (numBucketSegments < 1) { throw new RuntimeException("Bug in InPlaceMutableHashTable"); } bucketSegments = new MemorySegment[numBucketSegments]; for(int i = 0; i < bucketSegments.length; i++) { bucketSegments[i] = forcedAllocateSegment(); // Init all pointers in all buckets to END_OF_LIST for(int j = 0; j < numBucketsPerSegment; j++) { bucketSegments[i].putLong(j << bucketSizeBits, END_OF_LIST); } } numBuckets = numBucketSegments * numBucketsPerSegment; numBucketsMask = (1 << MathUtils.log2strict(numBuckets)) - 1; }
Example #12
Source File: HashPartition.java From flink with Apache License 2.0 | 6 votes |
/** * Constructor creating a partition from a spilled partition file that could be read in one because it was * known to completely fit into memory. * * @param buildSideAccessors The data type accessors for the build side data-type. * @param probeSideAccessors The data type accessors for the probe side data-type. * @param partitionNumber The number of the partition. * @param recursionLevel The recursion level of the partition. * @param buffers The memory segments holding the records. * @param buildSideRecordCounter The number of records in the buffers. * @param segmentSize The size of the memory segments. */ HashPartition(TypeSerializer<BT> buildSideAccessors, TypeSerializer<PT> probeSideAccessors, int partitionNumber, int recursionLevel, List<MemorySegment> buffers, long buildSideRecordCounter, int segmentSize, int lastSegmentLimit) { super(0); this.buildSideSerializer = buildSideAccessors; this.probeSideSerializer = probeSideAccessors; this.partitionNumber = partitionNumber; this.recursionLevel = recursionLevel; this.memorySegmentSize = segmentSize; this.segmentSizeBits = MathUtils.log2strict(segmentSize); this.finalBufferLimit = lastSegmentLimit; this.partitionBuffers = (MemorySegment[]) buffers.toArray(new MemorySegment[buffers.size()]); this.buildSideRecordCounter = buildSideRecordCounter; this.overflowSegments = new MemorySegment[2]; this.numOverflowSegments = 0; this.nextOverflowBucket = 0; }
Example #13
Source File: BytesHashMap.java From flink with Apache License 2.0 | 5 votes |
private int calcNumBucketSegments(LogicalType[] keyTypes, LogicalType[] valueTypes) { int calcRecordLength = reusedValue.getFixedLengthPartSize() + getVariableLength(valueTypes) + reusedKey.getFixedLengthPartSize() + getVariableLength(keyTypes); // We aim for a 200% utilization of the bucket table. double averageBucketSize = BUCKET_SIZE / LOAD_FACTOR; double fraction = averageBucketSize / (averageBucketSize + calcRecordLength + RECORD_EXTRA_LENGTH); // We make the number of buckets a power of 2 so that taking modulo is efficient. // To avoid rehash as far as possible, here use roundUpToPowerOfTwo firstly int ret = Math.max(1, MathUtils.roundDownToPowerOf2((int) (reservedNumBuffers * fraction))); // We can't handle more than Integer.MAX_VALUE buckets (eg. because hash functions return int) if ((long) ret * numBucketsPerSegment > Integer.MAX_VALUE) { ret = MathUtils.roundDownToPowerOf2(Integer.MAX_VALUE / numBucketsPerSegment); } return ret; }
Example #14
Source File: LongHashPartition.java From flink with Apache License 2.0 | 5 votes |
private void resize() throws IOException { MemorySegment[] oldBuckets = this.buckets; int oldNumBuckets = numBuckets; int newNumSegs = oldBuckets.length * 2; int newNumBuckets = MathUtils.roundDownToPowerOf2(newNumSegs * segmentSize / 16); // request new buckets. MemorySegment[] newBuckets = new MemorySegment[newNumSegs]; for (int i = 0; i < newNumSegs; i++) { MemorySegment seg = longTable.getNextBuffer(); if (seg == null) { final int spilledPart = longTable.spillPartition(); if (spilledPart == partitionNum) { // this bucket is no longer in-memory // free new segments. longTable.returnAll(Arrays.asList(newBuckets)); return; } seg = longTable.getNextBuffer(); if (seg == null) { throw new RuntimeException( "Bug in HybridHashJoin: No memory became available after spilling a partition."); } } newBuckets[i] = seg; } setNewBuckets(newBuckets, newNumBuckets); reHash(oldBuckets, oldNumBuckets); }
Example #15
Source File: TimerSerializer.java From flink with Apache License 2.0 | 5 votes |
@Override public TimerHeapInternalTimer<K, N> deserialize(DataInputView source) throws IOException { long timestamp = MathUtils.flipSignBit(source.readLong()); K key = keySerializer.deserialize(source); N namespace = namespaceSerializer.deserialize(source); return new TimerHeapInternalTimer<>(timestamp, key, namespace); }
Example #16
Source File: MutableHashTable.java From flink with Apache License 2.0 | 5 votes |
/** * The level parameter is needed so that we can have different hash functions when we recursively apply * the partitioning, so that the working set eventually fits into memory. */ public static int hash(int code, int level) { final int rotation = level * 11; code = Integer.rotateLeft(code, rotation); return MathUtils.jenkinsHash(code); }
Example #17
Source File: HashTableBloomFilter.java From flink with Apache License 2.0 | 5 votes |
HashTableBloomFilter(MemorySegment[] buffers, long numRecords) { checkArgument(buffers != null && buffers.length > 0); this.buffers = buffers; this.numBuffers = buffers.length; checkArgument(MathUtils.isPowerOf2(numBuffers)); this.numBuffersMask = numBuffers - 1; int bufferSize = buffers[0].size(); this.filter = new BloomFilter((int) (numRecords / numBuffers), buffers[0].size()); filter.setBitsLocation(buffers[0], 0); // We assume that a BloomFilter can contain up to 2.44 elements per byte. // fpp roughly equal 0.2 this.maxSize = (int) ((numBuffers * bufferSize) * 2.44); }
Example #18
Source File: LongHashPartition.java From flink with Apache License 2.0 | 5 votes |
private void setNewBuckets(MemorySegment[] buckets, int numBuckets) { for (MemorySegment segment : buckets) { for (int i = 0; i < segmentSize; i += 16) { // Maybe we don't need init key, cause always verify address segment.putLong(i, 0); segment.putLong(i + 8, INVALID_ADDRESS); } } this.buckets = buckets; checkArgument(MathUtils.isPowerOf2(numBuckets)); this.numBuckets = numBuckets; this.numBucketsMask = numBuckets - 1; this.numKeys = 0; }
Example #19
Source File: HashTableBloomFilter.java From flink with Apache License 2.0 | 5 votes |
HashTableBloomFilter(MemorySegment[] buffers, long numRecords) { checkArgument(buffers != null && buffers.length > 0); this.buffers = buffers; this.numBuffers = buffers.length; checkArgument(MathUtils.isPowerOf2(numBuffers)); this.numBuffersMask = numBuffers - 1; int bufferSize = buffers[0].size(); this.filter = new BloomFilter((int) (numRecords / numBuffers), buffers[0].size()); filter.setBitsLocation(buffers[0], 0); // We assume that a BloomFilter can contain up to 2.44 elements per byte. // fpp roughly equal 0.2 this.maxSize = (int) ((numBuffers * bufferSize) * 2.44); }
Example #20
Source File: InPlaceMutableHashTable.java From flink with Apache License 2.0 | 5 votes |
/** * Searches the hash table for the record with the given key. * (If there would be multiple matches, only one is returned.) * @param record The record whose key we are searching for * @param targetForMatch If a match is found, it will be written here * @return targetForMatch if a match is found, otherwise null. */ @Override public T getMatchFor(PT record, T targetForMatch) { if (closed) { return null; } final int hashCode = MathUtils.jenkinsHash(probeTypeComparator.hash(record)); final int bucket = hashCode & numBucketsMask; bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex]; bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment curElemPtr = bucketSegment.getLong(bucketOffset); pairComparator.setReference(record); T currentRecordInList = targetForMatch; prevElemPtr = INVALID_PREV_POINTER; try { while (curElemPtr != END_OF_LIST && !closed) { recordArea.setReadPosition(curElemPtr); nextPtr = recordArea.readPointer(); currentRecordInList = recordArea.readRecord(currentRecordInList); recordEnd = recordArea.getReadPosition(); if (pairComparator.equalToReference(currentRecordInList)) { // we found an element with a matching key, and not just a hash collision return currentRecordInList; } prevElemPtr = curElemPtr; curElemPtr = nextPtr; } } catch (IOException ex) { throw new RuntimeException("Error deserializing record from the hashtable: " + ex.getMessage(), ex); } return null; }
Example #21
Source File: SimpleCollectingOutputView.java From flink with Apache License 2.0 | 5 votes |
public SimpleCollectingOutputView(List<MemorySegment> fullSegmentTarget, MemorySegmentSource memSource, int segmentSize) { super(memSource.nextSegment(), segmentSize, 0); this.segmentSizeBits = MathUtils.log2strict(segmentSize); this.fullSegments = fullSegmentTarget; this.memorySource = memSource; this.fullSegments.add(getCurrentSegment()); }
Example #22
Source File: SeekableFileChannelInputView.java From flink with Apache License 2.0 | 5 votes |
public void seek(long position) throws IOException { final int block = MathUtils.checkedDownCast(position / segmentSize); final int positionInBlock = (int) (position % segmentSize); if (position < 0 || block >= numBlocksTotal || (block == numBlocksTotal - 1 && positionInBlock > sizeOfLastBlock)) { throw new IllegalArgumentException("Position is out of range"); } clear(); if (reader != null) { reader.close(); } reader = ioManager.createBlockChannelReader(channelId); if (block > 0) { reader.seekToPosition(((long) block) * segmentSize); } this.numBlocksRemaining = this.numBlocksTotal - block; this.numRequestsRemaining = numBlocksRemaining; for (int i = 0; i < memory.size(); i++) { sendReadRequest(memory.get(i)); } numBlocksRemaining--; seekInput(reader.getNextReturnedBlock(), positionInBlock, numBlocksRemaining == 0 ? sizeOfLastBlock : segmentSize); }
Example #23
Source File: LongHashPartition.java From flink with Apache License 2.0 | 5 votes |
private void setNewBuckets(MemorySegment[] buckets, int numBuckets) { for (MemorySegment segment : buckets) { for (int i = 0; i < segmentSize; i += 16) { // Maybe we don't need init key, cause always verify address segment.putLong(i, 0); segment.putLong(i + 8, INVALID_ADDRESS); } } this.buckets = buckets; checkArgument(MathUtils.isPowerOf2(numBuckets)); this.numBuckets = numBuckets; this.numBucketsMask = numBuckets - 1; this.numKeys = 0; }
Example #24
Source File: TimerSerializer.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public TimerHeapInternalTimer<K, N> deserialize(DataInputView source) throws IOException { long timestamp = MathUtils.flipSignBit(source.readLong()); K key = keySerializer.deserialize(source); N namespace = namespaceSerializer.deserialize(source); return new TimerHeapInternalTimer<>(timestamp, key, namespace); }
Example #25
Source File: HadoopS3RecoverableWriterITCase.java From flink with Apache License 2.0 | 5 votes |
private static String createBigDataChunk(String pattern, long size) { final StringBuilder stringBuilder = new StringBuilder(); int sampleLength = bytesOf(pattern).length; int repeats = MathUtils.checkedDownCast(size) / sampleLength + 100; for (int i = 0; i < repeats; i++) { stringBuilder.append(pattern); } return stringBuilder.toString(); }
Example #26
Source File: BinaryHashBucketArea.java From flink with Apache License 2.0 | 5 votes |
private void setNewBuckets(MemorySegment[] buckets, int numBuckets, int threshold) { this.buckets = buckets; checkArgument(MathUtils.isPowerOf2(buckets.length)); this.numBuckets = numBuckets; this.numBucketsMask = numBuckets - 1; this.overflowSegments = new MemorySegment[2]; this.numOverflowSegments = 0; this.nextOverflowBucket = 0; this.threshold = threshold; }
Example #27
Source File: SeekableFileChannelInputView.java From flink with Apache License 2.0 | 5 votes |
public SeekableFileChannelInputView(IOManager ioManager, FileIOChannel.ID channelId, MemoryManager memManager, List<MemorySegment> memory, int sizeOfLastBlock) throws IOException { super(0); checkNotNull(ioManager); checkNotNull(channelId); checkNotNull(memManager); checkNotNull(memory); this.ioManager = ioManager; this.channelId = channelId; this.memManager = memManager; this.memory = memory; this.sizeOfLastBlock = sizeOfLastBlock; this.segmentSize = memManager.getPageSize(); this.reader = ioManager.createBlockChannelReader(channelId); try { final long channelLength = reader.getSize(); final int blockCount = MathUtils.checkedDownCast(channelLength / segmentSize); this.numBlocksTotal = (channelLength % segmentSize == 0) ? blockCount : blockCount + 1; this.numBlocksRemaining = this.numBlocksTotal; this.numRequestsRemaining = numBlocksRemaining; for (int i = 0; i < memory.size(); i++) { sendReadRequest(memory.get(i)); } advance(); } catch (IOException e) { memManager.release(memory); throw e; } }
Example #28
Source File: InternalPriorityQueueTestBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void serialize(TestElement record, DataOutputView target) throws IOException { // serialize priority first, so that we have correct order in RocksDB. We flip the sign bit for correct // lexicographic order. target.writeLong(MathUtils.flipSignBit(record.getPriority())); target.writeLong(record.getKey()); }
Example #29
Source File: FileChannelInputView.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public FileChannelInputView(BlockChannelReader<MemorySegment> reader, MemoryManager memManager, List<MemorySegment> memory, int sizeOfLastBlock) throws IOException { super(0); checkNotNull(reader); checkNotNull(memManager); checkNotNull(memory); checkArgument(!reader.isClosed()); checkArgument(memory.size() > 0); this.reader = reader; this.memManager = memManager; this.memory = memory; this.sizeOfLastBlock = sizeOfLastBlock; try { final long channelLength = reader.getSize(); final int segmentSize = memManager.getPageSize(); this.numBlocksRemaining = MathUtils.checkedDownCast(channelLength / segmentSize); if (channelLength % segmentSize != 0) { this.numBlocksRemaining++; } this.numRequestsRemaining = numBlocksRemaining; for (int i = 0; i < memory.size(); i++) { sendReadRequest(memory.get(i)); } advance(); } catch (IOException e) { memManager.release(memory); throw e; } }
Example #30
Source File: InPlaceMutableHashTable.java From flink with Apache License 2.0 | 5 votes |
/** * Searches the hash table for the record with the given key. * (If there would be multiple matches, only one is returned.) * @param record The record whose key we are searching for * @param targetForMatch If a match is found, it will be written here * @return targetForMatch if a match is found, otherwise null. */ @Override public T getMatchFor(PT record, T targetForMatch) { if (closed) { return null; } final int hashCode = MathUtils.jenkinsHash(probeTypeComparator.hash(record)); final int bucket = hashCode & numBucketsMask; bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex]; bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment curElemPtr = bucketSegment.getLong(bucketOffset); pairComparator.setReference(record); T currentRecordInList = targetForMatch; prevElemPtr = INVALID_PREV_POINTER; try { while (curElemPtr != END_OF_LIST && !closed) { recordArea.setReadPosition(curElemPtr); nextPtr = recordArea.readPointer(); currentRecordInList = recordArea.readRecord(currentRecordInList); recordEnd = recordArea.getReadPosition(); if (pairComparator.equalToReference(currentRecordInList)) { // we found an element with a matching key, and not just a hash collision return currentRecordInList; } prevElemPtr = curElemPtr; curElemPtr = nextPtr; } } catch (IOException ex) { throw new RuntimeException("Error deserializing record from the hashtable: " + ex.getMessage(), ex); } return null; }