Java Code Examples for org.apache.flink.core.memory.MemorySegment#putInt()
The following examples show how to use
org.apache.flink.core.memory.MemorySegment#putInt() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IOManagerAsyncTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void channelReadWriteOneSegment() { final int NUM_IOS = 1111; try { final FileIOChannel.ID channelID = this.ioManager.createChannel(); final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channelID); MemorySegment memSeg = MemorySegmentFactory.allocateUnpooledSegment(32 * 1024); for (int i = 0; i < NUM_IOS; i++) { for (int pos = 0; pos < memSeg.size(); pos += 4) { memSeg.putInt(pos, i); } writer.writeBlock(memSeg); memSeg = writer.getNextReturnedBlock(); } writer.close(); final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channelID); for (int i = 0; i < NUM_IOS; i++) { reader.readBlock(memSeg); memSeg = reader.getNextReturnedBlock(); for (int pos = 0; pos < memSeg.size(); pos += 4) { if (memSeg.getInt(pos) != i) { fail("Read memory segment contains invalid data."); } } } reader.closeAndDelete(); } catch (Exception ex) { ex.printStackTrace(); fail("Test encountered an exception: " + ex.getMessage()); } }
Example 2
Source File: CompactingHashTable.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void initTable(int numBuckets, byte numPartitions) { final int bucketsPerSegment = this.bucketsPerSegmentMask + 1; final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1); final MemorySegment[] table = new MemorySegment[numSegs]; // go over all segments that are part of the table for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) { final MemorySegment seg = getNextBuffer(); // go over all buckets in the segment for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) { final int bucketOffset = k * HASH_BUCKET_SIZE; // compute the partition that the bucket corresponds to final byte partition = assignPartition(bucket, numPartitions); // initialize the header fields seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition); seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0); seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); } table[i] = seg; } this.buckets = table; this.numBuckets = numBuckets; }
Example 3
Source File: ChannelWriterOutputView.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void writeSegment(MemorySegment segment, int writePosition, boolean lastSegment) throws IOException { segment.putShort(0, HEADER_MAGIC_NUMBER); segment.putShort(HEADER_FLAGS_OFFSET, lastSegment ? FLAG_LAST_BLOCK : 0); segment.putInt(HEAD_BLOCK_LENGTH_OFFSET, writePosition); this.writer.writeBlock(segment); this.bytesBeforeSegment += writePosition - HEADER_LENGTH; }
Example 4
Source File: SerializedUpdateBuffer.java From flink with Apache License 2.0 | 5 votes |
@Override protected MemorySegment nextSegment(MemorySegment current, int positionInCurrent) throws IOException { current.putInt(0, positionInCurrent); // check if we keep the segment in memory, or if we spill it if (emptyBuffers.size() > numSegmentsSpillingThreshold) { // keep buffer in memory fullBuffers.addLast(current); } else { // spill all buffers up to now // check, whether we have a channel already if (currentWriter == null) { currentWriter = ioManager.createBlockChannelWriter(channelEnumerator.next(), emptyBuffers); } // spill all elements gathered up to now numBuffersSpilled += fullBuffers.size(); while (fullBuffers.size() > 0) { currentWriter.writeBlock(fullBuffers.removeFirst()); } currentWriter.writeBlock(current); numBuffersSpilled++; } try { return emptyBuffers.take(); } catch (InterruptedException iex) { throw new RuntimeException("Spilling Fifo Queue was interrupted while waiting for next buffer."); } }
Example 5
Source File: IOManagerAsyncTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void channelReadWriteOneSegment() { final int NUM_IOS = 1111; try { final FileIOChannel.ID channelID = this.ioManager.createChannel(); final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channelID); MemorySegment memSeg = MemorySegmentFactory.allocateUnpooledSegment(32 * 1024); for (int i = 0; i < NUM_IOS; i++) { for (int pos = 0; pos < memSeg.size(); pos += 4) { memSeg.putInt(pos, i); } writer.writeBlock(memSeg); memSeg = writer.getNextReturnedBlock(); } writer.close(); final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channelID); for (int i = 0; i < NUM_IOS; i++) { reader.readBlock(memSeg); memSeg = reader.getNextReturnedBlock(); for (int pos = 0; pos < memSeg.size(); pos += 4) { if (memSeg.getInt(pos) != i) { fail("Read memory segment contains invalid data."); } } } reader.closeAndDelete(); } catch (Exception ex) { ex.printStackTrace(); fail("Test encountered an exception: " + ex.getMessage()); } }
Example 6
Source File: BinaryMap.java From flink with Apache License 2.0 | 5 votes |
public static BinaryMap valueOf(BinaryArray key, BinaryArray value) { checkArgument(key.getSegments().length == 1 && value.getSegments().length == 1); byte[] bytes = new byte[4 + key.getSizeInBytes() + value.getSizeInBytes()]; MemorySegment segment = MemorySegmentFactory.wrap(bytes); segment.putInt(0, key.getSizeInBytes()); key.getSegments()[0].copyTo(key.getOffset(), segment, 4, key.getSizeInBytes()); value.getSegments()[0].copyTo( value.getOffset(), segment, 4 + key.getSizeInBytes(), value.getSizeInBytes()); BinaryMap map = new BinaryMap(); map.pointTo(segment, 0, bytes.length); return map; }
Example 7
Source File: IntNormalizedKeyComputer.java From flink with Apache License 2.0 | 5 votes |
@Override public void putKey(BaseRow record, MemorySegment target, int offset) { // write first null byte. if (record.isNullAt(0)) { SortUtil.minNormalizedKey(target, offset, 5); } else { target.put(offset, (byte) 1); SortUtil.putIntNormalizedKey(record.getInt(0), target, offset + 1, 4); } // revert 4 bytes to compare easier. target.putInt(offset, Integer.reverseBytes(target.getInt(offset))); }
Example 8
Source File: BufferFileWriterReaderTest.java From flink with Apache License 2.0 | 5 votes |
static int fillBufferWithAscendingNumbers(Buffer buffer, int currentNumber, int size) { checkArgument(size % 4 == 0); MemorySegment segment = buffer.getMemorySegment(); for (int i = 0; i < size; i += 4) { segment.putInt(i, currentNumber++); } buffer.setSize(size); return currentNumber; }
Example 9
Source File: SerializedUpdateBuffer.java From flink with Apache License 2.0 | 5 votes |
@Override protected MemorySegment nextSegment(MemorySegment current, int positionInCurrent) throws IOException { current.putInt(0, positionInCurrent); // check if we keep the segment in memory, or if we spill it if (emptyBuffers.size() > numSegmentsSpillingThreshold) { // keep buffer in memory fullBuffers.addLast(current); } else { // spill all buffers up to now // check, whether we have a channel already if (currentWriter == null) { currentWriter = ioManager.createBlockChannelWriter(channelEnumerator.next(), emptyBuffers); } // spill all elements gathered up to now numBuffersSpilled += fullBuffers.size(); while (fullBuffers.size() > 0) { currentWriter.writeBlock(fullBuffers.removeFirst()); } currentWriter.writeBlock(current); numBuffersSpilled++; } try { return emptyBuffers.take(); } catch (InterruptedException iex) { throw new RuntimeException("Spilling Fifo Queue was interrupted while waiting for next buffer."); } }
Example 10
Source File: IntNormalizedKeyComputer.java From flink with Apache License 2.0 | 5 votes |
@Override public void swapKey(MemorySegment segI, int offsetI, MemorySegment segJ, int offsetJ) { int temp0 = segI.getInt(offsetI); segI.putInt(offsetI, segJ.getInt(offsetJ)); segJ.putInt(offsetJ, temp0); byte temp1 = segI.get(offsetI + 4); segI.put(offsetI + 4, segJ.get(offsetJ + 4)); segJ.put(offsetJ + 4, temp1); }
Example 11
Source File: IntNormalizedKeyComputer.java From flink with Apache License 2.0 | 5 votes |
@Override public void putKey(RowData record, MemorySegment target, int offset) { // write first null byte. if (record.isNullAt(0)) { SortUtil.minNormalizedKey(target, offset, 5); } else { target.put(offset, (byte) 1); SortUtil.putIntNormalizedKey(record.getInt(0), target, offset + 1, 4); } // revert 4 bytes to compare easier. target.putInt(offset, Integer.reverseBytes(target.getInt(offset))); }
Example 12
Source File: CompactingHashTable.java From flink with Apache License 2.0 | 5 votes |
private void initTable(int numBuckets, byte numPartitions) { final int bucketsPerSegment = this.bucketsPerSegmentMask + 1; final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1); final MemorySegment[] table = new MemorySegment[numSegs]; // go over all segments that are part of the table for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) { final MemorySegment seg = getNextBuffer(); // go over all buckets in the segment for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) { final int bucketOffset = k * HASH_BUCKET_SIZE; // compute the partition that the bucket corresponds to final byte partition = assignPartition(bucket, numPartitions); // initialize the header fields seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition); seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0); seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); } table[i] = seg; } this.buckets = table; this.numBuckets = numBuckets; }
Example 13
Source File: BufferFileWriterReaderTest.java From flink with Apache License 2.0 | 5 votes |
static int fillBufferWithAscendingNumbers(Buffer buffer, int currentNumber, int size) { checkArgument(size % 4 == 0); MemorySegment segment = buffer.getMemorySegment(); for (int i = 0; i < size; i += 4) { segment.putInt(i, currentNumber++); } buffer.setSize(size); return currentNumber; }
Example 14
Source File: IOManagerAsyncTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void channelReadWriteOneSegment() { final int NUM_IOS = 1111; try { final FileIOChannel.ID channelID = this.ioManager.createChannel(); final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channelID); MemorySegment memSeg = MemorySegmentFactory.allocateUnpooledSegment(32 * 1024); for (int i = 0; i < NUM_IOS; i++) { for (int pos = 0; pos < memSeg.size(); pos += 4) { memSeg.putInt(pos, i); } writer.writeBlock(memSeg); memSeg = writer.getNextReturnedBlock(); } writer.close(); final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channelID); for (int i = 0; i < NUM_IOS; i++) { reader.readBlock(memSeg); memSeg = reader.getNextReturnedBlock(); for (int pos = 0; pos < memSeg.size(); pos += 4) { if (memSeg.getInt(pos) != i) { fail("Read memory segment contains invalid data."); } } } reader.closeAndDelete(); } catch (Exception ex) { ex.printStackTrace(); fail("Test encountered an exception: " + ex.getMessage()); } }
Example 15
Source File: CompactingHashTable.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * IMPORTANT!!! We pass only the partition number, because we must make sure we get a fresh * partition reference. The partition reference used during search for the key may have become * invalid during the compaction. */ private void insertBucketEntryFromSearch(MemorySegment originalBucket, MemorySegment currentBucket, int originalBucketOffset, int currentBucketOffset, int countInCurrentBucket, long originalForwardPointer, int hashCode, long pointer, int partitionNumber) throws IOException { boolean checkForResize = false; if (countInCurrentBucket < NUM_ENTRIES_PER_BUCKET) { // we are good in our current bucket, put the values currentBucket.putInt(currentBucketOffset + BUCKET_HEADER_LENGTH + (countInCurrentBucket * HASH_CODE_LEN), hashCode); // hash code currentBucket.putLong(currentBucketOffset + BUCKET_POINTER_START_OFFSET + (countInCurrentBucket * POINTER_LEN), pointer); // pointer currentBucket.putInt(currentBucketOffset + HEADER_COUNT_OFFSET, countInCurrentBucket + 1); // update count } else { // we go to a new overflow bucket final InMemoryPartition<T> partition = this.partitions.get(partitionNumber); MemorySegment overflowSeg; final int overflowSegmentNum; final int overflowBucketOffset; // first, see if there is space for an overflow bucket remaining in the last overflow segment if (partition.nextOverflowBucket == 0) { // no space left in last bucket, or no bucket yet, so create an overflow segment overflowSeg = getNextBuffer(); overflowBucketOffset = 0; overflowSegmentNum = partition.numOverflowSegments; // add the new overflow segment if (partition.overflowSegments.length <= partition.numOverflowSegments) { MemorySegment[] newSegsArray = new MemorySegment[partition.overflowSegments.length * 2]; System.arraycopy(partition.overflowSegments, 0, newSegsArray, 0, partition.overflowSegments.length); partition.overflowSegments = newSegsArray; } partition.overflowSegments[partition.numOverflowSegments] = overflowSeg; partition.numOverflowSegments++; checkForResize = true; } else { // there is space in the last overflow segment overflowSegmentNum = partition.numOverflowSegments - 1; overflowSeg = partition.overflowSegments[overflowSegmentNum]; overflowBucketOffset = partition.nextOverflowBucket << NUM_INTRA_BUCKET_BITS; } // next overflow bucket is one ahead. if the segment is full, the next will be at the beginning // of a new segment partition.nextOverflowBucket = (partition.nextOverflowBucket == this.bucketsPerSegmentMask ? 0 : partition.nextOverflowBucket + 1); // insert the new overflow bucket in the chain of buckets // 1) set the old forward pointer // 2) let the bucket in the main table point to this one overflowSeg.putLong(overflowBucketOffset + HEADER_FORWARD_OFFSET, originalForwardPointer); final long pointerToNewBucket = (((long) overflowSegmentNum) << 32) | ((long) overflowBucketOffset); originalBucket.putLong(originalBucketOffset + HEADER_FORWARD_OFFSET, pointerToNewBucket); // finally, insert the values into the overflow buckets overflowSeg.putInt(overflowBucketOffset + BUCKET_HEADER_LENGTH, hashCode); // hash code overflowSeg.putLong(overflowBucketOffset + BUCKET_POINTER_START_OFFSET, pointer); // pointer // set the count to one overflowSeg.putInt(overflowBucketOffset + HEADER_COUNT_OFFSET, 1); if(checkForResize && !this.isResizing) { // check if we should resize buckets if(this.buckets.length <= getOverflowSegmentCount()) { resizeHashTable(); } } } }
Example 16
Source File: CompactingHashTable.java From flink with Apache License 2.0 | 4 votes |
/** * IMPORTANT!!! We pass only the partition number, because we must make sure we get a fresh * partition reference. The partition reference used during search for the key may have become * invalid during the compaction. */ private void insertBucketEntryFromSearch(MemorySegment originalBucket, MemorySegment currentBucket, int originalBucketOffset, int currentBucketOffset, int countInCurrentBucket, long originalForwardPointer, int hashCode, long pointer, int partitionNumber) throws IOException { boolean checkForResize = false; if (countInCurrentBucket < NUM_ENTRIES_PER_BUCKET) { // we are good in our current bucket, put the values currentBucket.putInt(currentBucketOffset + BUCKET_HEADER_LENGTH + (countInCurrentBucket * HASH_CODE_LEN), hashCode); // hash code currentBucket.putLong(currentBucketOffset + BUCKET_POINTER_START_OFFSET + (countInCurrentBucket * POINTER_LEN), pointer); // pointer currentBucket.putInt(currentBucketOffset + HEADER_COUNT_OFFSET, countInCurrentBucket + 1); // update count } else { // we go to a new overflow bucket final InMemoryPartition<T> partition = this.partitions.get(partitionNumber); MemorySegment overflowSeg; final int overflowSegmentNum; final int overflowBucketOffset; // first, see if there is space for an overflow bucket remaining in the last overflow segment if (partition.nextOverflowBucket == 0) { // no space left in last bucket, or no bucket yet, so create an overflow segment overflowSeg = getNextBuffer(); overflowBucketOffset = 0; overflowSegmentNum = partition.numOverflowSegments; // add the new overflow segment if (partition.overflowSegments.length <= partition.numOverflowSegments) { MemorySegment[] newSegsArray = new MemorySegment[partition.overflowSegments.length * 2]; System.arraycopy(partition.overflowSegments, 0, newSegsArray, 0, partition.overflowSegments.length); partition.overflowSegments = newSegsArray; } partition.overflowSegments[partition.numOverflowSegments] = overflowSeg; partition.numOverflowSegments++; checkForResize = true; } else { // there is space in the last overflow segment overflowSegmentNum = partition.numOverflowSegments - 1; overflowSeg = partition.overflowSegments[overflowSegmentNum]; overflowBucketOffset = partition.nextOverflowBucket << NUM_INTRA_BUCKET_BITS; } // next overflow bucket is one ahead. if the segment is full, the next will be at the beginning // of a new segment partition.nextOverflowBucket = (partition.nextOverflowBucket == this.bucketsPerSegmentMask ? 0 : partition.nextOverflowBucket + 1); // insert the new overflow bucket in the chain of buckets // 1) set the old forward pointer // 2) let the bucket in the main table point to this one overflowSeg.putLong(overflowBucketOffset + HEADER_FORWARD_OFFSET, originalForwardPointer); final long pointerToNewBucket = (((long) overflowSegmentNum) << 32) | ((long) overflowBucketOffset); originalBucket.putLong(originalBucketOffset + HEADER_FORWARD_OFFSET, pointerToNewBucket); // finally, insert the values into the overflow buckets overflowSeg.putInt(overflowBucketOffset + BUCKET_HEADER_LENGTH, hashCode); // hash code overflowSeg.putLong(overflowBucketOffset + BUCKET_POINTER_START_OFFSET, pointer); // pointer // set the count to one overflowSeg.putInt(overflowBucketOffset + HEADER_COUNT_OFFSET, 1); if(checkForResize && !this.isResizing) { // check if we should resize buckets if(this.buckets.length <= getOverflowSegmentCount()) { resizeHashTable(); } } } }
Example 17
Source File: CompactingHashTable.java From flink with Apache License 2.0 | 4 votes |
/** * IMPORTANT!!! We pass only the partition number, because we must make sure we get a fresh * partition reference. The partition reference used during search for the key may have become * invalid during the compaction. */ private void insertBucketEntryFromSearch(MemorySegment originalBucket, MemorySegment currentBucket, int originalBucketOffset, int currentBucketOffset, int countInCurrentBucket, long originalForwardPointer, int hashCode, long pointer, int partitionNumber) throws IOException { boolean checkForResize = false; if (countInCurrentBucket < NUM_ENTRIES_PER_BUCKET) { // we are good in our current bucket, put the values currentBucket.putInt(currentBucketOffset + BUCKET_HEADER_LENGTH + (countInCurrentBucket * HASH_CODE_LEN), hashCode); // hash code currentBucket.putLong(currentBucketOffset + BUCKET_POINTER_START_OFFSET + (countInCurrentBucket * POINTER_LEN), pointer); // pointer currentBucket.putInt(currentBucketOffset + HEADER_COUNT_OFFSET, countInCurrentBucket + 1); // update count } else { // we go to a new overflow bucket final InMemoryPartition<T> partition = this.partitions.get(partitionNumber); MemorySegment overflowSeg; final int overflowSegmentNum; final int overflowBucketOffset; // first, see if there is space for an overflow bucket remaining in the last overflow segment if (partition.nextOverflowBucket == 0) { // no space left in last bucket, or no bucket yet, so create an overflow segment overflowSeg = getNextBuffer(); overflowBucketOffset = 0; overflowSegmentNum = partition.numOverflowSegments; // add the new overflow segment if (partition.overflowSegments.length <= partition.numOverflowSegments) { MemorySegment[] newSegsArray = new MemorySegment[partition.overflowSegments.length * 2]; System.arraycopy(partition.overflowSegments, 0, newSegsArray, 0, partition.overflowSegments.length); partition.overflowSegments = newSegsArray; } partition.overflowSegments[partition.numOverflowSegments] = overflowSeg; partition.numOverflowSegments++; checkForResize = true; } else { // there is space in the last overflow segment overflowSegmentNum = partition.numOverflowSegments - 1; overflowSeg = partition.overflowSegments[overflowSegmentNum]; overflowBucketOffset = partition.nextOverflowBucket << NUM_INTRA_BUCKET_BITS; } // next overflow bucket is one ahead. if the segment is full, the next will be at the beginning // of a new segment partition.nextOverflowBucket = (partition.nextOverflowBucket == this.bucketsPerSegmentMask ? 0 : partition.nextOverflowBucket + 1); // insert the new overflow bucket in the chain of buckets // 1) set the old forward pointer // 2) let the bucket in the main table point to this one overflowSeg.putLong(overflowBucketOffset + HEADER_FORWARD_OFFSET, originalForwardPointer); final long pointerToNewBucket = (((long) overflowSegmentNum) << 32) | ((long) overflowBucketOffset); originalBucket.putLong(originalBucketOffset + HEADER_FORWARD_OFFSET, pointerToNewBucket); // finally, insert the values into the overflow buckets overflowSeg.putInt(overflowBucketOffset + BUCKET_HEADER_LENGTH, hashCode); // hash code overflowSeg.putLong(overflowBucketOffset + BUCKET_POINTER_START_OFFSET, pointer); // pointer // set the count to one overflowSeg.putInt(overflowBucketOffset + HEADER_COUNT_OFFSET, 1); if(checkForResize && !this.isResizing) { // check if we should resize buckets if(this.buckets.length <= getOverflowSegmentCount()) { resizeHashTable(); } } } }
Example 18
Source File: SkipListUtils.java From flink with Apache License 2.0 | 2 votes |
/** * Puts the version of value to value space. * * @param memorySegment memory segment for value space. * @param offset offset of value space in memory segment. * @param version version of value. */ public static void putValueVersion(MemorySegment memorySegment, int offset, int version) { memorySegment.putInt(offset + VALUE_VERSION_OFFSET, version); }
Example 19
Source File: SkipListUtils.java From flink with Apache License 2.0 | 2 votes |
/** * Puts the level and status to the key space. * * @param memorySegment memory segment for key space. * @param offset offset of key space in the memory segment. * @param level the level. * @param status the status. */ public static void putLevelAndNodeStatus(MemorySegment memorySegment, int offset, int level, NodeStatus status) { int data = ((status.getValue() & BYTE_MASK) << 8) | level; memorySegment.putInt(offset + SkipListUtils.KEY_META_OFFSET, data); }
Example 20
Source File: SkipListUtils.java From flink with Apache License 2.0 | 2 votes |
/** * Puts the length of key to the key space. * * @param memorySegment memory segment for key space. * @param offset offset of key space in the memory segment. * @param keyLen length of key. */ public static void putKeyLen(MemorySegment memorySegment, int offset, int keyLen) { memorySegment.putInt(offset + KEY_LEN_OFFSET, keyLen); }