Java Code Examples for org.apache.flink.core.memory.MemorySegment#putShort()
The following examples show how to use
org.apache.flink.core.memory.MemorySegment#putShort() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MutableHashTable.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
protected void initTable(int numBuckets, byte numPartitions) { final int bucketsPerSegment = this.bucketsPerSegmentMask + 1; final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1); final MemorySegment[] table = new MemorySegment[numSegs]; ensureNumBuffersReturned(numSegs); // go over all segments that are part of the table for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) { final MemorySegment seg = getNextBuffer(); // go over all buckets in the segment for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) { final int bucketOffset = k * HASH_BUCKET_SIZE; // compute the partition that the bucket corresponds to final byte partition = assignPartition(bucket, numPartitions); // initialize the header fields seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition); seg.put(bucketOffset + HEADER_STATUS_OFFSET, BUCKET_STATUS_IN_MEMORY); seg.putShort(bucketOffset + HEADER_COUNT_OFFSET, (short) 0); seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); seg.putShort(bucketOffset + HEADER_PROBED_FLAGS_OFFSET, (short) 0); } table[i] = seg; } this.buckets = table; this.numBuckets = numBuckets; if (useBloomFilters) { initBloomFilter(numBuckets); } }
Example 2
Source File: ChannelWriterOutputView.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void writeSegment(MemorySegment segment, int writePosition, boolean lastSegment) throws IOException { segment.putShort(0, HEADER_MAGIC_NUMBER); segment.putShort(HEADER_FLAGS_OFFSET, lastSegment ? FLAG_LAST_BLOCK : 0); segment.putInt(HEAD_BLOCK_LENGTH_OFFSET, writePosition); this.writer.writeBlock(segment); this.bytesBeforeSegment += writePosition - HEADER_LENGTH; }
Example 3
Source File: MutableHashTable.java From flink with Apache License 2.0 | 5 votes |
protected void initTable(int numBuckets, byte numPartitions) { final int bucketsPerSegment = this.bucketsPerSegmentMask + 1; final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1); final MemorySegment[] table = new MemorySegment[numSegs]; ensureNumBuffersReturned(numSegs); // go over all segments that are part of the table for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) { final MemorySegment seg = getNextBuffer(); // go over all buckets in the segment for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) { final int bucketOffset = k * HASH_BUCKET_SIZE; // compute the partition that the bucket corresponds to final byte partition = assignPartition(bucket, numPartitions); // initialize the header fields seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition); seg.put(bucketOffset + HEADER_STATUS_OFFSET, BUCKET_STATUS_IN_MEMORY); seg.putShort(bucketOffset + HEADER_COUNT_OFFSET, (short) 0); seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); seg.putShort(bucketOffset + HEADER_PROBED_FLAGS_OFFSET, (short) 0); } table[i] = seg; } this.buckets = table; this.numBuckets = numBuckets; if (useBloomFilters) { initBloomFilter(numBuckets); } }
Example 4
Source File: ChannelWriterOutputView.java From flink with Apache License 2.0 | 5 votes |
private void writeSegment(MemorySegment segment, int writePosition, boolean lastSegment) throws IOException { segment.putShort(0, HEADER_MAGIC_NUMBER); segment.putShort(HEADER_FLAGS_OFFSET, lastSegment ? FLAG_LAST_BLOCK : 0); segment.putInt(HEAD_BLOCK_LENGTH_OFFSET, writePosition); this.writer.writeBlock(segment); this.bytesBeforeSegment += writePosition - HEADER_LENGTH; }
Example 5
Source File: MutableHashTable.java From flink with Apache License 2.0 | 5 votes |
protected void initTable(int numBuckets, byte numPartitions) { final int bucketsPerSegment = this.bucketsPerSegmentMask + 1; final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1); final MemorySegment[] table = new MemorySegment[numSegs]; ensureNumBuffersReturned(numSegs); // go over all segments that are part of the table for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) { final MemorySegment seg = getNextBuffer(); // go over all buckets in the segment for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) { final int bucketOffset = k * HASH_BUCKET_SIZE; // compute the partition that the bucket corresponds to final byte partition = assignPartition(bucket, numPartitions); // initialize the header fields seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition); seg.put(bucketOffset + HEADER_STATUS_OFFSET, BUCKET_STATUS_IN_MEMORY); seg.putShort(bucketOffset + HEADER_COUNT_OFFSET, (short) 0); seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); seg.putShort(bucketOffset + HEADER_PROBED_FLAGS_OFFSET, (short) 0); } table[i] = seg; } this.buckets = table; this.numBuckets = numBuckets; if (useBloomFilters) { initBloomFilter(numBuckets); } }
Example 6
Source File: ChannelWriterOutputView.java From flink with Apache License 2.0 | 5 votes |
private void writeSegment(MemorySegment segment, int writePosition, boolean lastSegment) throws IOException { segment.putShort(0, HEADER_MAGIC_NUMBER); segment.putShort(HEADER_FLAGS_OFFSET, lastSegment ? FLAG_LAST_BLOCK : 0); segment.putInt(HEAD_BLOCK_LENGTH_OFFSET, writePosition); this.writer.writeBlock(segment); this.bytesBeforeSegment += writePosition - HEADER_LENGTH; }
Example 7
Source File: HashTableITCase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testBucketsNotFulfillSegment() throws Exception { final int NUM_KEYS = 10000; final int BUILD_VALS_PER_KEY = 3; final int PROBE_VALS_PER_KEY = 10; // create a build input that gives 30000 pairs with 3 values sharing the same key MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false); // create a probe input that gives 100000 pairs with 10 values sharing a key MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true); // allocate the memory for the HashTable List<MemorySegment> memSegments; try { // 33 is minimum number of pages required to perform hash join this inputs memSegments = this.memManager.allocatePages(MEM_OWNER, 33); } catch (MemoryAllocationException maex) { fail("Memory for the Join could not be provided."); return; } // For FLINK-2545, the buckets data may not fulfill it's buffer, for example, the buffer may contains 256 buckets, // while hash table only assign 250 bucket on it. The unused buffer bytes may contains arbitrary data, which may // influence hash table if forget to skip it. To mock this, put the invalid bucket data(partition=1, inMemory=true, count=-1) // at the end of buffer. for (MemorySegment segment : memSegments) { int newBucketOffset = segment.size() - 128; // initialize the header fields segment.put(newBucketOffset + 0, (byte)0); segment.put(newBucketOffset + 1, (byte)0); segment.putShort(newBucketOffset + 2, (short) -1); segment.putLong(newBucketOffset + 4, ~0x0L); } // ---------------------------------------------------------------------------------------- final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>( this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager); join.open(buildInput, probeInput); final IntPair recordReuse = new IntPair(); int numRecordsInJoinResult = 0; while (join.nextRecord()) { MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator(); while (buildSide.next(recordReuse) != null) { numRecordsInJoinResult++; } } Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult); join.close(); this.memManager.release(join.getFreedMemory()); }
Example 8
Source File: BinaryHashTableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testBucketsNotFulfillSegment() throws Exception { final int numKeys = 10000; final int buildValsPerKey = 3; final int probeValsPerKey = 10; // create a build input that gives 30000 pairs with 3 values sharing the same key MutableObjectIterator<BinaryRow> buildInput = new UniformBinaryRowGenerator(numKeys, buildValsPerKey, false); // create a probe input that gives 100000 pairs with 10 values sharing a key MutableObjectIterator<BinaryRow> probeInput = new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true); // allocate the memory for the HashTable MemoryManager memManager = new MemoryManager(35 * PAGE_SIZE, 1); // ---------------------------------------------------------------------------------------- final BinaryHashTable table = new BinaryHashTable(conf, new Object(), this.buildSideSerializer, this.probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 35 * PAGE_SIZE, ioManager, 24, 200000, true, HashJoinType.INNER, null, false, new boolean[]{true}, false); // For FLINK-2545, the buckets data may not fulfill it's buffer, for example, the buffer may contains 256 buckets, // while hash table only assign 250 bucket on it. The unused buffer bytes may contains arbitrary data, which may // influence hash table if forget to skip it. To mock this, put the invalid bucket data(partition=1, inMemory=true, count=-1) // at the end of buffer. for (MemorySegment segment : table.getFreedMemory()) { int newBucketOffset = segment.size() - 128; // initialize the header fields segment.put(newBucketOffset, (byte) 0); segment.put(newBucketOffset + 1, (byte) 0); segment.putShort(newBucketOffset + 2, (short) -1); segment.putLong(newBucketOffset + 4, ~0x0L); } int numRecordsInJoinResult = join(table, buildInput, probeInput); Assert.assertEquals("Wrong number of records in join result.", numKeys * buildValsPerKey * probeValsPerKey, numRecordsInJoinResult); table.close(); table.free(); }
Example 9
Source File: HashTableITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testBucketsNotFulfillSegment() throws Exception { final int NUM_KEYS = 10000; final int BUILD_VALS_PER_KEY = 3; final int PROBE_VALS_PER_KEY = 10; // create a build input that gives 30000 pairs with 3 values sharing the same key MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false); // create a probe input that gives 100000 pairs with 10 values sharing a key MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true); // allocate the memory for the HashTable List<MemorySegment> memSegments; try { // 33 is minimum number of pages required to perform hash join this inputs memSegments = this.memManager.allocatePages(MEM_OWNER, 33); } catch (MemoryAllocationException maex) { fail("Memory for the Join could not be provided."); return; } // For FLINK-2545, the buckets data may not fulfill it's buffer, for example, the buffer may contains 256 buckets, // while hash table only assign 250 bucket on it. The unused buffer bytes may contains arbitrary data, which may // influence hash table if forget to skip it. To mock this, put the invalid bucket data(partition=1, inMemory=true, count=-1) // at the end of buffer. for (MemorySegment segment : memSegments) { int newBucketOffset = segment.size() - 128; // initialize the header fields segment.put(newBucketOffset + 0, (byte)0); segment.put(newBucketOffset + 1, (byte)0); segment.putShort(newBucketOffset + 2, (short) -1); segment.putLong(newBucketOffset + 4, ~0x0L); } // ---------------------------------------------------------------------------------------- final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>( this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager); join.open(buildInput, probeInput); final IntPair recordReuse = new IntPair(); int numRecordsInJoinResult = 0; while (join.nextRecord()) { MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator(); while (buildSide.next(recordReuse) != null) { numRecordsInJoinResult++; } } Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult); join.close(); this.memManager.release(join.getFreedMemory()); }
Example 10
Source File: BinaryHashTableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testBucketsNotFulfillSegment() throws Exception { final int numKeys = 10000; final int buildValsPerKey = 3; final int probeValsPerKey = 10; // create a build input that gives 30000 pairs with 3 values sharing the same key MutableObjectIterator<BinaryRowData> buildInput = new UniformBinaryRowGenerator(numKeys, buildValsPerKey, false); // create a probe input that gives 100000 pairs with 10 values sharing a key MutableObjectIterator<BinaryRowData> probeInput = new UniformBinaryRowGenerator(numKeys, probeValsPerKey, true); // allocate the memory for the HashTable MemoryManager memManager = MemoryManagerBuilder.newBuilder().setMemorySize(35 * PAGE_SIZE).build(); // ---------------------------------------------------------------------------------------- final BinaryHashTable table = new BinaryHashTable(conf, new Object(), this.buildSideSerializer, this.probeSideSerializer, new MyProjection(), new MyProjection(), memManager, 35 * PAGE_SIZE, ioManager, 24, 200000, true, HashJoinType.INNER, null, false, new boolean[]{true}, false); // For FLINK-2545, the buckets data may not fulfill it's buffer, for example, the buffer may contains 256 buckets, // while hash table only assign 250 bucket on it. The unused buffer bytes may contains arbitrary data, which may // influence hash table if forget to skip it. To mock this, put the invalid bucket data(partition=1, inMemory=true, count=-1) // at the end of buffer. int totalPages = table.getInternalPool().freePages(); for (int i = 0; i < totalPages; i++) { MemorySegment segment = table.getInternalPool().nextSegment(); int newBucketOffset = segment.size() - 128; // initialize the header fields segment.put(newBucketOffset, (byte) 0); segment.put(newBucketOffset + 1, (byte) 0); segment.putShort(newBucketOffset + 2, (short) -1); segment.putLong(newBucketOffset + 4, ~0x0L); table.returnPage(segment); } int numRecordsInJoinResult = join(table, buildInput, probeInput); Assert.assertEquals("Wrong number of records in join result.", numKeys * buildValsPerKey * probeValsPerKey, numRecordsInJoinResult); table.close(); table.free(); }
Example 11
Source File: HashTableITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testBucketsNotFulfillSegment() throws Exception { final int NUM_KEYS = 10000; final int BUILD_VALS_PER_KEY = 3; final int PROBE_VALS_PER_KEY = 10; // create a build input that gives 30000 pairs with 3 values sharing the same key MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false); // create a probe input that gives 100000 pairs with 10 values sharing a key MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true); // allocate the memory for the HashTable List<MemorySegment> memSegments; try { // 33 is minimum number of pages required to perform hash join this inputs memSegments = this.memManager.allocatePages(MEM_OWNER, 33); } catch (MemoryAllocationException maex) { fail("Memory for the Join could not be provided."); return; } // For FLINK-2545, the buckets data may not fulfill it's buffer, for example, the buffer may contains 256 buckets, // while hash table only assign 250 bucket on it. The unused buffer bytes may contains arbitrary data, which may // influence hash table if forget to skip it. To mock this, put the invalid bucket data(partition=1, inMemory=true, count=-1) // at the end of buffer. for (MemorySegment segment : memSegments) { int newBucketOffset = segment.size() - 128; // initialize the header fields segment.put(newBucketOffset + 0, (byte)0); segment.put(newBucketOffset + 1, (byte)0); segment.putShort(newBucketOffset + 2, (short) -1); segment.putLong(newBucketOffset + 4, ~0x0L); } // ---------------------------------------------------------------------------------------- final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>( this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager); join.open(buildInput, probeInput); final IntPair recordReuse = new IntPair(); int numRecordsInJoinResult = 0; while (join.nextRecord()) { MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator(); while (buildSide.next(recordReuse) != null) { numRecordsInJoinResult++; } } Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult); join.close(); this.memManager.release(join.getFreedMemory()); }