org.apache.flink.api.common.typeutils.SameTypePairComparator Java Examples
The following examples show how to use
org.apache.flink.api.common.typeutils.SameTypePairComparator.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: InPlaceMutableHashTable.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public ReduceFacade(ReduceFunction<T> reducer, Collector<T> outputCollector, boolean objectReuseEnabled) { this.reducer = reducer; this.outputCollector = outputCollector; this.objectReuseEnabled = objectReuseEnabled; this.prober = getProber(buildSideComparator, new SameTypePairComparator<>(buildSideComparator)); this.reuse = buildSideSerializer.createInstance(); }
Example #2
Source File: InPlaceMutableHashTableTest.java From flink with Apache License 2.0 | 5 votes |
/** * The records are larger than one segment. Additionally, there is just barely enough memory, * so lots of compactions will happen. */ @Test public void testLargeRecordsWithManyCompactions() { try { final int numElements = 1000; final String longString1 = getLongString(100000), longString2 = getLongString(110000); List<MemorySegment> memory = getMemory(3800, 32 * 1024); InPlaceMutableHashTable<Tuple2<Long, String>> table = new InPlaceMutableHashTable<>(serializer, comparator, memory); table.open(); // first, we insert some elements for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString1)); } // now, we replace the same elements with larger ones, causing fragmentation for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString2)); } // check the results InPlaceMutableHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober = table.getProber(comparator, new SameTypePairComparator<>(comparator)); Tuple2<Long, String> reuse = new Tuple2<>(); for (long i = 0; i < numElements; i++) { assertNotNull(prober.getMatchFor(Tuple2.of(i, longString2), reuse)); } table.close(); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #3
Source File: InPlaceMutableHashTable.java From flink with Apache License 2.0 | 5 votes |
public ReduceFacade(ReduceFunction<T> reducer, Collector<T> outputCollector, boolean objectReuseEnabled) { this.reducer = reducer; this.outputCollector = outputCollector; this.objectReuseEnabled = objectReuseEnabled; this.prober = getProber(buildSideComparator, new SameTypePairComparator<>(buildSideComparator)); this.reuse = buildSideSerializer.createInstance(); }
Example #4
Source File: InPlaceMutableHashTable.java From flink with Apache License 2.0 | 5 votes |
public InPlaceMutableHashTable(TypeSerializer<T> serializer, TypeComparator<T> comparator, List<MemorySegment> memory) { super(serializer, comparator); this.numAllMemorySegments = memory.size(); this.freeMemorySegments = new ArrayList<>(memory); // some sanity checks first if (freeMemorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) { throw new IllegalArgumentException("Too few memory segments provided. InPlaceMutableHashTable needs at least " + MIN_NUM_MEMORY_SEGMENTS + " memory segments."); } // Get the size of the first memory segment and record it. All further buffers must have the same size. // the size must also be a power of 2 segmentSize = freeMemorySegments.get(0).size(); if ( (segmentSize & segmentSize - 1) != 0) { throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2."); } this.numBucketsPerSegment = segmentSize / bucketSize; this.numBucketsPerSegmentBits = MathUtils.log2strict(this.numBucketsPerSegment); this.numBucketsPerSegmentMask = (1 << this.numBucketsPerSegmentBits) - 1; recordArea = new RecordArea(segmentSize); stagingSegments = new ArrayList<>(); stagingSegments.add(forcedAllocateSegment()); stagingSegmentsInView = new RandomAccessInputView(stagingSegments, segmentSize); stagingSegmentsOutView = new StagingOutputView(stagingSegments, segmentSize); prober = new HashTableProber<>(buildSideComparator, new SameTypePairComparator<>(buildSideComparator)); enableResize = buildSideSerializer.getLength() == -1; }
Example #5
Source File: InPlaceMutableHashTableTest.java From flink with Apache License 2.0 | 5 votes |
/** * The records are larger than one segment. Additionally, there is just barely enough memory, * so lots of compactions will happen. */ @Test public void testLargeRecordsWithManyCompactions() { try { final int numElements = 1000; final String longString1 = getLongString(100000), longString2 = getLongString(110000); List<MemorySegment> memory = getMemory(3800, 32 * 1024); InPlaceMutableHashTable<Tuple2<Long, String>> table = new InPlaceMutableHashTable<>(serializer, comparator, memory); table.open(); // first, we insert some elements for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString1)); } // now, we replace the same elements with larger ones, causing fragmentation for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString2)); } // check the results InPlaceMutableHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober = table.getProber(comparator, new SameTypePairComparator<>(comparator)); Tuple2<Long, String> reuse = new Tuple2<>(); for (long i = 0; i < numElements; i++) { assertNotNull(prober.getMatchFor(Tuple2.of(i, longString2), reuse)); } table.close(); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #6
Source File: InPlaceMutableHashTable.java From flink with Apache License 2.0 | 5 votes |
public ReduceFacade(ReduceFunction<T> reducer, Collector<T> outputCollector, boolean objectReuseEnabled) { this.reducer = reducer; this.outputCollector = outputCollector; this.objectReuseEnabled = objectReuseEnabled; this.prober = getProber(buildSideComparator, new SameTypePairComparator<>(buildSideComparator)); this.reuse = buildSideSerializer.createInstance(); }
Example #7
Source File: InPlaceMutableHashTable.java From flink with Apache License 2.0 | 5 votes |
public InPlaceMutableHashTable(TypeSerializer<T> serializer, TypeComparator<T> comparator, List<MemorySegment> memory) { super(serializer, comparator); this.numAllMemorySegments = memory.size(); this.freeMemorySegments = new ArrayList<>(memory); // some sanity checks first if (freeMemorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) { throw new IllegalArgumentException("Too few memory segments provided. InPlaceMutableHashTable needs at least " + MIN_NUM_MEMORY_SEGMENTS + " memory segments."); } // Get the size of the first memory segment and record it. All further buffers must have the same size. // the size must also be a power of 2 segmentSize = freeMemorySegments.get(0).size(); if ( (segmentSize & segmentSize - 1) != 0) { throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2."); } this.numBucketsPerSegment = segmentSize / bucketSize; this.numBucketsPerSegmentBits = MathUtils.log2strict(this.numBucketsPerSegment); this.numBucketsPerSegmentMask = (1 << this.numBucketsPerSegmentBits) - 1; recordArea = new RecordArea(segmentSize); stagingSegments = new ArrayList<>(); stagingSegments.add(forcedAllocateSegment()); stagingSegmentsInView = new RandomAccessInputView(stagingSegments, segmentSize); stagingSegmentsOutView = new StagingOutputView(stagingSegments, segmentSize); prober = new HashTableProber<>(buildSideComparator, new SameTypePairComparator<>(buildSideComparator)); enableResize = buildSideSerializer.getLength() == -1; }
Example #8
Source File: InPlaceMutableHashTableTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * The records are larger than one segment. Additionally, there is just barely enough memory, * so lots of compactions will happen. */ @Test public void testLargeRecordsWithManyCompactions() { try { final int numElements = 1000; final String longString1 = getLongString(100000), longString2 = getLongString(110000); List<MemorySegment> memory = getMemory(3800, 32 * 1024); InPlaceMutableHashTable<Tuple2<Long, String>> table = new InPlaceMutableHashTable<>(serializer, comparator, memory); table.open(); // first, we insert some elements for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString1)); } // now, we replace the same elements with larger ones, causing fragmentation for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString2)); } // check the results InPlaceMutableHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober = table.getProber(comparator, new SameTypePairComparator<>(comparator)); Tuple2<Long, String> reuse = new Tuple2<>(); for (long i = 0; i < numElements; i++) { assertNotNull(prober.getMatchFor(Tuple2.of(i, longString2), reuse)); } table.close(); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #9
Source File: InPlaceMutableHashTable.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public InPlaceMutableHashTable(TypeSerializer<T> serializer, TypeComparator<T> comparator, List<MemorySegment> memory) { super(serializer, comparator); this.numAllMemorySegments = memory.size(); this.freeMemorySegments = new ArrayList<>(memory); // some sanity checks first if (freeMemorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) { throw new IllegalArgumentException("Too few memory segments provided. InPlaceMutableHashTable needs at least " + MIN_NUM_MEMORY_SEGMENTS + " memory segments."); } // Get the size of the first memory segment and record it. All further buffers must have the same size. // the size must also be a power of 2 segmentSize = freeMemorySegments.get(0).size(); if ( (segmentSize & segmentSize - 1) != 0) { throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2."); } this.numBucketsPerSegment = segmentSize / bucketSize; this.numBucketsPerSegmentBits = MathUtils.log2strict(this.numBucketsPerSegment); this.numBucketsPerSegmentMask = (1 << this.numBucketsPerSegmentBits) - 1; recordArea = new RecordArea(segmentSize); stagingSegments = new ArrayList<>(); stagingSegments.add(forcedAllocateSegment()); stagingSegmentsInView = new RandomAccessInputView(stagingSegments, segmentSize); stagingSegmentsOutView = new StagingOutputView(stagingSegments, segmentSize); prober = new HashTableProber<>(buildSideComparator, new SameTypePairComparator<>(buildSideComparator)); enableResize = buildSideSerializer.getLength() == -1; }
Example #10
Source File: CompactingHashTableTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testTripleResize() { // Only CompactingHashTable try { final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE; final Random rnd = new Random(RANDOM_SEED); final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd); List<MemorySegment> memory = getMemory(NUM_MEM_PAGES); CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory); table.open(); for (int i = 0; i < NUM_PAIRS; i++) { table.insert(pairs[i]); } AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator)); IntPair target = new IntPair(); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(2*ADDITIONAL_MEM)); b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } table.close(); assertEquals("Memory lost", NUM_MEM_PAGES + 4*ADDITIONAL_MEM, table.getFreeMemory().size()); } catch (Exception e) { e.printStackTrace(); fail("Error: " + e.getMessage()); } }
Example #11
Source File: CompactingHashTableTest.java From flink with Apache License 2.0 | 4 votes |
/** * This test validates that new inserts (rather than updates) in "insertOrReplace()" properly * react to out of memory conditions. */ @Test public void testInsertsWithInsertOrReplace() { try { final int numElements = 1000; final String longString = getLongString(10000); List<MemorySegment> memory = getMemory(1000, 32 * 1024); // we create a hash table that thinks the records are super large. that makes it choose initially // a lot of memory for the partition buffers, and start with a smaller hash table. that way // we trigger a hash table growth early. CompactingHashTable<Tuple2<Long, String>> table = new CompactingHashTable<>( tuple2LongStringSerializer, tuple2LongStringComparator, memory, 100); table.open(); // first, we insert some elements for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString)); } // now, we replace the same elements, causing fragmentation for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString)); } // now we insert an additional set of elements. without compaction during this insertion, // the memory will run out for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i + numElements, longString)); } // check the results CompactingHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober = table.getProber(tuple2LongStringComparator, new SameTypePairComparator<>(tuple2LongStringComparator)); Tuple2<Long, String> reuse = new Tuple2<>(); for (long i = 0; i < numElements; i++) { assertNotNull(prober.getMatchFor(Tuple2.of(i, longString), reuse)); assertNotNull(prober.getMatchFor(Tuple2.of(i + numElements, longString), reuse)); } } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #12
Source File: CompactingHashTableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testResize() { // Only CompactingHashTable try { final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE; final Random rnd = new Random(RANDOM_SEED); final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd); List<MemorySegment> memory = getMemory(NUM_MEM_PAGES); CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory); table.open(); for (int i = 0; i < NUM_PAIRS; i++) { table.insert(pairs[i]); } AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator)); IntPair target = new IntPair(); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } table.close(); assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM, table.getFreeMemory().size()); } catch (Exception e) { e.printStackTrace(); fail("Error: " + e.getMessage()); } }
Example #13
Source File: CompactingHashTableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testDoubleResize() { // Only CompactingHashTable try { final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE; final Random rnd = new Random(RANDOM_SEED); final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd); List<MemorySegment> memory = getMemory(NUM_MEM_PAGES); CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory); table.open(); for (int i = 0; i < NUM_PAIRS; i++) { table.insert(pairs[i]); } AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator)); IntPair target = new IntPair(); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } table.close(); assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM + ADDITIONAL_MEM, table.getFreeMemory().size()); } catch (Exception e) { e.printStackTrace(); fail("Error: " + e.getMessage()); } }
Example #14
Source File: CompactingHashTableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testTripleResize() { // Only CompactingHashTable try { final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE; final Random rnd = new Random(RANDOM_SEED); final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd); List<MemorySegment> memory = getMemory(NUM_MEM_PAGES); CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory); table.open(); for (int i = 0; i < NUM_PAIRS; i++) { table.insert(pairs[i]); } AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator)); IntPair target = new IntPair(); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(2*ADDITIONAL_MEM)); b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } table.close(); assertEquals("Memory lost", NUM_MEM_PAGES + 4*ADDITIONAL_MEM, table.getFreeMemory().size()); } catch (Exception e) { e.printStackTrace(); fail("Error: " + e.getMessage()); } }
Example #15
Source File: CompactingHashTableTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testDoubleResize() { // Only CompactingHashTable try { final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE; final Random rnd = new Random(RANDOM_SEED); final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd); List<MemorySegment> memory = getMemory(NUM_MEM_PAGES); CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory); table.open(); for (int i = 0; i < NUM_PAIRS; i++) { table.insert(pairs[i]); } AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator)); IntPair target = new IntPair(); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } table.close(); assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM + ADDITIONAL_MEM, table.getFreeMemory().size()); } catch (Exception e) { e.printStackTrace(); fail("Error: " + e.getMessage()); } }
Example #16
Source File: CompactingHashTableTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testResize() { // Only CompactingHashTable try { final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE; final Random rnd = new Random(RANDOM_SEED); final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd); List<MemorySegment> memory = getMemory(NUM_MEM_PAGES); CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory); table.open(); for (int i = 0; i < NUM_PAIRS; i++) { table.insert(pairs[i]); } AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator)); IntPair target = new IntPair(); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } table.close(); assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM, table.getFreeMemory().size()); } catch (Exception e) { e.printStackTrace(); fail("Error: " + e.getMessage()); } }
Example #17
Source File: CompactingHashTableTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * This test validates that new inserts (rather than updates) in "insertOrReplace()" properly * react to out of memory conditions. */ @Test public void testInsertsWithInsertOrReplace() { try { final int numElements = 1000; final String longString = getLongString(10000); List<MemorySegment> memory = getMemory(1000, 32 * 1024); // we create a hash table that thinks the records are super large. that makes it choose initially // a lot of memory for the partition buffers, and start with a smaller hash table. that way // we trigger a hash table growth early. CompactingHashTable<Tuple2<Long, String>> table = new CompactingHashTable<>( tuple2LongStringSerializer, tuple2LongStringComparator, memory, 100); table.open(); // first, we insert some elements for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString)); } // now, we replace the same elements, causing fragmentation for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString)); } // now we insert an additional set of elements. without compaction during this insertion, // the memory will run out for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i + numElements, longString)); } // check the results CompactingHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober = table.getProber(tuple2LongStringComparator, new SameTypePairComparator<>(tuple2LongStringComparator)); Tuple2<Long, String> reuse = new Tuple2<>(); for (long i = 0; i < numElements; i++) { assertNotNull(prober.getMatchFor(Tuple2.of(i, longString), reuse)); assertNotNull(prober.getMatchFor(Tuple2.of(i + numElements, longString), reuse)); } } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #18
Source File: CompactingHashTableTest.java From flink with Apache License 2.0 | 4 votes |
/** * This test validates that new inserts (rather than updates) in "insertOrReplace()" properly * react to out of memory conditions. */ @Test public void testInsertsWithInsertOrReplace() { try { final int numElements = 1000; final String longString = getLongString(10000); List<MemorySegment> memory = getMemory(1000, 32 * 1024); // we create a hash table that thinks the records are super large. that makes it choose initially // a lot of memory for the partition buffers, and start with a smaller hash table. that way // we trigger a hash table growth early. CompactingHashTable<Tuple2<Long, String>> table = new CompactingHashTable<>( tuple2LongStringSerializer, tuple2LongStringComparator, memory, 100); table.open(); // first, we insert some elements for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString)); } // now, we replace the same elements, causing fragmentation for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i, longString)); } // now we insert an additional set of elements. without compaction during this insertion, // the memory will run out for (long i = 0; i < numElements; i++) { table.insertOrReplaceRecord(Tuple2.of(i + numElements, longString)); } // check the results CompactingHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober = table.getProber(tuple2LongStringComparator, new SameTypePairComparator<>(tuple2LongStringComparator)); Tuple2<Long, String> reuse = new Tuple2<>(); for (long i = 0; i < numElements; i++) { assertNotNull(prober.getMatchFor(Tuple2.of(i, longString), reuse)); assertNotNull(prober.getMatchFor(Tuple2.of(i + numElements, longString), reuse)); } } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #19
Source File: CompactingHashTableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testResize() { // Only CompactingHashTable try { final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE; final Random rnd = new Random(RANDOM_SEED); final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd); List<MemorySegment> memory = getMemory(NUM_MEM_PAGES); CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory); table.open(); for (int i = 0; i < NUM_PAIRS; i++) { table.insert(pairs[i]); } AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator)); IntPair target = new IntPair(); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } table.close(); assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM, table.getFreeMemory().size()); } catch (Exception e) { e.printStackTrace(); fail("Error: " + e.getMessage()); } }
Example #20
Source File: CompactingHashTableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testDoubleResize() { // Only CompactingHashTable try { final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE; final Random rnd = new Random(RANDOM_SEED); final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd); List<MemorySegment> memory = getMemory(NUM_MEM_PAGES); CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory); table.open(); for (int i = 0; i < NUM_PAIRS; i++) { table.insert(pairs[i]); } AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator)); IntPair target = new IntPair(); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } table.close(); assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM + ADDITIONAL_MEM, table.getFreeMemory().size()); } catch (Exception e) { e.printStackTrace(); fail("Error: " + e.getMessage()); } }
Example #21
Source File: CompactingHashTableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testTripleResize() { // Only CompactingHashTable try { final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE; final Random rnd = new Random(RANDOM_SEED); final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd); List<MemorySegment> memory = getMemory(NUM_MEM_PAGES); CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory); table.open(); for (int i = 0; i < NUM_PAIRS; i++) { table.insert(pairs[i]); } AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator)); IntPair target = new IntPair(); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(ADDITIONAL_MEM)); b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } // make sure there is enough memory for resize memory.addAll(getMemory(2*ADDITIONAL_MEM)); b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable"); assertTrue(b); for (int i = 0; i < NUM_PAIRS; i++) { assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target)); assertEquals(pairs[i].getValue(), target.getValue()); } table.close(); assertEquals("Memory lost", NUM_MEM_PAGES + 4*ADDITIONAL_MEM, table.getFreeMemory().size()); } catch (Exception e) { e.printStackTrace(); fail("Error: " + e.getMessage()); } }