org.apache.flink.runtime.io.network.util.TestPooledBufferProvider Java Examples
The following examples show how to use
org.apache.flink.runtime.io.network.util.TestPooledBufferProvider.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RecordWriterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testSerializerClearedAfterClearBuffers() throws Exception { ResultPartitionWriter partitionWriter = spy(new RecyclingPartitionWriter(new TestPooledBufferProvider(1, 16))); RecordWriter<IntValue> recordWriter = new RecordWriter<>(partitionWriter); // Fill a buffer, but don't write it out. recordWriter.emit(new IntValue(0)); // Clear all buffers. recordWriter.clearBuffers(); // This should not throw an Exception iff the serializer state // has been cleared as expected. recordWriter.flushAll(); }
Example #2
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 6 votes |
private void verifyBroadcastBufferOrEventIndependence(boolean broadcastEvent) throws Exception { @SuppressWarnings("unchecked") ArrayDeque<BufferConsumer>[] queues = new ArrayDeque[]{new ArrayDeque(), new ArrayDeque()}; ResultPartitionWriter partition = new CollectingPartitionWriter(queues, new TestPooledBufferProvider(Integer.MAX_VALUE)); RecordWriter<IntValue> writer = createRecordWriter(partition); if (broadcastEvent) { writer.broadcastEvent(EndOfPartitionEvent.INSTANCE); } else { writer.broadcastEmit(new IntValue(0)); } // verify added to all queues assertEquals(1, queues[0].size()); assertEquals(1, queues[1].size()); // these two buffers may share the memory but not the indices! Buffer buffer1 = buildSingleBuffer(queues[0].remove()); Buffer buffer2 = buildSingleBuffer(queues[1].remove()); assertEquals(0, buffer1.getReaderIndex()); assertEquals(0, buffer2.getReaderIndex()); buffer1.setReaderIndex(1); assertEquals("Buffer 2 shares the same reader index as buffer 1", 0, buffer2.getReaderIndex()); }
Example #3
Source File: RecordWriterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Tests that broadcasted events' buffers are independent (in their (reader) indices) once they * are put into the queue for Netty when broadcasting events to multiple channels. */ @Test public void testBroadcastEventBufferIndependence() throws Exception { @SuppressWarnings("unchecked") ArrayDeque<BufferConsumer>[] queues = new ArrayDeque[]{new ArrayDeque(), new ArrayDeque()}; ResultPartitionWriter partition = new CollectingPartitionWriter(queues, new TestPooledBufferProvider(Integer.MAX_VALUE)); RecordWriter<?> writer = new RecordWriter<>(partition); writer.broadcastEvent(EndOfPartitionEvent.INSTANCE); // Verify added to all queues assertEquals(1, queues[0].size()); assertEquals(1, queues[1].size()); // these two buffers may share the memory but not the indices! Buffer buffer1 = buildSingleBuffer(queues[0].remove()); Buffer buffer2 = buildSingleBuffer(queues[1].remove()); assertEquals(0, buffer1.getReaderIndex()); assertEquals(0, buffer2.getReaderIndex()); buffer1.setReaderIndex(1); assertEquals("Buffer 2 shares the same reader index as buffer 1", 0, buffer2.getReaderIndex()); }
Example #4
Source File: RecordWriterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Tests that broadcasted records' buffers are independent (in their (reader) indices) once they * are put into the queue for Netty when broadcasting events to multiple channels. */ @Test public void testBroadcastEmitBufferIndependence() throws Exception { @SuppressWarnings("unchecked") ArrayDeque<BufferConsumer>[] queues = new ArrayDeque[]{new ArrayDeque(), new ArrayDeque()}; ResultPartitionWriter partition = new CollectingPartitionWriter(queues, new TestPooledBufferProvider(Integer.MAX_VALUE)); RecordWriter<IntValue> writer = new RecordWriter<>(partition); writer.broadcastEmit(new IntValue(0)); writer.flushAll(); // Verify added to all queues assertEquals(1, queues[0].size()); assertEquals(1, queues[1].size()); // these two buffers may share the memory but not the indices! Buffer buffer1 = buildSingleBuffer(queues[0].remove()); Buffer buffer2 = buildSingleBuffer(queues[1].remove()); assertEquals(0, buffer1.getReaderIndex()); assertEquals(0, buffer2.getReaderIndex()); buffer1.setReaderIndex(1); assertEquals("Buffer 2 shares the same reader index as buffer 1", 0, buffer2.getReaderIndex()); }
Example #5
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testSerializerClearedAfterClearBuffers() throws Exception { ResultPartitionWriter partitionWriter = spy(new RecyclingPartitionWriter(new TestPooledBufferProvider(1, 16))); RecordWriter<IntValue> recordWriter = createRecordWriter(partitionWriter); // Fill a buffer, but don't write it out. recordWriter.emit(new IntValue(0)); // Clear all buffers. recordWriter.clearBuffers(); // This should not throw an Exception iff the serializer state // has been cleared as expected. recordWriter.flushAll(); }
Example #6
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testSerializerClearedAfterClearBuffers() throws Exception { ResultPartitionWriter partitionWriter = spy(new RecyclingPartitionWriter(new TestPooledBufferProvider(1, 16))); RecordWriter<IntValue> recordWriter = new RecordWriterBuilder().build(partitionWriter); // Fill a buffer, but don't write it out. recordWriter.emit(new IntValue(0)); // Clear all buffers. recordWriter.clearBuffers(); // This should not throw an Exception iff the serializer state // has been cleared as expected. recordWriter.flushAll(); }
Example #7
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 6 votes |
private void verifyBroadcastBufferOrEventIndependence(boolean broadcastEvent) throws Exception { @SuppressWarnings("unchecked") ArrayDeque<BufferConsumer>[] queues = new ArrayDeque[]{new ArrayDeque(), new ArrayDeque()}; ResultPartitionWriter partition = new CollectingPartitionWriter(queues, new TestPooledBufferProvider(Integer.MAX_VALUE)); RecordWriter<IntValue> writer = new RecordWriterBuilder().build(partition); if (broadcastEvent) { writer.broadcastEvent(EndOfPartitionEvent.INSTANCE); } else { writer.broadcastEmit(new IntValue(0)); } // verify added to all queues assertEquals(1, queues[0].size()); assertEquals(1, queues[1].size()); // these two buffers may share the memory but not the indices! Buffer buffer1 = buildSingleBuffer(queues[0].remove()); Buffer buffer2 = buildSingleBuffer(queues[1].remove()); assertEquals(0, buffer1.getReaderIndex()); assertEquals(0, buffer2.getReaderIndex()); buffer1.setReaderIndex(1); assertEquals("Buffer 2 shares the same reader index as buffer 1", 0, buffer2.getReaderIndex()); }
Example #8
Source File: StreamMockEnvironment.java From flink with Apache License 2.0 | 5 votes |
public <T> void addOutput(final Collection<Object> outputList, final TypeSerializer<T> serializer) { try { outputs.add(new RecordOrEventCollectingResultPartitionWriter<T>( outputList, new TestPooledBufferProvider(Integer.MAX_VALUE), serializer)); } catch (Throwable t) { t.printStackTrace(); fail(t.getMessage()); } }
Example #9
Source File: MockEnvironment.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public void addOutput(final List<Record> outputList) { try { outputs.add(new RecordCollectingResultPartitionWriter(outputList, new TestPooledBufferProvider(Integer.MAX_VALUE))); } catch (Throwable t) { t.printStackTrace(); fail(t.getMessage()); } }
Example #10
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that records are broadcast via {@link RecordWriter#broadcastEmit(IOReadableWritable)}. */ @Test public void testBroadcastEmitRecord() throws Exception { final int numberOfChannels = 4; final int bufferSize = 32; final int numValues = 8; final int serializationLength = 4; @SuppressWarnings("unchecked") final Queue<BufferConsumer>[] queues = new Queue[numberOfChannels]; for (int i = 0; i < numberOfChannels; i++) { queues[i] = new ArrayDeque<>(); } final TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize); final ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider); final RecordWriter<SerializationTestType> writer = createRecordWriter(partitionWriter); final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>( new String[]{ tempFolder.getRoot().getAbsolutePath() }); final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>(); final Iterable<SerializationTestType> records = Util.randomRecords(numValues, SerializationTestTypeFactory.INT); for (SerializationTestType record : records) { serializedRecords.add(record); writer.broadcastEmit(record); } final int numRequiredBuffers = numValues / (bufferSize / (4 + serializationLength)); if (isBroadcastWriter) { assertEquals(numRequiredBuffers, bufferProvider.getNumberOfCreatedBuffers()); } else { assertEquals(numRequiredBuffers * numberOfChannels, bufferProvider.getNumberOfCreatedBuffers()); } for (int i = 0; i < numberOfChannels; i++) { assertEquals(numRequiredBuffers, queues[i].size()); verifyDeserializationResults(queues[i], deserializer, serializedRecords.clone(), numRequiredBuffers, numValues); } }
Example #11
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that event buffers are properly recycled when broadcasting events * to multiple channels. */ @Test public void testBroadcastEventBufferReferenceCounting() throws Exception { @SuppressWarnings("unchecked") ArrayDeque<BufferConsumer>[] queues = new ArrayDeque[] { new ArrayDeque(), new ArrayDeque() }; ResultPartitionWriter partition = new CollectingPartitionWriter(queues, new TestPooledBufferProvider(Integer.MAX_VALUE)); RecordWriter<?> writer = createRecordWriter(partition); writer.broadcastEvent(EndOfPartitionEvent.INSTANCE); // Verify added to all queues assertEquals(1, queues[0].size()); assertEquals(1, queues[1].size()); // get references to buffer consumers (copies from the original event buffer consumer) BufferConsumer bufferConsumer1 = queues[0].getFirst(); BufferConsumer bufferConsumer2 = queues[1].getFirst(); // process all collected events (recycles the buffer) for (int i = 0; i < queues.length; i++) { assertTrue(parseBuffer(queues[i].remove(), i).isEvent()); } assertTrue(bufferConsumer1.isRecycled()); assertTrue(bufferConsumer2.isRecycled()); }
Example #12
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests broadcasting events when no records have been emitted yet. */ @Test public void testBroadcastEventNoRecords() throws Exception { int numberOfChannels = 4; int bufferSize = 32; @SuppressWarnings("unchecked") Queue<BufferConsumer>[] queues = new Queue[numberOfChannels]; for (int i = 0; i < numberOfChannels; i++) { queues[i] = new ArrayDeque<>(); } TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize); ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider); RecordWriter<ByteArrayIO> writer = createRecordWriter(partitionWriter); CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 919192L, Integer.MAX_VALUE + 18828228L, CheckpointOptions.forCheckpointWithDefaultLocation()); // No records emitted yet, broadcast should not request a buffer writer.broadcastEvent(barrier); assertEquals(0, bufferProvider.getNumberOfCreatedBuffers()); for (int i = 0; i < numberOfChannels; i++) { assertEquals(1, queues[i].size()); BufferOrEvent boe = parseBuffer(queues[i].remove(), i); assertTrue(boe.isEvent()); assertEquals(barrier, boe.getEvent()); assertEquals(0, queues[i].size()); } }
Example #13
Source File: BroadcastRecordWriterTest.java From flink with Apache License 2.0 | 5 votes |
/** * FLINK-17780: Tests that a shared buffer(or memory segment) of a buffer builder is only freed when all consumers * are closed. */ @Test public void testRandomEmitAndBufferRecycling() throws Exception { int recordSize = 8; final TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(2, 2 * recordSize); final KeepingPartitionWriter partitionWriter = new KeepingPartitionWriter(bufferProvider) { @Override public int getNumberOfSubpartitions() { return 2; } }; final BroadcastRecordWriter<SerializationTestType> writer = new BroadcastRecordWriter<>(partitionWriter, 0, "test"); // force materialization of both buffers for easier availability tests List<Buffer> buffers = Arrays.asList(bufferProvider.requestBuffer(), bufferProvider.requestBuffer()); buffers.forEach(Buffer::recycleBuffer); assertEquals(2, bufferProvider.getNumberOfAvailableBuffers()); // fill first buffer writer.randomEmit(new IntType(1), 0); writer.broadcastEmit(new IntType(2)); assertEquals(1, bufferProvider.getNumberOfAvailableBuffers()); // simulate consumption of first buffer consumer; this should not free buffers assertEquals(1, partitionWriter.getAddedBufferConsumers(0).size()); closeConsumer(partitionWriter, 0, 2 * recordSize); assertEquals(1, bufferProvider.getNumberOfAvailableBuffers()); // use second buffer writer.broadcastEmit(new IntType(3)); assertEquals(0, bufferProvider.getNumberOfAvailableBuffers()); // fully free first buffer assertEquals(2, partitionWriter.getAddedBufferConsumers(1).size()); closeConsumer(partitionWriter, 1, recordSize); assertEquals(1, bufferProvider.getNumberOfAvailableBuffers()); }
Example #14
Source File: RecordWriterDelegateTest.java From flink with Apache License 2.0 | 5 votes |
private RecordWriter createRecordWriter(ArrayDeque<BufferConsumer>[] queues) { final ResultPartitionWriter partition = new RecordWriterTest.CollectingPartitionWriter( queues, new TestPooledBufferProvider(1)); return new RecordWriterBuilder().build(partition); }
Example #15
Source File: MockEnvironment.java From flink with Apache License 2.0 | 5 votes |
public void addOutput(final List<Record> outputList) { try { outputs.add(new RecordCollectingResultPartitionWriter(outputList, new TestPooledBufferProvider(Integer.MAX_VALUE))); } catch (Throwable t) { t.printStackTrace(); fail(t.getMessage()); } }
Example #16
Source File: StreamMockEnvironment.java From flink with Apache License 2.0 | 5 votes |
public <T> void addOutput(final Collection<Object> outputList, final TypeSerializer<T> serializer) { try { outputs.add(new RecordOrEventCollectingResultPartitionWriter<T>( outputList, new TestPooledBufferProvider(Integer.MAX_VALUE), serializer)); } catch (Throwable t) { t.printStackTrace(); fail(t.getMessage()); } }
Example #17
Source File: RecordWriterTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests broadcasting events when no records have been emitted yet. */ @Test public void testBroadcastEventNoRecords() throws Exception { int numberOfChannels = 4; int bufferSize = 32; @SuppressWarnings("unchecked") Queue<BufferConsumer>[] queues = new Queue[numberOfChannels]; for (int i = 0; i < numberOfChannels; i++) { queues[i] = new ArrayDeque<>(); } TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize); ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider); RecordWriter<ByteArrayIO> writer = new RecordWriter<>(partitionWriter); CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 919192L, Integer.MAX_VALUE + 18828228L, CheckpointOptions.forCheckpointWithDefaultLocation()); // No records emitted yet, broadcast should not request a buffer writer.broadcastEvent(barrier); assertEquals(0, bufferProvider.getNumberOfCreatedBuffers()); for (int i = 0; i < numberOfChannels; i++) { assertEquals(1, queues[i].size()); BufferOrEvent boe = parseBuffer(queues[i].remove(), i); assertTrue(boe.isEvent()); assertEquals(barrier, boe.getEvent()); assertEquals(0, queues[i].size()); } }
Example #18
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests broadcasting events when no records have been emitted yet. */ @Test public void testBroadcastEventNoRecords() throws Exception { int numberOfChannels = 4; int bufferSize = 32; @SuppressWarnings("unchecked") Queue<BufferConsumer>[] queues = new Queue[numberOfChannels]; for (int i = 0; i < numberOfChannels; i++) { queues[i] = new ArrayDeque<>(); } TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize); ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider); RecordWriter<ByteArrayIO> writer = new RecordWriterBuilder().build(partitionWriter); CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 919192L, Integer.MAX_VALUE + 18828228L, CheckpointOptions.forCheckpointWithDefaultLocation()); // No records emitted yet, broadcast should not request a buffer writer.broadcastEvent(barrier); assertEquals(0, bufferProvider.getNumberOfCreatedBuffers()); for (int i = 0; i < numberOfChannels; i++) { assertEquals(1, queues[i].size()); BufferOrEvent boe = parseBuffer(queues[i].remove(), i); assertTrue(boe.isEvent()); assertEquals(barrier, boe.getEvent()); assertEquals(0, queues[i].size()); } }
Example #19
Source File: MockEnvironment.java From flink with Apache License 2.0 | 5 votes |
public void addOutput(final List<Record> outputList) { try { outputs.add(new RecordCollectingResultPartitionWriter(outputList, new TestPooledBufferProvider(Integer.MAX_VALUE))); } catch (Throwable t) { t.printStackTrace(); fail(t.getMessage()); } }
Example #20
Source File: StreamMockEnvironment.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public <T> void addOutput(final Collection<Object> outputList, final TypeSerializer<T> serializer) { try { outputs.add(new RecordOrEventCollectingResultPartitionWriter<T>( outputList, new TestPooledBufferProvider(Integer.MAX_VALUE), serializer)); } catch (Throwable t) { t.printStackTrace(); fail(t.getMessage()); } }
Example #21
Source File: RecordWriterTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests that event buffers are properly recycled when broadcasting events * to multiple channels. */ @Test public void testBroadcastEventBufferReferenceCounting() throws Exception { @SuppressWarnings("unchecked") ArrayDeque<BufferConsumer>[] queues = new ArrayDeque[] { new ArrayDeque(), new ArrayDeque() }; ResultPartitionWriter partition = new CollectingPartitionWriter(queues, new TestPooledBufferProvider(Integer.MAX_VALUE)); RecordWriter<?> writer = new RecordWriter<>(partition); writer.broadcastEvent(EndOfPartitionEvent.INSTANCE); // Verify added to all queues assertEquals(1, queues[0].size()); assertEquals(1, queues[1].size()); // get references to buffer consumers (copies from the original event buffer consumer) BufferConsumer bufferConsumer1 = queues[0].getFirst(); BufferConsumer bufferConsumer2 = queues[1].getFirst(); // process all collected events (recycles the buffer) for (int i = 0; i < queues.length; i++) { assertTrue(parseBuffer(queues[i].remove(), i).isEvent()); } assertTrue(bufferConsumer1.isRecycled()); assertTrue(bufferConsumer2.isRecycled()); }
Example #22
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that event buffers are properly recycled when broadcasting events * to multiple channels. */ @Test public void testBroadcastEventBufferReferenceCounting() throws Exception { @SuppressWarnings("unchecked") ArrayDeque<BufferConsumer>[] queues = new ArrayDeque[] { new ArrayDeque(), new ArrayDeque() }; ResultPartitionWriter partition = new CollectingPartitionWriter(queues, new TestPooledBufferProvider(Integer.MAX_VALUE)); RecordWriter<?> writer = new RecordWriterBuilder().build(partition); writer.broadcastEvent(EndOfPartitionEvent.INSTANCE); // Verify added to all queues assertEquals(1, queues[0].size()); assertEquals(1, queues[1].size()); // get references to buffer consumers (copies from the original event buffer consumer) BufferConsumer bufferConsumer1 = queues[0].getFirst(); BufferConsumer bufferConsumer2 = queues[1].getFirst(); // process all collected events (recycles the buffer) for (int i = 0; i < queues.length; i++) { assertTrue(parseBuffer(queues[i].remove(), i).isEvent()); } assertTrue(bufferConsumer1.isRecycled()); assertTrue(bufferConsumer2.isRecycled()); }
Example #23
Source File: RecordWriterTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests a fix for FLINK-2089. * * @see <a href="https://issues.apache.org/jira/browse/FLINK-2089">FLINK-2089</a> */ @Test public void testClearBuffersAfterInterruptDuringBlockingBufferRequest() throws Exception { ExecutorService executor = null; try { executor = Executors.newSingleThreadExecutor(); TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(1); KeepingPartitionWriter partitionWriter = new KeepingPartitionWriter(bufferProvider); final RecordWriter<IntValue> recordWriter = createRecordWriter(partitionWriter); CountDownLatch waitLock = new CountDownLatch(1); Future<?> result = executor.submit(new Callable<Void>() { @Override public Void call() throws Exception { IntValue val = new IntValue(0); try { recordWriter.emit(val); recordWriter.flushAll(); waitLock.countDown(); recordWriter.emit(val); } catch (InterruptedException e) { recordWriter.clearBuffers(); } return null; } }); waitLock.await(); // Interrupt the Thread. // // The second emit call requests a new buffer and blocks the thread. // When interrupting the thread at this point, clearing the buffers // should not recycle any buffer. result.cancel(true); recordWriter.clearBuffers(); // Verify that the written out buffer has only been recycled once // (by the partition writer), so no buffer recycled. assertEquals(0, bufferProvider.getNumberOfAvailableBuffers()); partitionWriter.close(); assertEquals(1, bufferProvider.getNumberOfAvailableBuffers()); } finally { if (executor != null) { executor.shutdown(); } } }
Example #24
Source File: SubtaskCheckpointCoordinatorTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testBroadcastCancelCheckpointMarkerOnAbortingFromCoordinator() throws Exception { OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>( OneInputStreamTask::new, 1, 1, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO); testHarness.setupOutputForSingletonOperatorChain(); StreamConfig streamConfig = testHarness.getStreamConfig(); streamConfig.setStreamOperator(new MapOperator()); testHarness.invoke(); testHarness.waitForTaskRunning(); MockEnvironment mockEnvironment = MockEnvironment.builder().build(); SubtaskCheckpointCoordinator subtaskCheckpointCoordinator = new MockSubtaskCheckpointCoordinatorBuilder() .setEnvironment(mockEnvironment) .build(); TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(1, 4096); ArrayList<Object> recordOrEvents = new ArrayList<>(); StreamElementSerializer<String> stringStreamElementSerializer = new StreamElementSerializer<>(StringSerializer.INSTANCE); ResultPartitionWriter resultPartitionWriter = new RecordOrEventCollectingResultPartitionWriter<>( recordOrEvents, bufferProvider, stringStreamElementSerializer); mockEnvironment.addOutputs(Collections.singletonList(resultPartitionWriter)); OneInputStreamTask<String, String> task = testHarness.getTask(); OperatorChain<String, OneInputStreamOperator<String, String>> operatorChain = new OperatorChain<>( task, StreamTask.createRecordWriterDelegate(streamConfig, mockEnvironment)); long checkpointId = 42L; // notify checkpoint aborted before execution. subtaskCheckpointCoordinator.notifyCheckpointAborted(checkpointId, operatorChain, () -> true); subtaskCheckpointCoordinator.checkpointState( new CheckpointMetaData(checkpointId, System.currentTimeMillis()), CheckpointOptions.forCheckpointWithDefaultLocation(), new CheckpointMetrics(), operatorChain, () -> true); assertEquals(1, recordOrEvents.size()); Object recordOrEvent = recordOrEvents.get(0); // ensure CancelCheckpointMarker is broadcast downstream. assertTrue(recordOrEvent instanceof CancelCheckpointMarker); assertEquals(checkpointId, ((CancelCheckpointMarker) recordOrEvent).getCheckpointId()); testHarness.endInput(); testHarness.waitForTaskCompletion(); }
Example #25
Source File: ServerTransportErrorHandlingTest.java From flink with Apache License 2.0 | 4 votes |
/** * Verifies remote closes trigger the release of all resources. */ @Test public void testRemoteClose() throws Exception { final TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16); final CountDownLatch sync = new CountDownLatch(1); final ResultPartitionManager partitionManager = mock(ResultPartitionManager.class); when(partitionManager .createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class))) .thenAnswer(new Answer<ResultSubpartitionView>() { @Override public ResultSubpartitionView answer(InvocationOnMock invocationOnMock) throws Throwable { BufferAvailabilityListener listener = (BufferAvailabilityListener) invocationOnMock.getArguments()[2]; listener.notifyDataAvailable(); return new CancelPartitionRequestTest.InfiniteSubpartitionView(outboundBuffers, sync); } }); NettyProtocol protocol = new NettyProtocol(partitionManager, mock(TaskEventDispatcher.class)) { @Override public ChannelHandler[] getClientChannelHandlers() { return new ChannelHandler[]{ new NettyMessage.NettyMessageEncoder(), // Close on read new ChannelInboundHandlerAdapter() { @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { ctx.channel().close(); } } }; } }; NettyTestUtil.NettyServerAndClient serverAndClient = null; try { serverAndClient = initServerAndClient(protocol, createConfig()); Channel ch = connect(serverAndClient); // Write something to trigger close by server ch.writeAndFlush(new NettyMessage.PartitionRequest(new ResultPartitionID(), 0, new InputChannelID(), Integer.MAX_VALUE)); // Wait for the notification if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) { fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about released partition."); } } finally { shutdown(serverAndClient); } }
Example #26
Source File: CancelPartitionRequestTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testDuplicateCancel() throws Exception { NettyServerAndClient serverAndClient = null; try { final TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16); ResultPartitionManager partitions = mock(ResultPartitionManager.class); ResultPartitionID pid = new ResultPartitionID(); final CountDownLatch sync = new CountDownLatch(1); final ResultSubpartitionView view = spy(new InfiniteSubpartitionView(outboundBuffers, sync)); // Return infinite subpartition when(partitions.createSubpartitionView(eq(pid), eq(0), any(BufferAvailabilityListener.class))) .thenAnswer(new Answer<ResultSubpartitionView>() { @Override public ResultSubpartitionView answer(InvocationOnMock invocationOnMock) throws Throwable { BufferAvailabilityListener listener = (BufferAvailabilityListener) invocationOnMock.getArguments()[2]; listener.notifyDataAvailable(); return view; } }); NettyProtocol protocol = new NettyProtocol(partitions, mock(TaskEventDispatcher.class)); serverAndClient = initServerAndClient(protocol); Channel ch = connect(serverAndClient); // Request for non-existing input channel => results in cancel request InputChannelID inputChannelId = new InputChannelID(); ch.writeAndFlush(new PartitionRequest(pid, 0, inputChannelId, Integer.MAX_VALUE)).await(); // Wait for the notification if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) { fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about cancelled partition."); } ch.writeAndFlush(new CancelPartitionRequest(inputChannelId)).await(); ch.close(); NettyTestUtil.awaitClose(ch); verify(view, times(1)).releaseAllResources(); } finally { shutdown(serverAndClient); } }
Example #27
Source File: CancelPartitionRequestTest.java From flink with Apache License 2.0 | 4 votes |
/** * Verifies that requests for non-existing (failed/cancelled) input channels are properly * cancelled. The receiver receives data, but there is no input channel to receive the data. * This should cancel the request. */ @Test public void testCancelPartitionRequest() throws Exception { NettyServerAndClient serverAndClient = null; try { TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16); ResultPartitionManager partitions = mock(ResultPartitionManager.class); ResultPartitionID pid = new ResultPartitionID(); CountDownLatch sync = new CountDownLatch(1); final ResultSubpartitionView view = spy(new InfiniteSubpartitionView(outboundBuffers, sync)); // Return infinite subpartition when(partitions.createSubpartitionView(eq(pid), eq(0), any(BufferAvailabilityListener.class))) .thenAnswer(new Answer<ResultSubpartitionView>() { @Override public ResultSubpartitionView answer(InvocationOnMock invocationOnMock) throws Throwable { BufferAvailabilityListener listener = (BufferAvailabilityListener) invocationOnMock.getArguments()[2]; listener.notifyDataAvailable(); return view; } }); NettyProtocol protocol = new NettyProtocol(partitions, mock(TaskEventDispatcher.class)); serverAndClient = initServerAndClient(protocol); Channel ch = connect(serverAndClient); // Request for non-existing input channel => results in cancel request ch.writeAndFlush(new PartitionRequest(pid, 0, new InputChannelID(), Integer.MAX_VALUE)).await(); // Wait for the notification if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) { fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about cancelled partition."); } verify(view, times(1)).releaseAllResources(); } finally { shutdown(serverAndClient); } }
Example #28
Source File: RecordWriterTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests broadcasting events when records have been emitted. The emitted * records cover all three {@link SerializationResult} types. */ @Test public void testBroadcastEventMixedRecords() throws Exception { Random rand = new XORShiftRandom(); int numberOfChannels = 4; int bufferSize = 32; int lenBytes = 4; // serialized length @SuppressWarnings("unchecked") Queue<BufferConsumer>[] queues = new Queue[numberOfChannels]; for (int i = 0; i < numberOfChannels; i++) { queues[i] = new ArrayDeque<>(); } TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize); ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider); RecordWriter<ByteArrayIO> writer = new RecordWriter<>(partitionWriter); CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 1292L, Integer.MAX_VALUE + 199L, CheckpointOptions.forCheckpointWithDefaultLocation()); // Emit records on some channels first (requesting buffers), then // broadcast the event. The record buffers should be emitted first, then // the event. After the event, no new buffer should be requested. // (i) Smaller than the buffer size (single buffer request => 1) byte[] bytes = new byte[bufferSize / 2]; rand.nextBytes(bytes); writer.emit(new ByteArrayIO(bytes)); // (ii) Larger than the buffer size (two buffer requests => 1 + 2) bytes = new byte[bufferSize + 1]; rand.nextBytes(bytes); writer.emit(new ByteArrayIO(bytes)); // (iii) Exactly the buffer size (single buffer request => 1 + 2 + 1) bytes = new byte[bufferSize - lenBytes]; rand.nextBytes(bytes); writer.emit(new ByteArrayIO(bytes)); // (iv) Nothing on the 4th channel (no buffer request => 1 + 2 + 1 + 0 = 4) // (v) Broadcast the event writer.broadcastEvent(barrier); assertEquals(4, bufferProvider.getNumberOfCreatedBuffers()); BufferOrEvent boe; assertEquals(2, queues[0].size()); // 1 buffer + 1 event assertTrue(parseBuffer(queues[0].remove(), 0).isBuffer()); assertEquals(3, queues[1].size()); // 2 buffers + 1 event assertTrue(parseBuffer(queues[1].remove(), 1).isBuffer()); assertTrue(parseBuffer(queues[1].remove(), 1).isBuffer()); assertEquals(2, queues[2].size()); // 1 buffer + 1 event assertTrue(parseBuffer(queues[2].remove(), 2).isBuffer()); assertEquals(1, queues[3].size()); // 0 buffers + 1 event // every queue's last element should be the event for (int i = 0; i < numberOfChannels; i++) { boe = parseBuffer(queues[i].remove(), i); assertTrue(boe.isEvent()); assertEquals(barrier, boe.getEvent()); } }
Example #29
Source File: RecordWriterTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * The results of emitting records via BroadcastPartitioner or broadcasting records directly are the same, * that is all the target channels can receive the whole outputs. * * @param isBroadcastEmit whether using {@link RecordWriter#broadcastEmit(IOReadableWritable)} or not */ private void emitRecordWithBroadcastPartitionerOrBroadcastEmitRecord(boolean isBroadcastEmit) throws Exception { final int numberOfChannels = 4; final int bufferSize = 32; final int numValues = 8; final int serializationLength = 4; @SuppressWarnings("unchecked") final Queue<BufferConsumer>[] queues = new Queue[numberOfChannels]; for (int i = 0; i < numberOfChannels; i++) { queues[i] = new ArrayDeque<>(); } final TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize); final ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider); final ChannelSelector selector = new OutputEmitter(ShipStrategyType.BROADCAST, 0); final RecordWriter<SerializationTestType> writer = RecordWriter.createRecordWriter(partitionWriter, selector, 0, "test"); final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>( new String[]{ tempFolder.getRoot().getAbsolutePath() }); final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>(); final Iterable<SerializationTestType> records = Util.randomRecords(numValues, SerializationTestTypeFactory.INT); for (SerializationTestType record : records) { serializedRecords.add(record); if (isBroadcastEmit) { writer.broadcastEmit(record); } else { writer.emit(record); } } final int requiredBuffers = numValues / (bufferSize / (4 + serializationLength)); for (int i = 0; i < numberOfChannels; i++) { assertEquals(requiredBuffers, queues[i].size()); final ArrayDeque<SerializationTestType> expectedRecords = serializedRecords.clone(); int assertRecords = 0; for (int j = 0; j < requiredBuffers; j++) { Buffer buffer = buildSingleBuffer(queues[i].remove()); deserializer.setNextBuffer(buffer); assertRecords += DeserializationUtils.deserializeRecords(expectedRecords, deserializer); } Assert.assertEquals(numValues, assertRecords); } }
Example #30
Source File: CancelPartitionRequestTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Verifies that requests for non-existing (failed/cancelled) input channels are properly * cancelled. The receiver receives data, but there is no input channel to receive the data. * This should cancel the request. */ @Test public void testCancelPartitionRequest() throws Exception { NettyServerAndClient serverAndClient = null; try { TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16); ResultPartitionManager partitions = mock(ResultPartitionManager.class); ResultPartitionID pid = new ResultPartitionID(); CountDownLatch sync = new CountDownLatch(1); final ResultSubpartitionView view = spy(new InfiniteSubpartitionView(outboundBuffers, sync)); // Return infinite subpartition when(partitions.createSubpartitionView(eq(pid), eq(0), any(BufferAvailabilityListener.class))) .thenAnswer(new Answer<ResultSubpartitionView>() { @Override public ResultSubpartitionView answer(InvocationOnMock invocationOnMock) throws Throwable { BufferAvailabilityListener listener = (BufferAvailabilityListener) invocationOnMock.getArguments()[2]; listener.notifyDataAvailable(); return view; } }); NettyProtocol protocol = new NettyProtocol( partitions, mock(TaskEventDispatcher.class), true); serverAndClient = initServerAndClient(protocol); Channel ch = connect(serverAndClient); // Request for non-existing input channel => results in cancel request ch.writeAndFlush(new PartitionRequest(pid, 0, new InputChannelID(), Integer.MAX_VALUE)).await(); // Wait for the notification if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) { fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about cancelled partition."); } verify(view, times(1)).releaseAllResources(); verify(view, times(0)).notifySubpartitionConsumed(); } finally { shutdown(serverAndClient); } }