Java Code Examples for org.apache.flink.runtime.io.network.api.writer.RecordWriter#flushAll()
The following examples show how to use
org.apache.flink.runtime.io.network.api.writer.RecordWriter#flushAll() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NetworkStackThroughputITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public void invoke() throws Exception { RecordReader<SpeedTestRecord> reader = new RecordReader<>( getEnvironment().getInputGate(0), SpeedTestRecord.class, getEnvironment().getTaskManagerInfo().getTmpDirectories()); RecordWriter<SpeedTestRecord> writer = new RecordWriter<>(getEnvironment().getWriter(0)); try { SpeedTestRecord record; while ((record = reader.next()) != null) { writer.emit(record); } } finally { reader.clearBuffers(); writer.clearBuffers(); writer.flushAll(); } }
Example 2
Source File: SlotCountExceedingParallelismTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public void invoke() throws Exception { RecordWriter<IntValue> writer = new RecordWriter<>(getEnvironment().getWriter(0)); final int numberOfTimesToSend = getTaskConfiguration().getInteger(CONFIG_KEY, 0); final IntValue subtaskIndex = new IntValue( getEnvironment().getTaskInfo().getIndexOfThisSubtask()); try { for (int i = 0; i < numberOfTimesToSend; i++) { writer.emit(subtaskIndex); } writer.flushAll(); } finally { writer.clearBuffers(); } }
Example 3
Source File: NetworkStackThroughputITCase.java From flink with Apache License 2.0 | 6 votes |
@Override public void invoke() throws Exception { RecordReader<SpeedTestRecord> reader = new RecordReader<>( getEnvironment().getInputGate(0), SpeedTestRecord.class, getEnvironment().getTaskManagerInfo().getTmpDirectories()); RecordWriter<SpeedTestRecord> writer = new RecordWriterBuilder().build(getEnvironment().getWriter(0)); try { SpeedTestRecord record; while ((record = reader.next()) != null) { writer.emit(record); } } finally { reader.clearBuffers(); writer.clearBuffers(); writer.flushAll(); } }
Example 4
Source File: ShuffleCompressionITCase.java From flink with Apache License 2.0 | 6 votes |
@Override public void invoke() throws Exception { ResultPartitionWriter resultPartitionWriter = getEnvironment().getWriter(0); RecordWriterBuilder<LongValue> recordWriterBuilder = new RecordWriterBuilder<>(); if (getEnvironment().getExecutionConfig().getExecutionMode() == ExecutionMode.PIPELINED) { // enable output flush for pipeline mode recordWriterBuilder.setTimeout(100); } if (useBroadcastPartitioner) { recordWriterBuilder.setChannelSelector(new BroadcastPartitioner()); } RecordWriter<LongValue> writer = recordWriterBuilder.build(resultPartitionWriter); for (int i = 0; i < NUM_RECORDS_TO_SEND; ++i) { writer.broadcastEmit(RECORD_TO_SEND); } writer.flushAll(); writer.clearBuffers(); }
Example 5
Source File: NetworkStackThroughputITCase.java From flink with Apache License 2.0 | 6 votes |
@Override public void invoke() throws Exception { RecordReader<SpeedTestRecord> reader = new RecordReader<>( getEnvironment().getInputGate(0), SpeedTestRecord.class, getEnvironment().getTaskManagerInfo().getTmpDirectories()); RecordWriter<SpeedTestRecord> writer = new RecordWriterBuilder<SpeedTestRecord>().build(getEnvironment().getWriter(0)); try { SpeedTestRecord record; while ((record = reader.next()) != null) { writer.emit(record); } } finally { reader.clearBuffers(); writer.clearBuffers(); writer.flushAll(); } }
Example 6
Source File: SlotCountExceedingParallelismTest.java From flink with Apache License 2.0 | 6 votes |
@Override public void invoke() throws Exception { RecordWriter<IntValue> writer = new RecordWriterBuilder<IntValue>().build(getEnvironment().getWriter(0)); final int numberOfTimesToSend = getTaskConfiguration().getInteger(CONFIG_KEY, 0); final IntValue subtaskIndex = new IntValue( getEnvironment().getTaskInfo().getIndexOfThisSubtask()); try { for (int i = 0; i < numberOfTimesToSend; i++) { writer.emit(subtaskIndex); } writer.flushAll(); } finally { writer.clearBuffers(); } }
Example 7
Source File: ScheduleOrUpdateConsumersTest.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { List<RecordWriter<IntValue>> writers = Lists.newArrayListWithCapacity(2); // The order of intermediate result creation in the job graph specifies which produced // result partition is pipelined/blocking. final RecordWriter<IntValue> pipelinedWriter = new RecordWriterBuilder<IntValue>().build(getEnvironment().getWriter(0)); final RecordWriter<IntValue> blockingWriter = new RecordWriterBuilder<IntValue>().build(getEnvironment().getWriter(1)); writers.add(pipelinedWriter); writers.add(blockingWriter); final int numberOfTimesToSend = getTaskConfiguration().getInteger(CONFIG_KEY, 0); final IntValue subtaskIndex = new IntValue( getEnvironment().getTaskInfo().getIndexOfThisSubtask()); // Produce the first intermediate result and then the second in a serial fashion. for (RecordWriter<IntValue> writer : writers) { try { for (int i = 0; i < numberOfTimesToSend; i++) { writer.emit(subtaskIndex); } writer.flushAll(); } finally { writer.clearBuffers(); } } }
Example 8
Source File: NetworkStackThroughputITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { RecordWriter<SpeedTestRecord> writer = new RecordWriter<>(getEnvironment().getWriter(0)); try { // Determine the amount of data to send per subtask int dataVolumeGb = getTaskConfiguration().getInteger(NetworkStackThroughputITCase.DATA_VOLUME_GB_CONFIG_KEY, 1); long dataMbPerSubtask = (dataVolumeGb * 10) / getCurrentNumberOfSubtasks(); long numRecordsToEmit = (dataMbPerSubtask * 1024 * 1024) / SpeedTestRecord.RECORD_SIZE; LOG.info(String.format("%d/%d: Producing %d records (each record: %d bytes, total: %.2f GB)", getIndexInSubtaskGroup() + 1, getCurrentNumberOfSubtasks(), numRecordsToEmit, SpeedTestRecord.RECORD_SIZE, dataMbPerSubtask / 1024.0)); boolean isSlow = getTaskConfiguration().getBoolean(IS_SLOW_SENDER_CONFIG_KEY, false); int numRecords = 0; SpeedTestRecord record = new SpeedTestRecord(); for (long i = 0; i < numRecordsToEmit; i++) { if (isSlow && (numRecords++ % IS_SLOW_EVERY_NUM_RECORDS) == 0) { Thread.sleep(IS_SLOW_SLEEP_MS); } writer.emit(record); } } finally { writer.clearBuffers(); writer.flushAll(); } }
Example 9
Source File: TestingAbstractInvokables.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final RecordWriter<IntValue> writer = new RecordWriterBuilder<IntValue>().build(getEnvironment().getWriter(0)); try { writer.emit(new IntValue(42)); writer.emit(new IntValue(1337)); writer.flushAll(); } finally { writer.clearBuffers(); } }
Example 10
Source File: OutputCollector.java From flink with Apache License 2.0 | 5 votes |
@Override public void close() { for (RecordWriter<?> writer : writers) { writer.clearBuffers(); writer.flushAll(); } }
Example 11
Source File: FileBufferReaderITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final RecordWriter<ByteArrayType> writer = new RecordWriterBuilder<ByteArrayType>().build(getEnvironment().getWriter(0)); final ByteArrayType bytes = new ByteArrayType(dataSource); int counter = 0; while (counter++ < numRecords) { try { writer.emit(bytes); writer.flushAll(); } finally { writer.clearBuffers(); } } }
Example 12
Source File: NetworkStackThroughputITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { RecordWriter<SpeedTestRecord> writer = new RecordWriterBuilder<SpeedTestRecord>().build(getEnvironment().getWriter(0)); try { // Determine the amount of data to send per subtask int dataVolumeGb = getTaskConfiguration().getInteger(NetworkStackThroughputITCase.DATA_VOLUME_GB_CONFIG_KEY, 1); long dataMbPerSubtask = (dataVolumeGb * 10) / getCurrentNumberOfSubtasks(); long numRecordsToEmit = (dataMbPerSubtask * 1024 * 1024) / SpeedTestRecord.RECORD_SIZE; LOG.info(String.format("%d/%d: Producing %d records (each record: %d bytes, total: %.2f GB)", getIndexInSubtaskGroup() + 1, getCurrentNumberOfSubtasks(), numRecordsToEmit, SpeedTestRecord.RECORD_SIZE, dataMbPerSubtask / 1024.0)); boolean isSlow = getTaskConfiguration().getBoolean(IS_SLOW_SENDER_CONFIG_KEY, false); int numRecords = 0; SpeedTestRecord record = new SpeedTestRecord(); for (long i = 0; i < numRecordsToEmit; i++) { if (isSlow && (numRecords++ % IS_SLOW_EVERY_NUM_RECORDS) == 0) { Thread.sleep(IS_SLOW_SLEEP_MS); } writer.emit(record); } } finally { writer.clearBuffers(); writer.flushAll(); } }
Example 13
Source File: ScheduleOrUpdateConsumersTest.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { List<RecordWriter<IntValue>> writers = Lists.newArrayListWithCapacity(2); // The order of intermediate result creation in the job graph specifies which produced // result partition is pipelined/blocking. final RecordWriter<IntValue> pipelinedWriter = new RecordWriterBuilder().build(getEnvironment().getWriter(0)); final RecordWriter<IntValue> blockingWriter = new RecordWriterBuilder().build(getEnvironment().getWriter(1)); writers.add(pipelinedWriter); writers.add(blockingWriter); final int numberOfTimesToSend = getTaskConfiguration().getInteger(CONFIG_KEY, 0); final IntValue subtaskIndex = new IntValue( getEnvironment().getTaskInfo().getIndexOfThisSubtask()); // Produce the first intermediate result and then the second in a serial fashion. for (RecordWriter<IntValue> writer : writers) { try { for (int i = 0; i < numberOfTimesToSend; i++) { writer.emit(subtaskIndex); } writer.flushAll(); } finally { writer.clearBuffers(); } } }
Example 14
Source File: TestingAbstractInvokables.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final RecordWriter<IntValue> writer = new RecordWriterBuilder().build(getEnvironment().getWriter(0)); try { writer.emit(new IntValue(42)); writer.emit(new IntValue(1337)); writer.flushAll(); } finally { writer.clearBuffers(); } }
Example 15
Source File: OutputCollector.java From flink with Apache License 2.0 | 5 votes |
@Override public void close() { for (RecordWriter<?> writer : writers) { writer.clearBuffers(); writer.flushAll(); } }
Example 16
Source File: FileBufferReaderITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final RecordWriter<ByteArrayType> writer = new RecordWriterBuilder().build(getEnvironment().getWriter(0)); final ByteArrayType bytes = new ByteArrayType(dataSource); int counter = 0; while (counter++ < numRecords) { try { writer.emit(bytes); writer.flushAll(); } finally { writer.clearBuffers(); } } }
Example 17
Source File: NetworkStackThroughputITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { RecordWriter<SpeedTestRecord> writer = new RecordWriterBuilder().build(getEnvironment().getWriter(0)); try { // Determine the amount of data to send per subtask int dataVolumeGb = getTaskConfiguration().getInteger(NetworkStackThroughputITCase.DATA_VOLUME_GB_CONFIG_KEY, 1); long dataMbPerSubtask = (dataVolumeGb * 10) / getCurrentNumberOfSubtasks(); long numRecordsToEmit = (dataMbPerSubtask * 1024 * 1024) / SpeedTestRecord.RECORD_SIZE; LOG.info(String.format("%d/%d: Producing %d records (each record: %d bytes, total: %.2f GB)", getIndexInSubtaskGroup() + 1, getCurrentNumberOfSubtasks(), numRecordsToEmit, SpeedTestRecord.RECORD_SIZE, dataMbPerSubtask / 1024.0)); boolean isSlow = getTaskConfiguration().getBoolean(IS_SLOW_SENDER_CONFIG_KEY, false); int numRecords = 0; SpeedTestRecord record = new SpeedTestRecord(); for (long i = 0; i < numRecordsToEmit; i++) { if (isSlow && (numRecords++ % IS_SLOW_EVERY_NUM_RECORDS) == 0) { Thread.sleep(IS_SLOW_SLEEP_MS); } writer.emit(record); } } finally { writer.clearBuffers(); writer.flushAll(); } }
Example 18
Source File: ScheduleOrUpdateConsumersTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { List<RecordWriter<IntValue>> writers = Lists.newArrayListWithCapacity(2); // The order of intermediate result creation in the job graph specifies which produced // result partition is pipelined/blocking. final RecordWriter<IntValue> pipelinedWriter = new RecordWriter<>(getEnvironment().getWriter(0)); final RecordWriter<IntValue> blockingWriter = new RecordWriter<>(getEnvironment().getWriter(1)); writers.add(pipelinedWriter); writers.add(blockingWriter); final int numberOfTimesToSend = getTaskConfiguration().getInteger(CONFIG_KEY, 0); final IntValue subtaskIndex = new IntValue( getEnvironment().getTaskInfo().getIndexOfThisSubtask()); // Produce the first intermediate result and then the second in a serial fashion. for (RecordWriter<IntValue> writer : writers) { try { for (int i = 0; i < numberOfTimesToSend; i++) { writer.emit(subtaskIndex); } writer.flushAll(); } finally { writer.clearBuffers(); } } }
Example 19
Source File: TestingAbstractInvokables.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void invoke() throws Exception { final RecordWriter<IntValue> writer = new RecordWriter<>(getEnvironment().getWriter(0)); try { writer.emit(new IntValue(42)); writer.emit(new IntValue(1337)); writer.flushAll(); } finally { writer.clearBuffers(); } }
Example 20
Source File: OutputCollector.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void close() { for (RecordWriter<?> writer : writers) { writer.clearBuffers(); writer.flushAll(); } }