org.apache.flink.runtime.plugable.SerializationDelegate Java Examples
The following examples show how to use
org.apache.flink.runtime.plugable.SerializationDelegate.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RecordWriterOutput.java From flink with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") public RecordWriterOutput( RecordWriter<SerializationDelegate<StreamRecord<OUT>>> recordWriter, TypeSerializer<OUT> outSerializer, OutputTag outputTag, StreamStatusProvider streamStatusProvider) { checkNotNull(recordWriter); this.outputTag = outputTag; // generic hack: cast the writer to generic Object type so we can use it // with multiplexed records and watermarks this.recordWriter = (RecordWriter<SerializationDelegate<StreamElement>>) (RecordWriter<?>) recordWriter; TypeSerializer<StreamElement> outRecordSerializer = new StreamElementSerializer<>(outSerializer); if (outSerializer != null) { serializationDelegate = new SerializationDelegate<StreamElement>(outRecordSerializer); } this.streamStatusProvider = checkNotNull(streamStatusProvider); }
Example #2
Source File: StreamTask.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@VisibleForTesting public static <OUT> List<RecordWriter<SerializationDelegate<StreamRecord<OUT>>>> createRecordWriters( StreamConfig configuration, Environment environment) { List<RecordWriter<SerializationDelegate<StreamRecord<OUT>>>> recordWriters = new ArrayList<>(); List<StreamEdge> outEdgesInOrder = configuration.getOutEdgesInOrder(environment.getUserClassLoader()); Map<Integer, StreamConfig> chainedConfigs = configuration.getTransitiveChainedTaskConfigsWithSelf(environment.getUserClassLoader()); for (int i = 0; i < outEdgesInOrder.size(); i++) { StreamEdge edge = outEdgesInOrder.get(i); recordWriters.add( createRecordWriter( edge, i, environment, environment.getTaskInfo().getTaskName(), chainedConfigs.get(edge.getSourceId()).getBufferTimeout())); } return recordWriters; }
Example #3
Source File: StreamTask.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private static <OUT> RecordWriter<SerializationDelegate<StreamRecord<OUT>>> createRecordWriter( StreamEdge edge, int outputIndex, Environment environment, String taskName, long bufferTimeout) { @SuppressWarnings("unchecked") StreamPartitioner<OUT> outputPartitioner = (StreamPartitioner<OUT>) edge.getPartitioner(); LOG.debug("Using partitioner {} for output {} of task {}", outputPartitioner, outputIndex, taskName); ResultPartitionWriter bufferWriter = environment.getWriter(outputIndex); // we initialize the partitioner here with the number of key groups (aka max. parallelism) if (outputPartitioner instanceof ConfigurableStreamPartitioner) { int numKeyGroups = bufferWriter.getNumTargetKeyGroups(); if (0 < numKeyGroups) { ((ConfigurableStreamPartitioner) outputPartitioner).configure(numKeyGroups); } } RecordWriter<SerializationDelegate<StreamRecord<OUT>>> output = RecordWriter.createRecordWriter(bufferWriter, outputPartitioner, bufferTimeout, taskName); output.setMetricGroup(environment.getMetricGroup().getIOMetricGroup()); return output; }
Example #4
Source File: OutputEmitterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testPartitionHash() { // Test for IntValue verifyPartitionHashSelectedChannels(50000, 100, RecordType.INTEGER); // Test for StringValue verifyPartitionHashSelectedChannels(10000, 100, RecordType.STRING); // Test hash corner cases final TestIntComparator testIntComp = new TestIntComparator(); final ChannelSelector<SerializationDelegate<Integer>> selector = createChannelSelector( ShipStrategyType.PARTITION_HASH, testIntComp, 100); final SerializationDelegate<Integer> serializationDelegate = new SerializationDelegate<>(new IntSerializer()); assertPartitionHashSelectedChannels(selector, serializationDelegate, Integer.MIN_VALUE, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, -1, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, 0, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, 1, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, Integer.MAX_VALUE, 100); }
Example #5
Source File: RecordWriterOutput.java From flink with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") public RecordWriterOutput( RecordWriter<SerializationDelegate<StreamRecord<OUT>>> recordWriter, TypeSerializer<OUT> outSerializer, OutputTag outputTag, StreamStatusProvider streamStatusProvider) { checkNotNull(recordWriter); this.outputTag = outputTag; // generic hack: cast the writer to generic Object type so we can use it // with multiplexed records and watermarks this.recordWriter = (RecordWriter<SerializationDelegate<StreamElement>>) (RecordWriter<?>) recordWriter; TypeSerializer<StreamElement> outRecordSerializer = new StreamElementSerializer<>(outSerializer); if (outSerializer != null) { serializationDelegate = new SerializationDelegate<StreamElement>(outRecordSerializer); } this.streamStatusProvider = checkNotNull(streamStatusProvider); }
Example #6
Source File: OperatorChain.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private RecordWriterOutput<OUT> createStreamOutput( RecordWriter<SerializationDelegate<StreamRecord<OUT>>> recordWriter, StreamEdge edge, StreamConfig upStreamConfig, Environment taskEnvironment) { OutputTag sideOutputTag = edge.getOutputTag(); // OutputTag, return null if not sideOutput TypeSerializer outSerializer = null; if (edge.getOutputTag() != null) { // side output outSerializer = upStreamConfig.getTypeSerializerSideOut( edge.getOutputTag(), taskEnvironment.getUserClassLoader()); } else { // main output outSerializer = upStreamConfig.getTypeSerializerOut(taskEnvironment.getUserClassLoader()); } return new RecordWriterOutput<>(recordWriter, outSerializer, sideOutputTag, this); }
Example #7
Source File: OutputEmitterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private boolean verifyWrongPartitionHashKey(int position, int fieldNum) { final TypeComparator<Record> comparator = new RecordComparatorFactory( new int[] {position}, new Class[] {IntValue.class}).createComparator(); final ChannelSelector<SerializationDelegate<Record>> selector = createChannelSelector( ShipStrategyType.PARTITION_HASH, comparator, 100); final SerializationDelegate<Record> delegate = new SerializationDelegate<>(new RecordSerializerFactory().getSerializer()); Record record = new Record(2); record.setField(fieldNum, new IntValue(1)); delegate.setInstance(record); try { selector.selectChannel(delegate); } catch (NullKeyFieldException re) { Assert.assertEquals(position, re.getFieldNumber()); return true; } return false; }
Example #8
Source File: RecordWriterOutput.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") public RecordWriterOutput( RecordWriter<SerializationDelegate<StreamRecord<OUT>>> recordWriter, TypeSerializer<OUT> outSerializer, OutputTag outputTag, StreamStatusProvider streamStatusProvider) { checkNotNull(recordWriter); this.outputTag = outputTag; // generic hack: cast the writer to generic Object type so we can use it // with multiplexed records and watermarks this.recordWriter = (RecordWriter<SerializationDelegate<StreamElement>>) (RecordWriter<?>) recordWriter; TypeSerializer<StreamElement> outRecordSerializer = new StreamElementSerializer<>(outSerializer); if (outSerializer != null) { serializationDelegate = new SerializationDelegate<StreamElement>(outRecordSerializer); } this.streamStatusProvider = checkNotNull(streamStatusProvider); }
Example #9
Source File: OperatorChain.java From flink with Apache License 2.0 | 6 votes |
private RecordWriterOutput<OUT> createStreamOutput( RecordWriter<SerializationDelegate<StreamRecord<OUT>>> recordWriter, StreamEdge edge, StreamConfig upStreamConfig, Environment taskEnvironment) { OutputTag sideOutputTag = edge.getOutputTag(); // OutputTag, return null if not sideOutput TypeSerializer outSerializer = null; if (edge.getOutputTag() != null) { // side output outSerializer = upStreamConfig.getTypeSerializerSideOut( edge.getOutputTag(), taskEnvironment.getUserClassLoader()); } else { // main output outSerializer = upStreamConfig.getTypeSerializerOut(taskEnvironment.getUserClassLoader()); } return new RecordWriterOutput<>(recordWriter, outSerializer, sideOutputTag, this); }
Example #10
Source File: StreamTask.java From flink with Apache License 2.0 | 6 votes |
private static <OUT> List<RecordWriter<SerializationDelegate<StreamRecord<OUT>>>> createRecordWriters( StreamConfig configuration, Environment environment) { List<RecordWriter<SerializationDelegate<StreamRecord<OUT>>>> recordWriters = new ArrayList<>(); List<StreamEdge> outEdgesInOrder = configuration.getOutEdgesInOrder(environment.getUserClassLoader()); Map<Integer, StreamConfig> chainedConfigs = configuration.getTransitiveChainedTaskConfigsWithSelf(environment.getUserClassLoader()); for (int i = 0; i < outEdgesInOrder.size(); i++) { StreamEdge edge = outEdgesInOrder.get(i); recordWriters.add( createRecordWriter( edge, i, environment, environment.getTaskInfo().getTaskName(), chainedConfigs.get(edge.getSourceId()).getBufferTimeout())); } return recordWriters; }
Example #11
Source File: OutputEmitterTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testPartitionHash() { // Test for IntValue verifyPartitionHashSelectedChannels(50000, 100, RecordType.INTEGER); // Test for StringValue verifyPartitionHashSelectedChannels(10000, 100, RecordType.STRING); // Test hash corner cases final TestIntComparator testIntComp = new TestIntComparator(); final ChannelSelector<SerializationDelegate<Integer>> selector = createChannelSelector( ShipStrategyType.PARTITION_HASH, testIntComp, 100); final SerializationDelegate<Integer> serializationDelegate = new SerializationDelegate<>(new IntSerializer()); assertPartitionHashSelectedChannels(selector, serializationDelegate, Integer.MIN_VALUE, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, -1, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, 0, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, 1, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, Integer.MAX_VALUE, 100); }
Example #12
Source File: OutputEmitter.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public final int selectChannel(SerializationDelegate<T> record) { switch (strategy) { case FORWARD: return forward(); case PARTITION_RANDOM: case PARTITION_FORCED_REBALANCE: return robin(numberOfChannels); case PARTITION_HASH: return hashPartitionDefault(record.getInstance(), numberOfChannels); case PARTITION_CUSTOM: return customPartition(record.getInstance(), numberOfChannels); case PARTITION_RANGE: return rangePartition(record.getInstance(), numberOfChannels); default: throw new UnsupportedOperationException("Unsupported distribution strategy: " + strategy.name()); } }
Example #13
Source File: OutputEmitter.java From flink with Apache License 2.0 | 6 votes |
@Override public final int selectChannel(SerializationDelegate<T> record) { switch (strategy) { case FORWARD: return forward(); case PARTITION_RANDOM: case PARTITION_FORCED_REBALANCE: return robin(numberOfChannels); case PARTITION_HASH: return hashPartitionDefault(record.getInstance(), numberOfChannels); case PARTITION_CUSTOM: return customPartition(record.getInstance(), numberOfChannels); case PARTITION_RANGE: return rangePartition(record.getInstance(), numberOfChannels); default: throw new UnsupportedOperationException("Unsupported distribution strategy: " + strategy.name()); } }
Example #14
Source File: OutputEmitterTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testPartitionHash() { // Test for IntValue verifyPartitionHashSelectedChannels(50000, 100, RecordType.INTEGER); // Test for StringValue verifyPartitionHashSelectedChannels(10000, 100, RecordType.STRING); // Test hash corner cases final TestIntComparator testIntComp = new TestIntComparator(); final ChannelSelector<SerializationDelegate<Integer>> selector = createChannelSelector( ShipStrategyType.PARTITION_HASH, testIntComp, 100); final SerializationDelegate<Integer> serializationDelegate = new SerializationDelegate<>(new IntSerializer()); assertPartitionHashSelectedChannels(selector, serializationDelegate, Integer.MIN_VALUE, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, -1, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, 0, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, 1, 100); assertPartitionHashSelectedChannels(selector, serializationDelegate, Integer.MAX_VALUE, 100); }
Example #15
Source File: OperatorChain.java From flink with Apache License 2.0 | 6 votes |
private RecordWriterOutput<OUT> createStreamOutput( RecordWriter<SerializationDelegate<StreamRecord<OUT>>> recordWriter, StreamEdge edge, StreamConfig upStreamConfig, Environment taskEnvironment) { OutputTag sideOutputTag = edge.getOutputTag(); // OutputTag, return null if not sideOutput TypeSerializer outSerializer = null; if (edge.getOutputTag() != null) { // side output outSerializer = upStreamConfig.getTypeSerializerSideOut( edge.getOutputTag(), taskEnvironment.getUserClassLoader()); } else { // main output outSerializer = upStreamConfig.getTypeSerializerOut(taskEnvironment.getUserClassLoader()); } return new RecordWriterOutput<>(recordWriter, outSerializer, sideOutputTag, this); }
Example #16
Source File: StreamTask.java From flink with Apache License 2.0 | 6 votes |
@VisibleForTesting public static <OUT> List<RecordWriter<SerializationDelegate<StreamRecord<OUT>>>> createRecordWriters( StreamConfig configuration, Environment environment) { List<RecordWriter<SerializationDelegate<StreamRecord<OUT>>>> recordWriters = new ArrayList<>(); List<StreamEdge> outEdgesInOrder = configuration.getOutEdgesInOrder(environment.getUserClassLoader()); Map<Integer, StreamConfig> chainedConfigs = configuration.getTransitiveChainedTaskConfigsWithSelf(environment.getUserClassLoader()); for (int i = 0; i < outEdgesInOrder.size(); i++) { StreamEdge edge = outEdgesInOrder.get(i); recordWriters.add( createRecordWriter( edge, i, environment, environment.getTaskInfo().getTaskName(), chainedConfigs.get(edge.getSourceId()).getBufferTimeout())); } return recordWriters; }
Example #17
Source File: OutputEmitterTest.java From flink with Apache License 2.0 | 6 votes |
private boolean verifyWrongPartitionHashKey(int position, int fieldNum) { final TypeComparator<Record> comparator = new RecordComparatorFactory( new int[] {position}, new Class[] {IntValue.class}).createComparator(); final ChannelSelector<SerializationDelegate<Record>> selector = createChannelSelector( ShipStrategyType.PARTITION_HASH, comparator, 100); final SerializationDelegate<Record> delegate = new SerializationDelegate<>(new RecordSerializerFactory().getSerializer()); Record record = new Record(2); record.setField(fieldNum, new IntValue(1)); delegate.setInstance(record); try { selector.selectChannel(delegate); } catch (NullKeyFieldException re) { Assert.assertEquals(position, re.getFieldNumber()); return true; } return false; }
Example #18
Source File: OutputEmitterTest.java From flink with Apache License 2.0 | 6 votes |
private boolean verifyWrongPartitionHashKey(int position, int fieldNum) { final TypeComparator<Record> comparator = new RecordComparatorFactory( new int[] {position}, new Class[] {IntValue.class}).createComparator(); final ChannelSelector<SerializationDelegate<Record>> selector = createChannelSelector( ShipStrategyType.PARTITION_HASH, comparator, 100); final SerializationDelegate<Record> delegate = new SerializationDelegate<>(new RecordSerializerFactory().getSerializer()); Record record = new Record(2); record.setField(fieldNum, new IntValue(1)); delegate.setInstance(record); try { selector.selectChannel(delegate); } catch (NullKeyFieldException re) { Assert.assertEquals(position, re.getFieldNumber()); return true; } return false; }
Example #19
Source File: OutputEmitter.java From flink with Apache License 2.0 | 6 votes |
@Override public final int selectChannel(SerializationDelegate<T> record) { switch (strategy) { case FORWARD: return forward(); case PARTITION_RANDOM: case PARTITION_FORCED_REBALANCE: return robin(numberOfChannels); case PARTITION_HASH: return hashPartitionDefault(record.getInstance(), numberOfChannels); case PARTITION_CUSTOM: return customPartition(record.getInstance(), numberOfChannels); case PARTITION_RANGE: return rangePartition(record.getInstance(), numberOfChannels); default: throw new UnsupportedOperationException("Unsupported distribution strategy: " + strategy.name()); } }
Example #20
Source File: OutputEmitterTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testMultiKeys() { final int numberOfChannels = 100; final int numRecords = 5000; final TypeComparator<Record> multiComp = new RecordComparatorFactory( new int[] {0,1, 3}, new Class[] {IntValue.class, StringValue.class, DoubleValue.class}).createComparator(); final ChannelSelector<SerializationDelegate<Record>> selector = createChannelSelector( ShipStrategyType.PARTITION_HASH, multiComp, numberOfChannels); final SerializationDelegate<Record> delegate = new SerializationDelegate<>(new RecordSerializerFactory().getSerializer()); int[] hits = new int[numberOfChannels]; for (int i = 0; i < numRecords; i++) { Record record = new Record(4); record.setField(0, new IntValue(i)); record.setField(1, new StringValue("AB" + i + "CD" + i)); record.setField(3, new DoubleValue(i * 3.141d)); delegate.setInstance(record); int channel = selector.selectChannel(delegate); hits[channel]++; } int totalHitCount = 0; for (int hit : hits) { assertTrue(hit > 0); totalHitCount += hit; } assertTrue(totalHitCount == numRecords); }
Example #21
Source File: OutputEmitterTest.java From flink with Apache License 2.0 | 5 votes |
private int[] getSelectedChannelsHitCount( ShipStrategyType shipStrategyType, int numRecords, int numberOfChannels, Enum recordType) { final TypeComparator<Record> comparator = new RecordComparatorFactory( new int[] {0}, new Class[] {recordType == RecordType.INTEGER ? IntValue.class : StringValue.class}).createComparator(); final ChannelSelector<SerializationDelegate<Record>> selector = createChannelSelector(shipStrategyType, comparator, numberOfChannels); final SerializationDelegate<Record> delegate = new SerializationDelegate<>(new RecordSerializerFactory().getSerializer()); return getSelectedChannelsHitCount(selector, delegate, recordType, numRecords, numberOfChannels); }
Example #22
Source File: OutputEmitterTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testMultiKeys() { final int numberOfChannels = 100; final int numRecords = 5000; final TypeComparator<Record> multiComp = new RecordComparatorFactory( new int[] {0,1, 3}, new Class[] {IntValue.class, StringValue.class, DoubleValue.class}).createComparator(); final ChannelSelector<SerializationDelegate<Record>> selector = createChannelSelector( ShipStrategyType.PARTITION_HASH, multiComp, numberOfChannels); final SerializationDelegate<Record> delegate = new SerializationDelegate<>(new RecordSerializerFactory().getSerializer()); int[] hits = new int[numberOfChannels]; for (int i = 0; i < numRecords; i++) { Record record = new Record(4); record.setField(0, new IntValue(i)); record.setField(1, new StringValue("AB" + i + "CD" + i)); record.setField(3, new DoubleValue(i * 3.141d)); delegate.setInstance(record); int channel = selector.selectChannel(delegate); hits[channel]++; } int totalHitCount = 0; for (int hit : hits) { assertTrue(hit > 0); totalHitCount += hit; } assertTrue(totalHitCount == numRecords); }
Example #23
Source File: OutputEmitterTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private int[] getSelectedChannelsHitCount( ShipStrategyType shipStrategyType, int numRecords, int numberOfChannels, Enum recordType) { final TypeComparator<Record> comparator = new RecordComparatorFactory( new int[] {0}, new Class[] {recordType == RecordType.INTEGER ? IntValue.class : StringValue.class}).createComparator(); final ChannelSelector<SerializationDelegate<Record>> selector = createChannelSelector(shipStrategyType, comparator, numberOfChannels); final SerializationDelegate<Record> delegate = new SerializationDelegate<>(new RecordSerializerFactory().getSerializer()); return getSelectedChannelsHitCount(selector, delegate, recordType, numRecords, numberOfChannels); }
Example #24
Source File: OutputEmitterTest.java From flink with Apache License 2.0 | 5 votes |
private void assertPartitionHashSelectedChannels( ChannelSelector selector, SerializationDelegate<Integer> serializationDelegate, int record, int numberOfChannels) { serializationDelegate.setInstance(record); int selectedChannel = selector.selectChannel(serializationDelegate); assertTrue(selectedChannel >= 0 && selectedChannel <= numberOfChannels - 1); }
Example #25
Source File: StreamTaskNetworkInputTest.java From flink with Apache License 2.0 | 5 votes |
private void serializeRecord(long value, BufferBuilder bufferBuilder) throws IOException { RecordSerializer<SerializationDelegate<StreamElement>> serializer = new SpanningRecordSerializer<>(); SerializationDelegate<StreamElement> serializationDelegate = new SerializationDelegate<>( new StreamElementSerializer<>(LongSerializer.INSTANCE)); serializationDelegate.setInstance(new StreamRecord<>(value)); serializer.serializeRecord(serializationDelegate); assertFalse(serializer.copyToBufferBuilder(bufferBuilder).isFullBuffer()); }
Example #26
Source File: StreamTaskNetworkInputTest.java From flink with Apache License 2.0 | 5 votes |
private void serializeRecord(long value, BufferBuilder bufferBuilder) throws IOException { RecordSerializer<SerializationDelegate<StreamElement>> serializer = new SpanningRecordSerializer<>(); SerializationDelegate<StreamElement> serializationDelegate = new SerializationDelegate<>( new StreamElementSerializer<>(LongSerializer.INSTANCE)); serializationDelegate.setInstance(new StreamRecord<>(value)); serializer.serializeRecord(serializationDelegate); assertFalse(serializer.copyToBufferBuilder(bufferBuilder).isFullBuffer()); }
Example #27
Source File: DynamicPartitioner.java From flink-siddhi with Apache License 2.0 | 5 votes |
@Override public int selectChannel(SerializationDelegate<StreamRecord<Tuple2<StreamRoute, Object>>> streamRecordSerializationDelegate) { Tuple2<StreamRoute, Object> value = streamRecordSerializationDelegate.getInstance().getValue(); if (value.f0.getPartitionKey() == -1) { // random partition return random.nextInt(numberOfChannels); } else { return partitioner.partition(value.f0.getPartitionKey(), numberOfChannels); } }
Example #28
Source File: RescalePartitioner.java From flink with Apache License 2.0 | 5 votes |
@Override public int selectChannel(SerializationDelegate<StreamRecord<T>> record) { if (++nextChannelToSendTo >= numberOfChannels) { nextChannelToSendTo = 0; } return nextChannelToSendTo; }
Example #29
Source File: OutputEmitterTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testMultiKeys() { final int numberOfChannels = 100; final int numRecords = 5000; final TypeComparator<Record> multiComp = new RecordComparatorFactory( new int[] {0,1, 3}, new Class[] {IntValue.class, StringValue.class, DoubleValue.class}).createComparator(); final ChannelSelector<SerializationDelegate<Record>> selector = createChannelSelector( ShipStrategyType.PARTITION_HASH, multiComp, numberOfChannels); final SerializationDelegate<Record> delegate = new SerializationDelegate<>(new RecordSerializerFactory().getSerializer()); int[] hits = new int[numberOfChannels]; for (int i = 0; i < numRecords; i++) { Record record = new Record(4); record.setField(0, new IntValue(i)); record.setField(1, new StringValue("AB" + i + "CD" + i)); record.setField(3, new DoubleValue(i * 3.141d)); delegate.setInstance(record); int channel = selector.selectChannel(delegate); hits[channel]++; } int totalHitCount = 0; for (int hit : hits) { assertTrue(hit > 0); totalHitCount += hit; } assertTrue(totalHitCount == numRecords); }
Example #30
Source File: KeyGroupStreamPartitioner.java From flink with Apache License 2.0 | 5 votes |
@Override public int selectChannel(SerializationDelegate<StreamRecord<T>> record) { K key; try { key = keySelector.getKey(record.getInstance().getValue()); } catch (Exception e) { throw new RuntimeException("Could not extract key from " + record.getInstance().getValue(), e); } return KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, numberOfChannels); }