Java Code Examples for org.apache.flink.runtime.state.KeyGroupRangeAssignment#assignKeyToParallelOperator()
The following examples show how to use
org.apache.flink.runtime.state.KeyGroupRangeAssignment#assignKeyToParallelOperator() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaShuffleTestBase.java From flink with Apache License 2.0 | 6 votes |
@Override public void processElement( Tuple3<Integer, Long, Integer> in, Context ctx, Collector<Tuple3<Integer, Long, Integer>> out) throws Exception { int expectedPartition = KeyGroupRangeAssignment .assignKeyToParallelOperator(keySelector.getKey(in), numberOfPartitions, numberOfPartitions); int indexOfThisSubtask = getRuntimeContext().getIndexOfThisSubtask(); KafkaTopicPartition partition = new KafkaTopicPartition(topic, expectedPartition); // This is how Kafka assign partition to subTask; boolean rightAssignment = KafkaTopicPartitionAssigner.assign(partition, numberOfPartitions) == indexOfThisSubtask; boolean samePartition = (previousPartition == expectedPartition) || (previousPartition == -1); previousPartition = expectedPartition; if (!(rightAssignment && samePartition)) { throw new Exception("Error: Kafka partition assignment error "); } out.collect(in); }
Example 2
Source File: RocksDBSerializedCompositeKeyBuilderTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private <K> int setKeyAndReturnKeyGroup( RocksDBSerializedCompositeKeyBuilder<K> compositeKeyBuilder, K key, int maxParallelism) { int keyGroup = KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, maxParallelism); compositeKeyBuilder.setKeyAndKeyGroup(key, keyGroup); return keyGroup; }
Example 3
Source File: KeyGroupStreamPartitioner.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public int selectChannel(SerializationDelegate<StreamRecord<T>> record) { K key; try { key = keySelector.getKey(record.getInstance().getValue()); } catch (Exception e) { throw new RuntimeException("Could not extract key from " + record.getInstance().getValue(), e); } return KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, numberOfChannels); }
Example 4
Source File: RegionFailoverITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception { if (index < 0) { // not been restored, so initialize index = 0; } int subTaskIndex = getRuntimeContext().getIndexOfThisSubtask(); while (isRunning && index < numElements) { synchronized (ctx.getCheckpointLock()) { int key = index / 2; int forwardTaskIndex = KeyGroupRangeAssignment.assignKeyToParallelOperator(key, MAX_PARALLELISM, NUM_OF_REGIONS); // pre-partition output keys if (forwardTaskIndex == subTaskIndex) { // we would send data with the same key twice. ctx.collect(Tuple2.of(key, index)); } index += 1; } if (numCompletedCheckpoints.get() < 3) { // not yet completed enough checkpoints, so slow down if (index < checkpointLatestAt) { // mild slow down Thread.sleep(1); } else { // wait until the checkpoints are completed while (isRunning && numCompletedCheckpoints.get() < 3) { Thread.sleep(300); } } } if (jobFailedCnt.get() < NUM_OF_RESTARTS) { // slow down if job has not failed for 'NUM_OF_RESTARTS' times. Thread.sleep(1); } } }
Example 5
Source File: RocksDBSerializedCompositeKeyBuilderTest.java From flink with Apache License 2.0 | 5 votes |
private <K> int setKeyAndReturnKeyGroup( RocksDBSerializedCompositeKeyBuilder<K> compositeKeyBuilder, K key, int maxParallelism) { int keyGroup = KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, maxParallelism); compositeKeyBuilder.setKeyAndKeyGroup(key, keyGroup); return keyGroup; }
Example 6
Source File: KeyGroupStreamPartitioner.java From flink with Apache License 2.0 | 5 votes |
@Override public int selectChannel(SerializationDelegate<StreamRecord<T>> record) { K key; try { key = keySelector.getKey(record.getInstance().getValue()); } catch (Exception e) { throw new RuntimeException("Could not extract key from " + record.getInstance().getValue(), e); } return KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, numberOfChannels); }
Example 7
Source File: FlinkStreamingPipelineTranslator.java From beam with Apache License 2.0 | 5 votes |
private Map<Integer, ShardedKey<Integer>> generateShardedKeys(int key, int shardCount) { Map<Integer, ShardedKey<Integer>> shardedKeys = new HashMap<>(); for (int shard = 0; shard < shardCount; shard++) { int salt = -1; while (true) { if (salt++ == Integer.MAX_VALUE) { throw new RuntimeException( "Failed to find sharded key in [ " + Integer.MAX_VALUE + " ] iterations"); } ShardedKey<Integer> shk = ShardedKey.of(Objects.hash(key, salt), shard); int targetPartition = shard % parallelism; // create effective key in the same way Beam/Flink will do so we can see if it gets // allocated to the partition we want ByteBuffer effectiveKey = FlinkKeyUtils.encodeKey(shk, shardedKeyCoder); int partition = KeyGroupRangeAssignment.assignKeyToParallelOperator( effectiveKey, maxParallelism, parallelism); if (partition == targetPartition) { shardedKeys.put(shard, shk); break; } } } return shardedKeys; }
Example 8
Source File: FlinkKafkaShuffleProducer.java From flink with Apache License 2.0 | 5 votes |
/** * This is the function invoked to handle each element. * * @param transaction Transaction state; * elements are written to Kafka in transactions to guarantee different level of data consistency * @param next Element to handle * @param context Context needed to handle the element * @throws FlinkKafkaException for kafka error */ @Override public void invoke(KafkaTransactionState transaction, IN next, Context context) throws FlinkKafkaException { checkErroneous(); // write timestamp to Kafka if timestamp is available Long timestamp = context.timestamp(); int[] partitions = getPartitions(transaction); int partitionIndex; try { partitionIndex = KeyGroupRangeAssignment .assignKeyToParallelOperator(keySelector.getKey(next), partitions.length, partitions.length); } catch (Exception e) { throw new RuntimeException("Fail to assign a partition number to record", e); } ProducerRecord<byte[], byte[]> record = new ProducerRecord<>( defaultTopicId, partitionIndex, timestamp, null, kafkaSerializer.serializeRecord(next, timestamp)); pendingRecords.incrementAndGet(); transaction.getProducer().send(record, callback); }
Example 9
Source File: RegionFailoverITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception { if (index < 0) { // not been restored, so initialize index = 0; } int subTaskIndex = getRuntimeContext().getIndexOfThisSubtask(); while (isRunning && index < numElements) { synchronized (ctx.getCheckpointLock()) { int key = index / 2; int forwardTaskIndex = KeyGroupRangeAssignment.assignKeyToParallelOperator(key, MAX_PARALLELISM, NUM_OF_REGIONS); // pre-partition output keys if (forwardTaskIndex == subTaskIndex) { // we would send data with the same key twice. ctx.collect(Tuple2.of(key, index)); } index += 1; } if (numCompletedCheckpoints.get() < 3) { // not yet completed enough checkpoints, so slow down if (index < checkpointLatestAt) { // mild slow down Thread.sleep(1); } else { // wait until the checkpoints are completed while (isRunning && numCompletedCheckpoints.get() < 3) { Thread.sleep(300); } } } if (jobFailedCnt.get() < NUM_OF_RESTARTS) { // slow down if job has not failed for 'NUM_OF_RESTARTS' times. Thread.sleep(1); } } }
Example 10
Source File: RocksDBSerializedCompositeKeyBuilderTest.java From flink with Apache License 2.0 | 5 votes |
private <K> int setKeyAndReturnKeyGroup( RocksDBSerializedCompositeKeyBuilder<K> compositeKeyBuilder, K key, int maxParallelism) { int keyGroup = KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, maxParallelism); compositeKeyBuilder.setKeyAndKeyGroup(key, keyGroup); return keyGroup; }
Example 11
Source File: KeyGroupStreamPartitioner.java From flink with Apache License 2.0 | 5 votes |
@Override public int selectChannel(SerializationDelegate<StreamRecord<T>> record) { K key; try { key = keySelector.getKey(record.getInstance().getValue()); } catch (Exception e) { throw new RuntimeException("Could not extract key from " + record.getInstance().getValue(), e); } return KeyGroupRangeAssignment.assignKeyToParallelOperator(key, maxParallelism, numberOfChannels); }
Example 12
Source File: FlinkPartitionerTest.java From plog with Apache License 2.0 | 5 votes |
@Test public void computePartition() { Random random = new Random(42L); byte[] id = new byte[16]; int maxParallelism = 10393; int numPartitions = 1983; for (int i = 0; i < 40; i++) { random.nextBytes(id); String encoded = Base64.getEncoder().encodeToString(id); int testPartition = FlinkPartitioner.computePartition(encoded, numPartitions, maxParallelism); int flinkPartition = KeyGroupRangeAssignment.assignKeyToParallelOperator(encoded, maxParallelism, numPartitions); assertThat(testPartition, equalTo(flinkPartition)); } }