Java Code Examples for org.apache.flink.streaming.connectors.kafka.testutils.TestPartitionDiscoverer#createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn()
The following examples show how to use
org.apache.flink.streaming.connectors.kafka.testutils.TestPartitionDiscoverer#createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractPartitionDiscovererTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testPartitionsEqualConsumersFixedPartitions() throws Exception { List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition(TEST_TOPIC, 0), new KafkaTopicPartition(TEST_TOPIC, 1), new KafkaTopicPartition(TEST_TOPIC, 2), new KafkaTopicPartition(TEST_TOPIC, 3)); int numSubtasks = mockGetAllPartitionsForTopicsReturn.size(); // get the start index; the assertions below will fail if the assignment logic does not meet correct contracts int numConsumers = KafkaTopicPartitionAssigner.assign(mockGetAllPartitionsForTopicsReturn.get(0), numSubtasks); for (int subtaskIndex = 0; subtaskIndex < mockGetAllPartitionsForTopicsReturn.size(); subtaskIndex++) { TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, mockGetAllPartitionsForTopicsReturn.size(), TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Collections.singletonList(TEST_TOPIC)), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturn)); partitionDiscoverer.open(); List<KafkaTopicPartition> initialDiscovery = partitionDiscoverer.discoverPartitions(); assertEquals(1, initialDiscovery.size()); assertTrue(contains(mockGetAllPartitionsForTopicsReturn, initialDiscovery.get(0).getPartition())); assertEquals( getExpectedSubtaskIndex(initialDiscovery.get(0), numConsumers, numSubtasks), subtaskIndex); // subsequent discoveries should not find anything List<KafkaTopicPartition> secondDiscovery = partitionDiscoverer.discoverPartitions(); List<KafkaTopicPartition> thirdDiscovery = partitionDiscoverer.discoverPartitions(); assertEquals(0, secondDiscovery.size()); assertEquals(0, thirdDiscovery.size()); } }
Example 2
Source File: FlinkKafkaConsumerBaseTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void checkFilterRestoredPartitionsWithDisovered( List<String> restoredKafkaTopics, List<String> initKafkaTopics, List<String> expectedSubscribedPartitions, Boolean disableFiltering) throws Exception { final AbstractPartitionDiscoverer discoverer = new TestPartitionDiscoverer( new KafkaTopicsDescriptor(initKafkaTopics, null), 0, 1, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(initKafkaTopics), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn( initKafkaTopics.stream() .map(topic -> new KafkaTopicPartition(topic, 0)) .collect(Collectors.toList()))); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(initKafkaTopics, discoverer); if (disableFiltering) { consumer.disableFilterRestoredPartitionsWithSubscribedTopics(); } final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); for (int i = 0; i < restoredKafkaTopics.size(); i++) { listState.add(new Tuple2<>(new KafkaTopicPartition(restoredKafkaTopics.get(i), 0), 12345L)); } setupConsumer(consumer, true, listState, true, 0, 1); Map<KafkaTopicPartition, Long> subscribedPartitionsToStartOffsets = consumer.getSubscribedPartitionsToStartOffsets(); assertEquals(new HashSet<>(expectedSubscribedPartitions), subscribedPartitionsToStartOffsets .keySet() .stream() .map(partition -> partition.getTopic()) .collect(Collectors.toSet())); }
Example 3
Source File: AbstractPartitionDiscovererTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testPartitionsEqualConsumersFixedPartitions() throws Exception { List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition(TEST_TOPIC, 0), new KafkaTopicPartition(TEST_TOPIC, 1), new KafkaTopicPartition(TEST_TOPIC, 2), new KafkaTopicPartition(TEST_TOPIC, 3)); int numSubtasks = mockGetAllPartitionsForTopicsReturn.size(); // get the start index; the assertions below will fail if the assignment logic does not meet correct contracts int numConsumers = KafkaTopicPartitionAssigner.assign(mockGetAllPartitionsForTopicsReturn.get(0), numSubtasks); for (int subtaskIndex = 0; subtaskIndex < mockGetAllPartitionsForTopicsReturn.size(); subtaskIndex++) { TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, mockGetAllPartitionsForTopicsReturn.size(), TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Collections.singletonList(TEST_TOPIC)), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturn)); partitionDiscoverer.open(); List<KafkaTopicPartition> initialDiscovery = partitionDiscoverer.discoverPartitions(); assertEquals(1, initialDiscovery.size()); assertTrue(contains(mockGetAllPartitionsForTopicsReturn, initialDiscovery.get(0).getPartition())); assertEquals( getExpectedSubtaskIndex(initialDiscovery.get(0), numConsumers, numSubtasks), subtaskIndex); // subsequent discoveries should not find anything List<KafkaTopicPartition> secondDiscovery = partitionDiscoverer.discoverPartitions(); List<KafkaTopicPartition> thirdDiscovery = partitionDiscoverer.discoverPartitions(); assertEquals(0, secondDiscovery.size()); assertEquals(0, thirdDiscovery.size()); } }
Example 4
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
private void checkFilterRestoredPartitionsWithDisovered( List<String> restoredKafkaTopics, List<String> initKafkaTopics, List<String> expectedSubscribedPartitions, Boolean disableFiltering) throws Exception { final AbstractPartitionDiscoverer discoverer = new TestPartitionDiscoverer( new KafkaTopicsDescriptor(initKafkaTopics, null), 0, 1, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(initKafkaTopics), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn( initKafkaTopics.stream() .map(topic -> new KafkaTopicPartition(topic, 0)) .collect(Collectors.toList()))); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(initKafkaTopics, discoverer); if (disableFiltering) { consumer.disableFilterRestoredPartitionsWithSubscribedTopics(); } final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); for (int i = 0; i < restoredKafkaTopics.size(); i++) { listState.add(new Tuple2<>(new KafkaTopicPartition(restoredKafkaTopics.get(i), 0), 12345L)); } setupConsumer(consumer, true, listState, true, 0, 1); Map<KafkaTopicPartition, Long> subscribedPartitionsToStartOffsets = consumer.getSubscribedPartitionsToStartOffsets(); assertEquals(new HashSet<>(expectedSubscribedPartitions), subscribedPartitionsToStartOffsets .keySet() .stream() .map(partition -> partition.getTopic()) .collect(Collectors.toSet())); }
Example 5
Source File: AbstractPartitionDiscovererTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testPartitionsEqualConsumersFixedPartitions() throws Exception { List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition(TEST_TOPIC, 0), new KafkaTopicPartition(TEST_TOPIC, 1), new KafkaTopicPartition(TEST_TOPIC, 2), new KafkaTopicPartition(TEST_TOPIC, 3)); int numSubtasks = mockGetAllPartitionsForTopicsReturn.size(); // get the start index; the assertions below will fail if the assignment logic does not meet correct contracts int numConsumers = KafkaTopicPartitionAssigner.assign(mockGetAllPartitionsForTopicsReturn.get(0), numSubtasks); for (int subtaskIndex = 0; subtaskIndex < mockGetAllPartitionsForTopicsReturn.size(); subtaskIndex++) { TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, mockGetAllPartitionsForTopicsReturn.size(), TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Collections.singletonList(TEST_TOPIC)), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturn)); partitionDiscoverer.open(); List<KafkaTopicPartition> initialDiscovery = partitionDiscoverer.discoverPartitions(); assertEquals(1, initialDiscovery.size()); assertTrue(contains(mockGetAllPartitionsForTopicsReturn, initialDiscovery.get(0).getPartition())); assertEquals( getExpectedSubtaskIndex(initialDiscovery.get(0), numConsumers, numSubtasks), subtaskIndex); // subsequent discoveries should not find anything List<KafkaTopicPartition> secondDiscovery = partitionDiscoverer.discoverPartitions(); List<KafkaTopicPartition> thirdDiscovery = partitionDiscoverer.discoverPartitions(); assertEquals(0, secondDiscovery.size()); assertEquals(0, thirdDiscovery.size()); } }
Example 6
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
private void checkFilterRestoredPartitionsWithDisovered( List<String> restoredKafkaTopics, List<String> initKafkaTopics, List<String> expectedSubscribedPartitions, Boolean disableFiltering) throws Exception { final AbstractPartitionDiscoverer discoverer = new TestPartitionDiscoverer( new KafkaTopicsDescriptor(initKafkaTopics, null), 0, 1, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(initKafkaTopics), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn( initKafkaTopics.stream() .map(topic -> new KafkaTopicPartition(topic, 0)) .collect(Collectors.toList()))); final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(initKafkaTopics, discoverer); if (disableFiltering) { consumer.disableFilterRestoredPartitionsWithSubscribedTopics(); } final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>(); for (int i = 0; i < restoredKafkaTopics.size(); i++) { listState.add(new Tuple2<>(new KafkaTopicPartition(restoredKafkaTopics.get(i), 0), 12345L)); } setupConsumer(consumer, true, listState, true, 0, 1); Map<KafkaTopicPartition, Long> subscribedPartitionsToStartOffsets = consumer.getSubscribedPartitionsToStartOffsets(); assertEquals(new HashSet<>(expectedSubscribedPartitions), subscribedPartitionsToStartOffsets .keySet() .stream() .map(partition -> partition.getTopic()) .collect(Collectors.toSet())); }
Example 7
Source File: AbstractPartitionDiscovererTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testPartitionsFewerThanConsumersFixedPartitions() { try { List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition(TEST_TOPIC, 0), new KafkaTopicPartition(TEST_TOPIC, 1), new KafkaTopicPartition(TEST_TOPIC, 2), new KafkaTopicPartition(TEST_TOPIC, 3)); final Set<KafkaTopicPartition> allPartitions = new HashSet<>(); allPartitions.addAll(mockGetAllPartitionsForTopicsReturn); final int numConsumers = 2 * mockGetAllPartitionsForTopicsReturn.size() + 3; // get the start index; the assertions below will fail if the assignment logic does not meet correct contracts int startIndex = KafkaTopicPartitionAssigner.assign(mockGetAllPartitionsForTopicsReturn.get(0), numConsumers); for (int subtaskIndex = 0; subtaskIndex < numConsumers; subtaskIndex++) { TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, numConsumers, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Collections.singletonList(TEST_TOPIC)), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturn)); partitionDiscoverer.open(); List<KafkaTopicPartition> initialDiscovery = partitionDiscoverer.discoverPartitions(); assertTrue(initialDiscovery.size() <= 1); for (KafkaTopicPartition p : initialDiscovery) { // check that the element was actually contained assertTrue(allPartitions.remove(p)); assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), subtaskIndex); } // subsequent discoveries should not find anything List<KafkaTopicPartition> secondDiscovery = partitionDiscoverer.discoverPartitions(); List<KafkaTopicPartition> thirdDiscovery = partitionDiscoverer.discoverPartitions(); assertEquals(0, secondDiscovery.size()); assertEquals(0, thirdDiscovery.size()); } // all partitions must have been assigned assertTrue(allPartitions.isEmpty()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example 8
Source File: AbstractPartitionDiscovererTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testDeterministicAssignmentWithDifferentFetchedPartitionOrdering() throws Exception { int numSubtasks = 4; List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition("test-topic", 0), new KafkaTopicPartition("test-topic", 1), new KafkaTopicPartition("test-topic", 2), new KafkaTopicPartition("test-topic", 3), new KafkaTopicPartition("test-topic2", 0), new KafkaTopicPartition("test-topic2", 1)); List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturnOutOfOrder = Arrays.asList( new KafkaTopicPartition("test-topic", 3), new KafkaTopicPartition("test-topic", 1), new KafkaTopicPartition("test-topic2", 1), new KafkaTopicPartition("test-topic", 0), new KafkaTopicPartition("test-topic2", 0), new KafkaTopicPartition("test-topic", 2)); for (int subtaskIndex = 0; subtaskIndex < numSubtasks; subtaskIndex++) { TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, numSubtasks, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Arrays.asList("test-topic", "test-topic2")), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturn)); partitionDiscoverer.open(); TestPartitionDiscoverer partitionDiscovererOutOfOrder = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, numSubtasks, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Arrays.asList("test-topic", "test-topic2")), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturnOutOfOrder)); partitionDiscovererOutOfOrder.open(); List<KafkaTopicPartition> discoveredPartitions = partitionDiscoverer.discoverPartitions(); List<KafkaTopicPartition> discoveredPartitionsOutOfOrder = partitionDiscovererOutOfOrder.discoverPartitions(); // the subscribed partitions should be identical, regardless of the input partition ordering Collections.sort(discoveredPartitions, new KafkaTopicPartition.Comparator()); Collections.sort(discoveredPartitionsOutOfOrder, new KafkaTopicPartition.Comparator()); assertEquals(discoveredPartitions, discoveredPartitionsOutOfOrder); } }
Example 9
Source File: AbstractPartitionDiscovererTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testPartitionsFewerThanConsumersFixedPartitions() { try { List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition(TEST_TOPIC, 0), new KafkaTopicPartition(TEST_TOPIC, 1), new KafkaTopicPartition(TEST_TOPIC, 2), new KafkaTopicPartition(TEST_TOPIC, 3)); final Set<KafkaTopicPartition> allPartitions = new HashSet<>(); allPartitions.addAll(mockGetAllPartitionsForTopicsReturn); final int numConsumers = 2 * mockGetAllPartitionsForTopicsReturn.size() + 3; // get the start index; the assertions below will fail if the assignment logic does not meet correct contracts int startIndex = KafkaTopicPartitionAssigner.assign(mockGetAllPartitionsForTopicsReturn.get(0), numConsumers); for (int subtaskIndex = 0; subtaskIndex < numConsumers; subtaskIndex++) { TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, numConsumers, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Collections.singletonList(TEST_TOPIC)), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturn)); partitionDiscoverer.open(); List<KafkaTopicPartition> initialDiscovery = partitionDiscoverer.discoverPartitions(); assertTrue(initialDiscovery.size() <= 1); for (KafkaTopicPartition p : initialDiscovery) { // check that the element was actually contained assertTrue(allPartitions.remove(p)); assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), subtaskIndex); } // subsequent discoveries should not find anything List<KafkaTopicPartition> secondDiscovery = partitionDiscoverer.discoverPartitions(); List<KafkaTopicPartition> thirdDiscovery = partitionDiscoverer.discoverPartitions(); assertEquals(0, secondDiscovery.size()); assertEquals(0, thirdDiscovery.size()); } // all partitions must have been assigned assertTrue(allPartitions.isEmpty()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example 10
Source File: AbstractPartitionDiscovererTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testDeterministicAssignmentWithDifferentFetchedPartitionOrdering() throws Exception { int numSubtasks = 4; List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition("test-topic", 0), new KafkaTopicPartition("test-topic", 1), new KafkaTopicPartition("test-topic", 2), new KafkaTopicPartition("test-topic", 3), new KafkaTopicPartition("test-topic2", 0), new KafkaTopicPartition("test-topic2", 1)); List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturnOutOfOrder = Arrays.asList( new KafkaTopicPartition("test-topic", 3), new KafkaTopicPartition("test-topic", 1), new KafkaTopicPartition("test-topic2", 1), new KafkaTopicPartition("test-topic", 0), new KafkaTopicPartition("test-topic2", 0), new KafkaTopicPartition("test-topic", 2)); for (int subtaskIndex = 0; subtaskIndex < numSubtasks; subtaskIndex++) { TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, numSubtasks, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Arrays.asList("test-topic", "test-topic2")), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturn)); partitionDiscoverer.open(); TestPartitionDiscoverer partitionDiscovererOutOfOrder = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, numSubtasks, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Arrays.asList("test-topic", "test-topic2")), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturnOutOfOrder)); partitionDiscovererOutOfOrder.open(); List<KafkaTopicPartition> discoveredPartitions = partitionDiscoverer.discoverPartitions(); List<KafkaTopicPartition> discoveredPartitionsOutOfOrder = partitionDiscovererOutOfOrder.discoverPartitions(); // the subscribed partitions should be identical, regardless of the input partition ordering Collections.sort(discoveredPartitions, new KafkaTopicPartition.Comparator()); Collections.sort(discoveredPartitionsOutOfOrder, new KafkaTopicPartition.Comparator()); assertEquals(discoveredPartitions, discoveredPartitionsOutOfOrder); } }
Example 11
Source File: AbstractPartitionDiscovererTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testPartitionsFewerThanConsumersFixedPartitions() { try { List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition(TEST_TOPIC, 0), new KafkaTopicPartition(TEST_TOPIC, 1), new KafkaTopicPartition(TEST_TOPIC, 2), new KafkaTopicPartition(TEST_TOPIC, 3)); final Set<KafkaTopicPartition> allPartitions = new HashSet<>(); allPartitions.addAll(mockGetAllPartitionsForTopicsReturn); final int numConsumers = 2 * mockGetAllPartitionsForTopicsReturn.size() + 3; // get the start index; the assertions below will fail if the assignment logic does not meet correct contracts int startIndex = KafkaTopicPartitionAssigner.assign(mockGetAllPartitionsForTopicsReturn.get(0), numConsumers); for (int subtaskIndex = 0; subtaskIndex < numConsumers; subtaskIndex++) { TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, numConsumers, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Collections.singletonList(TEST_TOPIC)), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturn)); partitionDiscoverer.open(); List<KafkaTopicPartition> initialDiscovery = partitionDiscoverer.discoverPartitions(); assertTrue(initialDiscovery.size() <= 1); for (KafkaTopicPartition p : initialDiscovery) { // check that the element was actually contained assertTrue(allPartitions.remove(p)); assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), subtaskIndex); } // subsequent discoveries should not find anything List<KafkaTopicPartition> secondDiscovery = partitionDiscoverer.discoverPartitions(); List<KafkaTopicPartition> thirdDiscovery = partitionDiscoverer.discoverPartitions(); assertEquals(0, secondDiscovery.size()); assertEquals(0, thirdDiscovery.size()); } // all partitions must have been assigned assertTrue(allPartitions.isEmpty()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example 12
Source File: AbstractPartitionDiscovererTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testDeterministicAssignmentWithDifferentFetchedPartitionOrdering() throws Exception { int numSubtasks = 4; List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition("test-topic", 0), new KafkaTopicPartition("test-topic", 1), new KafkaTopicPartition("test-topic", 2), new KafkaTopicPartition("test-topic", 3), new KafkaTopicPartition("test-topic2", 0), new KafkaTopicPartition("test-topic2", 1)); List<KafkaTopicPartition> mockGetAllPartitionsForTopicsReturnOutOfOrder = Arrays.asList( new KafkaTopicPartition("test-topic", 3), new KafkaTopicPartition("test-topic", 1), new KafkaTopicPartition("test-topic2", 1), new KafkaTopicPartition("test-topic", 0), new KafkaTopicPartition("test-topic2", 0), new KafkaTopicPartition("test-topic", 2)); for (int subtaskIndex = 0; subtaskIndex < numSubtasks; subtaskIndex++) { TestPartitionDiscoverer partitionDiscoverer = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, numSubtasks, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Arrays.asList("test-topic", "test-topic2")), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturn)); partitionDiscoverer.open(); TestPartitionDiscoverer partitionDiscovererOutOfOrder = new TestPartitionDiscoverer( topicsDescriptor, subtaskIndex, numSubtasks, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(Arrays.asList("test-topic", "test-topic2")), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(mockGetAllPartitionsForTopicsReturnOutOfOrder)); partitionDiscovererOutOfOrder.open(); List<KafkaTopicPartition> discoveredPartitions = partitionDiscoverer.discoverPartitions(); List<KafkaTopicPartition> discoveredPartitionsOutOfOrder = partitionDiscovererOutOfOrder.discoverPartitions(); // the subscribed partitions should be identical, regardless of the input partition ordering Collections.sort(discoveredPartitions, new KafkaTopicPartition.Comparator()); Collections.sort(discoveredPartitionsOutOfOrder, new KafkaTopicPartition.Comparator()); assertEquals(discoveredPartitions, discoveredPartitionsOutOfOrder); } }