org.apache.flink.shaded.guava18.com.google.common.collect.Iterables Java Examples
The following examples show how to use
org.apache.flink.shaded.guava18.com.google.common.collect.Iterables.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: YARNHighAvailabilityITCase.java From flink with Apache License 2.0 | 6 votes |
private static Optional<Metric> getJobMetric( final RestClusterClient<ApplicationId> restClusterClient, final JobID jobId, final String metricName) throws Exception { final JobMetricsMessageParameters messageParameters = new JobMetricsMessageParameters(); messageParameters.jobPathParameter.resolve(jobId); messageParameters.metricsFilterParameter.resolveFromString(metricName); final Collection<Metric> metrics = restClusterClient.sendRequest( JobMetricsHeaders.getInstance(), messageParameters, EmptyRequestBody.getInstance()).get().getMetrics(); final Metric metric = Iterables.getOnlyElement(metrics, null); checkState(metric == null || metric.getId().equals(metricName)); return Optional.ofNullable(metric); }
Example #2
Source File: YARNHighAvailabilityITCase.java From flink with Apache License 2.0 | 6 votes |
private static Optional<Metric> getJobMetric( final RestClusterClient<ApplicationId> restClusterClient, final JobID jobId, final String metricName) throws Exception { final JobMetricsMessageParameters messageParameters = new JobMetricsMessageParameters(); messageParameters.jobPathParameter.resolve(jobId); messageParameters.metricsFilterParameter.resolveFromString(metricName); final Collection<Metric> metrics = restClusterClient.sendRequest( JobMetricsHeaders.getInstance(), messageParameters, EmptyRequestBody.getInstance()).get().getMetrics(); final Metric metric = Iterables.getOnlyElement(metrics, null); checkState(metric == null || metric.getId().equals(metricName)); return Optional.ofNullable(metric); }
Example #3
Source File: IntervalJoinOperatorTest.java From flink with Apache License 2.0 | 6 votes |
private void assertOutput( Iterable<StreamRecord<Tuple2<TestElem, TestElem>>> expectedOutput, Queue<Object> actualOutput) { int actualSize = actualOutput.stream() .filter(elem -> elem instanceof StreamRecord) .collect(Collectors.toList()) .size(); int expectedSize = Iterables.size(expectedOutput); Assert.assertEquals( "Expected and actual size of stream records different", expectedSize, actualSize ); for (StreamRecord<Tuple2<TestElem, TestElem>> record : expectedOutput) { Assert.assertTrue(actualOutput.contains(record)); } }
Example #4
Source File: DefaultSchedulerTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void restartFailedTask() { final JobGraph jobGraph = singleNonParallelJobVertexJobGraph(); final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph); final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph); final ArchivedExecutionVertex archivedExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getAllExecutionVertices()); final ExecutionAttemptID attemptId = archivedExecutionVertex.getCurrentExecutionAttempt().getAttemptId(); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.FAILED)); taskRestartExecutor.triggerScheduledTasks(); final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices(); final ExecutionVertexID executionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0); assertThat(deployedExecutionVertices, contains(executionVertexId, executionVertexId)); }
Example #5
Source File: DefaultSchedulerTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void failJobIfCannotRestart() throws Exception { final JobGraph jobGraph = singleNonParallelJobVertexJobGraph(); testRestartBackoffTimeStrategy.setCanRestart(false); final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph); final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getAllExecutionVertices()); final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId(); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.FAILED)); taskRestartExecutor.triggerScheduledTasks(); waitForTermination(scheduler); final JobStatus jobStatus = scheduler.requestJobStatus(); assertThat(jobStatus, is(equalTo(JobStatus.FAILED))); }
Example #6
Source File: DefaultSchedulerTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void vertexIsResetBeforeRestarted() throws Exception { final JobGraph jobGraph = singleNonParallelJobVertexJobGraph(); final TestSchedulingStrategy.Factory schedulingStrategyFactory = new TestSchedulingStrategy.Factory(); final DefaultScheduler scheduler = createScheduler(jobGraph, schedulingStrategyFactory); final TestSchedulingStrategy schedulingStrategy = schedulingStrategyFactory.getLastCreatedSchedulingStrategy(); final SchedulingTopology topology = schedulingStrategy.getSchedulingTopology(); startScheduling(scheduler); final SchedulingExecutionVertex onlySchedulingVertex = Iterables.getOnlyElement(topology.getVertices()); schedulingStrategy.schedule(Collections.singletonList(onlySchedulingVertex.getId())); final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getAllExecutionVertices()); final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId(); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.FAILED)); taskRestartExecutor.triggerScheduledTasks(); assertThat(schedulingStrategy.getReceivedVerticesToRestart(), hasSize(1)); assertThat(onlySchedulingVertex.getState(), is(equalTo(ExecutionState.CREATED))); }
Example #7
Source File: DefaultSchedulerTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void scheduleOnlyIfVertexIsCreated() throws Exception { final JobGraph jobGraph = singleNonParallelJobVertexJobGraph(); final TestSchedulingStrategy.Factory schedulingStrategyFactory = new TestSchedulingStrategy.Factory(); final DefaultScheduler scheduler = createScheduler(jobGraph, schedulingStrategyFactory); final TestSchedulingStrategy schedulingStrategy = schedulingStrategyFactory.getLastCreatedSchedulingStrategy(); final SchedulingTopology topology = schedulingStrategy.getSchedulingTopology(); startScheduling(scheduler); final ExecutionVertexID onlySchedulingVertexId = Iterables.getOnlyElement(topology.getVertices()).getId(); // Schedule the vertex to get it to a non-CREATED state schedulingStrategy.schedule(Collections.singletonList(onlySchedulingVertexId)); // The scheduling of a non-CREATED vertex will result in IllegalStateException try { schedulingStrategy.schedule(Collections.singletonList(onlySchedulingVertexId)); fail("IllegalStateException should happen"); } catch (IllegalStateException e) { // expected exception } }
Example #8
Source File: DefaultSchedulerTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void handleGlobalFailure() { final JobGraph jobGraph = singleNonParallelJobVertexJobGraph(); final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph); final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph); scheduler.handleGlobalFailure(new Exception("forced failure")); final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getAllExecutionVertices()); final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId(); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.CANCELED)); taskRestartExecutor.triggerScheduledTasks(); final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices(); final ExecutionVertexID executionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0); assertThat(deployedExecutionVertices, contains(executionVertexId, executionVertexId)); }
Example #9
Source File: DefaultSchedulerTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void abortPendingCheckpointsWhenRestartingTasks() throws Exception { final JobGraph jobGraph = singleNonParallelJobVertexJobGraph(); enableCheckpointing(jobGraph); final CountDownLatch checkpointTriggeredLatch = getCheckpointTriggeredLatch(); final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph); final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getAllExecutionVertices()); final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId(); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.RUNNING)); final CheckpointCoordinator checkpointCoordinator = getCheckpointCoordinator(scheduler); checkpointCoordinator.triggerCheckpoint(false); checkpointTriggeredLatch.await(); assertThat(checkpointCoordinator.getNumberOfPendingCheckpoints(), is(equalTo(1))); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.FAILED)); taskRestartExecutor.triggerScheduledTasks(); assertThat(checkpointCoordinator.getNumberOfPendingCheckpoints(), is(equalTo(0))); }
Example #10
Source File: YARNHighAvailabilityITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private static Optional<Metric> getJobMetric( final RestClusterClient<ApplicationId> restClusterClient, final JobID jobId, final String metricName) throws Exception { final JobMetricsMessageParameters messageParameters = new JobMetricsMessageParameters(); messageParameters.jobPathParameter.resolve(jobId); messageParameters.metricsFilterParameter.resolveFromString(metricName); final Collection<Metric> metrics = restClusterClient.sendRequest( JobMetricsHeaders.getInstance(), messageParameters, EmptyRequestBody.getInstance()).get().getMetrics(); final Metric metric = Iterables.getOnlyElement(metrics, null); checkState(metric == null || metric.getId().equals(metricName)); return Optional.ofNullable(metric); }
Example #11
Source File: DefaultSchedulerTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void failureInfoIsSetAfterTaskFailure() { final JobGraph jobGraph = singleNonParallelJobVertexJobGraph(); final JobID jobId = jobGraph.getJobID(); final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph); final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getAllExecutionVertices()); final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId(); final String exceptionMessage = "expected exception"; scheduler.updateTaskExecutionState(new TaskExecutionState(jobId, attemptId, ExecutionState.FAILED, new RuntimeException(exceptionMessage))); final ErrorInfo failureInfo = scheduler.requestJob().getFailureInfo(); assertThat(failureInfo, is(notNullValue())); assertThat(failureInfo.getExceptionAsString(), containsString(exceptionMessage)); }
Example #12
Source File: IntervalJoinOperatorTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void assertOutput( Iterable<StreamRecord<Tuple2<TestElem, TestElem>>> expectedOutput, Queue<Object> actualOutput) { int actualSize = actualOutput.stream() .filter(elem -> elem instanceof StreamRecord) .collect(Collectors.toList()) .size(); int expectedSize = Iterables.size(expectedOutput); Assert.assertEquals( "Expected and actual size of stream records different", expectedSize, actualSize ); for (StreamRecord<Tuple2<TestElem, TestElem>> record : expectedOutput) { Assert.assertTrue(actualOutput.contains(record)); } }
Example #13
Source File: TaskExecutorPartitionTrackerImplTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void createClusterPartitionReport() { final TaskExecutorPartitionTrackerImpl partitionTracker = new TaskExecutorPartitionTrackerImpl(new NettyShuffleEnvironmentBuilder().build()); assertThat(partitionTracker.createClusterPartitionReport().getEntries(), is(empty())); final IntermediateDataSetID dataSetId = new IntermediateDataSetID(); final JobID jobId = new JobID(); final ResultPartitionID clusterPartitionId = new ResultPartitionID(); final ResultPartitionID jobPartitionId = new ResultPartitionID(); final int numberOfPartitions = 1; partitionTracker.startTrackingPartition(jobId, new TaskExecutorPartitionInfo(clusterPartitionId, dataSetId, numberOfPartitions)); partitionTracker.startTrackingPartition(jobId, new TaskExecutorPartitionInfo(jobPartitionId, dataSetId, numberOfPartitions + 1)); partitionTracker.promoteJobPartitions(Collections.singleton(clusterPartitionId)); final ClusterPartitionReport clusterPartitionReport = partitionTracker.createClusterPartitionReport(); final ClusterPartitionReport.ClusterPartitionReportEntry reportEntry = Iterables.getOnlyElement(clusterPartitionReport.getEntries()); assertThat(reportEntry.getDataSetId(), is(dataSetId)); assertThat(reportEntry.getNumTotalPartitions(), is(numberOfPartitions)); assertThat(reportEntry.getHostedPartitions(), hasItems(clusterPartitionId)); }
Example #14
Source File: InputProcessorUtil.java From flink with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") public static CheckpointedInputGate createCheckpointedInputGate( AbstractInvokable toNotifyOnCheckpoint, StreamConfig config, SubtaskCheckpointCoordinator checkpointCoordinator, IndexedInputGate[] inputGates, TaskIOMetricGroup taskIOMetricGroup, String taskName) { CheckpointedInputGate[] checkpointedInputGates = createCheckpointedMultipleInputGate( toNotifyOnCheckpoint, config, checkpointCoordinator, taskIOMetricGroup, taskName, Arrays.asList(inputGates)); return Iterables.getOnlyElement(Arrays.asList(checkpointedInputGates)); }
Example #15
Source File: IntervalJoinOperatorTest.java From flink with Apache License 2.0 | 6 votes |
private void assertOutput( Iterable<StreamRecord<Tuple2<TestElem, TestElem>>> expectedOutput, Queue<Object> actualOutput) { int actualSize = actualOutput.stream() .filter(elem -> elem instanceof StreamRecord) .collect(Collectors.toList()) .size(); int expectedSize = Iterables.size(expectedOutput); Assert.assertEquals( "Expected and actual size of stream records different", expectedSize, actualSize ); for (StreamRecord<Tuple2<TestElem, TestElem>> record : expectedOutput) { Assert.assertTrue(actualOutput.contains(record)); } }
Example #16
Source File: StreamingJobGraphGeneratorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testManagedMemoryFractionForUnknownResourceSpec() throws Exception { final ResourceSpec resource = ResourceSpec.UNKNOWN; final List<ResourceSpec> resourceSpecs = Arrays.asList(resource, resource, resource, resource); final List<Integer> managedMemoryWeights = Arrays.asList(1, 2, 3, 4); // v1(source -> map1), v2(map2) are in the same slot sharing group, v3(map3) is in a different group final JobGraph jobGraph = createJobGraphForManagedMemoryFractionTest(resourceSpecs, managedMemoryWeights); final JobVertex vertex1 = jobGraph.getVerticesSortedTopologicallyFromSources().get(0); final JobVertex vertex2 = jobGraph.getVerticesSortedTopologicallyFromSources().get(1); final JobVertex vertex3 = jobGraph.getVerticesSortedTopologicallyFromSources().get(2); final StreamConfig sourceConfig = new StreamConfig(vertex1.getConfiguration()); assertEquals(1.0 / 6, sourceConfig.getManagedMemoryFraction(), 0.000001); final StreamConfig map1Config = Iterables.getOnlyElement( sourceConfig.getTransitiveChainedTaskConfigs(StreamingJobGraphGeneratorTest.class.getClassLoader()).values()); assertEquals(2.0 / 6, map1Config.getManagedMemoryFraction(), 0.000001); final StreamConfig map2Config = new StreamConfig(vertex2.getConfiguration()); assertEquals(3.0 / 6, map2Config.getManagedMemoryFraction(), 0.000001); final StreamConfig map3Config = new StreamConfig(vertex3.getConfiguration()); assertEquals(1.0, map3Config.getManagedMemoryFraction(), 0.000001); }
Example #17
Source File: FlinkKafkaInternalProducerITCase.java From flink with Apache License 2.0 | 5 votes |
private void assertRecord(String topicName, String expectedKey, String expectedValue) { try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(extraProperties)) { kafkaConsumer.subscribe(Collections.singletonList(topicName)); ConsumerRecords<String, String> records = kafkaConsumer.poll(10000); ConsumerRecord<String, String> record = Iterables.getOnlyElement(records); assertEquals(expectedKey, record.key()); assertEquals(expectedValue, record.value()); } }
Example #18
Source File: SlotPoolImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public AllocatedSlotReport createAllocatedSlotReport(ResourceID taskManagerId) { final Set<AllocatedSlot> availableSlotsForTaskManager = availableSlots.getSlotsForTaskManager(taskManagerId); final Set<AllocatedSlot> allocatedSlotsForTaskManager = allocatedSlots.getSlotsForTaskManager(taskManagerId); List<AllocatedSlotInfo> allocatedSlotInfos = new ArrayList<>( availableSlotsForTaskManager.size() + allocatedSlotsForTaskManager.size()); for (AllocatedSlot allocatedSlot : Iterables.concat(availableSlotsForTaskManager, allocatedSlotsForTaskManager)) { allocatedSlotInfos.add( new AllocatedSlotInfo(allocatedSlot.getPhysicalSlotNumber(), allocatedSlot.getAllocationId())); } return new AllocatedSlotReport(jobId, allocatedSlotInfos); }
Example #19
Source File: FlinkKafkaProducerITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void assertRecord(String topicName, String expectedKey, String expectedValue) { try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(extraProperties)) { kafkaConsumer.subscribe(Collections.singletonList(topicName)); ConsumerRecords<String, String> records = kafkaConsumer.poll(10000); ConsumerRecord<String, String> record = Iterables.getOnlyElement(records); assertEquals(expectedKey, record.key()); assertEquals(expectedValue, record.value()); } }
Example #20
Source File: FlinkKafkaInternalProducerITCase.java From flink with Apache License 2.0 | 5 votes |
private void assertRecord(String topicName, String expectedKey, String expectedValue) { try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(extraProperties)) { kafkaConsumer.subscribe(Collections.singletonList(topicName)); ConsumerRecords<String, String> records = kafkaConsumer.poll(10000); ConsumerRecord<String, String> record = Iterables.getOnlyElement(records); assertEquals(expectedKey, record.key()); assertEquals(expectedValue, record.value()); } }
Example #21
Source File: TestHarnessUtil.java From flink with Apache License 2.0 | 5 votes |
/** * Compare the two queues containing operator/task output by converting them to an array first. */ public static void assertOutputEqualsSorted(String message, Iterable<Object> expected, Iterable<Object> actual, Comparator<Object> comparator) { assertEquals(Iterables.size(expected), Iterables.size(actual)); // first, compare only watermarks, their position should be deterministic Iterator<Object> exIt = expected.iterator(); Iterator<Object> actIt = actual.iterator(); while (exIt.hasNext()) { Object nextEx = exIt.next(); Object nextAct = actIt.next(); if (nextEx instanceof Watermark) { assertEquals(nextEx, nextAct); } } List<Object> expectedRecords = new ArrayList<>(); List<Object> actualRecords = new ArrayList<>(); for (Object ex: expected) { if (ex instanceof StreamRecord) { expectedRecords.add(ex); } } for (Object act: actual) { if (act instanceof StreamRecord) { actualRecords.add(act); } } Object[] sortedExpected = expectedRecords.toArray(); Object[] sortedActual = actualRecords.toArray(); Arrays.sort(sortedExpected, comparator); Arrays.sort(sortedActual, comparator); Assert.assertArrayEquals(message, sortedExpected, sortedActual); }
Example #22
Source File: FlinkKafkaProducerITCase.java From flink with Apache License 2.0 | 5 votes |
private void assertRecord(String topicName, String expectedKey, String expectedValue) { try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(extraProperties)) { kafkaConsumer.subscribe(Collections.singletonList(topicName)); ConsumerRecords<String, String> records = kafkaConsumer.poll(10000); ConsumerRecord<String, String> record = Iterables.getOnlyElement(records); assertEquals(expectedKey, record.key()); assertEquals(expectedValue, record.value()); } }
Example #23
Source File: SlotPoolImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public AllocatedSlotReport createAllocatedSlotReport(ResourceID taskManagerId) { final Set<AllocatedSlot> availableSlotsForTaskManager = availableSlots.getSlotsForTaskManager(taskManagerId); final Set<AllocatedSlot> allocatedSlotsForTaskManager = allocatedSlots.getSlotsForTaskManager(taskManagerId); List<AllocatedSlotInfo> allocatedSlotInfos = new ArrayList<>( availableSlotsForTaskManager.size() + allocatedSlotsForTaskManager.size()); for (AllocatedSlot allocatedSlot : Iterables.concat(availableSlotsForTaskManager, allocatedSlotsForTaskManager)) { allocatedSlotInfos.add( new AllocatedSlotInfo(allocatedSlot.getPhysicalSlotNumber(), allocatedSlot.getAllocationId())); } return new AllocatedSlotReport(jobId, allocatedSlotInfos); }
Example #24
Source File: InputDependencyConstraintChecker.java From flink with Apache License 2.0 | 5 votes |
public boolean check(final SchedulingExecutionVertex schedulingExecutionVertex) { if (Iterables.isEmpty(schedulingExecutionVertex.getConsumedResults())) { return true; } final InputDependencyConstraint inputConstraint = schedulingExecutionVertex.getInputDependencyConstraint(); switch (inputConstraint) { case ANY: return checkAny(schedulingExecutionVertex); case ALL: return checkAll(schedulingExecutionVertex); default: throw new IllegalStateException("Unknown InputDependencyConstraint " + inputConstraint); } }
Example #25
Source File: DefaultLogicalTopologyTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testGetVertices() { // vertices from getVertices() should be topologically sorted final Iterable<JobVertex> jobVertices = jobGraph.getVerticesSortedTopologicallyFromSources(); final Iterable<DefaultLogicalVertex> logicalVertices = logicalTopology.getVertices(); assertEquals(Iterables.size(jobVertices), Iterables.size(logicalVertices)); final Iterator<JobVertex> jobVertexIterator = jobVertices.iterator(); final Iterator<DefaultLogicalVertex> logicalVertexIterator = logicalVertices.iterator(); while (jobVertexIterator.hasNext()) { assertVertexAndConnectedResultsEquals(jobVertexIterator.next(), logicalVertexIterator.next()); } }
Example #26
Source File: DeltaEvictor.java From flink with Apache License 2.0 | 5 votes |
private void evict(Iterable<TimestampedValue<T>> elements, int size, EvictorContext ctx) { TimestampedValue<T> lastElement = Iterables.getLast(elements); for (Iterator<TimestampedValue<T>> iterator = elements.iterator(); iterator.hasNext();){ TimestampedValue<T> element = iterator.next(); if (deltaFunction.getDelta(element.getValue(), lastElement.getValue()) >= this.threshold) { iterator.remove(); } } }
Example #27
Source File: DefaultExecutionTopologyTest.java From flink with Apache License 2.0 | 5 votes |
private static void assertPartitionsEquals( Iterable<IntermediateResultPartition> originalResultPartitions, Iterable<DefaultResultPartition> adaptedResultPartitions) { assertEquals(Iterables.size(originalResultPartitions), Iterables.size(adaptedResultPartitions)); for (IntermediateResultPartition originalPartition : originalResultPartitions) { DefaultResultPartition adaptedPartition = IterableUtils.toStream(adaptedResultPartitions) .filter(adapted -> adapted.getId().equals(originalPartition.getPartitionId())) .findAny() .orElseThrow(() -> new AssertionError("Could not find matching adapted partition for " + originalPartition)); assertPartitionEquals(originalPartition, adaptedPartition); List<ExecutionVertex> originalConsumers = originalPartition.getConsumers().stream() .flatMap(Collection::stream) .map(ExecutionEdge::getTarget) .collect(Collectors.toList()); Iterable<DefaultExecutionVertex> adaptedConsumers = adaptedPartition.getConsumers(); for (ExecutionVertex originalConsumer : originalConsumers) { // it is sufficient to verify that some vertex exists with the correct ID here, // since deep equality is verified later in the main loop // this DOES rely on an implicit assumption that the vertices objects returned by the topology are // identical to those stored in the partition ExecutionVertexID originalId = originalConsumer.getID(); assertTrue(IterableUtils.toStream(adaptedConsumers).anyMatch(adaptedConsumer -> adaptedConsumer.getId().equals(originalId))); } } }
Example #28
Source File: DefaultSchedulingPipelinedRegionTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests if the consumed inputs of the pipelined regions are computed * correctly using the Job graph below. * <pre> * c * / X * a -+- b e * \ / * d * </pre> * Pipelined regions: {a}, {b, c, d, e} */ @Test public void returnsIncidentBlockingPartitions() throws Exception { final JobVertex a = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex b = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex c = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex d = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex e = ExecutionGraphTestUtils.createNoOpVertex(1); b.connectNewDataSetAsInput(a, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); c.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); d.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); e.connectNewDataSetAsInput(c, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); e.connectNewDataSetAsInput(d, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); final ExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createSimpleTestGraph(a, b, c, d, e); final DefaultExecutionTopology topology = new DefaultExecutionTopology(simpleTestGraph); final DefaultSchedulingPipelinedRegion firstPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(a.getID(), 0)); final DefaultSchedulingPipelinedRegion secondPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(e.getID(), 0)); final DefaultExecutionVertex vertexB0 = topology.getVertex(new ExecutionVertexID(b.getID(), 0)); final IntermediateResultPartitionID b0ConsumedResultPartition = Iterables.getOnlyElement(vertexB0.getConsumedResults()).getId(); final Set<IntermediateResultPartitionID> secondPipelinedRegionConsumedResults = IterableUtils.toStream(secondPipelinedRegion.getConsumedResults()) .map(DefaultResultPartition::getId) .collect(Collectors.toSet()); assertThat(firstPipelinedRegion.getConsumedResults().iterator().hasNext(), is(false)); assertThat(secondPipelinedRegionConsumedResults, contains(b0ConsumedResultPartition)); }
Example #29
Source File: DefaultSchedulerTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void restoreStateWhenRestartingTasks() throws Exception { final JobGraph jobGraph = singleNonParallelJobVertexJobGraph(); enableCheckpointing(jobGraph); final CountDownLatch checkpointTriggeredLatch = getCheckpointTriggeredLatch(); final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph); final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getAllExecutionVertices()); final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId(); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.RUNNING)); final CheckpointCoordinator checkpointCoordinator = getCheckpointCoordinator(scheduler); // register a stateful master hook to help verify state restore final TestMasterHook masterHook = TestMasterHook.fromId("testHook"); checkpointCoordinator.addMasterHook(masterHook); // complete one checkpoint for state restore checkpointCoordinator.triggerCheckpoint(false); checkpointTriggeredLatch.await(); final long checkpointId = checkpointCoordinator.getPendingCheckpoints().keySet().iterator().next(); acknowledgePendingCheckpoint(scheduler, checkpointId); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.FAILED)); taskRestartExecutor.triggerScheduledTasks(); assertThat(masterHook.getRestoreCount(), is(equalTo(1))); }
Example #30
Source File: DefaultSchedulerTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void failGlobalWhenRestoringStateFails() throws Exception { final JobGraph jobGraph = singleNonParallelJobVertexJobGraph(); final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph); enableCheckpointing(jobGraph); final CountDownLatch checkpointTriggeredLatch = getCheckpointTriggeredLatch(); final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph); final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getAllExecutionVertices()); final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId(); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.RUNNING)); final CheckpointCoordinator checkpointCoordinator = getCheckpointCoordinator(scheduler); // register a master hook to fail state restore final TestMasterHook masterHook = TestMasterHook.fromId("testHook"); masterHook.enableFailOnRestore(); checkpointCoordinator.addMasterHook(masterHook); // complete one checkpoint for state restore checkpointCoordinator.triggerCheckpoint(false); checkpointTriggeredLatch.await(); final long checkpointId = checkpointCoordinator.getPendingCheckpoints().keySet().iterator().next(); acknowledgePendingCheckpoint(scheduler, checkpointId); scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.FAILED)); taskRestartExecutor.triggerScheduledTasks(); final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices(); // the first task failover should be skipped on state restore failure final ExecutionVertexID executionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0); assertThat(deployedExecutionVertices, contains(executionVertexId)); // a global failure should be triggered on state restore failure masterHook.disableFailOnRestore(); taskRestartExecutor.triggerScheduledTasks(); assertThat(deployedExecutionVertices, contains(executionVertexId, executionVertexId)); }