Java Code Examples for org.apache.flink.api.common.time.Time#seconds()
The following examples show how to use
org.apache.flink.api.common.time.Time#seconds() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JarRunHandlerParameterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@BeforeClass public static void setup() throws Exception { init(); final GatewayRetriever<TestingDispatcherGateway> gatewayRetriever = () -> CompletableFuture.completedFuture(restfulGateway); final Time timeout = Time.seconds(10); final Map<String, String> responseHeaders = Collections.emptyMap(); final Executor executor = TestingUtils.defaultExecutor(); handler = new JarRunHandler( gatewayRetriever, timeout, responseHeaders, JarRunHeaders.getInstance(), jarDir, new Configuration(), executor); }
Example 2
Source File: JobVertexBackPressureHandlerTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Before public void setUp() { restfulGateway = TestingRestfulGateway.newBuilder().setRequestOperatorBackPressureStatsFunction( (jobId, jobVertexId) -> { if (jobId.equals(TEST_JOB_ID_BACK_PRESSURE_STATS_AVAILABLE)) { return CompletableFuture.completedFuture(OperatorBackPressureStatsResponse.of(new OperatorBackPressureStats( 4711, Integer.MAX_VALUE, new double[]{1.0, 0.5, 0.1} ))); } else if (jobId.equals(TEST_JOB_ID_BACK_PRESSURE_STATS_ABSENT)) { return CompletableFuture.completedFuture(OperatorBackPressureStatsResponse.of(null)); } else { throw new AssertionError(); } } ).build(); jobVertexBackPressureHandler = new JobVertexBackPressureHandler( () -> CompletableFuture.completedFuture(restfulGateway), Time.seconds(10), Collections.emptyMap(), JobVertexBackPressureHeaders.getInstance() ); }
Example 3
Source File: StateAssignmentOperationTest.java From flink with Apache License 2.0 | 5 votes |
private ExecutionJobVertex buildExecutionJobVertex(OperatorID operatorID, OperatorID userDefinedOperatorId, int parallelism) throws JobException, JobExecutionException { ExecutionGraph graph = TestingExecutionGraphBuilder.newBuilder().build(); JobVertex jobVertex = new JobVertex( operatorID.toHexString(), new JobVertexID(), singletonList(OperatorIDPair.of(operatorID, userDefinedOperatorId))); return new ExecutionJobVertex(graph, jobVertex, parallelism, 1, Time.seconds(1), 1L, 1L); }
Example 4
Source File: GlobalModVersionTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private ExecutionGraph createSampleGraph(FailoverStrategy failoverStrategy) throws Exception { final JobID jid = new JobID(); final int parallelism = new Random().nextInt(10) + 1; final SimpleSlotProvider slotProvider = new SimpleSlotProvider(jid, parallelism); // build a simple execution graph with on job vertex, parallelism 2 final ExecutionGraph graph = new ExecutionGraph( new DummyJobInformation( jid, "test job"), TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), Time.seconds(10), new InfiniteDelayRestartStrategy(), new CustomStrategy(failoverStrategy), slotProvider); graph.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread()); JobVertex jv = new JobVertex("test vertex"); jv.setInvokableClass(NoOpInvokable.class); jv.setParallelism(parallelism); JobGraph jg = new JobGraph(jid, "testjob", jv); graph.attachJobGraph(jg.getVerticesSortedTopologicallyFromSources()); return graph; }
Example 5
Source File: JarHandlerParameterTest.java From flink with Apache License 2.0 | 5 votes |
static void init() throws Exception { jarDir = TMP.newFolder().toPath(); // properties are set property by surefire plugin final String parameterProgramJarName = System.getProperty("parameterJarName") + ".jar"; final String parameterProgramWithoutManifestJarName = System.getProperty("parameterJarWithoutManifestName") + ".jar"; final Path jarLocation = Paths.get(System.getProperty("targetDir")); jarWithManifest = Files.copy( jarLocation.resolve(parameterProgramJarName), jarDir.resolve("program-with-manifest.jar")); jarWithoutManifest = Files.copy( jarLocation.resolve(parameterProgramWithoutManifestJarName), jarDir.resolve("program-without-manifest.jar")); restfulGateway = new TestingDispatcherGateway.Builder() .setBlobServerPort(BLOB_SERVER_RESOURCE.getBlobServerPort()) .setSubmitFunction(jobGraph -> { LAST_SUBMITTED_JOB_GRAPH_REFERENCE.set(jobGraph); return CompletableFuture.completedFuture(Acknowledge.get()); }) .build(); gatewayRetriever = () -> CompletableFuture.completedFuture(restfulGateway); localAddressFuture = CompletableFuture.completedFuture("shazam://localhost:12345"); timeout = Time.seconds(10); responseHeaders = Collections.emptyMap(); executor = TestingUtils.defaultExecutor(); }
Example 6
Source File: MetricFetcherTest.java From flink with Apache License 2.0 | 5 votes |
@Nonnull private MetricFetcher createMetricFetcher(long updateInterval, RestfulGateway restfulGateway) { return new MetricFetcherImpl<>( () -> CompletableFuture.completedFuture(restfulGateway), address -> null, Executors.directExecutor(), Time.seconds(10L), updateInterval); }
Example 7
Source File: ExecutionVertexLocalityTest.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a simple 2 vertex graph with a parallel source and a parallel target. */ private ExecutionGraph createTestGraph(int parallelism, boolean allToAll) throws Exception { JobVertex source = new JobVertex("source", sourceVertexId); source.setParallelism(parallelism); source.setInvokableClass(NoOpInvokable.class); JobVertex target = new JobVertex("source", targetVertexId); target.setParallelism(parallelism); target.setInvokableClass(NoOpInvokable.class); DistributionPattern connectionPattern = allToAll ? DistributionPattern.ALL_TO_ALL : DistributionPattern.POINTWISE; target.connectNewDataSetAsInput(source, connectionPattern, ResultPartitionType.PIPELINED); JobGraph testJob = new JobGraph(jobId, "test job", source, target); final Time timeout = Time.seconds(10L); return ExecutionGraphBuilder.buildGraph( null, testJob, new Configuration(), TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), mock(SlotProvider.class), getClass().getClassLoader(), new StandaloneCheckpointRecoveryFactory(), timeout, new FixedDelayRestartStrategy(10, 0L), new UnregisteredMetricsGroup(), VoidBlobWriter.getInstance(), timeout, log, NettyShuffleMaster.INSTANCE, NoOpPartitionTracker.INSTANCE); }
Example 8
Source File: MetricFetcherTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Nonnull private MetricFetcher createMetricFetcher(long updateInterval, RestfulGateway restfulGateway) { return new MetricFetcherImpl<>( () -> CompletableFuture.completedFuture(restfulGateway), path -> new CompletableFuture<>(), Executors.directExecutor(), Time.seconds(10L), updateInterval); }
Example 9
Source File: JarHandlerParameterTest.java From flink with Apache License 2.0 | 5 votes |
static void init() throws Exception { jarDir = TMP.newFolder().toPath(); // properties are set property by surefire plugin final String parameterProgramJarName = System.getProperty("parameterJarName") + ".jar"; final String parameterProgramWithoutManifestJarName = System.getProperty("parameterJarWithoutManifestName") + ".jar"; final Path jarLocation = Paths.get(System.getProperty("targetDir")); jarWithManifest = Files.copy( jarLocation.resolve(parameterProgramJarName), jarDir.resolve("program-with-manifest.jar")); jarWithoutManifest = Files.copy( jarLocation.resolve(parameterProgramWithoutManifestJarName), jarDir.resolve("program-without-manifest.jar")); restfulGateway = new TestingDispatcherGateway.Builder() .setBlobServerPort(BLOB_SERVER_RESOURCE.getBlobServerPort()) .setSubmitFunction(jobGraph -> { LAST_SUBMITTED_JOB_GRAPH_REFERENCE.set(jobGraph); return CompletableFuture.completedFuture(Acknowledge.get()); }) .build(); gatewayRetriever = () -> CompletableFuture.completedFuture(restfulGateway); localAddressFuture = CompletableFuture.completedFuture("shazam://localhost:12345"); timeout = Time.seconds(10); responseHeaders = Collections.emptyMap(); executor = TestingUtils.defaultExecutor(); }
Example 10
Source File: JarUploadHandlerTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { MockitoAnnotations.initMocks(this); jarDir = temporaryFolder.newFolder().toPath(); jarUploadHandler = new JarUploadHandler( () -> CompletableFuture.completedFuture(mockDispatcherGateway), Time.seconds(10), Collections.emptyMap(), JarUploadHeaders.getInstance(), jarDir, Executors.directExecutor()); }
Example 11
Source File: JobVertexWatermarksHandlerTest.java From flink with Apache License 2.0 | 5 votes |
@Before public void before() throws Exception { taskMetricStore = Mockito.mock(MetricStore.TaskMetricStore.class); MetricStore metricStore = Mockito.mock(MetricStore.class); Mockito.when(metricStore.getTaskMetricStore(TEST_JOB_ID.toString(), TEST_VERTEX_ID.toString())) .thenReturn(taskMetricStore); metricFetcher = Mockito.mock(MetricFetcher.class); Mockito.when(metricFetcher.getMetricStore()).thenReturn(metricStore); watermarkHandler = new JobVertexWatermarksHandler( Mockito.mock(LeaderGatewayRetriever.class), Time.seconds(1), Collections.emptyMap(), metricFetcher, NoOpExecutionGraphCache.INSTANCE, Mockito.mock(Executor.class)); final Map<String, String> pathParameters = new HashMap<>(); pathParameters.put(JobIDPathParameter.KEY, TEST_JOB_ID.toString()); pathParameters.put(JobVertexIdPathParameter.KEY, TEST_VERTEX_ID.toString()); request = new HandlerRequest<>(EmptyRequestBody.getInstance(), new JobVertexMessageParameters(), pathParameters, Collections.emptyMap()); vertex = Mockito.mock(AccessExecutionJobVertex.class); Mockito.when(vertex.getJobVertexId()).thenReturn(TEST_VERTEX_ID); AccessExecutionVertex firstTask = Mockito.mock(AccessExecutionVertex.class); AccessExecutionVertex secondTask = Mockito.mock(AccessExecutionVertex.class); Mockito.when(firstTask.getParallelSubtaskIndex()).thenReturn(0); Mockito.when(secondTask.getParallelSubtaskIndex()).thenReturn(1); AccessExecutionVertex[] accessExecutionVertices = {firstTask, secondTask}; Mockito.when(vertex.getTaskVertices()).thenReturn(accessExecutionVertices); }
Example 12
Source File: CheckpointSettingsSerializableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testDeserializationOfUserCodeWithUserClassLoader() throws Exception { final ClassLoaderUtils.ObjectAndClassLoader<Serializable> outsideClassLoading = ClassLoaderUtils.createSerializableObjectFromNewClassLoader(); final ClassLoader classLoader = outsideClassLoading.getClassLoader(); final Serializable outOfClassPath = outsideClassLoading.getObject(); final MasterTriggerRestoreHook.Factory[] hooks = { new TestFactory(outOfClassPath) }; final SerializedValue<MasterTriggerRestoreHook.Factory[]> serHooks = new SerializedValue<>(hooks); final JobCheckpointingSettings checkpointingSettings = new JobCheckpointingSettings( Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), new CheckpointCoordinatorConfiguration( 1000L, 10000L, 0L, 1, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, true, false, false, 0), new SerializedValue<StateBackend>(new CustomStateBackend(outOfClassPath)), serHooks); final JobGraph jobGraph = new JobGraph(new JobID(), "test job"); jobGraph.setSnapshotSettings(checkpointingSettings); // to serialize/deserialize the job graph to see if the behavior is correct under // distributed execution final JobGraph copy = CommonTestUtils.createCopySerializable(jobGraph); final Time timeout = Time.seconds(10L); final ExecutionGraph eg = ExecutionGraphBuilder.buildGraph( null, copy, new Configuration(), TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), mock(SlotProvider.class), classLoader, new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), VoidBlobWriter.getInstance(), timeout, log, NettyShuffleMaster.INSTANCE, NoOpJobMasterPartitionTracker.INSTANCE); assertEquals(1, eg.getCheckpointCoordinator().getNumberOfRegisteredMasterHooks()); assertTrue(jobGraph.getCheckpointingSettings().getDefaultStateBackend().deserializeValue(classLoader) instanceof CustomStateBackend); }
Example 13
Source File: MetricFetcherTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testUpdate() { final Time timeout = Time.seconds(10L); // ========= setup TaskManager ================================================================================= JobID jobID = new JobID(); ResourceID tmRID = ResourceID.generate(); // ========= setup QueryServices ================================================================================ final MetricQueryServiceGateway jmQueryService = new TestingMetricQueryServiceGateway.Builder() .setQueryMetricsSupplier(() -> CompletableFuture.completedFuture(new MetricDumpSerialization.MetricSerializationResult(new byte[0], new byte[0], new byte[0], new byte[0], 0, 0, 0, 0))) .build(); MetricDumpSerialization.MetricSerializationResult requestMetricsAnswer = createRequestDumpAnswer(tmRID, jobID); final MetricQueryServiceGateway tmQueryService = new TestingMetricQueryServiceGateway.Builder() .setQueryMetricsSupplier(() -> CompletableFuture.completedFuture(requestMetricsAnswer)) .build(); // ========= setup JobManager ================================================================================== final TestingRestfulGateway restfulGateway = new TestingRestfulGateway.Builder() .setRequestMultipleJobDetailsSupplier(() -> CompletableFuture.completedFuture(new MultipleJobsDetails(Collections.emptyList()))) .setRequestMetricQueryServiceGatewaysSupplier(() -> CompletableFuture.completedFuture(Collections.singleton(jmQueryService.getAddress()))) .setRequestTaskManagerMetricQueryServiceGatewaysSupplier(() -> CompletableFuture.completedFuture(Collections.singleton(Tuple2.of(tmRID, tmQueryService.getAddress())))) .build(); final GatewayRetriever<RestfulGateway> retriever = () -> CompletableFuture.completedFuture(restfulGateway); // ========= start MetricFetcher testing ======================================================================= MetricFetcher fetcher = new MetricFetcherImpl<>( retriever, address -> CompletableFuture.completedFuture(tmQueryService), Executors.directExecutor(), timeout, MetricOptions.METRIC_FETCHER_UPDATE_INTERVAL.defaultValue()); // verify that update fetches metrics and updates the store fetcher.update(); MetricStore store = fetcher.getMetricStore(); synchronized (store) { assertEquals("7", store.getJobManagerMetricStore().getMetric("abc.hist_min")); assertEquals("6", store.getJobManagerMetricStore().getMetric("abc.hist_max")); assertEquals("4.0", store.getJobManagerMetricStore().getMetric("abc.hist_mean")); assertEquals("0.5", store.getJobManagerMetricStore().getMetric("abc.hist_median")); assertEquals("5.0", store.getJobManagerMetricStore().getMetric("abc.hist_stddev")); assertEquals("0.75", store.getJobManagerMetricStore().getMetric("abc.hist_p75")); assertEquals("0.9", store.getJobManagerMetricStore().getMetric("abc.hist_p90")); assertEquals("0.95", store.getJobManagerMetricStore().getMetric("abc.hist_p95")); assertEquals("0.98", store.getJobManagerMetricStore().getMetric("abc.hist_p98")); assertEquals("0.99", store.getJobManagerMetricStore().getMetric("abc.hist_p99")); assertEquals("0.999", store.getJobManagerMetricStore().getMetric("abc.hist_p999")); assertEquals("x", store.getTaskManagerMetricStore(tmRID.toString()).metrics.get("abc.gauge")); assertEquals("5.0", store.getJobMetricStore(jobID.toString()).metrics.get("abc.jc")); assertEquals("2", store.getTaskMetricStore(jobID.toString(), "taskid").metrics.get("2.abc.tc")); assertEquals("1", store.getTaskMetricStore(jobID.toString(), "taskid").metrics.get("2.opname.abc.oc")); } }
Example 14
Source File: CheckpointSettingsSerializableTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testDeserializationOfUserCodeWithUserClassLoader() throws Exception { final CommonTestUtils.ObjectAndClassLoader outsideClassLoading = CommonTestUtils.createObjectFromNewClassLoader(); final ClassLoader classLoader = outsideClassLoading.getClassLoader(); final Serializable outOfClassPath = outsideClassLoading.getObject(); final MasterTriggerRestoreHook.Factory[] hooks = { new TestFactory(outOfClassPath) }; final SerializedValue<MasterTriggerRestoreHook.Factory[]> serHooks = new SerializedValue<>(hooks); final JobCheckpointingSettings checkpointingSettings = new JobCheckpointingSettings( Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), new CheckpointCoordinatorConfiguration( 1000L, 10000L, 0L, 1, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, true, false, 0), new SerializedValue<StateBackend>(new CustomStateBackend(outOfClassPath)), serHooks); final JobGraph jobGraph = new JobGraph(new JobID(), "test job"); jobGraph.setSnapshotSettings(checkpointingSettings); // to serialize/deserialize the job graph to see if the behavior is correct under // distributed execution final JobGraph copy = CommonTestUtils.createCopySerializable(jobGraph); final Time timeout = Time.seconds(10L); final ExecutionGraph eg = ExecutionGraphBuilder.buildGraph( null, copy, new Configuration(), TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), mock(SlotProvider.class), classLoader, new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), VoidBlobWriter.getInstance(), timeout, log, NettyShuffleMaster.INSTANCE, NoOpPartitionTracker.INSTANCE); assertEquals(1, eg.getCheckpointCoordinator().getNumberOfRegisteredMasterHooks()); assertTrue(jobGraph.getCheckpointingSettings().getDefaultStateBackend().deserializeValue(classLoader) instanceof CustomStateBackend); }
Example 15
Source File: MetricRegistryImpl.java From flink with Apache License 2.0 | 4 votes |
/** * Shuts down this registry and the associated {@link MetricReporter}. * * <p>NOTE: This operation is asynchronous and returns a future which is completed * once the shutdown operation has been completed. * * @return Future which is completed once the {@link MetricRegistryImpl} * is shut down. */ public CompletableFuture<Void> shutdown() { synchronized (lock) { if (isShutdown) { return terminationFuture; } else { isShutdown = true; final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(3); final Time gracePeriod = Time.seconds(1L); if (metricQueryServiceRpcService != null) { final CompletableFuture<Void> metricQueryServiceRpcServiceTerminationFuture = metricQueryServiceRpcService.stopService(); terminationFutures.add(metricQueryServiceRpcServiceTerminationFuture); } Throwable throwable = null; for (MetricReporter reporter : reporters) { try { reporter.close(); } catch (Throwable t) { throwable = ExceptionUtils.firstOrSuppressed(t, throwable); } } reporters.clear(); if (throwable != null) { terminationFutures.add( FutureUtils.completedExceptionally( new FlinkException("Could not shut down the metric reporters properly.", throwable))); } final CompletableFuture<Void> executorShutdownFuture = ExecutorUtils.nonBlockingShutdown( gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS, executor); terminationFutures.add(executorShutdownFuture); FutureUtils .completeAll(terminationFutures) .whenComplete( (Void ignored, Throwable error) -> { if (error != null) { terminationFuture.completeExceptionally(error); } else { terminationFuture.complete(null); } }); return terminationFuture; } } }
Example 16
Source File: TestingSlotProviderStrategy.java From flink with Apache License 2.0 | 4 votes |
public static TestingSlotProviderStrategy from(SlotProvider slotProvider, boolean allowQueuedScheduling) { return new TestingSlotProviderStrategy(slotProvider, Time.seconds(10L), allowQueuedScheduling); }
Example 17
Source File: MetricRegistryImpl.java From flink with Apache License 2.0 | 4 votes |
/** * Shuts down this registry and the associated {@link MetricReporter}. * * <p>NOTE: This operation is asynchronous and returns a future which is completed * once the shutdown operation has been completed. * * @return Future which is completed once the {@link MetricRegistryImpl} * is shut down. */ public CompletableFuture<Void> shutdown() { synchronized (lock) { if (isShutdown) { return terminationFuture; } else { isShutdown = true; final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(3); final Time gracePeriod = Time.seconds(1L); if (metricQueryServiceRpcService != null) { final CompletableFuture<Void> metricQueryServiceRpcServiceTerminationFuture = metricQueryServiceRpcService.stopService(); terminationFutures.add(metricQueryServiceRpcServiceTerminationFuture); } Throwable throwable = null; for (ReporterAndSettings reporterAndSettings : reporters) { try { reporterAndSettings.getReporter().close(); } catch (Throwable t) { throwable = ExceptionUtils.firstOrSuppressed(t, throwable); } } reporters.clear(); if (throwable != null) { terminationFutures.add( FutureUtils.completedExceptionally( new FlinkException("Could not shut down the metric reporters properly.", throwable))); } final CompletableFuture<Void> executorShutdownFuture = ExecutorUtils.nonBlockingShutdown( gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS, executor); terminationFutures.add(executorShutdownFuture); FutureUtils .completeAll(terminationFutures) .whenComplete( (Void ignored, Throwable error) -> { if (error != null) { terminationFuture.completeExceptionally(error); } else { terminationFuture.complete(null); } }); return terminationFuture; } } }
Example 18
Source File: ExecutionGraphDeploymentTest.java From flink with Apache License 2.0 | 4 votes |
private ExecutionGraph createExecutionGraph(Configuration configuration) throws Exception { final ScheduledExecutorService executor = TestingUtils.defaultExecutor(); final JobID jobId = new JobID(); final JobGraph jobGraph = new JobGraph(jobId, "test"); jobGraph.setSnapshotSettings( new JobCheckpointingSettings( Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), Collections.<JobVertexID>emptyList(), new CheckpointCoordinatorConfiguration( 100, 10 * 60 * 1000, 0, 1, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, false, false, false, 0), null)); final Time timeout = Time.seconds(10L); return ExecutionGraphBuilder.buildGraph( null, jobGraph, configuration, executor, executor, new ProgrammedSlotProvider(1), getClass().getClassLoader(), new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), blobWriter, timeout, LoggerFactory.getLogger(getClass()), NettyShuffleMaster.INSTANCE, NoOpJobMasterPartitionTracker.INSTANCE); }
Example 19
Source File: JarSubmissionITCase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
JarHandlers(final Path jarDir, final TestingDispatcherGateway restfulGateway) { final GatewayRetriever<TestingDispatcherGateway> gatewayRetriever = () -> CompletableFuture.completedFuture(restfulGateway); final Time timeout = Time.seconds(10); final Map<String, String> responseHeaders = Collections.emptyMap(); final Executor executor = TestingUtils.defaultExecutor(); uploadHandler = new JarUploadHandler( gatewayRetriever, timeout, responseHeaders, JarUploadHeaders.getInstance(), jarDir, executor); listHandler = new JarListHandler( gatewayRetriever, timeout, responseHeaders, JarListHeaders.getInstance(), CompletableFuture.completedFuture("shazam://localhost:12345"), jarDir.toFile(), executor); planHandler = new JarPlanHandler( gatewayRetriever, timeout, responseHeaders, JarPlanHeaders.getInstance(), jarDir, new Configuration(), executor); runHandler = new JarRunHandler( gatewayRetriever, timeout, responseHeaders, JarRunHeaders.getInstance(), jarDir, new Configuration(), executor); deleteHandler = new JarDeleteHandler( gatewayRetriever, timeout, responseHeaders, JarDeleteHeaders.getInstance(), jarDir, executor); }
Example 20
Source File: WikipediaEditsSourceTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * We first check the connection to the IRC server. If it fails, this test is ignored. */ @Test @RetryOnFailure(times = 1) public void testWikipediaEditsSource() throws Exception { if (canConnect(1, TimeUnit.SECONDS)) { final Time testTimeout = Time.seconds(60); final WikipediaEditsSource wikipediaEditsSource = new WikipediaEditsSource(); ExecutorService executorService = null; try { executorService = Executors.newSingleThreadExecutor(); BlockingQueue<Object> collectedEvents = new ArrayBlockingQueue<>(1); AtomicReference<Exception> asyncError = new AtomicReference<>(); // Execute the source in a different thread and collect events into the queue. // We do this in a separate thread in order to not block the main test thread // indefinitely in case that something bad happens (like not receiving any // events) executorService.execute(() -> { try { wikipediaEditsSource.run(new CollectingSourceContext<>(collectedEvents)); } catch (Exception e) { boolean interrupted = e instanceof InterruptedException; if (!interrupted) { LOG.warn("Failure in WikipediaEditsSource", e); } asyncError.compareAndSet(null, e); } }); long deadline = deadlineNanos(testTimeout); Object event = null; Exception error = null; // Check event or error while (event == null && error == null && System.nanoTime() < deadline) { event = collectedEvents.poll(1, TimeUnit.SECONDS); error = asyncError.get(); } if (error != null) { // We don't use assertNull, because we want to include the error message fail("Failure in WikipediaEditsSource: " + error.getMessage()); } assertNotNull("Did not receive a WikipediaEditEvent within the desired timeout", event); assertTrue("Received unexpected event " + event, event instanceof WikipediaEditEvent); } finally { wikipediaEditsSource.cancel(); if (executorService != null) { executorService.shutdownNow(); executorService.awaitTermination(1, TimeUnit.SECONDS); } } } else { LOG.info("Skipping test, because not able to connect to IRC server."); } }