org.apache.flink.runtime.akka.AkkaUtils Java Examples
The following examples show how to use
org.apache.flink.runtime.akka.AkkaUtils.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ExecutionVertexCancelTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testCancelFromCreated() { try { final JobVertexID jid = new JobVertexID(); final ExecutionJobVertex ejv = getExecutionJobVertex(jid); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); assertEquals(ExecutionState.CREATED, vertex.getExecutionState()); vertex.cancel(); assertEquals(ExecutionState.CANCELED, vertex.getExecutionState()); assertNull(vertex.getFailureCause()); assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELED) > 0); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #2
Source File: ClusterClient.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public ClusterClient( Configuration flinkConfig, HighAvailabilityServices highAvailabilityServices, boolean sharedHaServices, ActorSystemLoader actorSystemLoader) { this.flinkConfig = Preconditions.checkNotNull(flinkConfig); this.compiler = new Optimizer(new DataStatistics(), new DefaultCostEstimator(), flinkConfig); this.timeout = AkkaUtils.getClientTimeout(flinkConfig); this.lookupTimeout = AkkaUtils.getLookupTimeout(flinkConfig); this.actorSystemLoader = Preconditions.checkNotNull(actorSystemLoader); this.highAvailabilityServices = Preconditions.checkNotNull(highAvailabilityServices); this.sharedHaServices = sharedHaServices; }
Example #3
Source File: CliFrontend.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public CliFrontend( Configuration configuration, List<CustomCommandLine<?>> customCommandLines) throws Exception { this.configuration = Preconditions.checkNotNull(configuration); this.customCommandLines = Preconditions.checkNotNull(customCommandLines); try { FileSystem.initialize(this.configuration); } catch (IOException e) { throw new Exception("Error while setting the default " + "filesystem scheme from configuration.", e); } this.customCommandLineOptions = new Options(); for (CustomCommandLine<?> customCommandLine : customCommandLines) { customCommandLine.addGeneralOptions(customCommandLineOptions); customCommandLine.addRunOptions(customCommandLineOptions); } this.clientTimeout = AkkaUtils.getClientTimeout(this.configuration); this.defaultParallelism = configuration.getInteger(CoreOptions.DEFAULT_PARALLELISM); }
Example #4
Source File: MiniCluster.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private CompletableFuture<Void> closeMetricSystem() { synchronized (lock) { final ArrayList<CompletableFuture<Void>> terminationFutures = new ArrayList<>(2); // metrics shutdown if (metricRegistry != null) { terminationFutures.add(metricRegistry.shutdown()); metricRegistry = null; } if (metricQueryServiceActorSystem != null) { terminationFutures.add(AkkaUtils.terminateActorSystem(metricQueryServiceActorSystem)); } return FutureUtils.completeAll(terminationFutures); } }
Example #5
Source File: BootstrapTools.java From flink with Apache License 2.0 | 6 votes |
/** * Starts a remote ActorSystem at given address and specific port range. * @param configuration The Flink configuration * @param externalAddress The external address to access the ActorSystem. * @param externalPortRange The choosing range of the external port to access the ActorSystem. * @param logger The logger to output log information. * @return The ActorSystem which has been started * @throws Exception Thrown when actor system cannot be started in specified port range */ @VisibleForTesting public static ActorSystem startRemoteActorSystem( Configuration configuration, String externalAddress, String externalPortRange, Logger logger) throws Exception { return startRemoteActorSystem( configuration, AkkaUtils.getFlinkActorSystemName(), externalAddress, externalPortRange, NetUtils.getWildcardIPAddress(), Optional.empty(), logger, ForkJoinExecutorConfiguration.fromConfiguration(configuration), null); }
Example #6
Source File: MiniCluster.java From flink with Apache License 2.0 | 6 votes |
/** * Factory method to instantiate the RPC service. * * @param akkaRpcServiceConfig * The default RPC timeout for asynchronous "ask" requests. * @param remoteEnabled * True, if the RPC service should be reachable from other (remote) RPC services. * @param bindAddress * The address to bind the RPC service to. Only relevant when "remoteEnabled" is true. * * @return The instantiated RPC service */ protected RpcService createRpcService( AkkaRpcServiceConfiguration akkaRpcServiceConfig, boolean remoteEnabled, String bindAddress) { final Config akkaConfig; if (remoteEnabled) { akkaConfig = AkkaUtils.getAkkaConfig(akkaRpcServiceConfig.getConfiguration(), bindAddress, 0); } else { akkaConfig = AkkaUtils.getAkkaConfig(akkaRpcServiceConfig.getConfiguration()); } final Config effectiveAkkaConfig = AkkaUtils.testDispatcherConfig().withFallback(akkaConfig); final ActorSystem actorSystem = AkkaUtils.createActorSystem(effectiveAkkaConfig); return new AkkaRpcService(actorSystem, akkaRpcServiceConfig); }
Example #7
Source File: LeaderConnectionInfo.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public LeaderConnectionInfo(String address, UUID leaderSessionID) throws FlinkException { this.address = address; this.leaderSessionID = leaderSessionID; final Address akkaAddress; // this only works as long as the address is Akka based try { akkaAddress = AkkaUtils.getAddressFromAkkaURL(address); } catch (MalformedURLException e) { throw new FlinkException("Could not extract the hostname from the given address \'" + address + "\'.", e); } if (akkaAddress.host().isDefined()) { hostname = akkaAddress.host().get(); } else { hostname = "localhost"; } if (akkaAddress.port().isDefined()) { port = (int) akkaAddress.port().get(); } else { port = -1; } }
Example #8
Source File: LeaderRetrievalUtils.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public void notifyLeaderAddress(final String leaderAddress, final UUID leaderSessionID) { if(leaderAddress != null && !leaderAddress.equals("") && !futureActorGateway.isCompleted()) { AkkaUtils.getActorRefFuture(leaderAddress, actorSystem, timeout) .map(new Mapper<ActorRef, ActorGateway>() { public ActorGateway apply(ActorRef ref) { return new AkkaActorGateway(ref, leaderSessionID); } }, actorSystem.dispatcher()) .onComplete(new OnComplete<ActorGateway>() { @Override public void onComplete(Throwable failure, ActorGateway success) throws Throwable { if (failure == null) { completePromise(success); } else { LOG.debug("Could not retrieve the leader for address " + leaderAddress + ".", failure); } } }, actorSystem.dispatcher()); } }
Example #9
Source File: AkkaRpcService.java From flink with Apache License 2.0 | 6 votes |
public AkkaRpcService(final ActorSystem actorSystem, final AkkaRpcServiceConfiguration configuration) { this.actorSystem = checkNotNull(actorSystem, "actor system"); this.configuration = checkNotNull(configuration, "akka rpc service configuration"); Address actorSystemAddress = AkkaUtils.getAddress(actorSystem); if (actorSystemAddress.host().isDefined()) { address = actorSystemAddress.host().get(); } else { address = ""; } if (actorSystemAddress.port().isDefined()) { port = (Integer) actorSystemAddress.port().get(); } else { port = -1; } internalScheduledExecutor = new ActorSystemScheduledExecutorAdapter(actorSystem); terminationFuture = new CompletableFuture<>(); stopped = false; }
Example #10
Source File: ExecutionGraphTestUtils.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static ExecutionJobVertex getExecutionVertex( JobVertexID id, ScheduledExecutorService executor) throws Exception { JobVertex ajv = new JobVertex("TestVertex", id); ajv.setInvokableClass(mock(AbstractInvokable.class).getClass()); ExecutionGraph graph = new ExecutionGraph( executor, executor, new JobID(), "test job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new TestingSlotProvider(ignored -> new CompletableFuture<>())); graph.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread()); return spy(new ExecutionJobVertex(graph, ajv, 1, AkkaUtils.getDefaultTimeout())); }
Example #11
Source File: ExecutionVertexSchedulingTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testScheduleToDeploying() { try { final ExecutionJobVertex ejv = getExecutionVertex(new JobVertexID()); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); final LogicalSlot slot = new TestingLogicalSlot(); CompletableFuture<LogicalSlot> future = CompletableFuture.completedFuture(slot); assertEquals(ExecutionState.CREATED, vertex.getExecutionState()); // try to deploy to the slot vertex.scheduleForExecution( new TestingSlotProvider(ignore -> future), false, LocationPreferenceConstraint.ALL, Collections.emptySet()); assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #12
Source File: ExecutionVertexSchedulingTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testScheduleToDeploying() { try { final ExecutionJobVertex ejv = getExecutionVertex(new JobVertexID()); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); final LogicalSlot slot = new TestingLogicalSlotBuilder().createTestingLogicalSlot(); CompletableFuture<LogicalSlot> future = CompletableFuture.completedFuture(slot); assertEquals(ExecutionState.CREATED, vertex.getExecutionState()); // try to deploy to the slot vertex.scheduleForExecution( TestingSlotProviderStrategy.from(new TestingSlotProvider(ignore -> future), false), LocationPreferenceConstraint.ALL, Collections.emptySet()); assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #13
Source File: MetricRegistryImplTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Tests that the query actor will be stopped when the MetricRegistry is shut down. */ @Test public void testQueryActorShutdown() throws Exception { final FiniteDuration timeout = new FiniteDuration(10L, TimeUnit.SECONDS); MetricRegistryImpl registry = new MetricRegistryImpl(MetricRegistryConfiguration.defaultMetricRegistryConfiguration()); final ActorSystem actorSystem = AkkaUtils.createDefaultActorSystem(); registry.startQueryService(actorSystem, null); ActorRef queryServiceActor = registry.getQueryService(); registry.shutdown().get(); try { Await.result(actorSystem.actorSelection(queryServiceActor.path()).resolveOne(timeout), timeout); fail("The query actor should be terminated resulting in a ActorNotFound exception."); } catch (ActorNotFound e) { // we expect the query actor to be shut down } }
Example #14
Source File: JobMasterConfiguration.java From flink with Apache License 2.0 | 6 votes |
public static JobMasterConfiguration fromConfiguration(Configuration configuration) { final Time rpcTimeout = AkkaUtils.getTimeoutAsTime(configuration); final Time slotRequestTimeout = Time.milliseconds(configuration.getLong(JobManagerOptions.SLOT_REQUEST_TIMEOUT)); final String tmpDirectory = ConfigurationUtils.parseTempDirectories(configuration)[0]; final RetryingRegistrationConfiguration retryingRegistrationConfiguration = RetryingRegistrationConfiguration.fromConfiguration(configuration); return new JobMasterConfiguration( rpcTimeout, slotRequestTimeout, tmpDirectory, retryingRegistrationConfiguration, configuration); }
Example #15
Source File: QueryableWindowOperatorEvicting.java From yahoo-streaming-benchmark with Apache License 2.0 | 6 votes |
private static void initializeActorSystem(String hostname) throws UnknownHostException { synchronized (actorSystemLock) { if (actorSystem == null) { Configuration config = new Configuration(); Option<scala.Tuple2<String, Object>> remoting = new Some<>(new scala.Tuple2<String, Object>(hostname, 0)); Config akkaConfig = AkkaUtils.getAkkaConfig(config, remoting); LOG.info("Start actory system."); actorSystem = ActorSystem.create("queryableWindow", akkaConfig); actorSystemUsers = 1; } else { LOG.info("Actor system has already been started."); actorSystemUsers++; } } }
Example #16
Source File: ExecutionGraphRestartTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private static ExecutionGraph newExecutionGraph(RestartStrategy restartStrategy, SlotProvider slotProvider) throws IOException { final ExecutionGraph executionGraph = new ExecutionGraph( TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new JobID(), "Test job", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), restartStrategy, slotProvider); executionGraph.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread()); return executionGraph; }
Example #17
Source File: ExecutionVertexDeploymentTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testFailExternallyDuringDeploy() { try { final JobVertexID jid = new JobVertexID(); final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService()); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); TestingLogicalSlot testingLogicalSlot = new TestingLogicalSlot(new SubmitBlockingSimpleAckingTaskManagerGateway()); assertEquals(ExecutionState.CREATED, vertex.getExecutionState()); vertex.deployToSlot(testingLogicalSlot); assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState()); Exception testError = new Exception("test error"); vertex.fail(testError); assertEquals(ExecutionState.FAILED, vertex.getExecutionState()); assertEquals(testError, vertex.getFailureCause()); assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #18
Source File: BootstrapTools.java From flink with Apache License 2.0 | 5 votes |
/** * Starts an Actor System at a specific port. * @param configuration The Flink configuration. * @param actorSystemName Name of the started {@link ActorSystem} * @param listeningAddress The address to listen at. * @param listeningPort The port to listen at. * @param logger the logger to output log information. * @param actorSystemExecutorConfiguration configuration for the ActorSystem's underlying executor * @return The ActorSystem which has been started. * @throws Exception */ public static ActorSystem startActorSystem( Configuration configuration, String actorSystemName, String listeningAddress, int listeningPort, Logger logger, ActorSystemExecutorConfiguration actorSystemExecutorConfiguration) throws Exception { String hostPortUrl = NetUtils.unresolvedHostAndPortToNormalizedString(listeningAddress, listeningPort); logger.info("Trying to start actor system at {}", hostPortUrl); try { Config akkaConfig = AkkaUtils.getAkkaConfig( configuration, new Some<>(new Tuple2<>(listeningAddress, listeningPort)), actorSystemExecutorConfiguration.getAkkaConfig()); logger.debug("Using akka configuration\n {}", akkaConfig); ActorSystem actorSystem = AkkaUtils.createActorSystem(actorSystemName, akkaConfig); logger.info("Actor system started at {}", AkkaUtils.getAddress(actorSystem)); return actorSystem; } catch (Throwable t) { if (t instanceof ChannelException) { Throwable cause = t.getCause(); if (cause != null && t.getCause() instanceof BindException) { throw new IOException("Unable to create ActorSystem at address " + hostPortUrl + " : " + cause.getMessage(), t); } } throw new Exception("Could not create actor system", t); } }
Example #19
Source File: ExecutionVertexCancelTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testCancelFromRunning() { try { final JobVertexID jid = new JobVertexID(); final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService()); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); LogicalSlot slot = new TestingLogicalSlot(new CancelSequenceSimpleAckingTaskManagerGateway(1)); setVertexResource(vertex, slot); setVertexState(vertex, ExecutionState.RUNNING); assertEquals(ExecutionState.RUNNING, vertex.getExecutionState()); vertex.cancel(); vertex.getCurrentExecutionAttempt().completeCancelling(); // response by task manager once actually canceled assertEquals(ExecutionState.CANCELED, vertex.getExecutionState()); assertFalse(slot.isAlive()); assertNull(vertex.getFailureCause()); assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELED) > 0); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #20
Source File: PointwisePatternTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private ExecutionGraph getDummyExecutionGraph() throws Exception { return new ExecutionGraph( TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new JobID(), "Test Job Sample Name", new Configuration(), new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new TestingSlotProvider(ignored -> new CompletableFuture<>())); }
Example #21
Source File: ExecutionVertexDeploymentTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testDeployFailedAsynchronously() { try { final JobVertexID jid = new JobVertexID(); final ExecutionJobVertex ejv = getExecutionJobVertex(jid); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); final LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new SubmitFailingSimpleAckingTaskManagerGateway()).createTestingLogicalSlot(); assertEquals(ExecutionState.CREATED, vertex.getExecutionState()); vertex.deployToSlot(slot); // wait until the state transition must be done for (int i = 0; i < 100; i++) { if (vertex.getExecutionState() == ExecutionState.FAILED && vertex.getFailureCause() != null) { break; } else { Thread.sleep(10); } } assertEquals(ExecutionState.FAILED, vertex.getExecutionState()); assertNotNull(vertex.getFailureCause()); assertTrue(vertex.getFailureCause().getMessage().contains(ERROR_MESSAGE)); assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #22
Source File: ExecutionVertexDeploymentTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testDeployFailedSynchronous() { try { final JobVertexID jid = new JobVertexID(); final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService()); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); final LogicalSlot slot = new TestingLogicalSlot(new SubmitFailingSimpleAckingTaskManagerGateway()); assertEquals(ExecutionState.CREATED, vertex.getExecutionState()); vertex.deployToSlot(slot); assertEquals(ExecutionState.FAILED, vertex.getExecutionState()); assertNotNull(vertex.getFailureCause()); assertTrue(vertex.getFailureCause().getMessage().contains(ERROR_MESSAGE)); assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #23
Source File: ExecutionVertexCancelTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testCancelFromRunning() { try { final JobVertexID jid = new JobVertexID(); final ExecutionJobVertex ejv = ExecutionGraphTestUtils.getExecutionJobVertex(jid, new DirectScheduledExecutorService()); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new CancelSequenceSimpleAckingTaskManagerGateway(1)).createTestingLogicalSlot(); setVertexResource(vertex, slot); setVertexState(vertex, ExecutionState.RUNNING); assertEquals(ExecutionState.RUNNING, vertex.getExecutionState()); vertex.cancel(); vertex.getCurrentExecutionAttempt().completeCancelling(); // response by task manager once actually canceled assertEquals(ExecutionState.CANCELED, vertex.getExecutionState()); assertFalse(slot.isAlive()); assertNull(vertex.getFailureCause()); assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELED) > 0); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #24
Source File: ExecutionVertexDeploymentTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testDeployFailedSynchronous() { try { final JobVertexID jid = new JobVertexID(); final ExecutionJobVertex ejv = ExecutionGraphTestUtils.getExecutionJobVertex(jid, new DirectScheduledExecutorService()); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); final LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new SubmitFailingSimpleAckingTaskManagerGateway()).createTestingLogicalSlot(); assertEquals(ExecutionState.CREATED, vertex.getExecutionState()); vertex.deployToSlot(slot); assertEquals(ExecutionState.FAILED, vertex.getExecutionState()); assertNotNull(vertex.getFailureCause()); assertTrue(vertex.getFailureCause().getMessage().contains(ERROR_MESSAGE)); assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0); assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #25
Source File: MesosServicesUtils.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a {@link MesosServices} instance depending on the high availability settings. * * @param configuration containing the high availability settings * @param hostname the hostname to advertise to remote clients * @return a mesos services instance * @throws Exception if the mesos services instance could not be created */ public static MesosServices createMesosServices(Configuration configuration, String hostname) throws Exception { ActorSystem localActorSystem = AkkaUtils.createLocalActorSystem(configuration); MesosArtifactServer artifactServer = createArtifactServer(configuration, hostname); HighAvailabilityMode highAvailabilityMode = HighAvailabilityMode.fromConfig(configuration); switch (highAvailabilityMode) { case NONE: return new StandaloneMesosServices(localActorSystem, artifactServer); case ZOOKEEPER: final String zkMesosRootPath = configuration.getString( HighAvailabilityOptions.HA_ZOOKEEPER_MESOS_WORKERS_PATH); ZooKeeperUtilityFactory zooKeeperUtilityFactory = new ZooKeeperUtilityFactory( configuration, zkMesosRootPath); return new ZooKeeperMesosServices(localActorSystem, artifactServer, zooKeeperUtilityFactory); default: throw new Exception("High availability mode " + highAvailabilityMode + " is not supported."); } }
Example #26
Source File: ExecutionGraphRescalingTest.java From flink with Apache License 2.0 | 5 votes |
/** * Verifies that building an {@link ExecutionGraph} from a {@link JobGraph} with * parallelism higher than the maximum parallelism fails. */ @Test public void testExecutionGraphConstructionFailsRescaleDopExceedMaxParallelism() throws Exception { final Configuration config = new Configuration(); final int initialParallelism = 1; final int maxParallelism = 10; final JobVertex[] jobVertices = createVerticesForSimpleBipartiteJobGraph(initialParallelism, maxParallelism); final JobGraph jobGraph = new JobGraph(jobVertices); for (JobVertex jv : jobVertices) { jv.setParallelism(maxParallelism + 1); } try { // this should fail since we set the parallelism to maxParallelism + 1 ExecutionGraphBuilder.buildGraph( null, jobGraph, config, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new TestingSlotProvider(ignore -> new CompletableFuture<>()), Thread.currentThread().getContextClassLoader(), new StandaloneCheckpointRecoveryFactory(), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new UnregisteredMetricsGroup(), VoidBlobWriter.getInstance(), AkkaUtils.getDefaultTimeout(), TEST_LOGGER, NettyShuffleMaster.INSTANCE, NoOpJobMasterPartitionTracker.INSTANCE); fail("Building the ExecutionGraph with a parallelism higher than the max parallelism should fail."); } catch (JobException e) { // expected, ignore } }
Example #27
Source File: ExecutionVertexSchedulingTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testSlotReleasedWhenScheduledImmediately() { try { final ExecutionJobVertex ejv = getExecutionVertex(new JobVertexID()); final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0], AkkaUtils.getDefaultTimeout()); // a slot than cannot be deployed to final LogicalSlot slot = new TestingLogicalSlot(); slot.releaseSlot(new Exception("Test Exception")); assertFalse(slot.isAlive()); CompletableFuture<LogicalSlot> future = new CompletableFuture<>(); future.complete(slot); assertEquals(ExecutionState.CREATED, vertex.getExecutionState()); // try to deploy to the slot vertex.scheduleForExecution(new TestingSlotProvider((i) -> future), false, LocationPreferenceConstraint.ALL, Collections.emptySet()); // will have failed assertEquals(ExecutionState.FAILED, vertex.getExecutionState()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #28
Source File: BootstrapToolsTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that we can concurrently create two {@link ActorSystem} without port conflicts. * This effectively tests that we don't open a socket to check for a ports availability. * See FLINK-10580 for more details. */ @Test public void testConcurrentActorSystemCreation() throws Exception { final int concurrentCreations = 10; final ExecutorService executorService = Executors.newFixedThreadPool(concurrentCreations); final CyclicBarrier cyclicBarrier = new CyclicBarrier(concurrentCreations); try { final List<CompletableFuture<Void>> actorSystemFutures = IntStream.range(0, concurrentCreations) .mapToObj( ignored -> CompletableFuture.supplyAsync( CheckedSupplier.unchecked(() -> { cyclicBarrier.await(); return BootstrapTools.startActorSystem( new Configuration(), "localhost", "0", LOG); }), executorService)) .map( // terminate ActorSystems actorSystemFuture -> actorSystemFuture.thenCompose(AkkaUtils::terminateActorSystem) ).collect(Collectors.toList()); FutureUtils.completeAll(actorSystemFutures).get(); } finally { ExecutorUtils.gracefulShutdown(10000L, TimeUnit.MILLISECONDS, executorService); } }
Example #29
Source File: TestingRpcService.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Creates a new {@code TestingRpcService}, using the given configuration. */ public TestingRpcService(Configuration configuration) { super(AkkaUtils.createLocalActorSystem(configuration), AkkaRpcServiceConfiguration.fromConfiguration(configuration)); this.registeredConnections = new ConcurrentHashMap<>(); }
Example #30
Source File: BootstrapTools.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Starts an ActorSystem with the given configuration listening at the address/ports. * * @param configuration The Flink configuration * @param listeningAddress The address to listen at. * @param portRangeDefinition The port range to choose a port from. * @param logger The logger to output log information. * @param actorSystemExecutorConfiguration configuration for the ActorSystem's underlying executor * @return The ActorSystem which has been started * @throws Exception Thrown when actor system cannot be started in specified port range */ public static ActorSystem startActorSystem( Configuration configuration, String listeningAddress, String portRangeDefinition, Logger logger, @Nonnull ActorSystemExecutorConfiguration actorSystemExecutorConfiguration) throws Exception { return startActorSystem( configuration, AkkaUtils.getFlinkActorSystemName(), listeningAddress, portRangeDefinition, logger, actorSystemExecutorConfiguration); }