Java Code Examples for org.apache.flink.configuration.Configuration#setString()
The following examples show how to use
org.apache.flink.configuration.Configuration#setString() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PrestoS3RecoverableWriterTest.java From flink with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = conf.getString(CoreOptions.TMP_DIRS) + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); }
Example 2
Source File: HadoopS3RecoverableWriterExceptionITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); basePath = new Path(S3TestCredentials.getTestBucketUri() + "tests-" + UUID.randomUUID()); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = TEMP_FOLDER.getRoot().getAbsolutePath() + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); skipped = false; }
Example 3
Source File: WebMonitorUtilsTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Tests dynamically loading of handlers such as {@link JarUploadHandler}. */ @Test public void testLoadWebSubmissionExtension() throws Exception { final Configuration configuration = new Configuration(); configuration.setString(JobManagerOptions.ADDRESS, "localhost"); final WebMonitorExtension webMonitorExtension = WebMonitorUtils.loadWebSubmissionExtension( CompletableFuture::new, Time.seconds(10), Collections.emptyMap(), CompletableFuture.completedFuture("localhost:12345"), Paths.get("/tmp"), Executors.directExecutor(), configuration); assertThat(webMonitorExtension, is(not(nullValue()))); }
Example 4
Source File: TaskMetricGroupTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testGenerateScopeWilcard() throws Exception { Configuration cfg = new Configuration(); cfg.setString(MetricOptions.SCOPE_NAMING_TASK, "*.<task_attempt_id>.<subtask_index>"); MetricRegistryImpl registry = new MetricRegistryImpl(MetricRegistryConfiguration.fromConfiguration(cfg)); AbstractID executionId = new AbstractID(); TaskManagerMetricGroup tmGroup = new TaskManagerMetricGroup(registry, "theHostName", "test-tm-id"); TaskManagerJobMetricGroup jmGroup = new TaskManagerJobMetricGroup(registry, tmGroup, new JobID(), "myJobName"); TaskMetricGroup taskGroup = new TaskMetricGroup( registry, jmGroup, new JobVertexID(), executionId, "aTaskName", 13, 1); assertArrayEquals( new String[]{"theHostName", "taskmanager", "test-tm-id", "myJobName", executionId.toString(), "13"}, taskGroup.getScopeComponents()); assertEquals( "theHostName.taskmanager.test-tm-id.myJobName." + executionId + ".13.name", taskGroup.getMetricIdentifier("name")); registry.shutdown().get(); }
Example 5
Source File: ZooKeeperLeaderRetrievalTest.java From flink with Apache License 2.0 | 6 votes |
@Before public void before() throws Exception { testingServer = new TestingServer(); config = new Configuration(); config.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); config.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); CuratorFramework client = ZooKeeperUtils.startCuratorFramework(config); highAvailabilityServices = new ZooKeeperHaServices( client, TestingUtils.defaultExecutor(), config, new VoidBlobStore()); }
Example 6
Source File: SSLUtilsTest.java From flink with Apache License 2.0 | 5 votes |
private static void addSslProviderConfig(Configuration config, String sslProvider) { if (sslProvider.equalsIgnoreCase("OPENSSL")) { assertTrue("openSSL not available", OpenSsl.isAvailable()); // Flink's default algorithm set is not available for openSSL - choose a different one: config.setString(SecurityOptions.SSL_ALGORITHMS, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"); } config.setString(SecurityOptions.SSL_PROVIDER, sslProvider); }
Example 7
Source File: FileInputFormatTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testSetFileViaConfiguration() { final DummyFileInputFormat format = new DummyFileInputFormat(); final String filePath = "file:///some/none/existing/directory/"; Configuration conf = new Configuration(); conf.setString("input.file.path", filePath); format.configure(conf); Assert.assertEquals("Paths should be equal.", new Path(filePath), format.getFilePath()); }
Example 8
Source File: RocksDBStateBackendConfigTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testConfigureTimerService() throws Exception { final Environment env = getMockEnvironment(tempFolder.newFolder()); // Fix the option key string Assert.assertEquals("state.backend.rocksdb.timer-service.factory", RocksDBOptions.TIMER_SERVICE_FACTORY.key()); // Fix the option value string and ensure all are covered Assert.assertEquals(2, RocksDBStateBackend.PriorityQueueStateType.values().length); Assert.assertEquals("ROCKSDB", RocksDBStateBackend.PriorityQueueStateType.ROCKSDB.toString()); Assert.assertEquals("HEAP", RocksDBStateBackend.PriorityQueueStateType.HEAP.toString()); // Fix the default Assert.assertEquals( RocksDBStateBackend.PriorityQueueStateType.HEAP.toString(), RocksDBOptions.TIMER_SERVICE_FACTORY.defaultValue()); RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(tempFolder.newFolder().toURI().toString()); RocksDBKeyedStateBackend<Integer> keyedBackend = createKeyedStateBackend(rocksDbBackend, env); Assert.assertEquals(HeapPriorityQueueSetFactory.class, keyedBackend.getPriorityQueueFactory().getClass()); keyedBackend.dispose(); Configuration conf = new Configuration(); conf.setString( RocksDBOptions.TIMER_SERVICE_FACTORY, RocksDBStateBackend.PriorityQueueStateType.ROCKSDB.toString()); rocksDbBackend = rocksDbBackend.configure(conf, Thread.currentThread().getContextClassLoader()); keyedBackend = createKeyedStateBackend(rocksDbBackend, env); Assert.assertEquals( RocksDBPriorityQueueSetFactory.class, keyedBackend.getPriorityQueueFactory().getClass()); keyedBackend.dispose(); }
Example 9
Source File: SSLUtilsTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that {@link SSLHandlerFactory} is created correctly. */ @Test public void testCreateSSLEngineFactory() throws Exception { Configuration serverConfig = createInternalSslConfigWithKeyAndTrustStores(); final String[] sslAlgorithms; final String[] expectedSslProtocols; if (sslProvider.equalsIgnoreCase("OPENSSL")) { // openSSL does not support the same set of cipher algorithms! sslAlgorithms = new String[] {"TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384"}; expectedSslProtocols = new String[] {"SSLv2Hello", "TLSv1"}; } else { sslAlgorithms = new String[] {"TLS_DHE_RSA_WITH_AES_128_CBC_SHA", "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256"}; expectedSslProtocols = new String[] {"TLSv1"}; } // set custom protocol and cipher suites serverConfig.setString(SecurityOptions.SSL_PROTOCOL, "TLSv1"); serverConfig.setString(SecurityOptions.SSL_ALGORITHMS, String.join(",", sslAlgorithms)); final SSLHandlerFactory serverSSLHandlerFactory = SSLUtils.createInternalServerSSLEngineFactory(serverConfig); final SslHandler sslHandler = serverSSLHandlerFactory.createNettySSLHandler(UnpooledByteBufAllocator.DEFAULT); assertEquals(expectedSslProtocols.length, sslHandler.engine().getEnabledProtocols().length); assertThat( sslHandler.engine().getEnabledProtocols(), arrayContainingInAnyOrder(expectedSslProtocols)); assertEquals(sslAlgorithms.length, sslHandler.engine().getEnabledCipherSuites().length); assertThat( sslHandler.engine().getEnabledCipherSuites(), arrayContainingInAnyOrder(sslAlgorithms)); }
Example 10
Source File: BlobCacheRetriesTest.java From flink with Apache License 2.0 | 5 votes |
/** * A test where the connection fails twice and then the get operation succeeds * (job-related blob). */ @Test public void testBlobForJobFetchRetries() throws IOException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); testBlobFetchRetries(config, new VoidBlobStore(), new JobID(), TRANSIENT_BLOB); }
Example 11
Source File: UtilsTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testGetEnvironmentVariables() { Configuration testConf = new Configuration(); testConf.setString("yarn.application-master.env.LD_LIBRARY_PATH", "/usr/lib/native"); Map<String, String> res = Utils.getEnvironmentVariables("yarn.application-master.env.", testConf); Assert.assertEquals(1, res.size()); Map.Entry<String, String> entry = res.entrySet().iterator().next(); Assert.assertEquals("LD_LIBRARY_PATH", entry.getKey()); Assert.assertEquals("/usr/lib/native", entry.getValue()); }
Example 12
Source File: PythonPlanBinder.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public PythonPlanBinder(Configuration globalConfig) { String configuredPlanTmpPath = globalConfig.getString(PythonOptions.PLAN_TMP_DIR); tmpPlanFilesDir = configuredPlanTmpPath != null ? configuredPlanTmpPath : System.getProperty("java.io.tmpdir") + File.separator + "flink_plan_" + UUID.randomUUID(); operatorConfig = new Configuration(); operatorConfig.setString(PythonOptions.PYTHON_BINARY_PATH, globalConfig.getString(PythonOptions.PYTHON_BINARY_PATH)); String configuredTmpDataDir = globalConfig.getString(PythonOptions.DATA_TMP_DIR); if (configuredTmpDataDir != null) { operatorConfig.setString(PythonOptions.DATA_TMP_DIR, configuredTmpDataDir); } operatorConfig.setLong(PythonOptions.MMAP_FILE_SIZE, globalConfig.getLong(PythonOptions.MMAP_FILE_SIZE)); }
Example 13
Source File: FlinkYarnSessionCliTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests that the command line arguments override the configuration settings * when the {@link ClusterSpecification} is created. */ @Test public void testCommandLineClusterSpecification() throws Exception { final Configuration configuration = new Configuration(); final int jobManagerMemory = 1337; final int taskManagerMemory = 7331; final int slotsPerTaskManager = 30; configuration.setString(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY, jobManagerMemory + "m"); configuration.setString(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY, taskManagerMemory + "m"); configuration.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, slotsPerTaskManager); final String[] args = {"-yjm", String.valueOf(jobManagerMemory) + "m", "-ytm", String.valueOf(taskManagerMemory) + "m", "-ys", String.valueOf(slotsPerTaskManager)}; final FlinkYarnSessionCli flinkYarnSessionCli = new FlinkYarnSessionCli( configuration, tmp.getRoot().getAbsolutePath(), "y", "yarn"); CommandLine commandLine = flinkYarnSessionCli.parseCommandLineOptions(args, false); final ClusterSpecification clusterSpecification = flinkYarnSessionCli.getClusterSpecification(commandLine); assertThat(clusterSpecification.getMasterMemoryMB(), is(jobManagerMemory)); assertThat(clusterSpecification.getTaskManagerMemoryMB(), is(taskManagerMemory)); assertThat(clusterSpecification.getSlotsPerTaskManager(), is(slotsPerTaskManager)); }
Example 14
Source File: MiniClusterConfiguration.java From flink with Apache License 2.0 | 5 votes |
public MiniClusterConfiguration build() { final Configuration modifiedConfiguration = new Configuration(configuration); modifiedConfiguration.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, numSlotsPerTaskManager); modifiedConfiguration.setString( RestOptions.ADDRESS, modifiedConfiguration.getString(RestOptions.ADDRESS, "localhost")); return new MiniClusterConfiguration( modifiedConfiguration, numTaskManagers, rpcServiceSharing, commonBindAddress); }
Example 15
Source File: OperatorGroupTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testGenerateScopeCustom() throws Exception { Configuration cfg = new Configuration(); cfg.setString(MetricOptions.SCOPE_NAMING_OPERATOR, "<tm_id>.<job_id>.<task_id>.<operator_name>.<operator_id>"); MetricRegistryImpl registry = new MetricRegistryImpl(MetricRegistryConfiguration.fromConfiguration(cfg)); try { String tmID = "test-tm-id"; JobID jid = new JobID(); JobVertexID vertexId = new JobVertexID(); OperatorID operatorID = new OperatorID(); String operatorName = "operatorName"; OperatorMetricGroup operatorGroup = new TaskManagerMetricGroup(registry, "theHostName", tmID) .addTaskForJob(jid, "myJobName", vertexId, new ExecutionAttemptID(), "aTaskname", 13, 2) .getOrAddOperator(operatorID, operatorName); assertArrayEquals( new String[]{tmID, jid.toString(), vertexId.toString(), operatorName, operatorID.toString()}, operatorGroup.getScopeComponents()); assertEquals( String.format("%s.%s.%s.%s.%s.name", tmID, jid, vertexId, operatorName, operatorID), operatorGroup.getMetricIdentifier("name")); } finally { registry.shutdown().get(); } }
Example 16
Source File: ZooKeeperRegistryTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that the function of ZookeeperRegistry, setJobRunning(), setJobFinished(), isJobRunning() */ @Test public void testZooKeeperRegistry() throws Exception { Configuration configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); final HighAvailabilityServices zkHaService = new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(configuration), Executors.directExecutor(), configuration, new VoidBlobStore()); final RunningJobsRegistry zkRegistry = zkHaService.getRunningJobsRegistry(); try { JobID jobID = JobID.generate(); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobRunning(jobID); assertEquals(JobSchedulingStatus.RUNNING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobFinished(jobID); assertEquals(JobSchedulingStatus.DONE, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.clearJob(jobID); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); } finally { zkHaService.close(); } }
Example 17
Source File: DispatcherResourceCleanupTest.java From flink with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { final JobVertex testVertex = new JobVertex("testVertex"); testVertex.setInvokableClass(NoOpInvokable.class); jobId = new JobID(); jobGraph = new JobGraph(jobId, "testJob", testVertex); configuration = new Configuration(); configuration.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); highAvailabilityServices = new TestingHighAvailabilityServices(); clearedJobLatch = new OneShotLatch(); runningJobsRegistry = new SingleRunningJobsRegistry(jobId, clearedJobLatch); highAvailabilityServices.setRunningJobsRegistry(runningJobsRegistry); storedHABlobFuture = new CompletableFuture<>(); deleteAllHABlobsFuture = new CompletableFuture<>(); final TestingBlobStore testingBlobStore = new TestingBlobStoreBuilder() .setPutFunction( putArguments -> storedHABlobFuture.complete(putArguments.f2)) .setDeleteAllFunction(deleteAllHABlobsFuture::complete) .createTestingBlobStore(); cleanupJobFuture = new CompletableFuture<>(); blobServer = new TestingBlobServer(configuration, testingBlobStore, cleanupJobFuture); // upload a blob to the blob server permanentBlobKey = blobServer.putPermanent(jobId, new byte[256]); jobGraph.addUserJarBlobKey(permanentBlobKey); blobFile = blobServer.getStorageLocation(jobId, permanentBlobKey); assertThat(blobFile.exists(), is(true)); // verify that we stored the blob also in the BlobStore assertThat(storedHABlobFuture.get(), equalTo(permanentBlobKey)); }
Example 18
Source File: MetricRegistryImplTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testConfigurableDelimiterForReportersInGroup() throws Exception { MetricConfig config1 = new MetricConfig(); config1.setProperty(ConfigConstants.METRICS_REPORTER_SCOPE_DELIMITER, "_"); MetricConfig config2 = new MetricConfig(); config2.setProperty(ConfigConstants.METRICS_REPORTER_SCOPE_DELIMITER, "-"); MetricConfig config3 = new MetricConfig(); config3.setProperty(ConfigConstants.METRICS_REPORTER_SCOPE_DELIMITER, "AA"); Configuration config = new Configuration(); config.setString(MetricOptions.SCOPE_NAMING_TM, "A.B"); config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test1." + ConfigConstants.METRICS_REPORTER_SCOPE_DELIMITER, "_"); config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test1." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, TestReporter8.class.getName()); config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test2." + ConfigConstants.METRICS_REPORTER_SCOPE_DELIMITER, "-"); config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test2." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, TestReporter8.class.getName()); config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test3." + ConfigConstants.METRICS_REPORTER_SCOPE_DELIMITER, "AA"); config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test3." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, TestReporter8.class.getName()); config.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test4." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, TestReporter8.class.getName()); MetricRegistryImpl registry = new MetricRegistryImpl( MetricRegistryConfiguration.fromConfiguration(config), Arrays.asList( ReporterSetup.forReporter("test1", config1, new TestReporter8()), ReporterSetup.forReporter("test2", config2, new TestReporter8()), ReporterSetup.forReporter("test3", config3, new TestReporter8()), ReporterSetup.forReporter("test4", new TestReporter8()))); List<MetricReporter> reporters = registry.getReporters(); ((TestReporter8) reporters.get(0)).expectedDelimiter = '_'; //test1 reporter ((TestReporter8) reporters.get(1)).expectedDelimiter = '-'; //test2 reporter ((TestReporter8) reporters.get(2)).expectedDelimiter = GLOBAL_DEFAULT_DELIMITER; //test3 reporter, because 'AA' - not correct delimiter ((TestReporter8) reporters.get(3)).expectedDelimiter = GLOBAL_DEFAULT_DELIMITER; //for test4 reporter use global delimiter TaskManagerMetricGroup group = new TaskManagerMetricGroup(registry, "host", "id"); group.counter("C"); group.close(); registry.shutdown().get(); assertEquals(4, TestReporter8.numCorrectDelimitersForRegister); assertEquals(4, TestReporter8.numCorrectDelimitersForUnregister); }
Example 19
Source File: YarnEntrypointUtils.java From flink with Apache License 2.0 | 4 votes |
public static Configuration loadConfiguration(String workingDirectory, Map<String, String> env) { Configuration configuration = GlobalConfiguration.loadConfiguration(workingDirectory); final String keytabPrincipal = env.get(YarnConfigKeys.KEYTAB_PRINCIPAL); final String zooKeeperNamespace = env.get(YarnConfigKeys.ENV_ZOOKEEPER_NAMESPACE); final String hostname = env.get(ApplicationConstants.Environment.NM_HOST.key()); Preconditions.checkState( hostname != null, "ApplicationMaster hostname variable %s not set", ApplicationConstants.Environment.NM_HOST.key()); configuration.setString(JobManagerOptions.ADDRESS, hostname); configuration.setString(RestOptions.ADDRESS, hostname); if (zooKeeperNamespace != null) { configuration.setString(HighAvailabilityOptions.HA_CLUSTER_ID, zooKeeperNamespace); } // if a web monitor shall be started, set the port to random binding if (configuration.getInteger(WebOptions.PORT, 0) >= 0) { configuration.setInteger(WebOptions.PORT, 0); } if (!configuration.contains(RestOptions.BIND_PORT)) { // set the REST port to 0 to select it randomly configuration.setString(RestOptions.BIND_PORT, "0"); } // if the user has set the deprecated YARN-specific config keys, we add the // corresponding generic config keys instead. that way, later code needs not // deal with deprecated config keys BootstrapTools.substituteDeprecatedConfigPrefix(configuration, ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX, ResourceManagerOptions.CONTAINERIZED_MASTER_ENV_PREFIX); BootstrapTools.substituteDeprecatedConfigPrefix(configuration, ConfigConstants.YARN_TASK_MANAGER_ENV_PREFIX, ResourceManagerOptions.CONTAINERIZED_TASK_MANAGER_ENV_PREFIX); final String keytabPath = Utils.resolveKeytabPath(workingDirectory, env.get(YarnConfigKeys.LOCAL_KEYTAB_PATH)); if (keytabPath != null && keytabPrincipal != null) { configuration.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath); configuration.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, keytabPrincipal); } final String localDirs = env.get(ApplicationConstants.Environment.LOCAL_DIRS.key()); BootstrapTools.updateTmpDirectoriesInConfiguration(configuration, localDirs); return configuration; }
Example 20
Source File: AccumulatorErrorITCase.java From flink with Apache License 2.0 | 4 votes |
public static Configuration getConfiguration() { Configuration config = new Configuration(); config.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "12m"); return config; }