org.apache.flink.configuration.CoreOptions Java Examples
The following examples show how to use
org.apache.flink.configuration.CoreOptions.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Optimizer.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Creates a new optimizer instance that uses the statistics object to determine properties about the input. * Given those statistics, the optimizer can make better choices for the execution strategies. * * The optimizer uses the given cost estimator to compute the costs of the individual operations. * * @param stats * The statistics to be used to determine the input properties. * @param estimator * The <tt>CostEstimator</tt> to use to cost the individual operations. */ public Optimizer(DataStatistics stats, CostEstimator estimator, Configuration config) { this.statistics = stats; this.costEstimator = estimator; // determine the default parallelism this.defaultParallelism = config.getInteger(CoreOptions.DEFAULT_PARALLELISM); if (defaultParallelism < 1) { this.defaultParallelism = CoreOptions.DEFAULT_PARALLELISM.defaultValue(); LOG.warn("Config value {} for option {} is invalid. Ignoring and using a value of {}.", defaultParallelism, CoreOptions.DEFAULT_PARALLELISM.key(), defaultParallelism); } }
Example #2
Source File: StreamTableEnvironmentTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testPassingExecutionParameters() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); EnvironmentSettings settings = EnvironmentSettings.newInstance().useOldPlanner().build(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings); tEnv.getConfig().addConfiguration( new Configuration() .set(CoreOptions.DEFAULT_PARALLELISM, 128) .set(PipelineOptions.AUTO_WATERMARK_INTERVAL, Duration.ofMillis(800)) .set(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofSeconds(30)) ); tEnv.createTemporaryView("test", env.fromElements(1, 2, 3)); // trigger translation Table table = tEnv.sqlQuery("SELECT * FROM test"); tEnv.toAppendStream(table, Row.class); assertEquals(128, env.getParallelism()); assertEquals(800, env.getConfig().getAutoWatermarkInterval()); assertEquals(30000, env.getCheckpointConfig().getCheckpointInterval()); }
Example #3
Source File: ExecutionContext.java From flink with Apache License 2.0 | 6 votes |
private TableConfig createTableConfig() { final TableConfig config = new TableConfig(); config.addConfiguration(flinkConfig); Configuration conf = config.getConfiguration(); environment.getConfiguration().asMap().forEach(conf::setString); ExecutionEntry execution = environment.getExecution(); config.setIdleStateRetentionTime( Time.milliseconds(execution.getMinStateRetention()), Time.milliseconds(execution.getMaxStateRetention())); conf.set(CoreOptions.DEFAULT_PARALLELISM, execution.getParallelism()); conf.set(PipelineOptions.MAX_PARALLELISM, execution.getMaxParallelism()); conf.set(StreamPipelineOptions.TIME_CHARACTERISTIC, execution.getTimeCharacteristic()); if (execution.getTimeCharacteristic() == TimeCharacteristic.EventTime) { conf.set(PipelineOptions.AUTO_WATERMARK_INTERVAL, Duration.ofMillis(execution.getPeriodicWatermarksInterval())); } setRestartStrategy(conf); return config; }
Example #4
Source File: HadoopS3RecoverableWriterITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); basePath = new Path(S3TestCredentials.getTestBucketUri() + "tests-" + UUID.randomUUID()); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = TEMP_FOLDER.getRoot().getAbsolutePath() + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); skipped = false; }
Example #5
Source File: PrestoS3RecoverableWriterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = conf.getString(CoreOptions.TMP_DIRS) + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); }
Example #6
Source File: HadoopS3RecoverableWriterITCase.java From flink with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); basePath = new Path(S3TestCredentials.getTestBucketUri() + "tests-" + UUID.randomUUID()); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = TEMP_FOLDER.getRoot().getAbsolutePath() + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); skipped = false; }
Example #7
Source File: EnvironmentTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testPassingExecutionParameters() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env); tEnv.getConfig().addConfiguration( new Configuration() .set(CoreOptions.DEFAULT_PARALLELISM, 128) .set(PipelineOptions.AUTO_WATERMARK_INTERVAL, Duration.ofMillis(800)) .set(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofSeconds(30)) ); tEnv.createTemporaryView("test", env.fromElements(1, 2, 3)); // trigger translation Table table = tEnv.sqlQuery("SELECT * FROM test"); tEnv.toAppendStream(table, Row.class); assertEquals(128, env.getParallelism()); assertEquals(800, env.getConfig().getAutoWatermarkInterval()); assertEquals(30000, env.getCheckpointConfig().getCheckpointInterval()); }
Example #8
Source File: HadoopS3RecoverableWriterExceptionITCase.java From flink with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); basePath = new Path(S3TestCredentials.getTestBucketUri() + "tests-" + UUID.randomUUID()); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = TEMP_FOLDER.getRoot().getAbsolutePath() + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); skipped = false; }
Example #9
Source File: SnapshotDirectoryTest.java From flink with Apache License 2.0 | 6 votes |
/** * Tests that we always use the local file system even if we have specified a different default * file system. See FLINK-12042. */ @Test public void testLocalFileSystemIsUsedForTemporary() throws Exception { // ensure that snapshot directory will always use the local file system instead of the default file system Configuration configuration = new Configuration(); configuration.setString(CoreOptions.DEFAULT_FILESYSTEM_SCHEME, "nonexistfs:///"); FileSystem.initialize(configuration); final File folderRoot = temporaryFolder.getRoot(); try { File folderB = new File(folderRoot, String.valueOf(UUID.randomUUID())); // only pass the path and leave the scheme missing SnapshotDirectory snapshotDirectoryB = SnapshotDirectory.temporary(folderB); Assert.assertEquals(snapshotDirectoryB.getFileSystem(), FileSystem.getLocalFileSystem()); } finally { // restore the FileSystem configuration FileSystem.initialize(new Configuration()); } }
Example #10
Source File: ClassPathJobGraphRetrieverTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testJobGraphRetrieval() throws FlinkException { final int parallelism = 42; final Configuration configuration = new Configuration(); configuration.setInteger(CoreOptions.DEFAULT_PARALLELISM, parallelism); final JobID jobId = new JobID(); final ClassPathJobGraphRetriever classPathJobGraphRetriever = new ClassPathJobGraphRetriever( jobId, SavepointRestoreSettings.none(), PROGRAM_ARGUMENTS, TestJob.class.getCanonicalName()); final JobGraph jobGraph = classPathJobGraphRetriever.retrieveJobGraph(configuration); assertThat(jobGraph.getName(), is(equalTo(TestJob.class.getCanonicalName() + "-suffix"))); assertThat(jobGraph.getMaximumParallelism(), is(parallelism)); assertEquals(jobGraph.getJobID(), jobId); }
Example #11
Source File: ClassPathJobGraphRetriever.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public JobGraph retrieveJobGraph(Configuration configuration) throws FlinkException { final PackagedProgram packagedProgram = createPackagedProgram(); final int defaultParallelism = configuration.getInteger(CoreOptions.DEFAULT_PARALLELISM); try { final JobGraph jobGraph = PackagedProgramUtils.createJobGraph( packagedProgram, configuration, defaultParallelism, jobId); jobGraph.setAllowQueuedScheduling(true); jobGraph.setSavepointRestoreSettings(savepointRestoreSettings); return jobGraph; } catch (Exception e) { throw new FlinkException("Could not create the JobGraph from the provided user code jar.", e); } }
Example #12
Source File: BootstrapToolsTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testUpdateTmpDirectoriesInConfiguration() { Configuration config = new Configuration(); // test that default value is taken BootstrapTools.updateTmpDirectoriesInConfiguration(config, "default/directory/path"); assertEquals(config.getString(CoreOptions.TMP_DIRS), "default/directory/path"); // test that we ignore default value is value is set before BootstrapTools.updateTmpDirectoriesInConfiguration(config, "not/default/directory/path"); assertEquals(config.getString(CoreOptions.TMP_DIRS), "default/directory/path"); //test that empty value is not a magic string config.setString(CoreOptions.TMP_DIRS, ""); BootstrapTools.updateTmpDirectoriesInConfiguration(config, "some/new/path"); assertEquals(config.getString(CoreOptions.TMP_DIRS), ""); }
Example #13
Source File: JavaCmdJobManagerDecoratorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testStartCommandWithLogAndJVMOpts() throws IOException { KubernetesTestUtils.createTemporyFile("some data", flinkConfDir, "log4j.properties"); KubernetesTestUtils.createTemporyFile("some data", flinkConfDir, "logback.xml"); flinkConfig.set(CoreOptions.FLINK_JVM_OPTIONS, jvmOpts); final Container resultMainContainer = javaCmdJobManagerDecorator.decorateFlinkPod(baseFlinkPod).getMainContainer(); assertEquals(Collections.singletonList(KUBERNETES_ENTRY_PATH), resultMainContainer.getCommand()); final String expectedCommand = getJobManagerExpectedCommand(jvmOpts, logback + " " + log4j); final List<String> expectedArgs = Arrays.asList("/bin/bash", "-c", expectedCommand); assertEquals(expectedArgs, resultMainContainer.getArgs()); }
Example #14
Source File: FlinkExecutionEnvironments.java From beam with Apache License 2.0 | 6 votes |
private static int determineParallelism( final int pipelineOptionsParallelism, final int envParallelism, final Configuration configuration) { if (pipelineOptionsParallelism > 0) { return pipelineOptionsParallelism; } if (envParallelism > 0) { // If the user supplies a parallelism on the command-line, this is set on the execution // environment during creation return envParallelism; } final int flinkConfigParallelism = configuration.getInteger(CoreOptions.DEFAULT_PARALLELISM.key(), -1); if (flinkConfigParallelism > 0) { return flinkConfigParallelism; } LOG.warn( "No default parallelism could be found. Defaulting to parallelism 1. " + "Please set an explicit parallelism with --parallelism"); return 1; }
Example #15
Source File: ClassPathPackagedProgramRetrieverTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testJobGraphRetrieval() throws IOException, FlinkException, ProgramInvocationException { final int parallelism = 42; final JobID jobId = new JobID(); final Configuration configuration = new Configuration(); configuration.setInteger(CoreOptions.DEFAULT_PARALLELISM, parallelism); configuration.set(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID, jobId.toHexString()); final ClassPathPackagedProgramRetriever retrieverUnderTest = ClassPathPackagedProgramRetriever.newBuilder(PROGRAM_ARGUMENTS) .setJobClassName(TestJob.class.getCanonicalName()) .build(); final JobGraph jobGraph = retrieveJobGraph(retrieverUnderTest, configuration); assertThat(jobGraph.getName(), is(equalTo(TestJob.class.getCanonicalName() + "-suffix"))); assertThat(jobGraph.getSavepointRestoreSettings(), is(equalTo(SavepointRestoreSettings.none()))); assertThat(jobGraph.getMaximumParallelism(), is(parallelism)); assertEquals(jobGraph.getJobID(), jobId); }
Example #16
Source File: BootstrapToolsTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testUpdateTmpDirectoriesInConfiguration() { Configuration config = new Configuration(); // test that default value is taken BootstrapTools.updateTmpDirectoriesInConfiguration(config, "default/directory/path"); assertEquals(config.getString(CoreOptions.TMP_DIRS), "default/directory/path"); // test that we ignore default value is value is set before BootstrapTools.updateTmpDirectoriesInConfiguration(config, "not/default/directory/path"); assertEquals(config.getString(CoreOptions.TMP_DIRS), "default/directory/path"); //test that empty value is not a magic string config.setString(CoreOptions.TMP_DIRS, ""); BootstrapTools.updateTmpDirectoriesInConfiguration(config, "some/new/path"); assertEquals(config.getString(CoreOptions.TMP_DIRS), ""); }
Example #17
Source File: JaasModuleTest.java From flink with Apache License 2.0 | 6 votes |
/** * Test that the jaas config file is created in the working directory. */ @Test public void testJaasModuleFilePath() throws IOException { File file = folder.newFolder(); String workingDir = file.toPath().toString(); Configuration configuration = new Configuration(); // set the string for CoreOptions.TMP_DIRS to mock the working directory. configuration.setString(CoreOptions.TMP_DIRS, workingDir); SecurityConfiguration sc = new SecurityConfiguration(configuration); JaasModule module = new JaasModule(sc); module.install(); assertJaasFileLocateInRightDirectory(workingDir); }
Example #18
Source File: JavaCmdTaskManagerDecoratorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testStartCommandWithLogAndJVMOpts() throws IOException { KubernetesTestUtils.createTemporyFile("some data", flinkConfDir, "log4j.properties"); KubernetesTestUtils.createTemporyFile("some data", flinkConfDir, "logback.xml"); flinkConfig.set(CoreOptions.FLINK_JVM_OPTIONS, jvmOpts); final Container resultMainContainer = javaCmdTaskManagerDecorator.decorateFlinkPod(baseFlinkPod).getMainContainer(); assertEquals(Collections.singletonList(KUBERNETES_ENTRY_PATH), resultMainContainer.getCommand()); final String expectedCommand = getTaskManagerExpectedCommand(jvmOpts, logback + " " + log4j); final List<String> expectedArgs = Arrays.asList("/bin/bash", "-c", expectedCommand); assertEquals(expectedArgs, resultMainContainer.getArgs()); }
Example #19
Source File: FileSystemTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testKnownFSWithoutPluginsAndException() throws Exception { try { final Configuration config = new Configuration(); config.set(CoreOptions.ALLOWED_FALLBACK_FILESYSTEMS, "s3;wasb"); FileSystem.initialize(config); Exception e = assertThatCode(() -> getFileSystemWithoutSafetyNet("s3://authority/")); assertThat(e, Matchers.instanceOf(UnsupportedFileSystemSchemeException.class)); /* exception should be: org.apache.flink.core.fs.UnsupportedFileSystemSchemeException: Could not find a file system implementation for scheme 's3'. The scheme is not directly supported by Flink and no Hadoop file system to support this scheme could be loaded. */ assertThat(e.getMessage(), containsString("not directly supported")); } finally { FileSystem.initialize(new Configuration()); } }
Example #20
Source File: AvroKryoClassloadingTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testKryoInChildClasspath() throws Exception { final Class<?> avroClass = AvroKryoSerializerUtils.class; final URL avroLocation = avroClass.getProtectionDomain().getCodeSource().getLocation(); final URL kryoLocation = Kryo.class.getProtectionDomain().getCodeSource().getLocation(); final ClassLoader parentClassLoader = new FilteredClassLoader( avroClass.getClassLoader(), AvroKryoSerializerUtils.class.getName()); final ClassLoader userAppClassLoader = FlinkUserCodeClassLoaders.childFirst( new URL[] { avroLocation, kryoLocation }, parentClassLoader, CoreOptions.ALWAYS_PARENT_FIRST_LOADER_PATTERNS.defaultValue().split(";")); final Class<?> userLoadedAvroClass = Class.forName(avroClass.getName(), false, userAppClassLoader); assertNotEquals(avroClass, userLoadedAvroClass); // call the 'addAvroGenericDataArrayRegistration(...)' method final Method m = userLoadedAvroClass.getMethod("addAvroGenericDataArrayRegistration", LinkedHashMap.class); final LinkedHashMap<String, ?> map = new LinkedHashMap<>(); m.invoke(userLoadedAvroClass.newInstance(), map); assertEquals(1, map.size()); }
Example #21
Source File: RocksDBStateBackendConfigTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testWithDefaultFsSchemeAbsoluteStoragePath() throws Exception { final File folder = tempFolder.newFolder(); final String dbStoragePath = folder.getAbsolutePath(); try { // set the default file system scheme Configuration config = new Configuration(); config.setString(CoreOptions.DEFAULT_FILESYSTEM_SCHEME, "s3://mydomain.com:8020/flink"); FileSystem.initialize(config); testLocalDbPaths(dbStoragePath, folder); } finally { FileSystem.initialize(new Configuration()); } }
Example #22
Source File: PrestoS3RecoverableWriterTest.java From flink with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = conf.getString(CoreOptions.TMP_DIRS) + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); }
Example #23
Source File: StatefulFunctionsJobGraphRetriever.java From stateful-functions with Apache License 2.0 | 6 votes |
@Override public JobGraph retrieveJobGraph(Configuration configuration) throws FlinkException { final PackagedProgram packagedProgram = createPackagedProgram(); final int defaultParallelism = configuration.getInteger(CoreOptions.DEFAULT_PARALLELISM); try { final JobGraph jobGraph = PackagedProgramUtils.createJobGraph( packagedProgram, configuration, defaultParallelism, jobId, false); jobGraph.setSavepointRestoreSettings(savepointRestoreSettings); return jobGraph; } catch (Exception e) { throw new FlinkException("Could not create the JobGraph from the provided user code jar.", e); } }
Example #24
Source File: CliFrontend.java From flink with Apache License 2.0 | 6 votes |
public CliFrontend( Configuration configuration, ClusterClientServiceLoader clusterClientServiceLoader, List<CustomCommandLine> customCommandLines) { this.configuration = checkNotNull(configuration); this.customCommandLines = checkNotNull(customCommandLines); this.clusterClientServiceLoader = checkNotNull(clusterClientServiceLoader); FileSystem.initialize(configuration, PluginUtils.createPluginManagerFromRootFolder(configuration)); this.customCommandLineOptions = new Options(); for (CustomCommandLine customCommandLine : customCommandLines) { customCommandLine.addGeneralOptions(customCommandLineOptions); customCommandLine.addRunOptions(customCommandLineOptions); } this.clientTimeout = configuration.get(ClientOptions.CLIENT_TIMEOUT); this.defaultParallelism = configuration.getInteger(CoreOptions.DEFAULT_PARALLELISM); }
Example #25
Source File: ClassPathJobGraphRetrieverTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testJobGraphRetrieval() throws FlinkException { final int parallelism = 42; final Configuration configuration = new Configuration(); configuration.setInteger(CoreOptions.DEFAULT_PARALLELISM, parallelism); final JobID jobId = new JobID(); final ClassPathJobGraphRetriever classPathJobGraphRetriever = new ClassPathJobGraphRetriever( jobId, SavepointRestoreSettings.none(), PROGRAM_ARGUMENTS, TestJob.class.getCanonicalName()); final JobGraph jobGraph = classPathJobGraphRetriever.retrieveJobGraph(configuration); assertThat(jobGraph.getName(), is(equalTo(TestJob.class.getCanonicalName() + "-suffix"))); assertThat(jobGraph.getMaximumParallelism(), is(parallelism)); assertEquals(jobGraph.getJobID(), jobId); }
Example #26
Source File: HadoopS3RecoverableWriterITCase.java From flink with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); basePath = new Path(S3TestCredentials.getTestBucketUri() + "tests-" + UUID.randomUUID()); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = TEMP_FOLDER.getRoot().getAbsolutePath() + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); skipped = false; }
Example #27
Source File: ClassPathJobGraphRetriever.java From flink with Apache License 2.0 | 6 votes |
@Override public JobGraph retrieveJobGraph(Configuration configuration) throws FlinkException { final PackagedProgram packagedProgram = createPackagedProgram(); final int defaultParallelism = configuration.getInteger(CoreOptions.DEFAULT_PARALLELISM); try { final JobGraph jobGraph = PackagedProgramUtils.createJobGraph( packagedProgram, configuration, defaultParallelism, jobId); jobGraph.setAllowQueuedScheduling(true); jobGraph.setSavepointRestoreSettings(savepointRestoreSettings); return jobGraph; } catch (Exception e) { throw new FlinkException("Could not create the JobGraph from the provided user code jar.", e); } }
Example #28
Source File: JavaCmdTaskManagerDecoratorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testContainerStartCommandTemplate2() throws IOException { KubernetesTestUtils.createTemporyFile("some data", flinkConfDir, "log4j.properties"); KubernetesTestUtils.createTemporyFile("some data", flinkConfDir, "logback.xml"); final String containerStartCommandTemplate = "%java% %jvmmem% %logging% %jvmopts% %class% %args% %redirects%"; this.flinkConfig.set(KubernetesConfigOptions.CONTAINER_START_COMMAND_TEMPLATE, containerStartCommandTemplate); final String tmJvmOpts = "-DjmJvm"; this.flinkConfig.setString(CoreOptions.FLINK_JVM_OPTIONS, jvmOpts); this.flinkConfig.setString(CoreOptions.FLINK_TM_JVM_OPTIONS, tmJvmOpts); final Container resultMainContainer = javaCmdTaskManagerDecorator.decorateFlinkPod(baseFlinkPod).getMainContainer(); assertEquals(Collections.singletonList(KUBERNETES_ENTRY_PATH), resultMainContainer.getCommand()); final String expectedCommand = java + " " + tmJvmMem + " " + tmLogfile + " " + logback + " " + log4j + " " + jvmOpts + " " + tmJvmOpts + " " + mainClass + " " + mainClassArgs + " " + tmLogRedirects; final List<String> expectedArgs = Arrays.asList("/bin/bash", "-c", expectedCommand); assertEquals(resultMainContainer.getArgs(), expectedArgs); }
Example #29
Source File: BlobUtilsTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests {@link BlobUtils#initLocalStorageDirectory}'s fallback to the default value of * {@link CoreOptions#TMP_DIRS}. */ @Test public void testTaskManagerFallbackFallbackBlobStorageDirectory1() throws IOException { Configuration config = new Configuration(); File dir = BlobUtils.initLocalStorageDirectory(config); assertThat(dir.getAbsolutePath(), startsWith(CoreOptions.TMP_DIRS.defaultValue())); }
Example #30
Source File: StatefulFunctionsConfigValidator.java From flink-statefun with Apache License 2.0 | 5 votes |
private static void validateParentFirstClassloaderPatterns(Configuration configuration) { final Set<String> parentFirstClassloaderPatterns = parentFirstClassloaderPatterns(configuration); if (!parentFirstClassloaderPatterns.containsAll(PARENT_FIRST_CLASSLOADER_PATTERNS)) { throw new StatefulFunctionsInvalidConfigException( CoreOptions.ALWAYS_PARENT_FIRST_LOADER_PATTERNS_ADDITIONAL, "Must contain all of " + String.join(", ", PARENT_FIRST_CLASSLOADER_PATTERNS)); } }