Java Code Examples for org.apache.flink.configuration.Configuration#get()
The following examples show how to use
org.apache.flink.configuration.Configuration#get() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StatefulFunctionsConfig.java From flink-statefun with Apache License 2.0 | 6 votes |
/** * Create a new configuration object based on the values set in flink-conf. * * @param configuration a configuration to read the values from */ public StatefulFunctionsConfig(Configuration configuration) { StatefulFunctionsConfigValidator.validate(configuration); this.factoryType = configuration.get(USER_MESSAGE_SERIALIZER); this.flinkJobName = configuration.get(FLINK_JOB_NAME); this.feedbackBufferSize = configuration.get(TOTAL_MEMORY_USED_FOR_FEEDBACK_CHECKPOINTING); this.maxAsyncOperationsPerTask = configuration.get(ASYNC_MAX_OPERATIONS_PER_TASK); for (String key : configuration.keySet()) { if (key.startsWith(MODULE_CONFIG_PREFIX)) { String value = configuration.get(ConfigOptions.key(key).stringType().noDefaultValue()); String userKey = key.substring(MODULE_CONFIG_PREFIX.length()); globalConfigurations.put(userKey, value); } } }
Example 2
Source File: StandaloneApplicationClusterConfigurationParserFactoryTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testEntrypointClusterConfigWOSavepointSettingsToConfigurationParsing() throws FlinkParseException { final JobID jobID = JobID.generate(); final String[] args = { "-c", confDirPath, "--job-id", jobID.toHexString() }; final StandaloneApplicationClusterConfiguration clusterConfiguration = commandLineParser.parse(args); final Configuration configuration = StandaloneApplicationClusterEntryPoint .loadConfigurationFromClusterConfig(clusterConfiguration); final String strJobId = configuration.get(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID); assertThat(JobID.fromHexString(strJobId), is(equalTo(jobID))); assertThat(SavepointRestoreSettings.fromConfiguration(configuration), is(equalTo(SavepointRestoreSettings.none()))); }
Example 3
Source File: YARNITCase.java From flink with Apache License 2.0 | 6 votes |
private void checkStagingDirectory(Configuration flinkConfig, ApplicationId appId) throws IOException { final List<String> providedLibDirs = flinkConfig.get(YarnConfigOptions.PROVIDED_LIB_DIRS); final boolean isProvidedLibDirsConfigured = providedLibDirs != null && !providedLibDirs.isEmpty(); try (final FileSystem fs = FileSystem.get(YARN_CONFIGURATION)) { final Path stagingDirectory = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString()); if (isProvidedLibDirsConfigured) { assertFalse( "The provided lib dirs is set, so the lib directory should not be uploaded to staging directory.", fs.exists(new Path(stagingDirectory, flinkLibFolder.getName()))); } else { assertTrue( "The lib directory should be uploaded to staging directory.", fs.exists(new Path(stagingDirectory, flinkLibFolder.getName()))); } } }
Example 4
Source File: PythonConfig.java From flink with Apache License 2.0 | 6 votes |
public PythonConfig(Configuration config) { maxBundleSize = config.get(PythonOptions.MAX_BUNDLE_SIZE); maxBundleTimeMills = config.get(PythonOptions.MAX_BUNDLE_TIME_MILLS); maxArrowBatchSize = config.get(PythonOptions.MAX_ARROW_BATCH_SIZE); pythonFrameworkMemorySize = config.get(PythonOptions.PYTHON_FRAMEWORK_MEMORY_SIZE); pythonDataBufferMemorySize = config.get(PythonOptions.PYTHON_DATA_BUFFER_MEMORY_SIZE); pythonFilesInfo = config.getOptional(PythonDependencyUtils.PYTHON_FILES).orElse(new HashMap<>()); pythonRequirementsFileInfo = config.getOptional(PythonDependencyUtils.PYTHON_REQUIREMENTS_FILE) .orElse(new HashMap<>()) .get(PythonDependencyUtils.FILE); pythonRequirementsCacheDirInfo = config.getOptional(PythonDependencyUtils.PYTHON_REQUIREMENTS_FILE) .orElse(new HashMap<>()) .get(PythonDependencyUtils.CACHE); pythonArchivesInfo = config.getOptional(PythonDependencyUtils.PYTHON_ARCHIVES).orElse(new HashMap<>()); pythonExec = config.get(PythonOptions.PYTHON_EXECUTABLE); metricEnabled = config.getBoolean(PythonOptions.PYTHON_METRIC_ENABLED); isUsingManagedMemory = config.getBoolean(PythonOptions.USE_MANAGED_MEMORY); }
Example 5
Source File: HiveTableSource.java From flink with Apache License 2.0 | 5 votes |
private DataStream<RowData> createStreamSourceForPartitionTable( StreamExecutionEnvironment execEnv, TypeInformation<RowData> typeInfo, HiveTableInputFormat inputFormat) { Configuration configuration = new Configuration(); catalogTable.getOptions().forEach(configuration::setString); String consumeOrderStr = configuration.get(STREAMING_SOURCE_CONSUME_ORDER); ConsumeOrder consumeOrder = ConsumeOrder.getConsumeOrder(consumeOrderStr); String consumeOffset = configuration.get(STREAMING_SOURCE_CONSUME_START_OFFSET); String extractorKind = configuration.get(PARTITION_TIME_EXTRACTOR_KIND); String extractorClass = configuration.get(PARTITION_TIME_EXTRACTOR_CLASS); String extractorPattern = configuration.get(PARTITION_TIME_EXTRACTOR_TIMESTAMP_PATTERN); Duration monitorInterval = configuration.get(STREAMING_SOURCE_MONITOR_INTERVAL); HiveContinuousMonitoringFunction monitoringFunction = new HiveContinuousMonitoringFunction( hiveShim, jobConf, tablePath, catalogTable, execEnv.getParallelism(), consumeOrder, consumeOffset, extractorKind, extractorClass, extractorPattern, monitorInterval.toMillis()); ContinuousFileReaderOperatorFactory<RowData, TimestampedHiveInputSplit> factory = new ContinuousFileReaderOperatorFactory<>(inputFormat); String sourceName = "HiveMonitoringFunction"; SingleOutputStreamOperator<RowData> source = execEnv .addSource(monitoringFunction, sourceName) .transform("Split Reader: " + sourceName, typeInfo, factory); return new DataStreamSource<>(source); }
Example 6
Source File: TaskExecutorResourceUtils.java From flink with Apache License 2.0 | 5 votes |
static TaskExecutorResourceSpec resourceSpecFromConfig(Configuration config) { try { checkTaskExecutorResourceConfigSet(config); } catch (IllegalConfigurationException e) { throw new IllegalConfigurationException("Failed to create TaskExecutorResourceSpec", e); } return new TaskExecutorResourceSpec( new CPUResource(config.getDouble(TaskManagerOptions.CPU_CORES)), config.get(TaskManagerOptions.TASK_HEAP_MEMORY), config.get(TaskManagerOptions.TASK_OFF_HEAP_MEMORY), config.get(TaskManagerOptions.NETWORK_MEMORY_MIN), config.get(TaskManagerOptions.MANAGED_MEMORY_SIZE) ); }
Example 7
Source File: YarnLogConfigUtil.java From flink with Apache License 2.0 | 5 votes |
@VisibleForTesting public static Configuration setLogConfigFileInConfig( final Configuration configuration, final String configurationDirectory) { if (configuration.get(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE) != null) { return configuration; } discoverLogConfigFile(configurationDirectory).ifPresent(file -> configuration.set(YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, file.getPath())); return configuration; }
Example 8
Source File: PythonOptionsTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testPythonClientExecutable() { final Configuration configuration = new Configuration(); final Optional<String> defaultPythonClientExecutable = configuration.getOptional(PythonOptions.PYTHON_CLIENT_EXECUTABLE); assertThat(defaultPythonClientExecutable, is(equalTo(Optional.empty()))); final String expectedPythonClientExecutable = "tmp_dir/test1.py,tmp_dir/test2.py"; configuration.set(PythonOptions.PYTHON_CLIENT_EXECUTABLE, expectedPythonClientExecutable); final String actualPythonClientExecutable = configuration.get(PythonOptions.PYTHON_CLIENT_EXECUTABLE); assertThat(actualPythonClientExecutable, is(equalTo(expectedPythonClientExecutable))); }
Example 9
Source File: PythonOptionsTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testPythonExecutable() { final Configuration configuration = new Configuration(); final Optional<String> defaultPythonExecutable = configuration.getOptional(PythonOptions.PYTHON_EXECUTABLE); assertThat(defaultPythonExecutable, is(equalTo(Optional.empty()))); final String expectedPythonExecutable = "venv/py37/bin/python"; configuration.set(PythonOptions.PYTHON_EXECUTABLE, expectedPythonExecutable); final String actualPythonExecutable = configuration.get(PythonOptions.PYTHON_EXECUTABLE); assertThat(actualPythonExecutable, is(equalTo(expectedPythonExecutable))); }
Example 10
Source File: PythonOptionsTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testPythonArchives() { final Configuration configuration = new Configuration(); final Optional<String> defaultPythonArchives = configuration.getOptional(PythonOptions.PYTHON_ARCHIVES); assertThat(defaultPythonArchives, is(equalTo(Optional.empty()))); final String expectedPythonArchives = "tmp_dir/py37.zip#venv,tmp_dir/data.zip"; configuration.set(PythonOptions.PYTHON_ARCHIVES, expectedPythonArchives); final String actualPythonArchives = configuration.get(PythonOptions.PYTHON_ARCHIVES); assertThat(actualPythonArchives, is(equalTo(expectedPythonArchives))); }
Example 11
Source File: PythonOptionsTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testPythonRequirements() { final Configuration configuration = new Configuration(); final Optional<String> defaultPythonRequirements = configuration.getOptional(PythonOptions.PYTHON_REQUIREMENTS); assertThat(defaultPythonRequirements, is(equalTo(Optional.empty()))); final String expectedPythonRequirements = "tmp_dir/requirements.txt#tmp_dir/cache"; configuration.set(PythonOptions.PYTHON_REQUIREMENTS, expectedPythonRequirements); final String actualPythonRequirements = configuration.get(PythonOptions.PYTHON_REQUIREMENTS); assertThat(actualPythonRequirements, is(equalTo(expectedPythonRequirements))); }
Example 12
Source File: PythonOptionsTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testPythonFiles() { final Configuration configuration = new Configuration(); final Optional<String> defaultPythonFiles = configuration.getOptional(PythonOptions.PYTHON_FILES); assertThat(defaultPythonFiles, is(equalTo(Optional.empty()))); final String expectedPythonFiles = "tmp_dir/test1.py,tmp_dir/test2.py"; configuration.set(PythonOptions.PYTHON_FILES, expectedPythonFiles); final String actualPythonFiles = configuration.get(PythonOptions.PYTHON_FILES); assertThat(actualPythonFiles, is(equalTo(expectedPythonFiles))); }
Example 13
Source File: FileSystemTableFactory.java From flink with Apache License 2.0 | 5 votes |
@Override public TableSink<RowData> createTableSink(TableSinkFactory.Context context) { Configuration conf = new Configuration(); context.getTable().getOptions().forEach(conf::setString); return new FileSystemTableSink( context.getObjectIdentifier(), context.isBounded(), context.getTable().getSchema(), getPath(conf), context.getTable().getPartitionKeys(), conf.get(PARTITION_DEFAULT_NAME), context.getTable().getOptions()); }
Example 14
Source File: FileSystemTableFactory.java From flink with Apache License 2.0 | 5 votes |
@Override public TableSource<RowData> createTableSource(TableSourceFactory.Context context) { Configuration conf = new Configuration(); context.getTable().getOptions().forEach(conf::setString); return new FileSystemTableSource( context.getTable().getSchema(), getPath(conf), context.getTable().getPartitionKeys(), conf.get(PARTITION_DEFAULT_NAME), context.getTable().getProperties()); }
Example 15
Source File: HiveTableSource.java From flink with Apache License 2.0 | 5 votes |
private DataStream<RowData> createStreamSourceForNonPartitionTable( StreamExecutionEnvironment execEnv, TypeInformation<RowData> typeInfo, HiveTableInputFormat inputFormat, HiveTablePartition hiveTable) { HiveTableFileInputFormat fileInputFormat = new HiveTableFileInputFormat(inputFormat, hiveTable); Configuration configuration = new Configuration(); catalogTable.getOptions().forEach(configuration::setString); String consumeOrderStr = configuration.get(STREAMING_SOURCE_CONSUME_ORDER); ConsumeOrder consumeOrder = ConsumeOrder.getConsumeOrder(consumeOrderStr); if (consumeOrder != ConsumeOrder.CREATE_TIME_ORDER) { throw new UnsupportedOperationException( "Only " + ConsumeOrder.CREATE_TIME_ORDER + " is supported for non partition table."); } String consumeOffset = configuration.get(STREAMING_SOURCE_CONSUME_START_OFFSET); // to Local zone mills instead of UTC mills long currentReadTime = TimestampData.fromLocalDateTime(toLocalDateTime(consumeOffset)) .toTimestamp().getTime(); Duration monitorInterval = configuration.get(STREAMING_SOURCE_MONITOR_INTERVAL); ContinuousFileMonitoringFunction<RowData> monitoringFunction = new ContinuousFileMonitoringFunction<>( fileInputFormat, FileProcessingMode.PROCESS_CONTINUOUSLY, execEnv.getParallelism(), monitorInterval.toMillis(), currentReadTime); ContinuousFileReaderOperatorFactory<RowData, TimestampedFileInputSplit> factory = new ContinuousFileReaderOperatorFactory<>(fileInputFormat); String sourceName = "HiveFileMonitoringFunction"; SingleOutputStreamOperator<RowData> source = execEnv.addSource(monitoringFunction, sourceName) .transform("Split Reader: " + sourceName, typeInfo, factory); return new DataStreamSource<>(source); }
Example 16
Source File: ExternalResourceUtils.java From flink with Apache License 2.0 | 4 votes |
/** * Get the enabled external resource list from configuration. */ private static Set<String> getExternalResourceSet(Configuration config) { return new HashSet<>(config.get(ExternalResourceOptions.EXTERNAL_RESOURCE_LIST)); }
Example 17
Source File: SecurityConfiguration.java From flink with Apache License 2.0 | 4 votes |
/** * Create a security configuration from the global configuration. * @param flinkConf the Flink global configuration. */ public SecurityConfiguration(Configuration flinkConf) { this(flinkConf, flinkConf.get(SECURITY_CONTEXT_FACTORY_CLASSES), flinkConf.get(SECURITY_MODULE_FACTORY_CLASSES)); }
Example 18
Source File: NettyShuffleEnvironmentConfiguration.java From flink with Apache License 2.0 | 4 votes |
/** * Utility method to extract network related parameters from the configuration and to * sanity check them. * * @param configuration configuration object * @param networkMemorySize the size of memory reserved for shuffle environment * @param localTaskManagerCommunication true, to skip initializing the network stack * @param taskManagerAddress identifying the IP address under which the TaskManager will be accessible * @return NettyShuffleEnvironmentConfiguration */ public static NettyShuffleEnvironmentConfiguration fromConfiguration( Configuration configuration, MemorySize networkMemorySize, boolean localTaskManagerCommunication, InetAddress taskManagerAddress) { final int dataBindPort = getDataBindPort(configuration); final int pageSize = ConfigurationParserUtils.getPageSize(configuration); final NettyConfig nettyConfig = createNettyConfig(configuration, localTaskManagerCommunication, taskManagerAddress, dataBindPort); final int numberOfNetworkBuffers = calculateNumberOfNetworkBuffers( configuration, networkMemorySize, pageSize); int initialRequestBackoff = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL); int maxRequestBackoff = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX); int buffersPerChannel = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_BUFFERS_PER_CHANNEL); int extraBuffersPerGate = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_EXTRA_BUFFERS_PER_GATE); int maxBuffersPerChannel = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_MAX_BUFFERS_PER_CHANNEL); boolean isNetworkDetailedMetrics = configuration.getBoolean(NettyShuffleEnvironmentOptions.NETWORK_DETAILED_METRICS); String[] tempDirs = ConfigurationUtils.parseTempDirectories(configuration); Duration requestSegmentsTimeout = Duration.ofMillis(configuration.getLong( NettyShuffleEnvironmentOptions.NETWORK_EXCLUSIVE_BUFFERS_REQUEST_TIMEOUT_MILLISECONDS)); BoundedBlockingSubpartitionType blockingSubpartitionType = getBlockingSubpartitionType(configuration); boolean forcePartitionReleaseOnConsumption = configuration.getBoolean(NettyShuffleEnvironmentOptions.FORCE_PARTITION_RELEASE_ON_CONSUMPTION); boolean blockingShuffleCompressionEnabled = configuration.get(NettyShuffleEnvironmentOptions.BLOCKING_SHUFFLE_COMPRESSION_ENABLED); String compressionCodec = configuration.getString(NettyShuffleEnvironmentOptions.SHUFFLE_COMPRESSION_CODEC); return new NettyShuffleEnvironmentConfiguration( numberOfNetworkBuffers, pageSize, initialRequestBackoff, maxRequestBackoff, buffersPerChannel, extraBuffersPerGate, requestSegmentsTimeout, isNetworkDetailedMetrics, nettyConfig, tempDirs, blockingSubpartitionType, forcePartitionReleaseOnConsumption, blockingShuffleCompressionEnabled, compressionCodec, maxBuffersPerChannel); }
Example 19
Source File: ApplicationConfiguration.java From flink with Apache License 2.0 | 3 votes |
public static ApplicationConfiguration fromConfiguration(final Configuration configuration) { checkNotNull(configuration); final List<String> programArgsList = ConfigUtils.decodeListFromConfig(configuration, APPLICATION_ARGS, String::new); final String[] programArgs = programArgsList.toArray(new String[0]); final String applicationClassName = configuration.get(APPLICATION_MAIN_CLASS); return new ApplicationConfiguration(programArgs, applicationClassName); }
Example 20
Source File: AkkaRpcServiceConfiguration.java From flink with Apache License 2.0 | 3 votes |
public static AkkaRpcServiceConfiguration fromConfiguration(Configuration configuration) { final Time timeout = AkkaUtils.getTimeoutAsTime(configuration); final long maximumFramesize = AkkaRpcServiceUtils.extractMaximumFramesize(configuration); final boolean captureAskCallStacks = configuration.get(AkkaOptions.CAPTURE_ASK_CALLSTACK); return new AkkaRpcServiceConfiguration(configuration, timeout, maximumFramesize, captureAskCallStacks); }