Java Code Examples for org.apache.flink.core.fs.FileSystem#initialize()
The following examples show how to use
org.apache.flink.core.fs.FileSystem#initialize() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FilesystemSchemeConfigTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testExplicitlySetToOther() throws Exception { final Configuration conf = new Configuration(); conf.setString(CoreOptions.DEFAULT_FILESYSTEM_SCHEME, "otherFS://localhost:1234/"); FileSystem.initialize(conf); URI justPath = new URI(tempFolder.newFile().toURI().getPath()); assertNull(justPath.getScheme()); try { FileSystem.get(justPath); fail("should have failed with an exception"); } catch (UnsupportedFileSystemSchemeException e) { assertTrue(e.getMessage().contains("otherFS")); } }
Example 2
Source File: HadoopS3RecoverableWriterITCase.java From flink with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); basePath = new Path(S3TestCredentials.getTestBucketUri() + "tests-" + UUID.randomUUID()); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = TEMP_FOLDER.getRoot().getAbsolutePath() + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); skipped = false; }
Example 3
Source File: TaskManagerRunnerConfigurationTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testDefaultFsParameterLoading() throws Exception { try { final File tmpDir = temporaryFolder.newFolder(); final File confFile = new File(tmpDir, GlobalConfiguration.FLINK_CONF_FILENAME); final URI defaultFS = new URI("otherFS", null, "localhost", 1234, null, null, null); final PrintWriter pw1 = new PrintWriter(confFile); pw1.println("fs.default-scheme: " + defaultFS); pw1.close(); String[] args = new String[] {"--configDir", tmpDir.toString()}; Configuration configuration = TaskManagerRunner.loadConfiguration(args); FileSystem.initialize(configuration); assertEquals(defaultFS, FileSystem.getDefaultFsUri()); } finally { // reset FS settings FileSystem.initialize(new Configuration()); } }
Example 4
Source File: HadoopS3RecoverableWriterExceptionITCase.java From flink with Apache License 2.0 | 6 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); basePath = new Path(S3TestCredentials.getTestBucketUri() + "tests-" + UUID.randomUUID()); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); conf.setLong(PART_UPLOAD_MIN_SIZE, PART_UPLOAD_MIN_SIZE_VALUE); conf.setInteger(MAX_CONCURRENT_UPLOADS, MAX_CONCURRENT_UPLOADS_VALUE); final String defaultTmpDir = TEMP_FOLDER.getRoot().getAbsolutePath() + "s3_tmp_dir"; conf.setString(CoreOptions.TMP_DIRS, defaultTmpDir); FileSystem.initialize(conf); skipped = false; }
Example 5
Source File: HadoopS3RecoverableWriterExceptionITCase.java From flink with Apache License 2.0 | 5 votes |
@AfterClass public static void cleanUp() throws Exception { if (!skipped) { getFileSystem().delete(basePath, true); } FileSystem.initialize(new Configuration()); }
Example 6
Source File: TaskManagerRunner.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { // startup checks and logging EnvironmentInformation.logEnvironmentInfo(LOG, "TaskManager", args); SignalHandler.register(LOG); JvmShutdownSafeguard.installAsShutdownHook(LOG); long maxOpenFileHandles = EnvironmentInformation.getOpenFileHandlesLimit(); if (maxOpenFileHandles != -1L) { LOG.info("Maximum number of open file descriptors is {}.", maxOpenFileHandles); } else { LOG.info("Cannot determine the maximum number of open file descriptors"); } final Configuration configuration = loadConfiguration(args); try { FileSystem.initialize(configuration); } catch (IOException e) { throw new IOException("Error while setting the default " + "filesystem scheme from configuration.", e); } SecurityUtils.install(new SecurityConfiguration(configuration)); try { SecurityUtils.getInstalledContext().runSecured(new Callable<Void>() { @Override public Void call() throws Exception { runTaskManager(configuration, ResourceID.generate()); return null; } }); } catch (Throwable t) { final Throwable strippedThrowable = ExceptionUtils.stripException(t, UndeclaredThrowableException.class); LOG.error("TaskManager initialization failed.", strippedThrowable); System.exit(STARTUP_FAILURE_RETURN_CODE); } }
Example 7
Source File: RocksDBStateBackendConfigTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWithDefaultFsSchemeNoStoragePath() throws Exception { try { // set the default file system scheme Configuration config = new Configuration(); config.setString(CoreOptions.DEFAULT_FILESYSTEM_SCHEME, "s3://mydomain.com:8020/flink"); FileSystem.initialize(config); testLocalDbPaths(null, tempFolder.getRoot()); } finally { FileSystem.initialize(new Configuration()); } }
Example 8
Source File: PrestoS3FileSystemTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testConfigPropagation() throws Exception{ final Configuration conf = new Configuration(); conf.setString("s3.access-key", "test_access_key_id"); conf.setString("s3.secret-key", "test_secret_access_key"); FileSystem.initialize(conf); FileSystem fs = FileSystem.get(new URI("s3://test")); validateBasicCredentials(fs); }
Example 9
Source File: AbstractHadoopFileSystemITTest.java From flink with Apache License 2.0 | 5 votes |
@AfterClass public static void teardown() throws IOException, InterruptedException { try { if (fs != null) { cleanupDirectoryWithRetry(fs, basePath, consistencyToleranceNS); } } finally { FileSystem.initialize(new Configuration()); } }
Example 10
Source File: YarnFileStageTestS3ITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Create a Hadoop config file containing S3 access credentials. * * <p>Note that we cannot use them as part of the URL since this may fail if the credentials * contain a "/" (see <a href="https://issues.apache.org/jira/browse/HADOOP-3733">HADOOP-3733</a>). */ private static void setupCustomHadoopConfig() throws IOException { File hadoopConfig = TEMP_FOLDER.newFile(); Map<String /* key */, String /* value */> parameters = new HashMap<>(); // set all different S3 fs implementation variants' configuration keys parameters.put("fs.s3a.access.key", S3TestCredentials.getS3AccessKey()); parameters.put("fs.s3a.secret.key", S3TestCredentials.getS3SecretKey()); parameters.put("fs.s3.awsAccessKeyId", S3TestCredentials.getS3AccessKey()); parameters.put("fs.s3.awsSecretAccessKey", S3TestCredentials.getS3SecretKey()); parameters.put("fs.s3n.awsAccessKeyId", S3TestCredentials.getS3AccessKey()); parameters.put("fs.s3n.awsSecretAccessKey", S3TestCredentials.getS3SecretKey()); try (PrintStream out = new PrintStream(new FileOutputStream(hadoopConfig))) { out.println("<?xml version=\"1.0\"?>"); out.println("<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>"); out.println("<configuration>"); for (Map.Entry<String, String> entry : parameters.entrySet()) { out.println("\t<property>"); out.println("\t\t<name>" + entry.getKey() + "</name>"); out.println("\t\t<value>" + entry.getValue() + "</value>"); out.println("\t</property>"); } out.println("</configuration>"); } final Configuration conf = new Configuration(); conf.setString(ConfigConstants.HDFS_SITE_CONFIG, hadoopConfig.getAbsolutePath()); FileSystem.initialize(conf); }
Example 11
Source File: LimitedConnectionsConfigurationTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testConfiguration() throws Exception { // nothing configured, we should get a regular file system FileSystem hdfs = FileSystem.get(URI.create("hdfs://localhost:12345/a/b/c")); FileSystem ftpfs = FileSystem.get(URI.create("ftp://localhost:12345/a/b/c")); assertFalse(hdfs instanceof LimitedConnectionsFileSystem); assertFalse(ftpfs instanceof LimitedConnectionsFileSystem); // configure some limits, which should cause "fsScheme" to be limited final Configuration config = new Configuration(); config.setInteger("fs.hdfs.limit.total", 40); config.setInteger("fs.hdfs.limit.input", 39); config.setInteger("fs.hdfs.limit.output", 38); config.setInteger("fs.hdfs.limit.timeout", 23456); config.setInteger("fs.hdfs.limit.stream-timeout", 34567); try { FileSystem.initialize(config); hdfs = FileSystem.get(URI.create("hdfs://localhost:12345/a/b/c")); ftpfs = FileSystem.get(URI.create("ftp://localhost:12345/a/b/c")); assertTrue(hdfs instanceof LimitedConnectionsFileSystem); assertFalse(ftpfs instanceof LimitedConnectionsFileSystem); LimitedConnectionsFileSystem limitedFs = (LimitedConnectionsFileSystem) hdfs; assertEquals(40, limitedFs.getMaxNumOpenStreamsTotal()); assertEquals(39, limitedFs.getMaxNumOpenInputStreams()); assertEquals(38, limitedFs.getMaxNumOpenOutputStreams()); assertEquals(23456, limitedFs.getStreamOpenTimeout()); assertEquals(34567, limitedFs.getStreamInactivityTimeout()); } finally { // clear all settings FileSystem.initialize(new Configuration()); } }
Example 12
Source File: YarnTaskExecutorRunner.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * The instance entry point for the YARN task executor. Obtains user group information and calls * the main work method {@link TaskManagerRunner#runTaskManager(Configuration, ResourceID)} as a * privileged action. * * @param args The command line arguments. */ private static void run(String[] args) { try { LOG.debug("All environment variables: {}", ENV); final String currDir = ENV.get(Environment.PWD.key()); LOG.info("Current working Directory: {}", currDir); final Configuration configuration = GlobalConfiguration.loadConfiguration(currDir); FileSystem.initialize(configuration); setupConfigurationAndInstallSecurityContext(configuration, currDir, ENV); final String containerId = ENV.get(YarnResourceManager.ENV_FLINK_CONTAINER_ID); Preconditions.checkArgument(containerId != null, "ContainerId variable %s not set", YarnResourceManager.ENV_FLINK_CONTAINER_ID); SecurityUtils.getInstalledContext().runSecured((Callable<Void>) () -> { TaskManagerRunner.runTaskManager(configuration, new ResourceID(containerId)); return null; }); } catch (Throwable t) { final Throwable strippedThrowable = ExceptionUtils.stripException(t, UndeclaredThrowableException.class); // make sure that everything whatever ends up in the log LOG.error("YARN TaskManager initialization failed.", strippedThrowable); System.exit(INIT_ERROR_EXIT_CODE); } }
Example 13
Source File: PrestoS3FileSystemBehaviorITCase.java From flink with Apache License 2.0 | 5 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials exist S3TestCredentials.assumeCredentialsAvailable(); // initialize configuration with valid credentials final Configuration conf = new Configuration(); conf.setString("s3.access.key", S3TestCredentials.getS3AccessKey()); conf.setString("s3.secret.key", S3TestCredentials.getS3SecretKey()); FileSystem.initialize(conf); }
Example 14
Source File: HadoopOSSFileSystemITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws IOException { OSSTestCredentials.assumeCredentialsAvailable(); final Configuration conf = new Configuration(); conf.setString("fs.oss.endpoint", OSSTestCredentials.getOSSEndpoint()); conf.setString("fs.oss.accessKeyId", OSSTestCredentials.getOSSAccessKey()); conf.setString("fs.oss.accessKeySecret", OSSTestCredentials.getOSSSecretKey()); FileSystem.initialize(conf); basePath = new Path(OSSTestCredentials.getTestBucketUri() + TEST_DATA_DIR); fs = basePath.getFileSystem(); deadline = 0; }
Example 15
Source File: HadoopS3RecoverableWriterITCase.java From flink with Apache License 2.0 | 5 votes |
@AfterClass public static void cleanUp() throws Exception { if (!skipped) { getFileSystem().delete(basePath, true); } FileSystem.initialize(new Configuration()); }
Example 16
Source File: AzureFileSystemBehaviorITCase.java From flink with Apache License 2.0 | 5 votes |
@BeforeClass public static void checkCredentialsAndSetup() throws IOException { // check whether credentials and container details exist Assume.assumeTrue("Azure container not configured, skipping test...", !StringUtils.isNullOrWhitespaceOnly(CONTAINER)); Assume.assumeTrue("Azure access key not configured, skipping test...", !StringUtils.isNullOrWhitespaceOnly(ACCESS_KEY)); // initialize configuration with valid credentials final Configuration conf = new Configuration(); // fs.azure.account.key.youraccount.blob.core.windows.net = ACCESS_KEY conf.setString("fs.azure.account.key." + ACCOUNT + ".blob.core.windows.net", ACCESS_KEY); FileSystem.initialize(conf); }
Example 17
Source File: ClusterEntrypoint.java From flink with Apache License 2.0 | 4 votes |
private void configureFileSystems(Configuration configuration, PluginManager pluginManager) { LOG.info("Install default filesystem."); FileSystem.initialize(configuration, pluginManager); }
Example 18
Source File: PrestoS3FileSystemBehaviorITCase.java From flink with Apache License 2.0 | 4 votes |
@AfterClass public static void clearFsConfig() throws IOException { FileSystem.initialize(new Configuration()); }
Example 19
Source File: PrestoS3RecoverableWriterTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@AfterClass public static void cleanUp() throws IOException { FileSystem.initialize(new Configuration()); }
Example 20
Source File: ClusterEntrypoint.java From flink with Apache License 2.0 | 4 votes |
private void configureFileSystems(Configuration configuration) { LOG.info("Install default filesystem."); FileSystem.initialize(configuration, PluginUtils.createPluginManagerFromRootFolder(configuration)); }