Java Code Examples for com.google.cloud.hadoop.gcsio.GoogleCloudStorageFileSystem#delete()
The following examples show how to use
com.google.cloud.hadoop.gcsio.GoogleCloudStorageFileSystem#delete() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GoogleHadoopFileSystemTestBase.java From hadoop-connectors with Apache License 2.0 | 5 votes |
@Test public void testRepairDirectory_afterFileDelete() throws IOException { GoogleHadoopFileSystemBase myghfs = (GoogleHadoopFileSystemBase) ghfs; GoogleCloudStorageFileSystem gcsfs = myghfs.getGcsFs(); GoogleCloudStorage gcs = gcsfs.getGcs(); URI seedUri = GoogleCloudStorageFileSystemIntegrationTest.getTempFilePath(); Path dirPath = ghfsHelper.castAsHadoopPath(seedUri); URI dirUri = myghfs.getGcsPath(dirPath); // A subdir path that looks like gs://<bucket>/<generated-tempdir>/foo-subdir where // neither the subdir nor gs://<bucket>/<generated-tempdir> exist yet. Path emptyObject = new Path(dirPath, "empty-object"); URI objUri = myghfs.getGcsPath(emptyObject); StorageResourceId resource = StorageResourceId.fromUriPath(objUri, false); gcs.createEmptyObject(resource); boolean inferImplicitDirectories = gcsfs.getOptions().getCloudStorageOptions().isInferImplicitDirectoriesEnabled(); boolean autoRepairImplicitDirectories = gcsfs.getOptions().getCloudStorageOptions().isAutoRepairImplicitDirectoriesEnabled(); assertDirectory(gcsfs, dirUri, /* exists= */ inferImplicitDirectories); gcsfs.delete(objUri, false); // Implicit directory created after deletion of the sole object in the directory assertDirectory(gcsfs, dirUri, /* exists= */ autoRepairImplicitDirectories); ghfsHelper.clearBucket(resource.getBucketName()); }
Example 2
Source File: GoogleHadoopFileSystemTestBase.java From hadoop-connectors with Apache License 2.0 | 5 votes |
@Test public void testRepairDirectory_afterSubdirectoryDelete() throws IOException { GoogleHadoopFileSystemBase myghfs = (GoogleHadoopFileSystemBase) ghfs; GoogleCloudStorageFileSystem gcsfs = myghfs.getGcsFs(); GoogleCloudStorage gcs = gcsfs.getGcs(); // only if directory inferring is enabled, the directory without the implicit // directory entry can be deleted without the FileNotFoundException assumeTrue(gcsfs.getOptions().getCloudStorageOptions().isInferImplicitDirectoriesEnabled()); URI seedUri = GoogleCloudStorageFileSystemIntegrationTest.getTempFilePath(); Path dirPath = ghfsHelper.castAsHadoopPath(seedUri); URI dirUri = myghfs.getGcsPath(dirPath); Path subDir = new Path(dirPath, "subdir"); URI subdirUri = myghfs.getGcsPath(subDir); // A subdir path that looks like gs://<bucket>/<generated-tempdir>/foo-subdir where // neither the subdir nor gs://<bucket>/<generated-tempdir> exist yet. Path emptyObject = new Path(subDir, "empty-object"); URI objUri = myghfs.getGcsPath(emptyObject); StorageResourceId resource = StorageResourceId.fromUriPath(objUri, false); gcs.createEmptyObject(resource); boolean autoRepairImplicitDirectories = gcsfs.getOptions().getCloudStorageOptions().isAutoRepairImplicitDirectoriesEnabled(); assertDirectory(gcsfs, dirUri, /* exists= */ true); assertDirectory(gcsfs, subdirUri, /* exists= */ true); gcsfs.delete(subdirUri, true); // Implicit directory created after deletion of the sole object in the directory assertDirectory(gcsfs, dirUri, /* exists= */ autoRepairImplicitDirectories); ghfsHelper.clearBucket(resource.getBucketName()); }
Example 3
Source File: CoopLockRepairIntegrationTest.java From hadoop-connectors with Apache License 2.0 | 4 votes |
@Test public void failedDirectoryDelete_noLockFile_checkSucceeds() throws Exception { String bucketName = gcsfsIHelper.createUniqueBucket("coop-delete-check-no-lock-failed"); URI bucketUri = new URI("gs://" + bucketName + "/"); String fileName = "file"; URI dirUri = bucketUri.resolve("delete_" + UUID.randomUUID() + "/"); // create file to delete gcsfsIHelper.writeTextFile(bucketName, dirUri.resolve(fileName).getPath(), "file_content"); GoogleCloudStorageFileSystemOptions gcsFsOptions = newGcsFsOptions(); failDeleteOperation(gcsFsOptions, bucketName, dirUri); GoogleCloudStorageFileSystem gcsFs = newGcsFs(gcsFsOptions, httpRequestInitializer); // delete operation lock file List<URI> lockFile = gcsFs.listFileInfo(bucketUri.resolve(LOCK_DIRECTORY)).stream() .map(FileInfo::getPath) .filter(p -> !p.toString().endsWith("/all.lock") && p.toString().endsWith(".lock")) .collect(toImmutableList()); gcsFs.delete(Iterables.getOnlyElement(lockFile), /* recursive */ false); assertThat(gcsFs.exists(dirUri)).isTrue(); assertThat(gcsFs.exists(dirUri.resolve(fileName))).isTrue(); CoopLockFsck fsck = new CoopLockFsck(); fsck.setConf(getTestConfiguration()); fsck.run(new String[] {"--check", "gs://" + bucketName}); assertThat(gcsFs.exists(dirUri)).isTrue(); assertThat(gcsFs.exists(dirUri.resolve(fileName))).isTrue(); // Validate lock files List<URI> lockFiles = gcsFs.listFileInfo(bucketUri.resolve(LOCK_DIRECTORY)).stream() .map(FileInfo::getPath) .collect(toList()); assertThat(lockFiles).hasSize(2); assertThat(matchFile(lockFiles, "all\\.lock")).isNotNull(); String filenamePattern = String.format(OPERATION_FILENAME_PATTERN_FORMAT, DELETE); URI logFileUri = matchFile(lockFiles, filenamePattern + "\\.log").get(); assertThat(gcsfsIHelper.readTextFile(bucketName, logFileUri.getPath())) .isEqualTo(dirUri.resolve(fileName) + "\n" + dirUri + "\n"); }
Example 4
Source File: CoopLockRepairIntegrationTest.java From hadoop-connectors with Apache License 2.0 | 4 votes |
@Test public void failedDirectoryDelete_noLogFile_checkSucceeds() throws Exception { String bucketName = gcsfsIHelper.createUniqueBucket("coop-delete-check-no-log-failed"); URI bucketUri = new URI("gs://" + bucketName + "/"); String fileName = "file"; URI dirUri = bucketUri.resolve("delete_" + UUID.randomUUID() + "/"); // create file to delete gcsfsIHelper.writeTextFile(bucketName, dirUri.resolve(fileName).getPath(), "file_content"); GoogleCloudStorageFileSystemOptions gcsFsOptions = newGcsFsOptions(); failDeleteOperation(gcsFsOptions, bucketName, dirUri); GoogleCloudStorageFileSystem gcsFs = newGcsFs(gcsFsOptions, httpRequestInitializer); // delete operation log file List<URI> logFile = gcsFs.listFileInfo(bucketUri.resolve(LOCK_DIRECTORY)).stream() .map(FileInfo::getPath) .filter(p -> p.toString().endsWith(".log")) .collect(toImmutableList()); gcsFs.delete(Iterables.getOnlyElement(logFile), /* recursive */ false); assertThat(gcsFs.exists(dirUri)).isTrue(); assertThat(gcsFs.exists(dirUri.resolve(fileName))).isTrue(); CoopLockFsck fsck = new CoopLockFsck(); fsck.setConf(getTestConfiguration()); fsck.run(new String[] {"--check", "gs://" + bucketName}); assertThat(gcsFs.exists(dirUri)).isTrue(); assertThat(gcsFs.exists(dirUri.resolve(fileName))).isTrue(); // Validate lock files List<URI> lockFiles = gcsFs.listFileInfo(bucketUri.resolve(LOCK_DIRECTORY)).stream() .map(FileInfo::getPath) .collect(toList()); assertThat(lockFiles).hasSize(2); assertThat(matchFile(lockFiles, "all\\.lock")).isNotNull(); String filenamePattern = String.format(OPERATION_FILENAME_PATTERN_FORMAT, DELETE); assertThat(matchFile(lockFiles, filenamePattern + "\\.log")).isEmpty(); }
Example 5
Source File: CoopLockRepairIntegrationTest.java From hadoop-connectors with Apache License 2.0 | 4 votes |
@Test public void successfulDirectoryDelete_rollForward() throws Exception { String bucketName = gcsfsIHelper.createUniqueBucket("coop-delete-forward-successful"); URI bucketUri = new URI("gs://" + bucketName + "/"); String fileName = "file"; URI dirUri = bucketUri.resolve("delete_" + UUID.randomUUID() + "/"); // create file to delete gcsfsIHelper.writeTextFile(bucketName, dirUri.resolve(fileName).getPath(), "file_content"); GoogleCloudStorageFileSystemOptions gcsFsOptions = newGcsFsOptions(); GoogleCloudStorageFileSystem gcsFs = newGcsFs(gcsFsOptions, httpRequestInitializer); assertThat(gcsFs.exists(dirUri)).isTrue(); assertThat(gcsFs.exists(dirUri.resolve(fileName))).isTrue(); gcsFs.delete(dirUri, /* recursive= */ true); assertThat(gcsFs.exists(dirUri)).isFalse(); assertThat(gcsFs.exists(dirUri.resolve(fileName))).isFalse(); CoopLockFsck fsck = new CoopLockFsck(); fsck.setConf(getTestConfiguration()); fsck.run(new String[] {"--rollForward", "gs://" + bucketName, "all"}); assertThat(gcsFs.exists(dirUri)).isFalse(); assertThat(gcsFs.exists(dirUri.resolve(fileName))).isFalse(); // Validate lock files List<URI> lockFiles = gcsFs.listFileInfo(bucketUri.resolve(LOCK_DIRECTORY)).stream() .map(FileInfo::getPath) .collect(toList()); assertThat(lockFiles).hasSize(2); String filenamePattern = String.format(OPERATION_FILENAME_PATTERN_FORMAT, DELETE); URI lockFileUri = matchFile(lockFiles, filenamePattern + "\\.lock").get(); URI logFileUri = matchFile(lockFiles, filenamePattern + "\\.log").get(); String lockContent = gcsfsIHelper.readTextFile(bucketName, lockFileUri.getPath()); assertThat(GSON.fromJson(lockContent, DeleteOperation.class).setLockExpiration(null)) .isEqualTo(new DeleteOperation().setLockExpiration(null).setResource(dirUri.toString())); assertThat(gcsfsIHelper.readTextFile(bucketName, logFileUri.getPath())) .isEqualTo(dirUri.resolve(fileName) + "\n" + dirUri + "\n"); }