Java Code Examples for org.apache.hadoop.fs.permission.FsAction#ALL
The following examples show how to use
org.apache.hadoop.fs.permission.FsAction#ALL .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TarArchiveInputStreamDataWriterTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
/** * Find the test compressed file <code><filePath/code> in classpath and read it as a {@link FileAwareInputStream} */ private FileAwareInputStream getCompressedInputStream(final String filePath, final String newFileName) throws Exception { UnGzipConverter converter = new UnGzipConverter(); FileSystem fs = FileSystem.getLocal(new Configuration()); String fullPath = getClass().getClassLoader().getResource(filePath).getFile(); FileStatus status = fs.getFileStatus(testTempPath); OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); CopyableFile cf = CopyableFileUtils.getTestCopyableFile(filePath, new Path(testTempPath, newFileName).toString(), newFileName, ownerAndPermission); FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf) .inputStream(fs.open(new Path(fullPath))).build(); Iterable<FileAwareInputStream> iterable = converter.convertRecord("outputSchema", fileAwareInputStream, new WorkUnitState()); return Iterables.getFirst(iterable, null); }
Example 2
Source File: HDFSIO.java From EasyML with Apache License 2.0 | 5 votes |
/** * Make directory in the uri position * @param uri target position * @return whether success or not * @throws IOException */ public static boolean mkdirs(String uri) throws IOException { Path path = new Path(Constants.NAME_NODE + "/" + uri); System.out.println("[mkdirs]" + path.toString()); FsPermission dirPerm = new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL); Boolean flag = fs.mkdirs(path); if( flag ) fs.setPermission(path, new FsPermission(dirPerm)); return flag; }
Example 3
Source File: TestSnapshot.java From hadoop with Apache License 2.0 | 5 votes |
/** * @return A random FsPermission */ private FsPermission genRandomPermission() { // randomly select between "rwx" and "rw-" FsAction u = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE; FsAction g = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE; FsAction o = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE; return new FsPermission(u, g, o); }
Example 4
Source File: TestCopyMapper.java From hadoop with Apache License 2.0 | 5 votes |
private static void changeUserGroup(String user, String group) throws IOException { FileSystem fs = cluster.getFileSystem(); FsPermission changedPermission = new FsPermission( FsAction.ALL, FsAction.ALL, FsAction.ALL ); for (Path path : pathList) if (fs.isFile(path)) { fs.setOwner(path, user, group); fs.setPermission(path, changedPermission); } }
Example 5
Source File: TestAclCommands.java From hadoop with Apache License 2.0 | 5 votes |
@Override public FileStatus[] listStatus(Path f) throws IOException { FsPermission perm = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE); Path path = new Path("/foo"); FileStatus stat = new FileStatus(1000, true, 3, 1000, 0, 0, perm, "owner", "group", path); return new FileStatus[] { stat }; }
Example 6
Source File: MetricsFileSystemInstrumentationTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Test(enabled = false) public void testCreate7() throws IOException, URISyntaxException { HDFSRoot hdfsRoot = new HDFSRoot("/tmp/create"); MetricsFileSystemInstrumentation fs = (MetricsFileSystemInstrumentation) FileSystem.get(new URI(instrumentedURI), new Configuration()); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.READ); Path newFile = new Path("/tmp/create/newFile"); FSDataOutputStream fstream = fs.create(newFile, permission, true, 100, (short)2, 1048576, null); Assert.assertEquals(fs.createTimer.getCount(), 1); fstream.close(); hdfsRoot.cleanupRoot(); }
Example 7
Source File: TestSnapshot.java From big-c with Apache License 2.0 | 5 votes |
/** * @return A random FsPermission */ private FsPermission genRandomPermission() { // randomly select between "rwx" and "rw-" FsAction u = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE; FsAction g = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE; FsAction o = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE; return new FsPermission(u, g, o); }
Example 8
Source File: TestCopyMapper.java From big-c with Apache License 2.0 | 5 votes |
private static void changeUserGroup(String user, String group) throws IOException { FileSystem fs = cluster.getFileSystem(); FsPermission changedPermission = new FsPermission( FsAction.ALL, FsAction.ALL, FsAction.ALL ); for (Path path : pathList) if (fs.isFile(path)) { fs.setOwner(path, user, group); fs.setPermission(path, changedPermission); } }
Example 9
Source File: TestAclCommands.java From big-c with Apache License 2.0 | 5 votes |
@Override public FileStatus[] listStatus(Path f) throws IOException { FsPermission perm = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE); Path path = new Path("/foo"); FileStatus stat = new FileStatus(1000, true, 3, 1000, 0, 0, perm, "owner", "group", path); return new FileStatus[] { stat }; }
Example 10
Source File: YarnApplicationFileUploader.java From flink with Apache License 2.0 | 5 votes |
private Path getApplicationDir(final ApplicationId applicationId) throws IOException { final Path applicationDir = getApplicationDirPath(homeDir, applicationId); if (!fileSystem.exists(applicationDir)) { final FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); fileSystem.mkdirs(applicationDir, permission); } return applicationDir; }
Example 11
Source File: RestorableHivePartitionDataset.java From incubator-gobblin with Apache License 2.0 | 5 votes |
public void restore() throws IOException { State state = new State(this.state); this.datasetOwnerFs = ProxyUtils.getOwnerFs(state, this.datasetOwner); try (HiveProxyQueryExecutor queryExecutor = ProxyUtils .getQueryExecutor(state, this.datasetOwner, this.datasetToRestoreOwner, this.trashOwner)) { if (this.state.getPropAsBoolean(ComplianceConfigurationKeys.COMPLIANCE_JOB_SIMULATE, ComplianceConfigurationKeys.DEFAULT_COMPLIANCE_JOB_SIMULATE)) { log.info("Simulating restore of " + datasetURN() + " with " + this.datasetToRestore.datasetURN()); return; } Path trashPartitionLocation = getTrashPartitionLocation(); executeTrashTableQueries(queryExecutor); this.datasetOwnerFs.mkdirs(trashPartitionLocation.getParent()); this.datasetOwnerFs.rename(getLocation(), trashPartitionLocation); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE); HadoopUtils .setPermissions(trashPartitionLocation.getParent(), this.datasetOwner, this.trashOwner, this.datasetOwnerFs, permission); log.info( "Moved dataset " + datasetURN() + " from " + getLocation() + " to trash location " + trashPartitionLocation); fsMove(this.datasetToRestore.getLocation(), getLocation()); HadoopUtils.setPermissions(getLocation().getParent(), this.datasetOwner, this.trashOwner, this.datasetOwnerFs, permission); log.info("Moved data from backup " + this.datasetToRestore.getLocation() + " to location " + getLocation()); executeDropPartitionQueries(queryExecutor); } }
Example 12
Source File: MetricsFileSystemInstrumentationTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Test(enabled = false) public void testSetPermission() throws IOException, URISyntaxException { HDFSRoot hdfsRoot = new HDFSRoot("/tmp/permission"); MetricsFileSystemInstrumentation fs = (MetricsFileSystemInstrumentation) FileSystem.get(new URI(instrumentedURI), new Configuration()); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.READ); fs.setPermission(hdfsRoot.getFilePath8(), permission); Assert.assertEquals(fs.setPermissionTimer.getCount(), 1); hdfsRoot.cleanupRoot(); }
Example 13
Source File: FileAwareInputStreamDataWriterTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Test public void testBlockWrite() throws Exception { String streamString = "testContents"; FileStatus status = fs.getFileStatus(testTempPath); OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission); CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source"))); WorkUnitState state = TestUtils.createTestWorkUnitState(); state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString()); state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString()); state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5)); state.setProp(DistcpFileSplitter.SPLIT_ENABLED, true); CopySource.serializeCopyEntity(state, cf); CopySource.serializeCopyableDataset(state, metadata); FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0); long splitLen = 4; int splits = (int) (streamString.length() / splitLen + 1); DistcpFileSplitter.Split split = new DistcpFileSplitter.Split(0, splitLen, 0, splits, String.format("%s.__PART%d__", cf.getDestination().getName(), 0)); FSDataInputStream dataInputStream = StreamUtils.convertStream(IOUtils.toInputStream(streamString)); dataInputStream.seek(split.getLowPosition()); FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf) .inputStream(dataInputStream) .split(Optional.of(split)) .build(); dataWriter.write(fileAwareInputStream); dataWriter.commit(); Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR), cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination()); Assert.assertEquals(IOUtils.toString(new FileInputStream(writtenFilePath.toString())), streamString.substring(0, (int) splitLen)); }
Example 14
Source File: FileAwareInputStreamDataWriterTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Test public void testWriteWithEncryption() throws Exception { byte[] streamString = "testEncryptedContents".getBytes("UTF-8"); byte[] expectedContents = new byte[streamString.length]; for (int i = 0; i < streamString.length; i++) { expectedContents[i] = (byte)((streamString[i] + 1) % 256); } FileStatus status = fs.getFileStatus(testTempPath); OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission); CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source"))); WorkUnitState state = TestUtils.createTestWorkUnitState(); state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString()); state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString()); state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5)); state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "insecure_shift"); CopySource.serializeCopyEntity(state, cf); CopySource.serializeCopyableDataset(state, metadata); FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0); FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf) .inputStream(StreamUtils.convertStream(new ByteArrayInputStream(streamString))).build(); dataWriter.write(fileAwareInputStream); dataWriter.commit(); Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR), cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination()); Assert.assertTrue(writtenFilePath.getName().endsWith("insecure_shift"), "Expected encryption name to be appended to destination"); Assert.assertEquals(IOUtils.toByteArray(new FileInputStream(writtenFilePath.toString())), expectedContents); }
Example 15
Source File: FileBasedJobLockFactory.java From incubator-gobblin with Apache License 2.0 | 4 votes |
protected static FsPermission getDefaultDirPermissions() { return new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE); }
Example 16
Source File: FileAwareInputStreamDataWriterTest.java From incubator-gobblin with Apache License 2.0 | 4 votes |
@Test public void testWriteWithGPGAsymmetricEncryption() throws Exception { byte[] streamString = "testEncryptedContents".getBytes("UTF-8"); FileStatus status = fs.getFileStatus(testTempPath); OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission); CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source"))); WorkUnitState state = TestUtils.createTestWorkUnitState(); state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString()); state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString()); state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5)); state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "gpg"); File publicKeyFile = new File(testTempPath.toString(), "public.key"); FileUtils.copyInputStreamToFile(GPGFileEncryptor.class.getResourceAsStream(GPGFileEncryptorTest.PUBLIC_KEY), publicKeyFile); state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEYSTORE_PATH_KEY, publicKeyFile.getAbsolutePath()); state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEYSTORE_PASSWORD_KEY, GPGFileEncryptorTest.PASSPHRASE); state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEY_NAME, GPGFileEncryptorTest.KEY_ID); CopySource.serializeCopyEntity(state, cf); CopySource.serializeCopyableDataset(state, metadata); FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0); FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf) .inputStream(StreamUtils.convertStream(new ByteArrayInputStream(streamString))).build(); dataWriter.write(fileAwareInputStream); dataWriter.commit(); Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR), cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination()); Assert.assertTrue(writtenFilePath.getName().endsWith("gpg"), "Expected encryption name to be appended to destination"); byte[] encryptedContent = IOUtils.toByteArray(new FileInputStream(writtenFilePath.toString())); byte[] decryptedContent = new byte[streamString.length]; IOUtils.readFully(GPGFileDecryptor.decryptFile(new FileInputStream(writtenFilePath.toString()), GPGFileEncryptor.class.getResourceAsStream(GPGFileEncryptorTest.PRIVATE_KEY), GPGFileEncryptorTest.PASSPHRASE), decryptedContent); // encrypted string should not be the same as the plaintext Assert.assertNotEquals(encryptedContent, streamString); // decrypted string should be the same as the plaintext Assert.assertEquals(decryptedContent, streamString); }
Example 17
Source File: FileAwareInputStreamDataWriterTest.java From incubator-gobblin with Apache License 2.0 | 4 votes |
@Test public void testWriteWithGPGSymmetricEncryption() throws Exception { byte[] streamString = "testEncryptedContents".getBytes("UTF-8"); FileStatus status = fs.getFileStatus(testTempPath); OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission); CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source"))); WorkUnitState state = TestUtils.createTestWorkUnitState(); state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString()); state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString()); state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5)); state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "gpg"); state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEYSTORE_PASSWORD_KEY, "testPassword"); CopySource.serializeCopyEntity(state, cf); CopySource.serializeCopyableDataset(state, metadata); FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0); FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf) .inputStream(StreamUtils.convertStream(new ByteArrayInputStream(streamString))).build(); dataWriter.write(fileAwareInputStream); dataWriter.commit(); Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR), cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination()); Assert.assertTrue(writtenFilePath.getName().endsWith("gpg"), "Expected encryption name to be appended to destination"); byte[] encryptedContent = IOUtils.toByteArray(new FileInputStream(writtenFilePath.toString())); byte[] decryptedContent = new byte[streamString.length]; IOUtils.readFully(GPGFileDecryptor.decryptFile(new FileInputStream(writtenFilePath.toString()), "testPassword"), decryptedContent); // encrypted string should not be the same as the plaintext Assert.assertNotEquals(encryptedContent, streamString); // decrypted string should be the same as the plaintext Assert.assertEquals(decryptedContent, streamString); }
Example 18
Source File: HivePartitionVersionRetentionReaper.java From incubator-gobblin with Apache License 2.0 | 4 votes |
/** * If simulate is set to true, will simply return. * If a version is pointing to a non-existing location, then drop the partition and close the jdbc connection. * If a version is pointing to the same location as of the dataset, then drop the partition and close the jdbc connection. * If a version is staging, it's data will be deleted and metadata is dropped. * IF a versions is backup, it's data will be moved to a backup dir, current metadata will be dropped and it will * be registered in the backup db. */ @Override public void clean() throws IOException { Path versionLocation = ((HivePartitionRetentionVersion) this.datasetVersion).getLocation(); Path datasetLocation = ((CleanableHivePartitionDataset) this.cleanableDataset).getLocation(); String completeName = ((HivePartitionRetentionVersion) this.datasetVersion).datasetURN(); State state = new State(this.state); this.versionOwnerFs = ProxyUtils.getOwnerFs(state, this.versionOwner); try (HiveProxyQueryExecutor queryExecutor = ProxyUtils .getQueryExecutor(state, this.versionOwner, this.backUpOwner)) { if (!this.versionOwnerFs.exists(versionLocation)) { log.info("Data versionLocation doesn't exist. Metadata will be dropped for the version " + completeName); } else if (datasetLocation.toString().equalsIgnoreCase(versionLocation.toString())) { log.info( "Dataset location is same as version location. Won't delete the data but metadata will be dropped for the version " + completeName); } else if (this.simulate) { log.info("Simulate is set to true. Won't move the version " + completeName); return; } else if (completeName.contains(ComplianceConfigurationKeys.STAGING)) { log.info("Deleting data from version " + completeName); this.versionOwnerFs.delete(versionLocation, true); } else if (completeName.contains(ComplianceConfigurationKeys.BACKUP)) { executeAlterQueries(queryExecutor); Path newVersionLocationParent = getNewVersionLocation().getParent(); log.info("Creating new dir " + newVersionLocationParent.toString()); this.versionOwnerFs.mkdirs(newVersionLocationParent); log.info("Moving data from " + versionLocation + " to " + getNewVersionLocation()); fsMove(versionLocation, getNewVersionLocation()); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE); HadoopUtils .setPermissions(newVersionLocationParent, this.versionOwner, this.backUpOwner, this.versionOwnerFs, permission); } executeDropVersionQueries(queryExecutor); } }
Example 19
Source File: TestSaveNamespace.java From big-c with Apache License 2.0 | 4 votes |
/** * Verify that a saveNamespace command brings faulty directories * in fs.name.dir and fs.edit.dir back online. */ @Test (timeout=30000) public void testReinsertnamedirsInSavenamespace() throws Exception { // create a configuration with the key to restore error // directories in fs.name.dir Configuration conf = getConf(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); // Replace the FSImage with a spy FSImage originalImage = fsn.getFSImage(); NNStorage storage = originalImage.getStorage(); FSImage spyImage = spy(originalImage); Whitebox.setInternalState(fsn, "fsImage", spyImage); FileSystem fs = FileSystem.getLocal(conf); File rootDir = storage.getStorageDir(0).getRoot(); Path rootPath = new Path(rootDir.getPath(), "current"); final FsPermission permissionNone = new FsPermission((short) 0); final FsPermission permissionAll = new FsPermission( FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE); fs.setPermission(rootPath, permissionNone); try { doAnEdit(fsn, 1); fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); // Save namespace - should mark the first storage dir as faulty // since it's not traversable. LOG.info("Doing the first savenamespace."); fsn.saveNamespace(); LOG.info("First savenamespace sucessful."); assertTrue("Savenamespace should have marked one directory as bad." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.", storage.getRemovedStorageDirs().size() == 1); fs.setPermission(rootPath, permissionAll); // The next call to savenamespace should try inserting the // erroneous directory back to fs.name.dir. This command should // be successful. LOG.info("Doing the second savenamespace."); fsn.saveNamespace(); LOG.warn("Second savenamespace sucessful."); assertTrue("Savenamespace should have been successful in removing " + " bad directories from Image." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.", storage.getRemovedStorageDirs().size() == 0); // Now shut down and restart the namesystem LOG.info("Shutting down fsimage."); originalImage.close(); fsn.close(); fsn = null; // Start a new namesystem, which should be able to recover // the namespace from the previous incarnation. LOG.info("Loading new FSmage from disk."); fsn = FSNamesystem.loadFromDisk(conf); // Make sure the image loaded including our edit. LOG.info("Checking reloaded image."); checkEditExists(fsn, 1); LOG.info("Reloaded image is good."); } finally { if (rootDir.exists()) { fs.setPermission(rootPath, permissionAll); } if (fsn != null) { try { fsn.close(); } catch (Throwable t) { LOG.fatal("Failed to shut down", t); } } } }
Example 20
Source File: TestSaveNamespace.java From hadoop with Apache License 2.0 | 4 votes |
/** * Verify that a saveNamespace command brings faulty directories * in fs.name.dir and fs.edit.dir back online. */ @Test (timeout=30000) public void testReinsertnamedirsInSavenamespace() throws Exception { // create a configuration with the key to restore error // directories in fs.name.dir Configuration conf = getConf(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); // Replace the FSImage with a spy FSImage originalImage = fsn.getFSImage(); NNStorage storage = originalImage.getStorage(); FSImage spyImage = spy(originalImage); Whitebox.setInternalState(fsn, "fsImage", spyImage); FileSystem fs = FileSystem.getLocal(conf); File rootDir = storage.getStorageDir(0).getRoot(); Path rootPath = new Path(rootDir.getPath(), "current"); final FsPermission permissionNone = new FsPermission((short) 0); final FsPermission permissionAll = new FsPermission( FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE); fs.setPermission(rootPath, permissionNone); try { doAnEdit(fsn, 1); fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); // Save namespace - should mark the first storage dir as faulty // since it's not traversable. LOG.info("Doing the first savenamespace."); fsn.saveNamespace(); LOG.info("First savenamespace sucessful."); assertTrue("Savenamespace should have marked one directory as bad." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.", storage.getRemovedStorageDirs().size() == 1); fs.setPermission(rootPath, permissionAll); // The next call to savenamespace should try inserting the // erroneous directory back to fs.name.dir. This command should // be successful. LOG.info("Doing the second savenamespace."); fsn.saveNamespace(); LOG.warn("Second savenamespace sucessful."); assertTrue("Savenamespace should have been successful in removing " + " bad directories from Image." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.", storage.getRemovedStorageDirs().size() == 0); // Now shut down and restart the namesystem LOG.info("Shutting down fsimage."); originalImage.close(); fsn.close(); fsn = null; // Start a new namesystem, which should be able to recover // the namespace from the previous incarnation. LOG.info("Loading new FSmage from disk."); fsn = FSNamesystem.loadFromDisk(conf); // Make sure the image loaded including our edit. LOG.info("Checking reloaded image."); checkEditExists(fsn, 1); LOG.info("Reloaded image is good."); } finally { if (rootDir.exists()) { fs.setPermission(rootPath, permissionAll); } if (fsn != null) { try { fsn.close(); } catch (Throwable t) { LOG.fatal("Failed to shut down", t); } } } }