org.apache.hadoop.fs.DF Java Examples
The following examples show how to use
org.apache.hadoop.fs.DF.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FsVolumeImpl.java From hadoop with Apache License 2.0 | 5 votes |
FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir, Configuration conf, StorageType storageType) throws IOException { this.dataset = dataset; this.storageID = storageID; this.reserved = conf.getLong( DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT); this.reservedForRbw = new AtomicLong(0L); this.currentDir = currentDir; File parent = currentDir.getParentFile(); this.usage = new DF(parent, conf); this.storageType = storageType; this.configuredCapacity = -1; cacheExecutor = initializeCacheExecutor(parent); }
Example #2
Source File: FsVolumeImpl.java From big-c with Apache License 2.0 | 5 votes |
FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir, Configuration conf, StorageType storageType) throws IOException { this.dataset = dataset; this.storageID = storageID; this.reserved = conf.getLong( DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT); this.reservedForRbw = new AtomicLong(0L); this.currentDir = currentDir; File parent = currentDir.getParentFile(); this.usage = new DF(parent, conf); this.storageType = storageType; this.configuredCapacity = -1; cacheExecutor = initializeCacheExecutor(parent); }
Example #3
Source File: TestNamenodeCapacityReport.java From RDFS with Apache License 2.0 | 5 votes |
public void testVolumeSizeWithBytes() throws Exception { Configuration conf = new Configuration(); File data_dir = MiniDFSCluster.getDataDirectory(conf); // Need to create data_dir, otherwise DF doesn't work on non-existent dir. data_dir.mkdirs(); DF df = new DF(data_dir, conf); long reserved = 10000; conf.setLong("dfs.datanode.du.reserved", reserved); verifyVolumeSize(conf, reserved, df); }
Example #4
Source File: TestNamenodeCapacityReport.java From RDFS with Apache License 2.0 | 5 votes |
public void testVolumeSizeWithPercent() throws Exception { Configuration conf = new Configuration(); File data_dir = MiniDFSCluster.getDataDirectory(conf); // Need to create data_dir, otherwise DF doesn't work on non-existent dir. data_dir.mkdirs(); DF df = new DF(data_dir, conf); long reserved = (long) (df.getCapacity() * 0.215); conf.setFloat("dfs.datanode.du.reserved.percent", 21.5f); verifyVolumeSize(conf, reserved, df); }
Example #5
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
FSVolume(FSDataset dataset, File currentDir, Configuration conf) throws IOException { this.currentDir = currentDir; File parent = currentDir.getParentFile(); this.usage = new DF(parent, conf); this.reserved = usage.getReserved(); this.dataset = dataset; this.namespaceMap = new NamespaceMap(); this.dfsUsage = new DU(currentDir, conf); this.dfsUsage.start(); }
Example #6
Source File: NameNodeResourceChecker.java From hadoop with Apache License 2.0 | 4 votes |
public CheckedVolume(File dirToCheck, boolean required) throws IOException { df = new DF(dirToCheck, conf); this.required = required; volume = df.getFilesystem(); }
Example #7
Source File: NameNodeResourceChecker.java From big-c with Apache License 2.0 | 4 votes |
public CheckedVolume(File dirToCheck, boolean required) throws IOException { df = new DF(dirToCheck, conf); this.required = required; volume = df.getFilesystem(); }