org.apache.hadoop.hdfs.server.common.Util Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.common.Util.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NNStorage.java From hadoop with Apache License 2.0 | 6 votes |
/** * Return the list of locations being used for a specific purpose. * i.e. Image or edit log storage. * * @param dirType Purpose of locations requested. * @throws IOException */ Collection<URI> getDirectories(NameNodeDirType dirType) throws IOException { ArrayList<URI> list = new ArrayList<URI>(); Iterator<StorageDirectory> it = (dirType == null) ? dirIterator() : dirIterator(dirType); for ( ;it.hasNext(); ) { StorageDirectory sd = it.next(); try { list.add(Util.fileAsURI(sd.getRoot())); } catch (IOException e) { throw new IOException("Exception while processing " + "StorageDirectory " + sd.getRoot(), e); } } return list; }
Example #2
Source File: TestBlockReplacement.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public void testThrottler() throws IOException { Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); long bandwidthPerSec = 1024*1024L; final long TOTAL_BYTES =6*bandwidthPerSec; long bytesToSend = TOTAL_BYTES; long start = Util.now(); BlockTransferThrottler throttler = new BlockTransferThrottler(bandwidthPerSec); long totalBytes = 0L; long bytesSent = 1024*512L; // 0.5MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; bytesSent = 1024*768L; // 0.75MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; try { Thread.sleep(1000); } catch (InterruptedException ignored) {} throttler.throttle(bytesToSend); long end = Util.now(); assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec); }
Example #3
Source File: Balancer.java From RDFS with Apache License 2.0 | 6 votes |
private long dispatchBlockMoves() throws InterruptedException { long bytesLastMoved = bytesMoved.get(); Future<?>[] futures = new Future<?>[sources.size()]; int i=0; for (Source source : sources) { futures[i++] = dispatcherExecutor.submit( source.new BlockMoveDispatcher(Util.now())); } // wait for all dispatcher threads to finish for (Future<?> future : futures) { try { future.get(); } catch (ExecutionException e) { LOG.warn("Dispatcher thread failed", e.getCause()); } } // wait for all block moving to be done waitForMoveCompletion(); return bytesMoved.get()-bytesLastMoved; }
Example #4
Source File: TestBlockReplacement.java From RDFS with Apache License 2.0 | 6 votes |
public void testThrottler() throws IOException { Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); long bandwidthPerSec = 1024*1024L; final long TOTAL_BYTES =6*bandwidthPerSec; long bytesToSend = TOTAL_BYTES; long start = Util.now(); DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec); long totalBytes = 0L; long bytesSent = 1024*512L; // 0.5MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; bytesSent = 1024*768L; // 0.75MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; try { Thread.sleep(1000); } catch (InterruptedException ignored) {} throttler.throttle(bytesToSend); long end = Util.now(); assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec); }
Example #5
Source File: OfflineEditsViewerHelper.java From big-c with Apache License 2.0 | 6 votes |
/** * Sets up a MiniDFSCluster, configures it to create one edits file, * starts DelegationTokenSecretManager (to get security op codes) * * @param dfsDir DFS directory (where to setup MiniDFS cluster) */ public void startCluster(String dfsDir) throws IOException { // same as manageDfsDirs but only one edits file instead of two config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(new File(dfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, Util.fileAsURI(new File(dfsDir, "namesecondary1")).toString()); // blocksize for concat (file size must be multiple of blocksize) config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // for security to work (fake JobTracker user) config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); config.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build(); cluster.waitClusterUp(); }
Example #6
Source File: OfflineEditsViewerHelper.java From hadoop with Apache License 2.0 | 6 votes |
/** * Sets up a MiniDFSCluster, configures it to create one edits file, * starts DelegationTokenSecretManager (to get security op codes) * * @param dfsDir DFS directory (where to setup MiniDFS cluster) */ public void startCluster(String dfsDir) throws IOException { // same as manageDfsDirs but only one edits file instead of two config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(new File(dfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, Util.fileAsURI(new File(dfsDir, "namesecondary1")).toString()); // blocksize for concat (file size must be multiple of blocksize) config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // for security to work (fake JobTracker user) config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); config.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build(); cluster.waitClusterUp(); }
Example #7
Source File: NNStorage.java From big-c with Apache License 2.0 | 6 votes |
/** * Return the storage directory corresponding to the passed URI * @param uri URI of a storage directory * @return The matching storage directory or null if none found */ StorageDirectory getStorageDirectory(URI uri) { try { uri = Util.fileAsURI(new File(uri)); Iterator<StorageDirectory> it = dirIterator(); for (; it.hasNext(); ) { StorageDirectory sd = it.next(); if (Util.fileAsURI(sd.getRoot()).equals(uri)) { return sd; } } } catch (IOException ioe) { LOG.warn("Error converting file to URI", ioe); } return null; }
Example #8
Source File: NNStorage.java From big-c with Apache License 2.0 | 6 votes |
/** * Return the list of locations being used for a specific purpose. * i.e. Image or edit log storage. * * @param dirType Purpose of locations requested. * @throws IOException */ Collection<URI> getDirectories(NameNodeDirType dirType) throws IOException { ArrayList<URI> list = new ArrayList<URI>(); Iterator<StorageDirectory> it = (dirType == null) ? dirIterator() : dirIterator(dirType); for ( ;it.hasNext(); ) { StorageDirectory sd = it.next(); try { list.add(Util.fileAsURI(sd.getRoot())); } catch (IOException e) { throw new IOException("Exception while processing " + "StorageDirectory " + sd.getRoot(), e); } } return list; }
Example #9
Source File: NNStorage.java From hadoop with Apache License 2.0 | 6 votes |
/** * Return the storage directory corresponding to the passed URI * @param uri URI of a storage directory * @return The matching storage directory or null if none found */ StorageDirectory getStorageDirectory(URI uri) { try { uri = Util.fileAsURI(new File(uri)); Iterator<StorageDirectory> it = dirIterator(); for (; it.hasNext(); ) { StorageDirectory sd = it.next(); if (Util.fileAsURI(sd.getRoot()).equals(uri)) { return sd; } } } catch (IOException ioe) { LOG.warn("Error converting file to URI", ioe); } return null; }
Example #10
Source File: FSImage.java From big-c with Apache License 2.0 | 5 votes |
static List<URI> getCheckpointEditsDirs(Configuration conf, String defaultName) { Collection<String> dirNames = conf.getTrimmedStringCollection( DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); if (dirNames.size() == 0 && defaultName != null) { dirNames.add(defaultName); } return Util.stringCollectionAsURIs(dirNames); }
Example #11
Source File: TestCreateEditsLog.java From big-c with Apache License 2.0 | 5 votes |
/** * Tests that an edits log created using CreateEditsLog is valid and can be * loaded successfully by a namenode. */ @Test(timeout=60000) public void testCanLoadCreatedEditsLog() throws Exception { // Format namenode. HdfsConfiguration conf = new HdfsConfiguration(); File nameDir = new File(HDFS_DIR, "name"); conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString()); DFSTestUtil.formatNameNode(conf); // Call CreateEditsLog and move the resulting edits to the name dir. CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d", TEST_DIR.getAbsolutePath() }); Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*"); FileContext localFc = FileContext.getLocalFSFileContext(); for (FileStatus edits: localFc.util().globStatus(editsWildcard)) { Path src = edits.getPath(); Path dst = new Path(new File(nameDir, "current").getAbsolutePath(), src.getName()); localFc.rename(src, dst); } // Start a namenode to try to load the edits. cluster = new MiniDFSCluster.Builder(conf) .format(false) .manageNameDfsDirs(false) .waitSafeMode(false) .build(); cluster.waitClusterUp(); // Test successful, because no exception thrown. }
Example #12
Source File: NameNodeResourceChecker.java From big-c with Apache License 2.0 | 5 votes |
/** * Create a NameNodeResourceChecker, which will check the edits dirs and any * additional dirs to check set in <code>conf</code>. */ public NameNodeResourceChecker(Configuration conf) throws IOException { this.conf = conf; volumes = new HashMap<String, CheckedVolume>(); duReserved = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT); Collection<URI> extraCheckedVolumes = Util.stringCollectionAsURIs(conf .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY)); Collection<URI> localEditDirs = Collections2.filter( FSNamesystem.getNamespaceEditsDirs(conf), new Predicate<URI>() { @Override public boolean apply(URI input) { if (input.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { return true; } return false; } }); // Add all the local edits dirs, marking some as required if they are // configured as such. for (URI editsDirToCheck : localEditDirs) { addDirToCheck(editsDirToCheck, FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains( editsDirToCheck)); } // All extra checked volumes are marked "required" for (URI extraDirToCheck : extraCheckedVolumes) { addDirToCheck(extraDirToCheck, true); } minimumRedundantVolumes = conf.getInt( DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_DEFAULT); }
Example #13
Source File: FSImage.java From big-c with Apache License 2.0 | 5 votes |
/** * Retrieve checkpoint dirs from configuration. * * @param conf the Configuration * @param defaultValue a default value for the attribute, if null * @return a Collection of URIs representing the values in * dfs.namenode.checkpoint.dir configuration property */ static Collection<URI> getCheckpointDirs(Configuration conf, String defaultValue) { Collection<String> dirNames = conf.getTrimmedStringCollection( DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); if (dirNames.size() == 0 && defaultValue != null) { dirNames.add(defaultValue); } return Util.stringCollectionAsURIs(dirNames); }
Example #14
Source File: TestCreateEditsLog.java From hadoop with Apache License 2.0 | 5 votes |
/** * Tests that an edits log created using CreateEditsLog is valid and can be * loaded successfully by a namenode. */ @Test(timeout=60000) public void testCanLoadCreatedEditsLog() throws Exception { // Format namenode. HdfsConfiguration conf = new HdfsConfiguration(); File nameDir = new File(HDFS_DIR, "name"); conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString()); DFSTestUtil.formatNameNode(conf); // Call CreateEditsLog and move the resulting edits to the name dir. CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d", TEST_DIR.getAbsolutePath() }); Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*"); FileContext localFc = FileContext.getLocalFSFileContext(); for (FileStatus edits: localFc.util().globStatus(editsWildcard)) { Path src = edits.getPath(); Path dst = new Path(new File(nameDir, "current").getAbsolutePath(), src.getName()); localFc.rename(src, dst); } // Start a namenode to try to load the edits. cluster = new MiniDFSCluster.Builder(conf) .format(false) .manageNameDfsDirs(false) .waitSafeMode(false) .build(); cluster.waitClusterUp(); // Test successful, because no exception thrown. }
Example #15
Source File: NameNodeResourceChecker.java From hadoop with Apache License 2.0 | 5 votes |
/** * Create a NameNodeResourceChecker, which will check the edits dirs and any * additional dirs to check set in <code>conf</code>. */ public NameNodeResourceChecker(Configuration conf) throws IOException { this.conf = conf; volumes = new HashMap<String, CheckedVolume>(); duReserved = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT); Collection<URI> extraCheckedVolumes = Util.stringCollectionAsURIs(conf .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY)); Collection<URI> localEditDirs = Collections2.filter( FSNamesystem.getNamespaceEditsDirs(conf), new Predicate<URI>() { @Override public boolean apply(URI input) { if (input.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { return true; } return false; } }); // Add all the local edits dirs, marking some as required if they are // configured as such. for (URI editsDirToCheck : localEditDirs) { addDirToCheck(editsDirToCheck, FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains( editsDirToCheck)); } // All extra checked volumes are marked "required" for (URI extraDirToCheck : extraCheckedVolumes) { addDirToCheck(extraDirToCheck, true); } minimumRedundantVolumes = conf.getInt( DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_DEFAULT); }
Example #16
Source File: FSImage.java From hadoop with Apache License 2.0 | 5 votes |
static List<URI> getCheckpointEditsDirs(Configuration conf, String defaultName) { Collection<String> dirNames = conf.getTrimmedStringCollection( DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); if (dirNames.size() == 0 && defaultName != null) { dirNames.add(defaultName); } return Util.stringCollectionAsURIs(dirNames); }
Example #17
Source File: FSImage.java From hadoop with Apache License 2.0 | 5 votes |
/** * Retrieve checkpoint dirs from configuration. * * @param conf the Configuration * @param defaultValue a default value for the attribute, if null * @return a Collection of URIs representing the values in * dfs.namenode.checkpoint.dir configuration property */ static Collection<URI> getCheckpointDirs(Configuration conf, String defaultValue) { Collection<String> dirNames = conf.getTrimmedStringCollection( DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); if (dirNames.size() == 0 && defaultValue != null) { dirNames.add(defaultValue); } return Util.stringCollectionAsURIs(dirNames); }
Example #18
Source File: DiskUtil.java From tajo with Apache License 2.0 | 4 votes |
public static List<URI> getDataNodeStorageDirs(){ Configuration conf = new HdfsConfiguration(); Collection<String> dirNames = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY); return Util.stringCollectionAsURIs(dirNames); }
Example #19
Source File: Balancer.java From RDFS with Apache License 2.0 | 4 votes |
private void dispatchBlocks(long startTime) { this.blocksToReceive = 2*scheduledSize; boolean isTimeUp = false; while(!isTimeUp && scheduledSize>0 && (!srcBlockList.isEmpty() || blocksToReceive>0)) { // check if time is up or not // Even if not sent everything the iteration is over if (Util.now()-startTime > maxIterationTime) { isTimeUp = true; continue; } PendingBlockMove pendingBlock = chooseNextBlockToMove(); if (pendingBlock != null) { // move the block pendingBlock.scheduleBlockMove(); continue; } /* Since we can not schedule any block to move, * filter any moved blocks from the source block list and * check if we should fetch more blocks from the namenode */ filterMovedBlocks(); // filter already moved blocks if (shouldFetchMoreBlocks()) { // fetch new blocks try { blocksToReceive -= getBlockList(); continue; } catch (IOException e) { LOG.warn(StringUtils.stringifyException(e)); return; } } /* Now we can not schedule any block to move and there are * no new blocks added to the source block list, so we wait. */ try { synchronized(Balancer.this) { Balancer.this.wait(1000); // wait for targets/sources to be idle } } catch (InterruptedException ignored) { } } }
Example #20
Source File: DataNode.java From RDFS with Apache License 2.0 | 4 votes |
static Collection<URI> getStorageDirs(Configuration conf) { Collection<String> dirNames = conf.getStringCollection("dfs.data.dir"); return Util.stringCollectionAsURIs(dirNames); }
Example #21
Source File: Balancer.java From hadoop-gpu with Apache License 2.0 | 4 votes |
private void dispatchBlocks() { long startTime = Util.now(); this.blocksToReceive = 2*scheduledSize; boolean isTimeUp = false; while(!isTimeUp && scheduledSize>0 && (!srcBlockList.isEmpty() || blocksToReceive>0)) { PendingBlockMove pendingBlock = chooseNextBlockToMove(); if (pendingBlock != null) { // move the block pendingBlock.scheduleBlockMove(); continue; } /* Since we can not schedule any block to move, * filter any moved blocks from the source block list and * check if we should fetch more blocks from the namenode */ filterMovedBlocks(); // filter already moved blocks if (shouldFetchMoreBlocks()) { // fetch new blocks try { blocksToReceive -= getBlockList(); continue; } catch (IOException e) { LOG.warn(StringUtils.stringifyException(e)); return; } } // check if time is up or not if (Util.now()-startTime > MAX_ITERATION_TIME) { isTimeUp = true; continue; } /* Now we can not schedule any block to move and there are * no new blocks added to the source block list, so we wait. */ try { synchronized(Balancer.this) { Balancer.this.wait(1000); // wait for targets/sources to be idle } } catch (InterruptedException ignored) { } } }