org.apache.hadoop.hdfs.util.LightWeightHashSet Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.util.LightWeightHashSet.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: InvalidateBlocks.java From hadoop with Apache License 2.0 | 6 votes |
/** * Add a block to the block collection * which will be invalidated on the specified datanode. */ synchronized void add(final Block block, final DatanodeInfo datanode, final boolean log) { LightWeightHashSet<Block> set = node2blocks.get(datanode); if (set == null) { set = new LightWeightHashSet<Block>(); node2blocks.put(datanode, set); } if (set.add(block)) { numBlocks++; if (log) { NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}", getClass().getSimpleName(), block, datanode); } } }
Example #2
Source File: InvalidateBlocks.java From hadoop with Apache License 2.0 | 6 votes |
/** Print the contents to out. */ synchronized void dump(final PrintWriter out) { final int size = node2blocks.values().size(); out.println("Metasave: Blocks " + numBlocks + " waiting deletion from " + size + " datanodes."); if (size == 0) { return; } for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) { final LightWeightHashSet<Block> blocks = entry.getValue(); if (blocks.size() > 0) { out.println(entry.getKey()); out.println(blocks); } } }
Example #3
Source File: InvalidateBlocks.java From big-c with Apache License 2.0 | 6 votes |
/** * Add a block to the block collection * which will be invalidated on the specified datanode. */ synchronized void add(final Block block, final DatanodeInfo datanode, final boolean log) { LightWeightHashSet<Block> set = node2blocks.get(datanode); if (set == null) { set = new LightWeightHashSet<Block>(); node2blocks.put(datanode, set); } if (set.add(block)) { numBlocks++; if (log) { NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}", getClass().getSimpleName(), block, datanode); } } }
Example #4
Source File: FSDataset.java From RDFS with Apache License 2.0 | 6 votes |
/** * Populate the given blockSet with any child blocks * found at this node. With each block, return the full path * of the block file. */ void getBlockAndFileInfo(LightWeightHashSet<BlockAndFile> blockSet) { FSDir[] children = this.getChildren(); if (children != null) { for (int i = 0; i < children.length; i++) { children[i].getBlockAndFileInfo(blockSet); } } File blockFiles[] = dir.listFiles(); String[] blockFilesNames = getFileNames(blockFiles); for (int i = 0; i < blockFiles.length; i++) { if (Block.isBlockFilename(blockFilesNames[i])) { long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames, blockFilesNames[i]); Block block = new Block(blockFiles[i], blockFiles[i].length(), genStamp); blockSet.add(new BlockAndFile(blockFiles[i].getAbsoluteFile(), block)); } } }
Example #5
Source File: InvalidateBlocks.java From big-c with Apache License 2.0 | 6 votes |
/** Print the contents to out. */ synchronized void dump(final PrintWriter out) { final int size = node2blocks.values().size(); out.println("Metasave: Blocks " + numBlocks + " waiting deletion from " + size + " datanodes."); if (size == 0) { return; } for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) { final LightWeightHashSet<Block> blocks = entry.getValue(); if (blocks.size() > 0) { out.println(entry.getKey()); out.println(blocks); } } }
Example #6
Source File: FSDataset.java From RDFS with Apache License 2.0 | 6 votes |
/** * Populate the given blockSet with any child blocks * found at this node. */ public void getBlockInfo(LightWeightHashSet<Block> blockSet) { FSDir[] children = this.getChildren(); if (children != null) { for (int i = 0; i < children.length; i++) { children[i].getBlockInfo(blockSet); } } File blockFiles[] = dir.listFiles(); String[] blockFilesNames = getFileNames(blockFiles); for (int i = 0; i < blockFiles.length; i++) { if (Block.isBlockFilename(blockFilesNames[i])) { long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames, blockFilesNames[i]); blockSet.add(new Block(blockFiles[i], blockFiles[i].length(), genStamp)); } } }
Example #7
Source File: DataBlockScanner.java From RDFS with Apache License 2.0 | 6 votes |
void scanNamespace() { startNewPeriod(); // Create a new processedBlocks structure processedBlocks = new LightWeightHashSet<Long>(); if (verificationLog != null) { try { verificationLog.openCurFile(); } catch (FileNotFoundException ex) { LOG.warn("Could not open current file"); } } if (!assignInitialVerificationTimes()) { return; } // Start scanning scan(); }
Example #8
Source File: FSDataset.java From RDFS with Apache License 2.0 | 6 votes |
/** * Recover blocks that were being written when the datanode * was earlier shut down. These blocks get re-inserted into * ongoingCreates. Also, send a blockreceived message to the NN * for each of these blocks because these are not part of a * block report. */ private void recoverBlocksBeingWritten(File bbw) throws IOException { FSDir fsd = new FSDir(namespaceId, bbw, this.volume); LightWeightHashSet<BlockAndFile> blockSet = new LightWeightHashSet<BlockAndFile>(); fsd.getBlockAndFileInfo(blockSet); for (BlockAndFile b : blockSet) { File f = b.pathfile; // full path name of block file lock.writeLock().lock(); try { volumeMap.add(namespaceId, b.block, new DatanodeBlockInfo(volume, f, DatanodeBlockInfo.UNFINALIZED)); volumeMap.addOngoingCreates(namespaceId, b.block, ActiveFile.createStartupRecoveryFile(f)); } finally { lock.writeLock().unlock(); } if (DataNode.LOG.isDebugEnabled()) { DataNode.LOG.debug("recoverBlocksBeingWritten for block " + b.block + "namespaceId: "+namespaceId); } } }
Example #9
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
/** * Return a table of block data for given namespace */ public Block[] getBlockReport(int namespaceId) { // getBlockReport doesn't grant the global lock as we believe it is // OK to get some inconsistent partial results. The inconsistent // information will finally be fixed by the next incremental LightWeightHashSet<Block> blockSet = new LightWeightHashSet<Block>(); volumes.getBlockInfo(namespaceId, blockSet); Block blockTable[] = new Block[blockSet.size()]; int i = 0; for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) { blockTable[i] = it.next(); } return blockTable; }
Example #10
Source File: InvalidateBlocks.java From hadoop with Apache License 2.0 | 5 votes |
/** * @return true if the given storage has the given block listed for * invalidation. Blocks are compared including their generation stamps: * if a block is pending invalidation but with a different generation stamp, * returns false. */ synchronized boolean contains(final DatanodeInfo dn, final Block block) { final LightWeightHashSet<Block> s = node2blocks.get(dn); if (s == null) { return false; // no invalidate blocks for this storage ID } Block blockInSet = s.getElement(block); return blockInSet != null && block.getGenerationStamp() == blockInSet.getGenerationStamp(); }
Example #11
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
/** * Return a table of blocks being written data * @throws IOException */ public Block[] getBlocksBeingWrittenReport(int namespaceId) throws IOException { LightWeightHashSet<Block> blockSet = new LightWeightHashSet<Block>(); volumes.getBlocksBeingWrittenInfo(namespaceId, blockSet); Block blockTable[] = new Block[blockSet.size()]; int i = 0; for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) { blockTable[i] = it.next(); } return blockTable; }
Example #12
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
private void getBlockInfo(int namespaceId, LightWeightHashSet<Block> blockSet) { long startTime = System.currentTimeMillis(); FSVolume[] volumes = this.getVolumes(); if (scannersExecutor != null) { synchronized (scannersExecutor) { List<Future<LightWeightHashSet<Block>>> builders = new ArrayList<Future<LightWeightHashSet<Block>>>(); for (int idx = 0; idx < volumes.length; idx++) { builders.add(scannersExecutor.submit(new BlockInfoBuilder( volumes[idx], namespaceId))); } for (Future<LightWeightHashSet<Block>> future : builders) { try { blockSet.addAll(future.get()); } catch (ExecutionException ex) { DataNode.LOG.error("Error scanning volumes ", ex.getCause()); } catch (InterruptedException iex) { DataNode.LOG.error("Error waiting for scan", iex); } } } } else { for (int idx = 0; idx < volumes.length; idx++) { try{ volumes[idx].getBlockInfo(namespaceId, blockSet); } catch (IOException e) { DataNode.LOG.error("Error scanning volumes ", e.getCause()); } } } long scanTime = (System.currentTimeMillis() - startTime)/1000; DataNode.LOG.info("Finished generating block report for " + volumes.length + " volumes in " + scanTime + " seconds"); }
Example #13
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
private void getBlocksBeingWrittenInfo(int namespaceId, LightWeightHashSet<Block> blockSet) throws IOException { long startTime = System.currentTimeMillis(); FSVolume[] volumes = this.getVolumes(); if (scannersExecutor != null) { synchronized(scannersExecutor) { List<Future<LightWeightHashSet<Block>>> builders = new ArrayList<Future<LightWeightHashSet<Block>>>(); for (int idx = 0; idx < volumes.length; idx++) { builders.add(scannersExecutor .submit(new BlocksBeingWrittenInfoBuilder(volumes[idx], namespaceId))); } for (Future<LightWeightHashSet<Block>> future : builders) { try { blockSet.addAll(future.get()); } catch (ExecutionException ex) { DataNode.LOG.error( "Error generating block being written info from volumes ", ex.getCause()); throw new IOException(ex); } catch (InterruptedException iex) { DataNode.LOG.error( "Error waiting for generating block being written info", iex); throw new IOException(iex); } } } } else { for (int idx = 0; idx < volumes.length; idx++) { volumes[idx].getBlocksBeingWrittenInfo(namespaceId, blockSet); } } long scanTime = (System.currentTimeMillis() - startTime)/1000; DataNode.LOG.info("Finished generating blocks being written report for " + volumes.length + " volumes in " + scanTime + " seconds"); }
Example #14
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
void getBlocksBeingWrittenInfo(int namespaceId, LightWeightHashSet<Block> blockSet) { NamespaceSlice ns = getNamespaceSlice(namespaceId); if (ns == null) { return; } ns.getBlocksBeingWrittenInfo(blockSet); return; }
Example #15
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
void getBlocksBeingWrittenInfo(LightWeightHashSet<Block> blockSet) { if (rbwDir == null) { return; } File[] blockFiles = rbwDir.listFiles(); if (blockFiles == null) { return; } String[] blockFileNames = getFileNames(blockFiles); for (int i = 0; i < blockFiles.length; i++) { if (!blockFiles[i].isDirectory()) { // get each block in the rbwDir direcotry if (Block.isBlockFilename(blockFileNames[i])) { long genStamp = FSDataset.getGenerationStampFromFile( blockFileNames, blockFileNames[i]); Block block = new Block(blockFiles[i], blockFiles[i].length(), genStamp); // add this block to block set blockSet.add(block); if (DataNode.LOG.isDebugEnabled()) { DataNode.LOG.debug("recoverBlocksBeingWritten for block " + block); } } } } }
Example #16
Source File: InvalidateBlocks.java From big-c with Apache License 2.0 | 5 votes |
synchronized List<Block> invalidateWork(final DatanodeDescriptor dn) { final long delay = getInvalidationDelay(); if (delay > 0) { if (BlockManager.LOG.isDebugEnabled()) { BlockManager.LOG .debug("Block deletion is delayed during NameNode startup. " + "The deletion will start after " + delay + " ms."); } return null; } final LightWeightHashSet<Block> set = node2blocks.get(dn); if (set == null) { return null; } // # blocks that can be sent in one message is limited final int limit = blockInvalidateLimit; final List<Block> toInvalidate = set.pollN(limit); // If we send everything in this message, remove this node entry if (set.isEmpty()) { remove(dn); } dn.addBlocksToBeInvalidated(toInvalidate); numBlocks -= toInvalidate.size(); return toInvalidate; }
Example #17
Source File: InvalidateBlocks.java From big-c with Apache License 2.0 | 5 votes |
/** Remove the block from the specified storage. */ synchronized void remove(final DatanodeInfo dn, final Block block) { final LightWeightHashSet<Block> v = node2blocks.get(dn); if (v != null && v.remove(block)) { numBlocks--; if (v.isEmpty()) { node2blocks.remove(dn); } } }
Example #18
Source File: InvalidateBlocks.java From big-c with Apache License 2.0 | 5 votes |
/** Remove a storage from the invalidatesSet */ synchronized void remove(final DatanodeInfo dn) { final LightWeightHashSet<Block> blocks = node2blocks.remove(dn); if (blocks != null) { numBlocks -= blocks.size(); } }
Example #19
Source File: InvalidateBlocks.java From big-c with Apache License 2.0 | 5 votes |
/** * @return true if the given storage has the given block listed for * invalidation. Blocks are compared including their generation stamps: * if a block is pending invalidation but with a different generation stamp, * returns false. */ synchronized boolean contains(final DatanodeInfo dn, final Block block) { final LightWeightHashSet<Block> s = node2blocks.get(dn); if (s == null) { return false; // no invalidate blocks for this storage ID } Block blockInSet = s.getElement(block); return blockInSet != null && block.getGenerationStamp() == blockInSet.getGenerationStamp(); }
Example #20
Source File: InvalidateBlocks.java From hadoop with Apache License 2.0 | 5 votes |
synchronized List<Block> invalidateWork(final DatanodeDescriptor dn) { final long delay = getInvalidationDelay(); if (delay > 0) { if (BlockManager.LOG.isDebugEnabled()) { BlockManager.LOG .debug("Block deletion is delayed during NameNode startup. " + "The deletion will start after " + delay + " ms."); } return null; } final LightWeightHashSet<Block> set = node2blocks.get(dn); if (set == null) { return null; } // # blocks that can be sent in one message is limited final int limit = blockInvalidateLimit; final List<Block> toInvalidate = set.pollN(limit); // If we send everything in this message, remove this node entry if (set.isEmpty()) { remove(dn); } dn.addBlocksToBeInvalidated(toInvalidate); numBlocks -= toInvalidate.size(); return toInvalidate; }
Example #21
Source File: InvalidateBlocks.java From hadoop with Apache License 2.0 | 5 votes |
/** Remove the block from the specified storage. */ synchronized void remove(final DatanodeInfo dn, final Block block) { final LightWeightHashSet<Block> v = node2blocks.get(dn); if (v != null && v.remove(block)) { numBlocks--; if (v.isEmpty()) { node2blocks.remove(dn); } } }
Example #22
Source File: InvalidateBlocks.java From hadoop with Apache License 2.0 | 5 votes |
/** Remove a storage from the invalidatesSet */ synchronized void remove(final DatanodeInfo dn) { final LightWeightHashSet<Block> blocks = node2blocks.remove(dn); if (blocks != null) { numBlocks -= blocks.size(); } }
Example #23
Source File: FSDataset.java From RDFS with Apache License 2.0 | 4 votes |
void getBlockInfo(LightWeightHashSet<Block> blocks){ dataDir.getBlockInfo(blocks); }
Example #24
Source File: FSDataset.java From RDFS with Apache License 2.0 | 4 votes |
void getBlockInfo(int namespaceId, LightWeightHashSet<Block> blockSet) throws IOException { NamespaceSlice ns = getNamespaceSlice(namespaceId); ns.getBlockInfo(blockSet); return; }
Example #25
Source File: FSDataset.java From RDFS with Apache License 2.0 | 4 votes |
@Override public LightWeightHashSet<Block> call() throws Exception { LightWeightHashSet<Block> result = new LightWeightHashSet<Block>(); volume.getBlockInfo(namespaceId, result); return result; }
Example #26
Source File: FSDataset.java From RDFS with Apache License 2.0 | 4 votes |
@Override public LightWeightHashSet<Block> call() throws Exception { LightWeightHashSet<Block> result = new LightWeightHashSet<Block>(); volume.getBlocksBeingWrittenInfo(namespaceId, result); return result; }