Java Code Examples for org.apache.hadoop.hdfs.server.datanode.ReplicaInfo#getMetaFile()
The following examples show how to use
org.apache.hadoop.hdfs.server.datanode.ReplicaInfo#getMetaFile() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FsDatasetImpl.java From hadoop with Apache License 2.0 | 6 votes |
/** * Bump a replica's generation stamp to a new one. * Its on-disk meta file name is renamed to be the new one too. * * @param replicaInfo a replica * @param newGS new generation stamp * @throws IOException if rename fails */ private void bumpReplicaGS(ReplicaInfo replicaInfo, long newGS) throws IOException { long oldGS = replicaInfo.getGenerationStamp(); File oldmeta = replicaInfo.getMetaFile(); replicaInfo.setGenerationStamp(newGS); File newmeta = replicaInfo.getMetaFile(); // rename meta file to new GS if (LOG.isDebugEnabled()) { LOG.debug("Renaming " + oldmeta + " to " + newmeta); } try { NativeIO.renameTo(oldmeta, newmeta); } catch (IOException e) { replicaInfo.setGenerationStamp(oldGS); // restore old GS throw new IOException("Block " + replicaInfo + " reopen failed. " + " Unable to move meta file " + oldmeta + " to " + newmeta, e); } }
Example 2
Source File: TestDatanodeRestart.java From hadoop with Apache License 2.0 | 6 votes |
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, boolean changeBlockFile, boolean isRename) throws IOException { File src; if (changeBlockFile) { src = replicaInfo.getBlockFile(); } else { src = replicaInfo.getMetaFile(); } File dst = DatanodeUtil.getUnlinkTmpFile(src); if (isRename) { src.renameTo(dst); } else { FileInputStream in = new FileInputStream(src); try { FileOutputStream out = new FileOutputStream(dst); try { IOUtils.copyBytes(in, out, 1); } finally { out.close(); } } finally { in.close(); } } }
Example 3
Source File: FsDatasetImpl.java From big-c with Apache License 2.0 | 6 votes |
/** * Bump a replica's generation stamp to a new one. * Its on-disk meta file name is renamed to be the new one too. * * @param replicaInfo a replica * @param newGS new generation stamp * @throws IOException if rename fails */ private void bumpReplicaGS(ReplicaInfo replicaInfo, long newGS) throws IOException { long oldGS = replicaInfo.getGenerationStamp(); File oldmeta = replicaInfo.getMetaFile(); replicaInfo.setGenerationStamp(newGS); File newmeta = replicaInfo.getMetaFile(); // rename meta file to new GS if (LOG.isDebugEnabled()) { LOG.debug("Renaming " + oldmeta + " to " + newmeta); } try { NativeIO.renameTo(oldmeta, newmeta); } catch (IOException e) { replicaInfo.setGenerationStamp(oldGS); // restore old GS throw new IOException("Block " + replicaInfo + " reopen failed. " + " Unable to move meta file " + oldmeta + " to " + newmeta, e); } }
Example 4
Source File: TestDatanodeRestart.java From big-c with Apache License 2.0 | 6 votes |
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, boolean changeBlockFile, boolean isRename) throws IOException { File src; if (changeBlockFile) { src = replicaInfo.getBlockFile(); } else { src = replicaInfo.getMetaFile(); } File dst = DatanodeUtil.getUnlinkTmpFile(src); if (isRename) { src.renameTo(dst); } else { FileInputStream in = new FileInputStream(src); try { FileOutputStream out = new FileOutputStream(dst); try { IOUtils.copyBytes(in, out, 1); } finally { out.close(); } } finally { in.close(); } } }
Example 5
Source File: BlockPoolSlice.java From hadoop with Apache License 2.0 | 5 votes |
private void deleteReplica(final ReplicaInfo replicaToDelete) { // Delete the files on disk. Failure here is okay. final File blockFile = replicaToDelete.getBlockFile(); if (!blockFile.delete()) { LOG.warn("Failed to delete block file " + blockFile); } final File metaFile = replicaToDelete.getMetaFile(); if (!metaFile.delete()) { LOG.warn("Failed to delete meta file " + metaFile); } }
Example 6
Source File: BlockPoolSlice.java From big-c with Apache License 2.0 | 5 votes |
private void deleteReplica(final ReplicaInfo replicaToDelete) { // Delete the files on disk. Failure here is okay. final File blockFile = replicaToDelete.getBlockFile(); if (!blockFile.delete()) { LOG.warn("Failed to delete block file " + blockFile); } final File metaFile = replicaToDelete.getMetaFile(); if (!metaFile.delete()) { LOG.warn("Failed to delete meta file " + metaFile); } }
Example 7
Source File: FsDatasetImpl.java From hadoop with Apache License 2.0 | 4 votes |
/** * Move block files from one storage to another storage. * @return Returns the Old replicaInfo * @throws IOException */ @Override public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, StorageType targetStorageType) throws IOException { ReplicaInfo replicaInfo = getReplicaInfo(block); if (replicaInfo.getState() != ReplicaState.FINALIZED) { throw new ReplicaNotFoundException( ReplicaNotFoundException.UNFINALIZED_REPLICA + block); } if (replicaInfo.getNumBytes() != block.getNumBytes()) { throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + block.getNumBytes()); } if (replicaInfo.getVolume().getStorageType() == targetStorageType) { throw new ReplicaAlreadyExistsException("Replica " + replicaInfo + " already exists on storage " + targetStorageType); } if (replicaInfo.isOnTransientStorage()) { // Block movement from RAM_DISK will be done by LazyPersist mechanism throw new IOException("Replica " + replicaInfo + " cannot be moved from storageType : " + replicaInfo.getVolume().getStorageType()); } try (FsVolumeReference volumeRef = volumes.getNextVolume( targetStorageType, block.getNumBytes())) { File oldBlockFile = replicaInfo.getBlockFile(); File oldMetaFile = replicaInfo.getMetaFile(); FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume(); // Copy files to temp dir first File[] blockFiles = copyBlockFiles(block.getBlockId(), block.getGenerationStamp(), oldMetaFile, oldBlockFile, targetVolume.getTmpDir(block.getBlockPoolId()), replicaInfo.isOnTransientStorage()); ReplicaInfo newReplicaInfo = new ReplicaInPipeline( replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(), targetVolume, blockFiles[0].getParentFile(), 0); newReplicaInfo.setNumBytes(blockFiles[1].length()); // Finalize the copied files newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo); removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile, oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId()); } // Replace the old block if any to reschedule the scanning. return replicaInfo; }
Example 8
Source File: FsDatasetImpl.java From big-c with Apache License 2.0 | 4 votes |
/** * Move block files from one storage to another storage. * @return Returns the Old replicaInfo * @throws IOException */ @Override public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, StorageType targetStorageType) throws IOException { ReplicaInfo replicaInfo = getReplicaInfo(block); if (replicaInfo.getState() != ReplicaState.FINALIZED) { throw new ReplicaNotFoundException( ReplicaNotFoundException.UNFINALIZED_REPLICA + block); } if (replicaInfo.getNumBytes() != block.getNumBytes()) { throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + block.getNumBytes()); } if (replicaInfo.getVolume().getStorageType() == targetStorageType) { throw new ReplicaAlreadyExistsException("Replica " + replicaInfo + " already exists on storage " + targetStorageType); } if (replicaInfo.isOnTransientStorage()) { // Block movement from RAM_DISK will be done by LazyPersist mechanism throw new IOException("Replica " + replicaInfo + " cannot be moved from storageType : " + replicaInfo.getVolume().getStorageType()); } try (FsVolumeReference volumeRef = volumes.getNextVolume( targetStorageType, block.getNumBytes())) { File oldBlockFile = replicaInfo.getBlockFile(); File oldMetaFile = replicaInfo.getMetaFile(); FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume(); // Copy files to temp dir first File[] blockFiles = copyBlockFiles(block.getBlockId(), block.getGenerationStamp(), oldMetaFile, oldBlockFile, targetVolume.getTmpDir(block.getBlockPoolId()), replicaInfo.isOnTransientStorage()); ReplicaInfo newReplicaInfo = new ReplicaInPipeline( replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(), targetVolume, blockFiles[0].getParentFile(), 0); newReplicaInfo.setNumBytes(blockFiles[1].length()); // Finalize the copied files newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo); removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile, oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId()); } // Replace the old block if any to reschedule the scanning. return replicaInfo; }