org.apache.hadoop.hdfs.server.datanode.DataStorage Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.datanode.DataStorage.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NNThroughputBenchmark.java From big-c with Apache License 2.0 | 6 votes |
void register() throws IOException { // get versions from the namenode nsInfo = nameNodeProto.versionRequest(); dnRegistration = new DatanodeRegistration( new DatanodeID(DNS.getDefaultIP("default"), DNS.getDefaultHost("default", "default"), DataNode.generateUuid(), getNodePort(dnIdx), DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT), new DataStorage(nsInfo), new ExportedBlockKeys(), VersionInfo.getVersion()); // register datanode dnRegistration = nameNodeProto.registerDatanode(dnRegistration); //first block reports storage = new DatanodeStorage(DatanodeStorage.generateUuid()); final StorageBlockReport[] reports = { new StorageBlockReport(storage, BlockListAsLongs.EMPTY) }; nameNodeProto.blockReport(dnRegistration, nameNode.getNamesystem().getBlockPoolId(), reports, new BlockReportContext(1, 0, System.nanoTime())); }
Example #2
Source File: UpgradeUtilities.java From hadoop with Apache License 2.0 | 6 votes |
/** * Create a <code>version</code> file for datanode inside the specified parent * directory. If such a file already exists, it will be overwritten. * The given version string will be written to the file as the layout * version. None of the parameters may be null. * * @param parent directory where namenode VERSION file is stored * @param version StorageInfo to create VERSION file from * @param bpid Block pool Id * @param bpidToWrite Block pool Id to write into the version file */ public static void createDataNodeVersionFile(File[] parent, StorageInfo version, String bpid, String bpidToWrite) throws IOException { DataStorage storage = new DataStorage(version); storage.setDatanodeUuid("FixedDatanodeUuid"); File[] versionFiles = new File[parent.length]; for (int i = 0; i < parent.length; i++) { File versionFile = new File(parent[i], "VERSION"); StorageDirectory sd = new StorageDirectory(parent[i].getParentFile()); storage.createStorageID(sd, false); storage.writeProperties(versionFile, sd); versionFiles[i] = versionFile; File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]); createBlockPoolVersionFile(bpDir, version, bpidToWrite); } }
Example #3
Source File: UpgradeUtilities.java From big-c with Apache License 2.0 | 6 votes |
/** * Create a <code>version</code> file for datanode inside the specified parent * directory. If such a file already exists, it will be overwritten. * The given version string will be written to the file as the layout * version. None of the parameters may be null. * * @param parent directory where namenode VERSION file is stored * @param version StorageInfo to create VERSION file from * @param bpid Block pool Id * @param bpidToWrite Block pool Id to write into the version file */ public static void createDataNodeVersionFile(File[] parent, StorageInfo version, String bpid, String bpidToWrite) throws IOException { DataStorage storage = new DataStorage(version); storage.setDatanodeUuid("FixedDatanodeUuid"); File[] versionFiles = new File[parent.length]; for (int i = 0; i < parent.length; i++) { File versionFile = new File(parent[i], "VERSION"); StorageDirectory sd = new StorageDirectory(parent[i].getParentFile()); storage.createStorageID(sd, false); storage.writeProperties(versionFile, sd); versionFiles[i] = versionFile; File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]); createBlockPoolVersionFile(bpDir, version, bpidToWrite); } }
Example #4
Source File: FsDatasetImpl.java From big-c with Apache License 2.0 | 6 votes |
/** * Gets initial volume failure information for all volumes that failed * immediately at startup. The method works by determining the set difference * between all configured storage locations and the actual storage locations in * use after attempting to put all of them into service. * * @return each storage location that has failed */ private static List<VolumeFailureInfo> getInitialVolumeFailureInfos( Collection<StorageLocation> dataLocations, DataStorage storage) { Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize( dataLocations.size()); for (StorageLocation sl: dataLocations) { failedLocationSet.add(sl.getFile().getAbsolutePath()); } for (Iterator<Storage.StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) { Storage.StorageDirectory sd = it.next(); failedLocationSet.remove(sd.getRoot().getAbsolutePath()); } List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity( failedLocationSet.size()); long failureDate = Time.now(); for (String failedStorageLocation: failedLocationSet) { volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation, failureDate)); } return volumeFailureInfos; }
Example #5
Source File: FsVolumeImpl.java From big-c with Apache License 2.0 | 6 votes |
boolean isBPDirEmpty(String bpid) throws IOException { File volumeCurrentDir = this.getCurrentDir(); File bpDir = new File(volumeCurrentDir, bpid); File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); File finalizedDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_FINALIZED); File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW); if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive( finalizedDir)) { return false; } if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) { return false; } return true; }
Example #6
Source File: TestFsDatasetImpl.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws IOException { datanode = mock(DataNode.class); storage = mock(DataStorage.class); this.conf = new Configuration(); this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0); final DNConf dnConf = new DNConf(conf); when(datanode.getConf()).thenReturn(conf); when(datanode.getDnConf()).thenReturn(dnConf); final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf); when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner); createStorageDirs(storage, conf, NUM_INIT_VOLUMES); dataset = new FsDatasetImpl(datanode, storage, conf); for (String bpid : BLOCK_POOL_IDS) { dataset.addBlockPool(bpid, conf); } assertEquals(NUM_INIT_VOLUMES, dataset.getVolumes().size()); assertEquals(0, dataset.getNumFailedVolumes()); }
Example #7
Source File: TestFsDatasetImpl.java From hadoop with Apache License 2.0 | 6 votes |
private static void createStorageDirs(DataStorage storage, Configuration conf, int numDirs) throws IOException { List<Storage.StorageDirectory> dirs = new ArrayList<Storage.StorageDirectory>(); List<String> dirStrings = new ArrayList<String>(); for (int i = 0; i < numDirs; i++) { File loc = new File(BASE_DIR + "/data" + i); dirStrings.add(new Path(loc.toString()).toUri().toString()); loc.mkdirs(); dirs.add(createStorageDirectory(loc)); when(storage.getStorageDir(i)).thenReturn(dirs.get(i)); } String dataDir = StringUtils.join(",", dirStrings); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir); when(storage.dirIterator()).thenReturn(dirs.iterator()); when(storage.getNumStorageDirs()).thenReturn(numDirs); }
Example #8
Source File: NNThroughputBenchmark.java From hadoop with Apache License 2.0 | 6 votes |
void register() throws IOException { // get versions from the namenode nsInfo = nameNodeProto.versionRequest(); dnRegistration = new DatanodeRegistration( new DatanodeID(DNS.getDefaultIP("default"), DNS.getDefaultHost("default", "default"), DataNode.generateUuid(), getNodePort(dnIdx), DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT), new DataStorage(nsInfo), new ExportedBlockKeys(), VersionInfo.getVersion()); // register datanode dnRegistration = nameNodeProto.registerDatanode(dnRegistration); //first block reports storage = new DatanodeStorage(DatanodeStorage.generateUuid()); final StorageBlockReport[] reports = { new StorageBlockReport(storage, BlockListAsLongs.EMPTY) }; nameNodeProto.blockReport(dnRegistration, nameNode.getNamesystem().getBlockPoolId(), reports, new BlockReportContext(1, 0, System.nanoTime())); }
Example #9
Source File: TestFsDatasetImpl.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setUp() throws IOException { datanode = mock(DataNode.class); storage = mock(DataStorage.class); this.conf = new Configuration(); this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0); final DNConf dnConf = new DNConf(conf); when(datanode.getConf()).thenReturn(conf); when(datanode.getDnConf()).thenReturn(dnConf); final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf); when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner); createStorageDirs(storage, conf, NUM_INIT_VOLUMES); dataset = new FsDatasetImpl(datanode, storage, conf); for (String bpid : BLOCK_POOL_IDS) { dataset.addBlockPool(bpid, conf); } assertEquals(NUM_INIT_VOLUMES, dataset.getVolumes().size()); assertEquals(0, dataset.getNumFailedVolumes()); }
Example #10
Source File: TestFsDatasetImpl.java From big-c with Apache License 2.0 | 6 votes |
private static void createStorageDirs(DataStorage storage, Configuration conf, int numDirs) throws IOException { List<Storage.StorageDirectory> dirs = new ArrayList<Storage.StorageDirectory>(); List<String> dirStrings = new ArrayList<String>(); for (int i = 0; i < numDirs; i++) { File loc = new File(BASE_DIR + "/data" + i); dirStrings.add(new Path(loc.toString()).toUri().toString()); loc.mkdirs(); dirs.add(createStorageDirectory(loc)); when(storage.getStorageDir(i)).thenReturn(dirs.get(i)); } String dataDir = StringUtils.join(",", dirStrings); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir); when(storage.dirIterator()).thenReturn(dirs.iterator()); when(storage.getNumStorageDirs()).thenReturn(numDirs); }
Example #11
Source File: FsVolumeImpl.java From hadoop with Apache License 2.0 | 6 votes |
boolean isBPDirEmpty(String bpid) throws IOException { File volumeCurrentDir = this.getCurrentDir(); File bpDir = new File(volumeCurrentDir, bpid); File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); File finalizedDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_FINALIZED); File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW); if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive( finalizedDir)) { return false; } if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) { return false; } return true; }
Example #12
Source File: FsDatasetImpl.java From hadoop with Apache License 2.0 | 6 votes |
/** * Gets initial volume failure information for all volumes that failed * immediately at startup. The method works by determining the set difference * between all configured storage locations and the actual storage locations in * use after attempting to put all of them into service. * * @return each storage location that has failed */ private static List<VolumeFailureInfo> getInitialVolumeFailureInfos( Collection<StorageLocation> dataLocations, DataStorage storage) { Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize( dataLocations.size()); for (StorageLocation sl: dataLocations) { failedLocationSet.add(sl.getFile().getAbsolutePath()); } for (Iterator<Storage.StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) { Storage.StorageDirectory sd = it.next(); failedLocationSet.remove(sd.getRoot().getAbsolutePath()); } List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity( failedLocationSet.size()); long failureDate = Time.now(); for (String failedStorageLocation: failedLocationSet) { volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation, failureDate)); } return volumeFailureInfos; }
Example #13
Source File: TestFsDatasetImpl.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 5000) public void testRemoveNewlyAddedVolume() throws IOException { final int numExistingVolumes = dataset.getVolumes().size(); List<NamespaceInfo> nsInfos = new ArrayList<>(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater"; StorageLocation loc = StorageLocation.parse(newVolumePath); Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath)); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(loc.getFile()), anyListOf(NamespaceInfo.class))) .thenReturn(builder); dataset.addVolume(loc, nsInfos); assertEquals(numExistingVolumes + 1, dataset.getVolumes().size()); when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1); when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd); Set<File> volumesToRemove = new HashSet<>(); volumesToRemove.add(loc.getFile()); dataset.removeVolumes(volumesToRemove, true); assertEquals(numExistingVolumes, dataset.getVolumes().size()); }
Example #14
Source File: TestFsDatasetImpl.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testAddVolumes() throws IOException { final int numNewVolumes = 3; final int numExistingVolumes = dataset.getVolumes().size(); final int totalVolumes = numNewVolumes + numExistingVolumes; Set<String> expectedVolumes = new HashSet<String>(); List<NamespaceInfo> nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } for (int i = 0; i < numNewVolumes; i++) { String path = BASE_DIR + "/newData" + i; String pathUri = new Path(path).toUri().toString(); expectedVolumes.add(new File(pathUri).toString()); StorageLocation loc = StorageLocation.parse(pathUri); Storage.StorageDirectory sd = createStorageDirectory(new File(path)); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(loc.getFile()), anyListOf(NamespaceInfo.class))) .thenReturn(builder); dataset.addVolume(loc, nsInfos); } assertEquals(totalVolumes, dataset.getVolumes().size()); assertEquals(totalVolumes, dataset.storageMap.size()); Set<String> actualVolumes = new HashSet<String>(); for (int i = 0; i < numNewVolumes; i++) { actualVolumes.add( dataset.getVolumes().get(numExistingVolumes + i).getBasePath()); } assertEquals(actualVolumes.size(), expectedVolumes.size()); assertTrue(actualVolumes.containsAll(expectedVolumes)); }
Example #15
Source File: TestDFSFinalize.java From big-c with Apache License 2.0 | 5 votes |
/** * Verify that the current directory exists and that the previous directory * does not exist. Verify that current hasn't been modified by comparing * the checksum of all it's containing files with their original checksum. */ static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs, String bpid) throws Exception { List<File> dirs = Lists.newArrayList(); for (int i = 0; i < nameNodeDirs.length; i++) { File curDir = new File(nameNodeDirs[i], "current"); dirs.add(curDir); FSImageTestUtil.assertReasonableNameCurrentDir(curDir); } FSImageTestUtil.assertParallelFilesAreIdentical( dirs, Collections.<String>emptySet()); File dnCurDirs[] = new File[dataNodeDirs.length]; for (int i = 0; i < dataNodeDirs.length; i++) { dnCurDirs[i] = new File(dataNodeDirs[i],"current"); assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i], false), UpgradeUtilities.checksumMasterDataNodeContents()); } for (int i = 0; i < nameNodeDirs.length; i++) { assertFalse(new File(nameNodeDirs[i],"previous").isDirectory()); } if (bpid == null) { for (int i = 0; i < dataNodeDirs.length; i++) { assertFalse(new File(dataNodeDirs[i],"previous").isDirectory()); } } else { for (int i = 0; i < dataNodeDirs.length; i++) { File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]); assertFalse(new File(bpRoot,"previous").isDirectory()); File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED); assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir, true), UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); } } }
Example #16
Source File: TestFsDatasetImpl.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testAddVolumeFailureReleasesInUseLock() throws IOException { FsDatasetImpl spyDataset = spy(dataset); FsVolumeImpl mockVolume = mock(FsVolumeImpl.class); File badDir = new File(BASE_DIR, "bad"); badDir.mkdirs(); doReturn(mockVolume).when(spyDataset) .createFsVolume(anyString(), any(File.class), any(StorageType.class)); doThrow(new IOException("Failed to getVolumeMap()")) .when(mockVolume).getVolumeMap( anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class)); Storage.StorageDirectory sd = createStorageDirectory(badDir); sd.lock(); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()), Matchers.<List<NamespaceInfo>>any())) .thenReturn(builder); StorageLocation location = StorageLocation.parse(badDir.toString()); List<NamespaceInfo> nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } try { spyDataset.addVolume(location, nsInfos); fail("Expect to throw MultipleIOException"); } catch (MultipleIOException e) { } FsDatasetTestUtil.assertFileLockReleased(badDir.toString()); }
Example #17
Source File: TestFsDatasetImpl.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout = 5000) public void testRemoveNewlyAddedVolume() throws IOException { final int numExistingVolumes = dataset.getVolumes().size(); List<NamespaceInfo> nsInfos = new ArrayList<>(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater"; StorageLocation loc = StorageLocation.parse(newVolumePath); Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath)); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(loc.getFile()), anyListOf(NamespaceInfo.class))) .thenReturn(builder); dataset.addVolume(loc, nsInfos); assertEquals(numExistingVolumes + 1, dataset.getVolumes().size()); when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1); when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd); Set<File> volumesToRemove = new HashSet<>(); volumesToRemove.add(loc.getFile()); dataset.removeVolumes(volumesToRemove, true); assertEquals(numExistingVolumes, dataset.getVolumes().size()); }
Example #18
Source File: TestFsDatasetImpl.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testAddVolumeFailureReleasesInUseLock() throws IOException { FsDatasetImpl spyDataset = spy(dataset); FsVolumeImpl mockVolume = mock(FsVolumeImpl.class); File badDir = new File(BASE_DIR, "bad"); badDir.mkdirs(); doReturn(mockVolume).when(spyDataset) .createFsVolume(anyString(), any(File.class), any(StorageType.class)); doThrow(new IOException("Failed to getVolumeMap()")) .when(mockVolume).getVolumeMap( anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class)); Storage.StorageDirectory sd = createStorageDirectory(badDir); sd.lock(); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()), Matchers.<List<NamespaceInfo>>any())) .thenReturn(builder); StorageLocation location = StorageLocation.parse(badDir.toString()); List<NamespaceInfo> nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } try { spyDataset.addVolume(location, nsInfos); fail("Expect to throw MultipleIOException"); } catch (MultipleIOException e) { } FsDatasetTestUtil.assertFileLockReleased(badDir.toString()); }
Example #19
Source File: TestFsDatasetImpl.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testAddVolumes() throws IOException { final int numNewVolumes = 3; final int numExistingVolumes = dataset.getVolumes().size(); final int totalVolumes = numNewVolumes + numExistingVolumes; Set<String> expectedVolumes = new HashSet<String>(); List<NamespaceInfo> nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } for (int i = 0; i < numNewVolumes; i++) { String path = BASE_DIR + "/newData" + i; String pathUri = new Path(path).toUri().toString(); expectedVolumes.add(new File(pathUri).toString()); StorageLocation loc = StorageLocation.parse(pathUri); Storage.StorageDirectory sd = createStorageDirectory(new File(path)); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(loc.getFile()), anyListOf(NamespaceInfo.class))) .thenReturn(builder); dataset.addVolume(loc, nsInfos); } assertEquals(totalVolumes, dataset.getVolumes().size()); assertEquals(totalVolumes, dataset.storageMap.size()); Set<String> actualVolumes = new HashSet<String>(); for (int i = 0; i < numNewVolumes; i++) { actualVolumes.add( dataset.getVolumes().get(numExistingVolumes + i).getBasePath()); } assertEquals(actualVolumes.size(), expectedVolumes.size()); assertTrue(actualVolumes.containsAll(expectedVolumes)); }
Example #20
Source File: TestDFSFinalize.java From hadoop with Apache License 2.0 | 5 votes |
/** * Verify that the current directory exists and that the previous directory * does not exist. Verify that current hasn't been modified by comparing * the checksum of all it's containing files with their original checksum. */ static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs, String bpid) throws Exception { List<File> dirs = Lists.newArrayList(); for (int i = 0; i < nameNodeDirs.length; i++) { File curDir = new File(nameNodeDirs[i], "current"); dirs.add(curDir); FSImageTestUtil.assertReasonableNameCurrentDir(curDir); } FSImageTestUtil.assertParallelFilesAreIdentical( dirs, Collections.<String>emptySet()); File dnCurDirs[] = new File[dataNodeDirs.length]; for (int i = 0; i < dataNodeDirs.length; i++) { dnCurDirs[i] = new File(dataNodeDirs[i],"current"); assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i], false), UpgradeUtilities.checksumMasterDataNodeContents()); } for (int i = 0; i < nameNodeDirs.length; i++) { assertFalse(new File(nameNodeDirs[i],"previous").isDirectory()); } if (bpid == null) { for (int i = 0; i < dataNodeDirs.length; i++) { assertFalse(new File(dataNodeDirs[i],"previous").isDirectory()); } } else { for (int i = 0; i < dataNodeDirs.length; i++) { File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]); assertFalse(new File(bpRoot,"previous").isDirectory()); File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED); assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir, true), UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); } } }
Example #21
Source File: NNThroughputBenchmark.java From RDFS with Apache License 2.0 | 5 votes |
void register() throws IOException { // get versions from the namenode nsInfo = nameNode.versionRequest(); dnRegistration.setStorageInfo(new DataStorage(nsInfo, "", null), ""); String storageId = DataNode.createNewStorageId(dnRegistration.getPort()); dnRegistration.setStorageID(storageId); // register datanode dnRegistration = nameNode.register(dnRegistration, DataTransferProtocol.DATA_TRANSFER_VERSION); }
Example #22
Source File: NNThroughputBenchmark.java From RDFS with Apache License 2.0 | 5 votes |
void register() throws IOException { // get versions from the namenode nsInfo = nameNode.versionRequest(); dnRegistration.setStorageInfo(new DataStorage(nsInfo, "", null), ""); dnRegistration.storageID = DataNode.createNewStorageId(dnRegistration.getPort()); // register datanode dnRegistration = nameNode.register(dnRegistration); }
Example #23
Source File: NNThroughputBenchmark.java From hadoop-gpu with Apache License 2.0 | 5 votes |
void register() throws IOException { // get versions from the namenode nsInfo = nameNode.versionRequest(); dnRegistration.setStorageInfo(new DataStorage(nsInfo, "")); DataNode.setNewStorageID(dnRegistration); // register datanode dnRegistration = nameNode.register(dnRegistration); }
Example #24
Source File: UpgradeUtilities.java From big-c with Apache License 2.0 | 4 votes |
/** * Initialize the data structures used by this class. * IMPORTANT NOTE: This method must be called once before calling * any other public method on this class. * <p> * Creates a singleton master populated storage * directory for a Namenode (contains edits, fsimage, * version, and time files) and a Datanode (contains version and * block files). This can be a lengthy operation. */ public static void initialize() throws Exception { createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()}); Configuration config = new HdfsConfiguration(); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString()); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString()); config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString()); MiniDFSCluster cluster = null; String bpid = null; try { // format data-node createEmptyDirs(new String[] {datanodeStorage.toString()}); // format and start NameNode and start DataNode DFSTestUtil.formatNameNode(config); cluster = new MiniDFSCluster.Builder(config) .numDataNodes(1) .startupOption(StartupOption.REGULAR) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); NamenodeProtocols namenode = cluster.getNameNodeRpc(); namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID(); namenodeStorageFsscTime = namenode.versionRequest().getCTime(); namenodeStorageClusterID = namenode.versionRequest().getClusterID(); namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID(); FileSystem fs = FileSystem.get(config); Path baseDir = new Path("/TestUpgrade"); fs.mkdirs(baseDir); // write some files int bufferSize = 4096; byte[] buffer = new byte[bufferSize]; for(int i=0; i < bufferSize; i++) buffer[i] = (byte)('0' + i % 50); writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize); writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize); // save image namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); namenode.saveNamespace(); namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false); // write more files writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize); writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize); bpid = cluster.getNamesystem(0).getBlockPoolId(); } finally { // shutdown if (cluster != null) cluster.shutdown(); FileUtil.fullyDelete(new File(namenodeStorage,"in_use.lock")); FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock")); } namenodeStorageChecksum = checksumContents(NAME_NODE, new File(namenodeStorage, "current"), false); File dnCurDir = new File(datanodeStorage, "current"); datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false); File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current"); blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false); File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/"+DataStorage.STORAGE_DIR_FINALIZED); blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, bpCurFinalizeDir, true); File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/"+DataStorage.STORAGE_DIR_RBW); blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir, false); }
Example #25
Source File: DatanodeRegistration.java From hadoop-gpu with Apache License 2.0 | 4 votes |
public void setStorageInfo(DataStorage storage) { this.storageInfo = new StorageInfo(storage); this.storageID = storage.getStorageID(); }
Example #26
Source File: BlockPoolSlice.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Create a blook pool slice * @param bpid Block pool Id * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to * @param bpDir directory corresponding to the BlockPool * @param conf configuration * @param timer include methods for getting time * @throws IOException Error making directories */ BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir, Configuration conf, Timer timer) throws IOException { this.bpid = bpid; this.volume = volume; this.fileIoProvider = volume.getFileIoProvider(); this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); this.finalizedDir = new File( currentDir, DataStorage.STORAGE_DIR_FINALIZED); this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST); if (!this.finalizedDir.exists()) { if (!this.finalizedDir.mkdirs()) { throw new IOException("Failed to mkdirs " + this.finalizedDir); } } this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf); this.deleteDuplicateReplicas = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT); this.cachedDfsUsedCheckTime = conf.getLong( DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_MS, DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_DEFAULT_MS); this.maxDataLength = conf.getInt( CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT); this.timer = timer; // Files that were being written when the datanode was last shutdown // are now moved back to the data directory. It is possible that // in the future, we might want to do some sort of datanode-local // recovery for these blocks. For example, crc validation. // this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP); if (tmpDir.exists()) { fileIoProvider.fullyDelete(volume, tmpDir); } this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW); // create the rbw and tmp directories if they don't exist. fileIoProvider.mkdirs(volume, rbwDir); fileIoProvider.mkdirs(volume, tmpDir); if (addReplicaThreadPool == null) { // initialize add replica fork join pool initializeAddReplicaPool(conf); } // Make the dfs usage to be saved during shutdown. shutdownHook = new Runnable() { @Override public void run() { addReplicaThreadPool.shutdownNow(); } }; ShutdownHookManager.get().addShutdownHook(shutdownHook, SHUTDOWN_HOOK_PRIORITY); }
Example #27
Source File: FsDatasetSpi.java From hadoop with Apache License 2.0 | 4 votes |
/** Create a new object. */ public abstract D newInstance(DataNode datanode, DataStorage storage, Configuration conf) throws IOException;
Example #28
Source File: BlockPoolSlice.java From big-c with Apache License 2.0 | 4 votes |
/** * Create a blook pool slice * @param bpid Block pool Id * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to * @param bpDir directory corresponding to the BlockPool * @param conf configuration * @throws IOException */ BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir, Configuration conf) throws IOException { this.bpid = bpid; this.volume = volume; this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); this.finalizedDir = new File( currentDir, DataStorage.STORAGE_DIR_FINALIZED); this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST); if (!this.finalizedDir.exists()) { if (!this.finalizedDir.mkdirs()) { throw new IOException("Failed to mkdirs " + this.finalizedDir); } } this.deleteDuplicateReplicas = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT); // Files that were being written when the datanode was last shutdown // are now moved back to the data directory. It is possible that // in the future, we might want to do some sort of datanode-local // recovery for these blocks. For example, crc validation. // this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP); if (tmpDir.exists()) { FileUtil.fullyDelete(tmpDir); } this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW); final boolean supportAppends = conf.getBoolean( DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT); if (rbwDir.exists() && !supportAppends) { FileUtil.fullyDelete(rbwDir); } if (!rbwDir.mkdirs()) { // create rbw directory if not exist if (!rbwDir.isDirectory()) { throw new IOException("Mkdirs failed to create " + rbwDir.toString()); } } if (!tmpDir.mkdirs()) { if (!tmpDir.isDirectory()) { throw new IOException("Mkdirs failed to create " + tmpDir.toString()); } } // Use cached value initially if available. Or the following call will // block until the initial du command completes. this.dfsUsage = new DU(bpDir, conf, loadDfsUsed()); this.dfsUsage.start(); // Make the dfs usage to be saved during shutdown. ShutdownHookManager.get().addShutdownHook( new Runnable() { @Override public void run() { if (!dfsUsedSaved) { saveDfsUsed(); } } }, SHUTDOWN_HOOK_PRIORITY); }
Example #29
Source File: FsDatasetFactory.java From big-c with Apache License 2.0 | 4 votes |
@Override public FsDatasetImpl newInstance(DataNode datanode, DataStorage storage, Configuration conf) throws IOException { return new FsDatasetImpl(datanode, storage, conf); }
Example #30
Source File: FsDatasetImpl.java From big-c with Apache License 2.0 | 4 votes |
/** * An FSDataset has a directory where it loads its data files. */ FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf ) throws IOException { this.fsRunning = true; this.datanode = datanode; this.dataStorage = storage; this.conf = conf; // The number of volumes required for operation is the total number // of volumes minus the number of failed volumes we can tolerate. final int volFailuresTolerated = conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT); String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf); List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos( dataLocations, storage); int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length; int volsFailed = volumeFailureInfos.size(); this.validVolsRequired = volsConfigured - volFailuresTolerated; if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) { throw new DiskErrorException("Invalid volume failure " + " config value: " + volFailuresTolerated); } if (volsFailed > volFailuresTolerated) { throw new DiskErrorException("Too many failed volumes - " + "current valid volumes: " + storage.getNumStorageDirs() + ", volumes configured: " + volsConfigured + ", volumes failed: " + volsFailed + ", volume failures tolerated: " + volFailuresTolerated); } storageMap = new ConcurrentHashMap<String, DatanodeStorage>(); volumeMap = new ReplicaMap(this); ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this); @SuppressWarnings("unchecked") final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl = ReflectionUtils.newInstance(conf.getClass( DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY, RoundRobinVolumeChoosingPolicy.class, VolumeChoosingPolicy.class), conf); volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(), blockChooserImpl); asyncDiskService = new FsDatasetAsyncDiskService(datanode, this); asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode); deletingBlock = new HashMap<String, Set<Long>>(); for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { addVolume(dataLocations, storage.getStorageDir(idx)); } setupAsyncLazyPersistThreads(); cacheManager = new FsDatasetCache(this); // Start the lazy writer once we have built the replica maps. lazyWriter = new Daemon(new LazyWriter(conf)); lazyWriter.start(); registerMBean(datanode.getDatanodeUuid()); localFS = FileSystem.getLocal(conf); blockPinningEnabled = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT); }