org.apache.hadoop.fs.StorageType Java Examples
The following examples show how to use
org.apache.hadoop.fs.StorageType.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockPlacementPolicyDefault.java From big-c with Apache License 2.0 | 6 votes |
private DatanodeStorageInfo chooseFromNextRack(Node next, Set<Node> excludedNodes, long blocksize, int maxNodesPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { final String nextRack = next.getNetworkLocation(); try { return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } catch(NotEnoughReplicasException e) { if (LOG.isDebugEnabled()) { LOG.debug("Failed to choose from the next rack (location = " + nextRack + "), retry choosing ramdomly", e); } //otherwise randomly choose one from the network return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } }
Example #2
Source File: DirectoryWithQuotaFeature.java From big-c with Apache License 2.0 | 6 votes |
private void verifyQuotaByStorageType(EnumCounters<StorageType> typeDelta) throws QuotaByStorageTypeExceededException { if (!isQuotaByStorageTypeSet()) { return; } for (StorageType t: StorageType.getTypesSupportingQuota()) { if (!isQuotaByStorageTypeSet(t)) { continue; } if (Quota.isViolated(quota.getTypeSpace(t), usage.getTypeSpace(t), typeDelta.get(t))) { throw new QuotaByStorageTypeExceededException( quota.getTypeSpace(t), usage.getTypeSpace(t) + typeDelta.get(t), t); } } }
Example #3
Source File: TestQuotaByStorageType.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testQuotaByStorageTypeParentOnChildOn() throws Exception { final Path parent = new Path(dir, "parent"); final Path child = new Path(parent, "child"); dfs.mkdirs(parent); dfs.mkdirs(child); dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); dfs.setQuotaByStorageType(parent, StorageType.SSD, 2 * BLOCKSIZE); dfs.setQuotaByStorageType(child, StorageType.SSD, 3 * BLOCKSIZE); // Create file of size 2.5 * BLOCKSIZE under child directory // Verify parent Quota applies Path createdFile1 = new Path(child, "created_file1.data"); long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2; int bufLen = BLOCKSIZE / 16; try { DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed); fail("Should have failed with QuotaByStorageTypeExceededException "); } catch (Throwable t) { LOG.info("Got expected exception ", t); } }
Example #4
Source File: SimulatedFSDataset.java From hadoop with Apache License 2.0 | 6 votes |
@Override // FsDatasetSpi public synchronized ReplicaHandler createTemporary( StorageType storageType, ExtendedBlock b) throws IOException { if (isValidBlock(b)) { throw new ReplicaAlreadyExistsException("Block " + b + " is valid, and cannot be written to."); } if (isValidRbw(b)) { throw new ReplicaAlreadyExistsException("Block " + b + " is being written, and cannot be written to."); } final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = new BInfo(b.getBlockPoolId(), b.getLocalBlock(), true); map.put(binfo.theBlock, binfo); return new ReplicaHandler(binfo, null); }
Example #5
Source File: FSImageFormatPBINode.java From big-c with Apache License 2.0 | 6 votes |
private void loadRootINode(INodeSection.INode p) { INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext()); final QuotaCounts q = root.getQuotaCounts(); final long nsQuota = q.getNameSpace(); final long dsQuota = q.getStorageSpace(); if (nsQuota != -1 || dsQuota != -1) { dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota); } final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces(); if (typeQuotas.anyGreaterOrEqual(0)) { dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas); } dir.rootDir.cloneModificationTime(root); dir.rootDir.clonePermissionStatus(root); // root dir supports having extended attributes according to POSIX final XAttrFeature f = root.getXAttrFeature(); if (f != null) { dir.rootDir.addXAttrFeature(f); } }
Example #6
Source File: TestQuotaByStorageType.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testQuotaByStorageTypeParentOffChildOn() throws Exception { final Path parent = new Path(dir, "parent"); final Path child = new Path(parent, "child"); dfs.mkdirs(parent); dfs.mkdirs(child); dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); dfs.setQuotaByStorageType(child, StorageType.SSD, 2 * BLOCKSIZE); // Create file of size 2.5 * BLOCKSIZE under child directory // Since child directory have SSD quota of 2 * BLOCKSIZE, // expect an exception when creating files under child directory. Path createdFile1 = new Path(child, "created_file1.data"); long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2; int bufLen = BLOCKSIZE / 16; try { DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed); fail("Should have failed with QuotaByStorageTypeExceededException "); } catch (Throwable t) { LOG.info("Got expected exception ", t); } }
Example #7
Source File: TestQuotaByStorageType.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); // Setup a 3-node cluster and configure // each node with 1 SSD and 1 DISK without capacity limitation cluster = new MiniDFSCluster .Builder(conf) .numDataNodes(REPLICATION) .storageTypes(new StorageType[]{StorageType.SSD, StorageType.DEFAULT}) .build(); cluster.waitActive(); fsdir = cluster.getNamesystem().getFSDirectory(); dfs = cluster.getFileSystem(); fsn = cluster.getNamesystem(); }
Example #8
Source File: BlockPlacementPolicyWithNodeGroup.java From hadoop with Apache License 2.0 | 6 votes |
/** * {@inheritDoc} */ @Override protected void chooseRemoteRack(int numOfReplicas, DatanodeDescriptor localMachine, Set<Node> excludedNodes, long blocksize, int maxReplicasPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); final String rackLocation = NetworkTopology.getFirstHalf( localMachine.getNetworkLocation()); try { // randomly choose from remote racks chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize, maxReplicasPerRack, results, avoidStaleNodes, storageTypes); } catch (NotEnoughReplicasException e) { // fall back to the local rack chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas), rackLocation, excludedNodes, blocksize, maxReplicasPerRack, results, avoidStaleNodes, storageTypes); } }
Example #9
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 6 votes |
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo, StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise) throws IOException { Promise<Void> saslPromise = channel.eventLoop().newPromise(); trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise); saslPromise.addListener(new FutureListener<Void>() { @Override public void operationComplete(Future<Void> future) throws Exception { if (future.isSuccess()) { // setup response processing pipeline first, then send request. processWriteBlockResponse(channel, dnInfo, promise, timeoutMs); requestWriteBlock(channel, storageType, writeBlockProtoBuilder); } else { promise.tryFailure(future.cause()); } } }); }
Example #10
Source File: FSDirectory.java From big-c with Apache License 2.0 | 6 votes |
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file, long newLength, QuotaCounts delta) throws QuotaExceededException { if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) { // Do not check quota if edit log is still being processed return; } final long diff = file.computeQuotaDeltaForTruncate(newLength); final short repl = file.getBlockReplication(); delta.addStorageSpace(diff * repl); final BlockStoragePolicy policy = getBlockStoragePolicySuite() .getPolicy(file.getStoragePolicyID()); List<StorageType> types = policy.chooseStorageTypes(repl); for (StorageType t : types) { if (t.supportTypeQuota()) { delta.addTypeSpace(t, diff); } } if (diff > 0) { readLock(); try { verifyQuota(iip, iip.length() - 1, delta, null); } finally { readUnlock(); } } }
Example #11
Source File: Mover.java From hadoop with Apache License 2.0 | 6 votes |
boolean scheduleMoves4Block(StorageTypeDiff diff, LocatedBlock lb) { final List<MLocation> locations = MLocation.toLocations(lb); Collections.shuffle(locations); final DBlock db = newDBlock(lb.getBlock().getLocalBlock(), locations); for (final StorageType t : diff.existing) { for (final MLocation ml : locations) { final Source source = storages.getSource(ml); if (ml.storageType == t && source != null) { // try to schedule one replica move. if (scheduleMoveReplica(db, source, diff.expected)) { return true; } } } } return false; }
Example #12
Source File: TestBlockStoragePolicy.java From hadoop with Apache License 2.0 | 5 votes |
static void checkChooseStorageTypes(BlockStoragePolicy p, short replication, List<StorageType> chosen, EnumSet<StorageType> unavailables, boolean isNewBlock, StorageType... expected) { final List<StorageType> types = p.chooseStorageTypes(replication, chosen, unavailables, isNewBlock); assertStorageTypes(types, expected); }
Example #13
Source File: TestFsDatasetImpl.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testAddVolumeFailureReleasesInUseLock() throws IOException { FsDatasetImpl spyDataset = spy(dataset); FsVolumeImpl mockVolume = mock(FsVolumeImpl.class); File badDir = new File(BASE_DIR, "bad"); badDir.mkdirs(); doReturn(mockVolume).when(spyDataset) .createFsVolume(anyString(), any(File.class), any(StorageType.class)); doThrow(new IOException("Failed to getVolumeMap()")) .when(mockVolume).getVolumeMap( anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class)); Storage.StorageDirectory sd = createStorageDirectory(badDir); sd.lock(); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()), Matchers.<List<NamespaceInfo>>any())) .thenReturn(builder); StorageLocation location = StorageLocation.parse(badDir.toString()); List<NamespaceInfo> nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } try { spyDataset.addVolume(location, nsInfos); fail("Expect to throw MultipleIOException"); } catch (MultipleIOException e) { } FsDatasetTestUtil.assertFileLockReleased(badDir.toString()); }
Example #14
Source File: TestStorageReport.java From big-c with Apache License 2.0 | 5 votes |
@Before public void startUpCluster() throws IOException { conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(REPL_FACTOR) .storageTypes(new StorageType[] { storageType, storageType } ) .build(); fs = cluster.getFileSystem(); bpid = cluster.getNamesystem().getBlockPoolId(); }
Example #15
Source File: Dispatcher.java From hadoop with Apache License 2.0 | 5 votes |
/** Decide if the given block is a good candidate to move or not */ private boolean isGoodBlockCandidate(DBlock block) { // source and target must have the same storage type final StorageType sourceStorageType = getStorageType(); for (Task t : tasks) { if (Dispatcher.this.isGoodBlockCandidate(this, t.target, sourceStorageType, block)) { return true; } } return false; }
Example #16
Source File: TestWriteBlockGetsBlockLengthHint.java From hadoop with Apache License 2.0 | 5 votes |
/** * Override createRbw to verify that the block length that is passed * is correct. This requires both DFSOutputStream and BlockReceiver to * correctly propagate the hint to FsDatasetSpi. */ @Override public synchronized ReplicaHandler createRbw( StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException { assertThat(b.getLocalBlock().getNumBytes(), is(EXPECTED_BLOCK_LENGTH)); return super.createRbw(storageType, b, allowLazyPersist); }
Example #17
Source File: TestStoragePolicySummary.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testMultipleWarmsInDifferentOrder() { BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite(); StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies()); BlockStoragePolicy warm = bsps.getPolicy("WARM"); //DISK:1,ARCHIVE:1 sts.add(new StorageType[]{StorageType.DISK,StorageType.ARCHIVE},warm); sts.add(new StorageType[]{StorageType.ARCHIVE,StorageType.DISK},warm); //DISK:2,ARCHIVE:1 sts.add(new StorageType[]{StorageType.ARCHIVE, StorageType.DISK,StorageType.DISK},warm); sts.add(new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,StorageType.DISK},warm); sts.add(new StorageType[]{StorageType.DISK, StorageType.DISK,StorageType.ARCHIVE},warm); //DISK:1,ARCHIVE:2 sts.add(new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,StorageType.ARCHIVE},warm); sts.add(new StorageType[]{StorageType.ARCHIVE, StorageType.DISK,StorageType.ARCHIVE},warm); sts.add(new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,StorageType.DISK},warm); //DISK:2,ARCHIVE:2 sts.add(new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,StorageType.DISK,StorageType.DISK},warm); Map<String, Long> actualOutput = convertToStringMap(sts); Assert.assertEquals(4,actualOutput.size()); Map<String, Long> expectedOutput = new HashMap<>(); expectedOutput.put("WARM|DISK:1,ARCHIVE:1(WARM)", 2l); expectedOutput.put("WARM|DISK:2,ARCHIVE:1", 3l); expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l); expectedOutput.put("WARM|DISK:2,ARCHIVE:2", 1l); Assert.assertEquals(expectedOutput,actualOutput); }
Example #18
Source File: TestPBHelper.java From big-c with Apache License 2.0 | 5 votes |
private static BlockWithLocations getBlockWithLocations(int bid) { final String[] datanodeUuids = {"dn1", "dn2", "dn3"}; final String[] storageIDs = {"s1", "s2", "s3"}; final StorageType[] storageTypes = { StorageType.DISK, StorageType.DISK, StorageType.DISK}; return new BlockWithLocations(new Block(bid, 0, 1), datanodeUuids, storageIDs, storageTypes); }
Example #19
Source File: BlockReportTestBase.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test writes a file and closes it. * Block reported is generated with an extra block. * Block report is forced and the check for # of pendingdeletion * blocks is performed. * * @throws IOException in case of an error */ @Test(timeout=300000) public void blockReport_04() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong()); DataNode dn = cluster.getDataNodes().get(DN_N0); // all blocks belong to the same file, hence same BP String poolId = cluster.getNamesystem().getBlockPoolId(); // Create a bogus new block which will not be present on the namenode. ExtendedBlock b = new ExtendedBlock( poolId, rand.nextLong(), 1024L, rand.nextLong()); dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false); DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false); sendBlockReports(dnR, poolId, reports); printStats(); assertThat("Wrong number of corrupt blocks", cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L)); assertThat("Wrong number of PendingDeletion blocks", cluster.getNamesystem().getPendingDeletionBlocks(), is(1L)); }
Example #20
Source File: TestBlockStoragePolicy.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void checkChooseStorageTypes(BlockStoragePolicy p, short replication, List<StorageType> chosen, StorageType... expected) { final List<StorageType> types = p.chooseStorageTypes(replication, chosen, none, false); assertStorageTypes(types, expected); }
Example #21
Source File: TestBlockStoragePolicy.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void checkChooseStorageTypes(BlockStoragePolicy p, short replication, List<StorageType> chosen, StorageType... expected) { final List<StorageType> types = p.chooseStorageTypes(replication, chosen, both, false); assertStorageTypes(types, expected); }
Example #22
Source File: FsDatasetImpl.java From hadoop with Apache License 2.0 | 5 votes |
private void addVolume(Collection<StorageLocation> dataLocations, Storage.StorageDirectory sd) throws IOException { final File dir = sd.getCurrentDir(); final StorageType storageType = getStorageTypeFromLocations(dataLocations, sd.getRoot()); // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is // nothing needed to be rolled back to make various data structures, e.g., // storageMap and asyncDiskService, consistent. FsVolumeImpl fsVolume = new FsVolumeImpl( this, sd.getStorageUuid(), dir, this.conf, storageType); FsVolumeReference ref = fsVolume.obtainReference(); ReplicaMap tempVolumeMap = new ReplicaMap(this); fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker); synchronized (this) { volumeMap.addAll(tempVolumeMap); storageMap.put(sd.getStorageUuid(), new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, storageType)); asyncDiskService.addVolume(sd.getCurrentDir()); volumes.addVolume(ref); } LOG.info("Added volume - " + dir + ", StorageType: " + storageType); }
Example #23
Source File: BlockStoragePolicy.java From hadoop with Apache License 2.0 | 5 votes |
/** * Compute the difference between two lists t and c so that after the diff * computation we have: t = t - c; * Further, if e is not null, set e = e + c - t; */ private static void diff(List<StorageType> t, Iterable<StorageType> c, List<StorageType> e) { for(StorageType storagetype : c) { final int i = t.indexOf(storagetype); if (i >= 0) { t.remove(i); } else if (e != null) { e.add(storagetype); } } }
Example #24
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
private static List<StorageTypesProto> convert(StorageType[][] types) { List<StorageTypesProto> list = Lists.newArrayList(); if (types != null) { for (StorageType[] ts : types) { StorageTypesProto.Builder builder = StorageTypesProto.newBuilder(); builder.addAllStorageTypes(convertStorageTypes(ts)); list.add(builder.build()); } } return list; }
Example #25
Source File: Mover.java From hadoop with Apache License 2.0 | 5 votes |
/** @return true if it is necessary to run another round of migration */ private boolean processFile(String fullPath, HdfsLocatedFileStatus status) { final byte policyId = status.getStoragePolicy(); // currently we ignore files with unspecified storage policy if (policyId == BlockStoragePolicySuite.ID_UNSPECIFIED) { return false; } final BlockStoragePolicy policy = blockStoragePolicies[policyId]; if (policy == null) { LOG.warn("Failed to get the storage policy of file " + fullPath); return false; } final List<StorageType> types = policy.chooseStorageTypes( status.getReplication()); final LocatedBlocks locatedBlocks = status.getBlockLocations(); boolean hasRemaining = false; final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete(); List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks(); for(int i = 0; i < lbs.size(); i++) { if (i == lbs.size() - 1 && !lastBlkComplete) { // last block is incomplete, skip it continue; } LocatedBlock lb = lbs.get(i); final StorageTypeDiff diff = new StorageTypeDiff(types, lb.getStorageTypes()); if (!diff.removeOverlap(true)) { if (scheduleMoves4Block(diff, lb)) { hasRemaining |= (diff.existing.size() > 1 && diff.expected.size() > 1); } } } return hasRemaining; }
Example #26
Source File: ContentSummary.java From big-c with Apache License 2.0 | 5 votes |
public Builder() { this.quota = -1; this.spaceQuota = -1; typeConsumed = new long[StorageType.values().length]; typeQuota = new long[StorageType.values().length]; for (int i = 0; i < typeQuota.length; i++) { typeQuota[i] = -1; } }
Example #27
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static List<StorageTypeProto> convertStorageTypes( StorageType[] types, int startIdx) { if (types == null) { return null; } final List<StorageTypeProto> protos = new ArrayList<StorageTypeProto>( types.length); for (int i = startIdx; i < types.length; ++i) { protos.add(convertStorageType(types[i])); } return protos; }
Example #28
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static LocatedBlock convert(LocatedBlockProto proto) { if (proto == null) return null; List<DatanodeInfoProto> locs = proto.getLocsList(); DatanodeInfo[] targets = new DatanodeInfo[locs.size()]; for (int i = 0; i < locs.size(); i++) { targets[i] = PBHelper.convert(locs.get(i)); } final StorageType[] storageTypes = convertStorageTypes( proto.getStorageTypesList(), locs.size()); final int storageIDsCount = proto.getStorageIDsCount(); final String[] storageIDs; if (storageIDsCount == 0) { storageIDs = null; } else { Preconditions.checkState(storageIDsCount == locs.size()); storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]); } // Set values from the isCached list, re-using references from loc List<DatanodeInfo> cachedLocs = new ArrayList<DatanodeInfo>(locs.size()); List<Boolean> isCachedList = proto.getIsCachedList(); for (int i=0; i<isCachedList.size(); i++) { if (isCachedList.get(i)) { cachedLocs.add(targets[i]); } } LocatedBlock lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[0])); lb.setBlockToken(PBHelper.convert(proto.getBlockToken())); return lb; }
Example #29
Source File: StoragePolicySummary.java From big-c with Apache License 2.0 | 5 votes |
/** * * @param storageTypes - sorted array of storageTypes * @return Storage Policy which matches the specific storage Combination */ private BlockStoragePolicy getStoragePolicy(StorageType[] storageTypes) { for (BlockStoragePolicy storagePolicy:storagePolicies) { StorageType[] policyStorageTypes = storagePolicy.getStorageTypes(); policyStorageTypes = Arrays.copyOf(policyStorageTypes, policyStorageTypes.length); Arrays.sort(policyStorageTypes); if (policyStorageTypes.length <= storageTypes.length) { int i = 0; for (; i < policyStorageTypes.length; i++) { if (policyStorageTypes[i] != storageTypes[i]) { break; } } if (i < policyStorageTypes.length) { continue; } int j=policyStorageTypes.length; for (; j < storageTypes.length; j++) { if (policyStorageTypes[i-1] != storageTypes[j]) { break; } } if (j==storageTypes.length) { return storagePolicy; } } } return null; }
Example #30
Source File: TestDiskspaceQuotaUpdate.java From big-c with Apache License 2.0 | 5 votes |
/** * Test append over a specific type of storage quota does not mark file as * UC or create a lease */ @Test (timeout=60000) public void testAppendOverTypeQuota() throws Exception { final Path dir = new Path("/TestAppendOverTypeQuota"); final Path file = new Path(dir, "file"); // create partial block file dfs.mkdirs(dir); // set the storage policy on dir dfs.setStoragePolicy(dir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed); // set quota of SSD to 1L dfs.setQuotaByStorageType(dir, StorageType.SSD, 1L); final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()) .asDirectory(); final long spaceUsed = dirNode.getDirectoryWithQuotaFeature() .getSpaceConsumed().getStorageSpace(); try { DFSTestUtil.appendFile(dfs, file, BLOCKSIZE); Assert.fail("append didn't fail"); } catch (RemoteException e) { assertTrue(e.getClassName().contains("QuotaByStorageTypeExceededException")); } // check that the file exists, isn't UC, and has no dangling lease INodeFile inode = fsdir.getINode(file.toString()).asFile(); Assert.assertNotNull(inode); Assert.assertFalse("should not be UC", inode.isUnderConstruction()); Assert.assertNull("should not have a lease", cluster.getNamesystem() .getLeaseManager().getLeaseByPath(file.toString())); // make sure the quota usage is unchanged final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature() .getSpaceConsumed().getStorageSpace(); assertEquals(spaceUsed, newSpaceUsed); // make sure edits aren't corrupted dfs.recoverLease(file); cluster.restartNameNodes(); }