org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException Java Examples
The following examples show how to use
org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestRoundRobinVolumeChoosingPolicy.java From hadoop with Apache License 2.0 | 6 votes |
public static void testRRPolicyExceptionMessage( VolumeChoosingPolicy<FsVolumeSpi> policy) throws Exception { final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>(); // First volume, with 500 bytes of space. volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L); // Second volume, with 600 bytes of space. volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L); int blockSize = 700; try { policy.chooseVolume(volumes, blockSize); Assert.fail("expected to throw DiskOutOfSpaceException"); } catch(DiskOutOfSpaceException e) { Assert.assertEquals("Not returnig the expected message", "Out of space: The volume with the most available space (=" + 600 + " B) is less than the block size (=" + blockSize + " B).", e.getMessage()); } }
Example #2
Source File: TestRoundRobinVolumeChoosingPolicy.java From big-c with Apache License 2.0 | 6 votes |
public static void testRRPolicyExceptionMessage( VolumeChoosingPolicy<FsVolumeSpi> policy) throws Exception { final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>(); // First volume, with 500 bytes of space. volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L); // Second volume, with 600 bytes of space. volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L); int blockSize = 700; try { policy.chooseVolume(volumes, blockSize); Assert.fail("expected to throw DiskOutOfSpaceException"); } catch(DiskOutOfSpaceException e) { Assert.assertEquals("Not returnig the expected message", "Out of space: The volume with the most available space (=" + 600 + " B) is less than the block size (=" + blockSize + " B).", e.getMessage()); } }
Example #3
Source File: RoundRobinVolumeChoosingPolicy.java From big-c with Apache License 2.0 | 5 votes |
@Override public synchronized V chooseVolume(final List<V> volumes, long blockSize) throws IOException { if(volumes.size() < 1) { throw new DiskOutOfSpaceException("No more available volumes"); } // since volumes could've been removed because of the failure // make sure we are not out of bounds if(curVolume >= volumes.size()) { curVolume = 0; } int startVolume = curVolume; long maxAvailable = 0; while (true) { final V volume = volumes.get(curVolume); curVolume = (curVolume + 1) % volumes.size(); long availableVolumeSize = volume.getAvailable(); if (availableVolumeSize > blockSize) { return volume; } if (availableVolumeSize > maxAvailable) { maxAvailable = availableVolumeSize; } if (curVolume == startVolume) { throw new DiskOutOfSpaceException("Out of space: " + "The volume with the most available space (=" + maxAvailable + " B) is less than the block size (=" + blockSize + " B)."); } } }
Example #4
Source File: DataNode.java From hadoop-gpu with Apache License 2.0 | 5 votes |
protected void checkDiskError( IOException e ) throws IOException { if (e.getMessage() != null && e.getMessage().startsWith("No space left on device")) { throw new DiskOutOfSpaceException("No space left on device"); } else { checkDiskError(); } }
Example #5
Source File: FSDataset.java From hadoop-gpu with Apache License 2.0 | 5 votes |
synchronized FSVolume getNextVolume(long blockSize) throws IOException { int startVolume = curVolume; while (true) { FSVolume volume = volumes[curVolume]; curVolume = (curVolume + 1) % volumes.length; if (volume.getAvailable() > blockSize) { return volume; } if (curVolume == startVolume) { throw new DiskOutOfSpaceException("Insufficient space for an additional block"); } } }
Example #6
Source File: DataNode.java From RDFS with Apache License 2.0 | 5 votes |
/** Check if there is no space in disk * @param e that caused this checkDiskError call **/ protected void checkDiskError(Exception e ) throws IOException { if (e instanceof ClosedByInterruptException || e instanceof java.io.InterruptedIOException) { return; } LOG.warn("checkDiskError: exception: ", e); if (e.getMessage() != null && e.getMessage().startsWith("No space left on device")) { throw new DiskOutOfSpaceException("No space left on device"); } else { checkDiskError(); } }
Example #7
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
private FSVolume getNextVolume(long blockSize) throws IOException { FSVolume[] volumes = this.getVolumes(); if(volumes.length < 1) { throw new DiskOutOfSpaceException("No more available volumes"); } // since volumes could've been removed because of the failure // make sure we are not out of bounds if (curVolume >= volumes.length) { curVolume = 0; } int startVolume = curVolume; while (true) { FSVolume volume = volumes[curVolume]; curVolume = (curVolume + 1) % volumes.length; if (volume.getAvailable() > blockSize) { return volume; } if (curVolume == startVolume) { throw new DiskOutOfSpaceException( "Insufficient space for an additional block"); } } }
Example #8
Source File: DataTransferTestUtil.java From big-c with Apache License 2.0 | 5 votes |
@Override public void run(DatanodeID id) throws DiskOutOfSpaceException { final DataTransferTest test = getDataTransferTest(); if (test.isNotSuccessAndLastPipelineContains(index, id) && countdown.isSatisfied()) { final String s = toString(id); FiTestUtil.LOG.info(s); throw new DiskOutOfSpaceException(s); } }
Example #9
Source File: DataTransferTestUtil.java From big-c with Apache License 2.0 | 5 votes |
@Override public void run(DatanodeID id) throws DiskOutOfSpaceException { final DataTransferTest test = getDataTransferTest(); if (test.isNotSuccessAndLastPipelineContains(index, id)) { final String s = toString(id); FiTestUtil.LOG.info(s); throw new DiskOutOfSpaceException(s); } }
Example #10
Source File: DataTransferTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void run(DatanodeID id) throws DiskOutOfSpaceException { final DataTransferTest test = getDataTransferTest(); if (test.isNotSuccessAndLastPipelineContains(index, id) && countdown.isSatisfied()) { final String s = toString(id); FiTestUtil.LOG.info(s); throw new DiskOutOfSpaceException(s); } }
Example #11
Source File: DataTransferTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void run(DatanodeID id) throws DiskOutOfSpaceException { final DataTransferTest test = getDataTransferTest(); if (test.isNotSuccessAndLastPipelineContains(index, id)) { final String s = toString(id); FiTestUtil.LOG.info(s); throw new DiskOutOfSpaceException(s); } }
Example #12
Source File: RoundRobinVolumeChoosingPolicy.java From hadoop with Apache License 2.0 | 5 votes |
@Override public synchronized V chooseVolume(final List<V> volumes, long blockSize) throws IOException { if(volumes.size() < 1) { throw new DiskOutOfSpaceException("No more available volumes"); } // since volumes could've been removed because of the failure // make sure we are not out of bounds if(curVolume >= volumes.size()) { curVolume = 0; } int startVolume = curVolume; long maxAvailable = 0; while (true) { final V volume = volumes.get(curVolume); curVolume = (curVolume + 1) % volumes.size(); long availableVolumeSize = volume.getAvailable(); if (availableVolumeSize > blockSize) { return volume; } if (availableVolumeSize > maxAvailable) { maxAvailable = availableVolumeSize; } if (curVolume == startVolume) { throw new DiskOutOfSpaceException("Out of space: " + "The volume with the most available space (=" + maxAvailable + " B) is less than the block size (=" + blockSize + " B)."); } } }
Example #13
Source File: TestRoundRobinVolumeChoosingPolicy.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Test public void testRRPolicyExceptionMessage() throws Exception { int blockSize = 300; try { policy.chooseVolume(volumes, blockSize); Assert.fail("expected to throw DiskOutOfSpaceException"); } catch(DiskOutOfSpaceException e) { Assert.assertEquals("Not returning the expected message", "Out of space: The volume with the most available space (=" + 200 + " B) is less than the container size (=" + blockSize + " B).", e.getMessage()); } }
Example #14
Source File: FsDatasetImpl.java From big-c with Apache License 2.0 | 4 votes |
@Override // FsDatasetSpi public synchronized ReplicaHandler createRbw( StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException { ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId()); if (replicaInfo != null) { throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + replicaInfo.getState() + " and thus cannot be created."); } // create a new block FsVolumeReference ref; while (true) { try { if (allowLazyPersist) { // First try to place the block on a transient volume. ref = volumes.getNextTransientVolume(b.getNumBytes()); datanode.getMetrics().incrRamDiskBlocksWrite(); } else { ref = volumes.getNextVolume(storageType, b.getNumBytes()); } } catch (DiskOutOfSpaceException de) { if (allowLazyPersist) { datanode.getMetrics().incrRamDiskBlocksWriteFallback(); allowLazyPersist = false; continue; } throw de; } break; } FsVolumeImpl v = (FsVolumeImpl) ref.getVolume(); // create an rbw file to hold block in the designated volume File f; try { f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock()); } catch (IOException e) { IOUtils.cleanup(null, ref); throw e; } ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes()); volumeMap.add(b.getBlockPoolId(), newReplicaInfo); return new ReplicaHandler(newReplicaInfo, ref); }
Example #15
Source File: RoundRobinVolumeChoosingPolicy.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Override public HddsVolume chooseVolume(List<HddsVolume> volumes, long maxContainerSize) throws IOException { // No volumes available to choose from if (volumes.size() < 1) { throw new DiskOutOfSpaceException("No more available volumes"); } // since volumes could've been removed because of the failure // make sure we are not out of bounds int nextIndex = nextVolumeIndex.get(); int currentVolumeIndex = nextIndex < volumes.size() ? nextIndex : 0; int startVolumeIndex = currentVolumeIndex; long maxAvailable = 0; while (true) { final HddsVolume volume = volumes.get(currentVolumeIndex); // adjust for remaining capacity in Open containers long availableVolumeSize = volume.getAvailable() - volume.getCommittedBytes(); currentVolumeIndex = (currentVolumeIndex + 1) % volumes.size(); if (availableVolumeSize > maxContainerSize) { nextVolumeIndex.compareAndSet(nextIndex, currentVolumeIndex); return volume; } if (availableVolumeSize > maxAvailable) { maxAvailable = availableVolumeSize; } if (currentVolumeIndex == startVolumeIndex) { throw new DiskOutOfSpaceException("Out of space: " + "The volume with the most available space (=" + maxAvailable + " B) is less than the container size (=" + maxContainerSize + " B)."); } } }
Example #16
Source File: FsDatasetImpl.java From hadoop with Apache License 2.0 | 4 votes |
@Override // FsDatasetSpi public synchronized ReplicaHandler createRbw( StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException { ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId()); if (replicaInfo != null) { throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + replicaInfo.getState() + " and thus cannot be created."); } // create a new block FsVolumeReference ref; while (true) { try { if (allowLazyPersist) { // First try to place the block on a transient volume. ref = volumes.getNextTransientVolume(b.getNumBytes()); datanode.getMetrics().incrRamDiskBlocksWrite(); } else { ref = volumes.getNextVolume(storageType, b.getNumBytes()); } } catch (DiskOutOfSpaceException de) { if (allowLazyPersist) { datanode.getMetrics().incrRamDiskBlocksWriteFallback(); allowLazyPersist = false; continue; } throw de; } break; } FsVolumeImpl v = (FsVolumeImpl) ref.getVolume(); // create an rbw file to hold block in the designated volume File f; try { f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock()); } catch (IOException e) { IOUtils.cleanup(null, ref); throw e; } ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes()); volumeMap.add(b.getBlockPoolId(), newReplicaInfo); return new ReplicaHandler(newReplicaInfo, ref); }
Example #17
Source File: MutableVolumeSet.java From hadoop-ozone with Apache License 2.0 | 4 votes |
/** * Add DN volumes configured through ConfigKeys to volumeMap. */ private void initializeVolumeSet() throws IOException { volumeMap = new ConcurrentHashMap<>(); failedVolumeMap = new ConcurrentHashMap<>(); volumeStateMap = new EnumMap<>(StorageType.class); Collection<String> rawLocations = getDatanodeStorageDirs(conf); for (StorageType storageType : StorageType.values()) { volumeStateMap.put(storageType, new ArrayList<>()); } for (String locationString : rawLocations) { try { StorageLocation location = StorageLocation.parse(locationString); HddsVolume hddsVolume = createVolume(location.getUri().getPath(), location.getStorageType()); checkAndSetClusterID(hddsVolume.getClusterID()); LOG.info("Added Volume : {} to VolumeSet", hddsVolume.getHddsRootDir().getPath()); if (!hddsVolume.getHddsRootDir().mkdirs() && !hddsVolume.getHddsRootDir().exists()) { throw new IOException("Failed to create HDDS storage dir " + hddsVolume.getHddsRootDir()); } volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume); volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume); } catch (IOException e) { HddsVolume volume = new HddsVolume.Builder(locationString) .failedVolume(true).build(); failedVolumeMap.put(locationString, volume); LOG.error("Failed to parse the storage location: " + locationString, e); } } // First checking if we have any volumes, if all volumes are failed the // volumeMap size will be zero, and we throw Exception. if (volumeMap.size() == 0) { throw new DiskOutOfSpaceException("No storage locations configured"); } checkAllVolumes(); // Ensure volume threads are stopped and scm df is saved during shutdown. shutdownHook = () -> { saveVolumeSetUsed(); }; ShutdownHookManager.get().addShutdownHook(shutdownHook, SHUTDOWN_HOOK_PRIORITY); }