Java Code Examples for org.apache.hadoop.hdfs.util.DataTransferThrottler#throttle()
The following examples show how to use
org.apache.hadoop.hdfs.util.DataTransferThrottler#throttle() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBlockReplacement.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testThrottler() throws IOException { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); long bandwidthPerSec = 1024*1024L; final long TOTAL_BYTES =6*bandwidthPerSec; long bytesToSend = TOTAL_BYTES; long start = Time.monotonicNow(); DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec); long totalBytes = 0L; long bytesSent = 1024*512L; // 0.5MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; bytesSent = 1024*768L; // 0.75MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; try { Thread.sleep(1000); } catch (InterruptedException ignored) {} throttler.throttle(bytesToSend); long end = Time.monotonicNow(); assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec); }
Example 2
Source File: TestBlockReplacement.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testThrottler() throws IOException { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); long bandwidthPerSec = 1024*1024L; final long TOTAL_BYTES =6*bandwidthPerSec; long bytesToSend = TOTAL_BYTES; long start = Time.monotonicNow(); DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec); long totalBytes = 0L; long bytesSent = 1024*512L; // 0.5MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; bytesSent = 1024*768L; // 0.75MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; try { Thread.sleep(1000); } catch (InterruptedException ignored) {} throttler.throttle(bytesToSend); long end = Time.monotonicNow(); assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec); }
Example 3
Source File: TestBlockReplacement.java From RDFS with Apache License 2.0 | 6 votes |
public void testThrottler() throws IOException { Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); long bandwidthPerSec = 1024*1024L; final long TOTAL_BYTES =6*bandwidthPerSec; long bytesToSend = TOTAL_BYTES; long start = Util.now(); DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec); long totalBytes = 0L; long bytesSent = 1024*512L; // 0.5MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; bytesSent = 1024*768L; // 0.75MB throttler.throttle(bytesSent); bytesToSend -= bytesSent; try { Thread.sleep(1000); } catch (InterruptedException ignored) {} throttler.throttle(bytesToSend); long end = Util.now(); assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec); }
Example 4
Source File: KeyValueContainerCheck.java From hadoop-ozone with Apache License 2.0 | 4 votes |
private static void verifyChecksum(BlockData block, ContainerProtos.ChunkInfo chunk, File chunkFile, ChunkLayOutVersion layout, DataTransferThrottler throttler, Canceler canceler) throws IOException { ChecksumData checksumData = ChecksumData.getFromProtoBuf(chunk.getChecksumData()); int checksumCount = checksumData.getChecksums().size(); int bytesPerChecksum = checksumData.getBytesPerChecksum(); Checksum cal = new Checksum(checksumData.getChecksumType(), bytesPerChecksum); ByteBuffer buffer = ByteBuffer.allocate(bytesPerChecksum); long bytesRead = 0; try (FileChannel channel = FileChannel.open(chunkFile.toPath(), ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) { if (layout == ChunkLayOutVersion.FILE_PER_BLOCK) { channel.position(chunk.getOffset()); } for (int i = 0; i < checksumCount; i++) { // limit last read for FILE_PER_BLOCK, to avoid reading next chunk if (layout == ChunkLayOutVersion.FILE_PER_BLOCK && i == checksumCount - 1 && chunk.getLen() % bytesPerChecksum != 0) { buffer.limit((int) (chunk.getLen() % bytesPerChecksum)); } int v = channel.read(buffer); if (v == -1) { break; } bytesRead += v; buffer.flip(); throttler.throttle(v, canceler); ByteString expected = checksumData.getChecksums().get(i); ByteString actual = cal.computeChecksum(buffer) .getChecksums().get(0); if (!expected.equals(actual)) { throw new OzoneChecksumException(String .format("Inconsistent read for chunk=%s" + " checksum item %d" + " expected checksum %s" + " actual checksum %s" + " for block %s", ChunkInfo.getFromProtoBuf(chunk), i, Arrays.toString(expected.toByteArray()), Arrays.toString(actual.toByteArray()), block.getBlockID())); } } if (bytesRead != chunk.getLen()) { throw new OzoneChecksumException(String .format("Inconsistent read for chunk=%s expected length=%d" + " actual length=%d for block %s", chunk.getChunkName(), chunk.getLen(), bytesRead, block.getBlockID())); } } }
Example 5
Source File: TransferFsImage.java From hadoop with Apache License 2.0 | 4 votes |
private static void copyFileToStream(OutputStream out, File localfile, FileInputStream infile, DataTransferThrottler throttler, Canceler canceler) throws IOException { byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; try { CheckpointFaultInjector.getInstance() .aboutToSendFile(localfile); if (CheckpointFaultInjector.getInstance(). shouldSendShortFile(localfile)) { // Test sending image shorter than localfile long len = localfile.length(); buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); } int num = 1; while (num > 0) { if (canceler != null && canceler.isCancelled()) { throw new SaveNamespaceCancelledException( canceler.getCancellationReason()); } num = infile.read(buf); if (num <= 0) { break; } if (CheckpointFaultInjector.getInstance() .shouldCorruptAByte(localfile)) { // Simulate a corrupted byte on the wire LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!"); buf[0]++; } out.write(buf, 0, num); if (throttler != null) { throttler.throttle(num, canceler); } } } catch (EofException e) { LOG.info("Connection closed by client"); out = null; // so we don't close in the finally } finally { if (out != null) { out.close(); } } }
Example 6
Source File: TransferFsImage.java From big-c with Apache License 2.0 | 4 votes |
private static void copyFileToStream(OutputStream out, File localfile, FileInputStream infile, DataTransferThrottler throttler, Canceler canceler) throws IOException { byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; try { CheckpointFaultInjector.getInstance() .aboutToSendFile(localfile); if (CheckpointFaultInjector.getInstance(). shouldSendShortFile(localfile)) { // Test sending image shorter than localfile long len = localfile.length(); buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); } int num = 1; while (num > 0) { if (canceler != null && canceler.isCancelled()) { throw new SaveNamespaceCancelledException( canceler.getCancellationReason()); } num = infile.read(buf); if (num <= 0) { break; } if (CheckpointFaultInjector.getInstance() .shouldCorruptAByte(localfile)) { // Simulate a corrupted byte on the wire LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!"); buf[0]++; } out.write(buf, 0, num); if (throttler != null) { throttler.throttle(num, canceler); } } } catch (EofException e) { LOG.info("Connection closed by client"); out = null; // so we don't close in the finally } finally { if (out != null) { out.close(); } } }
Example 7
Source File: TransferFsImage.java From RDFS with Apache License 2.0 | 4 votes |
/** * A server-side method to respond to a getfile http request * Copies the contents of the local file into the output stream. */ static void getFileServer(OutputStream outstream, File localfile, DataTransferThrottler throttler) throws IOException { byte buf[] = new byte[BUFFER_SIZE]; FileInputStream infile = null; long totalReads = 0, totalSends = 0; try { infile = new FileInputStream(localfile); if (ErrorSimulator.getErrorSimulation(2) && localfile.getAbsolutePath().contains("secondary")) { // throw exception only when the secondary sends its image throw new IOException("If this exception is not caught by the " + "name-node fs image will be truncated."); } if (ErrorSimulator.getErrorSimulation(3) && localfile.getAbsolutePath().contains("fsimage")) { // Test sending image shorter than localfile long len = localfile.length(); buf = new byte[(int)Math.min(len/2, BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); } int num = 1; while (num > 0) { long startRead = System.currentTimeMillis(); num = infile.read(buf); if (num <= 0) { break; } outstream.write(buf, 0, num); if (throttler != null) { throttler.throttle(num); } } } finally { if (infile != null) { infile.close(); } } }