org.apache.hadoop.io.SecureIOUtils Java Examples
The following examples show how to use
org.apache.hadoop.io.SecureIOUtils.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ContainerLogsUtils.java From hadoop with Apache License 2.0 | 6 votes |
public static FileInputStream openLogFileForRead(String containerIdStr, File logFile, Context context) throws IOException { ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ApplicationId applicationId = containerId.getApplicationAttemptId() .getApplicationId(); String user = context.getApplications().get( applicationId).getUser(); try { return SecureIOUtils.openForRead(logFile, user, null); } catch (IOException e) { if (e.getMessage().contains( "did not match expected owner '" + user + "'")) { LOG.error( "Exception reading log file " + logFile.getAbsolutePath(), e); throw new IOException("Exception reading log file. Application submitted by '" + user + "' doesn't own requested log file : " + logFile.getName(), e); } else { throw new IOException("Exception reading log file. It might be because log " + "file was aggregated : " + logFile.getName(), e); } } }
Example #2
Source File: ContainerLogsUtils.java From big-c with Apache License 2.0 | 6 votes |
public static FileInputStream openLogFileForRead(String containerIdStr, File logFile, Context context) throws IOException { ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ApplicationId applicationId = containerId.getApplicationAttemptId() .getApplicationId(); String user = context.getApplications().get( applicationId).getUser(); try { return SecureIOUtils.openForRead(logFile, user, null); } catch (IOException e) { if (e.getMessage().contains( "did not match expected owner '" + user + "'")) { LOG.error( "Exception reading log file " + logFile.getAbsolutePath(), e); throw new IOException("Exception reading log file. Application submitted by '" + user + "' doesn't own requested log file : " + logFile.getName(), e); } else { throw new IOException("Exception reading log file. It might be because log " + "file was aggregated : " + logFile.getName(), e); } } }
Example #3
Source File: SpillRecord.java From hadoop with Apache License 2.0 | 5 votes |
public SpillRecord(Path indexFileName, JobConf job, Checksum crc, String expectedIndexOwner) throws IOException { final FileSystem rfs = FileSystem.getLocal(job).getRaw(); final FSDataInputStream in = SecureIOUtils.openFSDataInputStream(new File(indexFileName.toUri() .getRawPath()), expectedIndexOwner, null); try { final long length = rfs.getFileStatus(indexFileName).getLen(); final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH; final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH; buf = ByteBuffer.allocate(size); if (crc != null) { crc.reset(); CheckedInputStream chk = new CheckedInputStream(in, crc); IOUtils.readFully(chk, buf.array(), 0, size); if (chk.getChecksum().getValue() != in.readLong()) { throw new ChecksumException("Checksum error reading spill index: " + indexFileName, -1); } } else { IOUtils.readFully(in, buf.array(), 0, size); } entries = buf.asLongBuffer(); } finally { in.close(); } }
Example #4
Source File: TaskLog.java From hadoop with Apache License 2.0 | 5 votes |
/** * Read a log file from start to end positions. The offsets may be negative, * in which case they are relative to the end of the file. For example, * Reader(taskid, kind, 0, -1) is the entire file and * Reader(taskid, kind, -4197, -1) is the last 4196 bytes. * @param taskid the id of the task to read the log file for * @param kind the kind of log to read * @param start the offset to read from (negative is relative to tail) * @param end the offset to read upto (negative is relative to tail) * @param isCleanup whether the attempt is cleanup attempt or not * @throws IOException */ public Reader(TaskAttemptID taskid, LogName kind, long start, long end, boolean isCleanup) throws IOException { // find the right log file LogFileDetail fileDetail = getLogFileDetail(taskid, kind, isCleanup); // calculate the start and stop long size = fileDetail.length; if (start < 0) { start += size + 1; } if (end < 0) { end += size + 1; } start = Math.max(0, Math.min(start, size)); end = Math.max(0, Math.min(end, size)); start += fileDetail.start; end += fileDetail.start; bytesRemaining = end - start; String owner = obtainLogDirOwner(taskid); file = SecureIOUtils.openForRead(new File(fileDetail.location, kind.toString()), owner, null); // skip upto start long pos = 0; while (pos < start) { long result = file.skip(start - pos); if (result < 0) { bytesRemaining = 0; break; } pos += result; } }
Example #5
Source File: SpillRecord.java From big-c with Apache License 2.0 | 5 votes |
public SpillRecord(Path indexFileName, JobConf job, Checksum crc, String expectedIndexOwner) throws IOException { final FileSystem rfs = FileSystem.getLocal(job).getRaw(); final FSDataInputStream in = SecureIOUtils.openFSDataInputStream(new File(indexFileName.toUri() .getRawPath()), expectedIndexOwner, null); try { final long length = rfs.getFileStatus(indexFileName).getLen(); final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH; final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH; buf = ByteBuffer.allocate(size); if (crc != null) { crc.reset(); CheckedInputStream chk = new CheckedInputStream(in, crc); IOUtils.readFully(chk, buf.array(), 0, size); if (chk.getChecksum().getValue() != in.readLong()) { throw new ChecksumException("Checksum error reading spill index: " + indexFileName, -1); } } else { IOUtils.readFully(in, buf.array(), 0, size); } entries = buf.asLongBuffer(); } finally { in.close(); } }
Example #6
Source File: TaskLog.java From big-c with Apache License 2.0 | 5 votes |
/** * Read a log file from start to end positions. The offsets may be negative, * in which case they are relative to the end of the file. For example, * Reader(taskid, kind, 0, -1) is the entire file and * Reader(taskid, kind, -4197, -1) is the last 4196 bytes. * @param taskid the id of the task to read the log file for * @param kind the kind of log to read * @param start the offset to read from (negative is relative to tail) * @param end the offset to read upto (negative is relative to tail) * @param isCleanup whether the attempt is cleanup attempt or not * @throws IOException */ public Reader(TaskAttemptID taskid, LogName kind, long start, long end, boolean isCleanup) throws IOException { // find the right log file LogFileDetail fileDetail = getLogFileDetail(taskid, kind, isCleanup); // calculate the start and stop long size = fileDetail.length; if (start < 0) { start += size + 1; } if (end < 0) { end += size + 1; } start = Math.max(0, Math.min(start, size)); end = Math.max(0, Math.min(end, size)); start += fileDetail.start; end += fileDetail.start; bytesRemaining = end - start; String owner = obtainLogDirOwner(taskid); file = SecureIOUtils.openForRead(new File(fileDetail.location, kind.toString()), owner, null); // skip upto start long pos = 0; while (pos < start) { long result = file.skip(start - pos); if (result < 0) { bytesRemaining = 0; break; } pos += result; } }
Example #7
Source File: ShuffleHandler.java From tez with Apache License 2.0 | 5 votes |
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo mapOutputInfo) throws IOException { final TezIndexRecord info = mapOutputInfo.indexRecord; final ShuffleHeader header = new ShuffleHeader(mapId, info.getPartLength(), info.getRawLength(), reduce); final DataOutputBuffer dob = new DataOutputBuffer(); header.write(dob); ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength())); final File spillfile = new File(mapOutputInfo.mapOutputFileName.toString()); RandomAccessFile spill; try { spill = SecureIOUtils.openForRandomRead(spillfile, "r", user, null); } catch (FileNotFoundException e) { LOG.info(spillfile + " not found"); return null; } ChannelFuture writeFuture; final DefaultFileRegion partition = new DefaultFileRegion(spill.getChannel(), info.getStartOffset(), info.getPartLength()); writeFuture = ch.write(partition); writeFuture.addListener(new ChannelFutureListener() { // TODO error handling; distinguish IO/connection failures, // attribute to appropriate spill output @Override public void operationComplete(ChannelFuture future) { partition.releaseExternalResources(); } }); return writeFuture; }
Example #8
Source File: AggregatedLogFormat.java From hadoop with Apache License 2.0 | 4 votes |
@VisibleForTesting public FileInputStream secureOpenFile(File logFile) throws IOException { return SecureIOUtils.openForRead(logFile, getUser(), null); }
Example #9
Source File: ShuffleHandler.java From hadoop with Apache License 2.0 | 4 votes |
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo mapOutputInfo) throws IOException { final IndexRecord info = mapOutputInfo.indexRecord; final ShuffleHeader header = new ShuffleHeader(mapId, info.partLength, info.rawLength, reduce); final DataOutputBuffer dob = new DataOutputBuffer(); header.write(dob); ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength())); final File spillfile = new File(mapOutputInfo.mapOutputFileName.toString()); RandomAccessFile spill; try { spill = SecureIOUtils.openForRandomRead(spillfile, "r", user, null); } catch (FileNotFoundException e) { LOG.info(spillfile + " not found"); return null; } ChannelFuture writeFuture; if (ch.getPipeline().get(SslHandler.class) == null) { final FadvisedFileRegion partition = new FadvisedFileRegion(spill, info.startOffset, info.partLength, manageOsCache, readaheadLength, readaheadPool, spillfile.getAbsolutePath(), shuffleBufferSize, shuffleTransferToAllowed); writeFuture = ch.write(partition); writeFuture.addListener(new ChannelFutureListener() { // TODO error handling; distinguish IO/connection failures, // attribute to appropriate spill output @Override public void operationComplete(ChannelFuture future) { if (future.isSuccess()) { partition.transferSuccessful(); } partition.releaseExternalResources(); } }); } else { // HTTPS cannot be done with zero copy. final FadvisedChunkedFile chunk = new FadvisedChunkedFile(spill, info.startOffset, info.partLength, sslFileBufferSize, manageOsCache, readaheadLength, readaheadPool, spillfile.getAbsolutePath()); writeFuture = ch.write(chunk); } metrics.shuffleConnections.incr(); metrics.shuffleOutputBytes.incr(info.partLength); // optimistic return writeFuture; }
Example #10
Source File: TaskLog.java From hadoop with Apache License 2.0 | 4 votes |
private static LogFileDetail getLogFileDetail(TaskAttemptID taskid, LogName filter, boolean isCleanup) throws IOException { File indexFile = getIndexFile(taskid, isCleanup); BufferedReader fis = new BufferedReader(new InputStreamReader( SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null), Charsets.UTF_8)); //the format of the index file is //LOG_DIR: <the dir where the task logs are really stored> //stdout:<start-offset in the stdout file> <length> //stderr:<start-offset in the stderr file> <length> //syslog:<start-offset in the syslog file> <length> LogFileDetail l = new LogFileDetail(); String str = null; try { str = fis.readLine(); if (str == null) { // the file doesn't have anything throw new IOException("Index file for the log of " + taskid + " doesn't exist."); } l.location = str.substring(str.indexOf(LogFileDetail.LOCATION) + LogFileDetail.LOCATION.length()); // special cases are the debugout and profile.out files. They are // guaranteed // to be associated with each task attempt since jvm reuse is disabled // when profiling/debugging is enabled if (filter.equals(LogName.DEBUGOUT) || filter.equals(LogName.PROFILE)) { l.length = new File(l.location, filter.toString()).length(); l.start = 0; fis.close(); return l; } str = fis.readLine(); while (str != null) { // look for the exact line containing the logname if (str.contains(filter.toString())) { str = str.substring(filter.toString().length() + 1); String[] startAndLen = str.split(" "); l.start = Long.parseLong(startAndLen[0]); l.length = Long.parseLong(startAndLen[1]); break; } str = fis.readLine(); } fis.close(); fis = null; } finally { IOUtils.cleanup(LOG, fis); } return l; }
Example #11
Source File: AggregatedLogFormat.java From big-c with Apache License 2.0 | 4 votes |
@VisibleForTesting public FileInputStream secureOpenFile(File logFile) throws IOException { return SecureIOUtils.openForRead(logFile, getUser(), null); }
Example #12
Source File: ShuffleHandler.java From big-c with Apache License 2.0 | 4 votes |
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo mapOutputInfo) throws IOException { final IndexRecord info = mapOutputInfo.indexRecord; final ShuffleHeader header = new ShuffleHeader(mapId, info.partLength, info.rawLength, reduce); final DataOutputBuffer dob = new DataOutputBuffer(); header.write(dob); ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength())); final File spillfile = new File(mapOutputInfo.mapOutputFileName.toString()); RandomAccessFile spill; try { spill = SecureIOUtils.openForRandomRead(spillfile, "r", user, null); } catch (FileNotFoundException e) { LOG.info(spillfile + " not found"); return null; } ChannelFuture writeFuture; if (ch.getPipeline().get(SslHandler.class) == null) { final FadvisedFileRegion partition = new FadvisedFileRegion(spill, info.startOffset, info.partLength, manageOsCache, readaheadLength, readaheadPool, spillfile.getAbsolutePath(), shuffleBufferSize, shuffleTransferToAllowed); writeFuture = ch.write(partition); writeFuture.addListener(new ChannelFutureListener() { // TODO error handling; distinguish IO/connection failures, // attribute to appropriate spill output @Override public void operationComplete(ChannelFuture future) { if (future.isSuccess()) { partition.transferSuccessful(); } partition.releaseExternalResources(); } }); } else { // HTTPS cannot be done with zero copy. final FadvisedChunkedFile chunk = new FadvisedChunkedFile(spill, info.startOffset, info.partLength, sslFileBufferSize, manageOsCache, readaheadLength, readaheadPool, spillfile.getAbsolutePath()); writeFuture = ch.write(chunk); } metrics.shuffleConnections.incr(); metrics.shuffleOutputBytes.incr(info.partLength); // optimistic return writeFuture; }
Example #13
Source File: TaskLog.java From big-c with Apache License 2.0 | 4 votes |
private static LogFileDetail getLogFileDetail(TaskAttemptID taskid, LogName filter, boolean isCleanup) throws IOException { File indexFile = getIndexFile(taskid, isCleanup); BufferedReader fis = new BufferedReader(new InputStreamReader( SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null), Charsets.UTF_8)); //the format of the index file is //LOG_DIR: <the dir where the task logs are really stored> //stdout:<start-offset in the stdout file> <length> //stderr:<start-offset in the stderr file> <length> //syslog:<start-offset in the syslog file> <length> LogFileDetail l = new LogFileDetail(); String str = null; try { str = fis.readLine(); if (str == null) { // the file doesn't have anything throw new IOException("Index file for the log of " + taskid + " doesn't exist."); } l.location = str.substring(str.indexOf(LogFileDetail.LOCATION) + LogFileDetail.LOCATION.length()); // special cases are the debugout and profile.out files. They are // guaranteed // to be associated with each task attempt since jvm reuse is disabled // when profiling/debugging is enabled if (filter.equals(LogName.DEBUGOUT) || filter.equals(LogName.PROFILE)) { l.length = new File(l.location, filter.toString()).length(); l.start = 0; fis.close(); return l; } str = fis.readLine(); while (str != null) { // look for the exact line containing the logname if (str.contains(filter.toString())) { str = str.substring(filter.toString().length() + 1); String[] startAndLen = str.split(" "); l.start = Long.parseLong(startAndLen[0]); l.length = Long.parseLong(startAndLen[1]); break; } str = fis.readLine(); } fis.close(); fis = null; } finally { IOUtils.cleanup(LOG, fis); } return l; }
Example #14
Source File: ShuffleHandler.java From tez with Apache License 2.0 | 4 votes |
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, String user, String mapId, Range reduceRange, MapOutputInfo outputInfo) throws IOException { TezIndexRecord firstIndex = null; TezIndexRecord lastIndex = null; DataOutputBuffer dobRange = new DataOutputBuffer(); // Indicate how many record to be written WritableUtils.writeVInt(dobRange, reduceRange.getLast() - reduceRange.getFirst() + 1); ch.write(wrappedBuffer(dobRange.getData(), 0, dobRange.getLength())); for (int reduce = reduceRange.getFirst(); reduce <= reduceRange.getLast(); reduce++) { TezIndexRecord index = outputInfo.getIndex(reduce); // Records are only valid if they have a non-zero part length if (index.getPartLength() != 0) { if (firstIndex == null) { firstIndex = index; } lastIndex = index; } ShuffleHeader header = new ShuffleHeader(mapId, index.getPartLength(), index.getRawLength(), reduce); DataOutputBuffer dob = new DataOutputBuffer(); header.write(dob); // Free the memory needed to store the spill and index records ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength())); } outputInfo.finish(); final long rangeOffset = firstIndex.getStartOffset(); final long rangePartLength = lastIndex.getStartOffset() + lastIndex.getPartLength() - firstIndex.getStartOffset(); final File spillFile = new File(outputInfo.mapOutputFileName.toString()); RandomAccessFile spill; try { spill = SecureIOUtils.openForRandomRead(spillFile, "r", user, null); } catch (FileNotFoundException e) { LOG.info(spillFile + " not found"); return null; } ChannelFuture writeFuture; if (ch.getPipeline().get(SslHandler.class) == null) { final FadvisedFileRegion partition = new FadvisedFileRegion(spill, rangeOffset, rangePartLength, manageOsCache, readaheadLength, readaheadPool, spillFile.getAbsolutePath(), shuffleBufferSize, shuffleTransferToAllowed); writeFuture = ch.write(partition); writeFuture.addListener(new ChannelFutureListener() { // TODO error handling; distinguish IO/connection failures, // attribute to appropriate spill output @Override public void operationComplete(ChannelFuture future) { if (future.isSuccess()) { partition.transferSuccessful(); } partition.releaseExternalResources(); } }); } else { // HTTPS cannot be done with zero copy. final FadvisedChunkedFile chunk = new FadvisedChunkedFile(spill, rangeOffset, rangePartLength, sslFileBufferSize, manageOsCache, readaheadLength, readaheadPool, spillFile.getAbsolutePath()); writeFuture = ch.write(chunk); } metrics.shuffleConnections.incr(); metrics.shuffleOutputBytes.incr(rangePartLength); // optimistic return writeFuture; }