Java Code Examples for org.apache.hadoop.fs.FileSystem#getLength()
The following examples show how to use
org.apache.hadoop.fs.FileSystem#getLength() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MDSHiveLineInputFormat.java From multiple-dimension-spread with Apache License 2.0 | 6 votes |
@Override public RecordReader<NullWritable,ColumnAndIndex> getRecordReader( final InputSplit split, final JobConf job, final Reporter reporter ) throws IOException { FileSplit fileSplit = (FileSplit)split; Path path = fileSplit.getPath(); FileSystem fs = path.getFileSystem( job ); long fileLength = fs.getLength( path ); long start = fileSplit.getStart(); long length = fileSplit.getLength(); InputStream in = fs.open( path ); IJobReporter jobReporter = new HadoopJobReporter( reporter ); jobReporter.setStatus( String.format( "Read file : %s" , path.toString() ) ); HiveReaderSetting hiveConfig = new HiveReaderSetting( fileSplit , job ); if ( hiveConfig.isVectorMode() ){ IVectorizedReaderSetting vectorizedSetting = new HiveVectorizedReaderSetting( fileSplit , job , hiveConfig ); return (RecordReader)new MDSHiveDirectVectorizedReader( in , fileLength , start , length , vectorizedSetting , jobReporter ); } else{ return new MDSHiveLineReader( in , fileLength , start , length , hiveConfig , jobReporter , spreadCounter ); } }
Example 2
Source File: MDSCombineSpreadReader.java From multiple-dimension-spread with Apache License 2.0 | 5 votes |
public MDSCombineSpreadReader( final CombineFileSplit split , final TaskAttemptContext context , final Integer index ) throws IOException{ Configuration config = context.getConfiguration(); Path path = split.getPath( index ); FileSystem fs = path.getFileSystem( config ); long fileLength = fs.getLength( path ); InputStream in = fs.open( path ); innerReader = new MDSSpreadReader(); innerReader.setStream( in , fileLength , 0 , fileLength ); }
Example 3
Source File: MDSSpreadReader.java From multiple-dimension-spread with Apache License 2.0 | 5 votes |
@Override public void initialize( final InputSplit inputSplit, final TaskAttemptContext context ) throws IOException, InterruptedException { FileSplit fileSplit = (FileSplit)inputSplit; Configuration config = context.getConfiguration(); Path path = fileSplit.getPath(); FileSystem fs = path.getFileSystem( config ); long fileLength = fs.getLength( path ); long start = fileSplit.getStart(); long length = fileSplit.getLength(); InputStream in = fs.open( path ); setStream( in , fileLength , start , length ); }
Example 4
Source File: UtilsForTests.java From RDFS with Apache License 2.0 | 5 votes |
static String slurpHadoop(Path p, FileSystem fs) throws IOException { int len = (int) fs.getLength(p); byte[] buf = new byte[len]; InputStream in = fs.open(p); String contents = null; try { in.read(buf, 0, len); contents = new String(buf, "UTF-8"); } finally { in.close(); } return contents; }
Example 5
Source File: StreamUtil.java From RDFS with Apache License 2.0 | 5 votes |
static String slurpHadoop(Path p, FileSystem fs) throws IOException { int len = (int) fs.getLength(p); byte[] buf = new byte[len]; FSDataInputStream in = fs.open(p); String contents = null; try { in.readFully(in.getPos(), buf); contents = new String(buf, "UTF-8"); } finally { in.close(); } return contents; }
Example 6
Source File: StreamUtil.java From RDFS with Apache License 2.0 | 5 votes |
static String slurpHadoop(Path p, FileSystem fs) throws IOException { int len = (int) fs.getLength(p); byte[] buf = new byte[len]; FSDataInputStream in = fs.open(p); String contents = null; try { in.readFully(in.getPos(), buf); contents = new String(buf, "UTF-8"); } finally { in.close(); } return contents; }
Example 7
Source File: UtilsForTests.java From hadoop-gpu with Apache License 2.0 | 5 votes |
static String slurpHadoop(Path p, FileSystem fs) throws IOException { int len = (int) fs.getLength(p); byte[] buf = new byte[len]; InputStream in = fs.open(p); String contents = null; try { in.read(buf, 0, len); contents = new String(buf, "UTF-8"); } finally { in.close(); } return contents; }
Example 8
Source File: StreamUtil.java From hadoop-gpu with Apache License 2.0 | 5 votes |
static String slurpHadoop(Path p, FileSystem fs) throws IOException { int len = (int) fs.getLength(p); byte[] buf = new byte[len]; FSDataInputStream in = fs.open(p); String contents = null; try { in.readFully(in.getPos(), buf); contents = new String(buf, "UTF-8"); } finally { in.close(); } return contents; }