Java Code Examples for org.apache.hadoop.fs.FSDataInputStream#readInt()
The following examples show how to use
org.apache.hadoop.fs.FSDataInputStream#readInt() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CompletedJobStatusStore.java From hadoop-gpu with Apache License 2.0 | 6 votes |
private TaskCompletionEvent[] readEvents(FSDataInputStream dataIn, int offset, int len) throws IOException { int size = dataIn.readInt(); if (offset > size) { return TaskCompletionEvent.EMPTY_ARRAY; } if (offset + len > size) { len = size - offset; } TaskCompletionEvent[] events = new TaskCompletionEvent[len]; for (int i = 0; i < (offset + len); i++) { TaskCompletionEvent event = new TaskCompletionEvent(); event.readFields(dataIn); if (i >= offset) { events[i - offset] = event; } } return events; }
Example 2
Source File: SIObserver.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
private byte[] getToken(String path) throws IOException{ byte[] token = null; FSDataInputStream in = null; try { FileSystem fs = FileSystem.get(new URI(path), HConfiguration.unwrapDelegate()); Path p = new Path(path, "_token"); if (fs.exists(p)) { in = fs.open(p); int len = in.readInt(); token = new byte[len]; in.readFully(token); } return token; } catch (Exception e) { throw new IOException(e); } finally { if (in != null) { in.close(); } } }
Example 3
Source File: CompletedJobStatusStore.java From RDFS with Apache License 2.0 | 6 votes |
private TaskCompletionEvent[] readEvents(FSDataInputStream dataIn, int offset, int len) throws IOException { int size = dataIn.readInt(); if (offset > size) { return TaskCompletionEvent.EMPTY_ARRAY; } if (offset + len > size) { len = size - offset; } TaskCompletionEvent[] events = new TaskCompletionEvent[len]; for (int i = 0; i < (offset + len); i++) { TaskCompletionEvent event = new TaskCompletionEvent(); event.readFields(dataIn); if (i >= offset) { events[i - offset] = event; } } return events; }
Example 4
Source File: FSInputLZ4CompressedColumnReader.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public FSInputLZ4CompressedColumnReader(FSDataInputStream fsInputStream, int columnDataStartOffset, int columnDataLength, int rowCount) throws IOException { this.rowCount = rowCount; this.fsInputStream = fsInputStream; int footStartOffset = columnDataStartOffset + columnDataLength - 8; fsInputStream.seek(footStartOffset); this.numValInBlock = fsInputStream.readInt(); this.valLen = fsInputStream.readInt(); fsInputStream.seek(columnDataStartOffset); this.currBlockNum = -1; this.deCompressor = LZ4Factory.fastestInstance().safeDecompressor(); this.maxDecompressedLength = numValInBlock * valLen; }
Example 5
Source File: BackupUtils.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
/** * * @param fs HBase file system * @param rootPath HBase root directory * @return True if there exists a successful database backup * @throws IOException */ public static boolean existsDatabaseBackup(FileSystem fs, Path rootPath) throws IOException { boolean ret = false; FSDataInputStream in = null; try { // Open backup record file from file system Path backupPath = new Path(rootPath, BackupRestoreConstants.BACKUP_DIR); Path p = new Path(backupPath, BackupRestoreConstants.BACKUP_RECORD_FILE_NAME); if (fs.exists(p)) { in = fs.open(p); int n = in.readInt(); // number of records BackupDescriptor bd = new BackupDescriptor(); while (n-- > 0) { bd.readExternal(in); if (bd.getScope().compareToIgnoreCase("DATABASE") == 0) { ret = true; break; } } } return ret; } finally { if (in != null) in.close(); } }
Example 6
Source File: HdfsKeyValueStore.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private void loadIndex(Path path) throws IOException { FSDataInputStream inputStream = _fileSystem.open(path); byte[] buf = new byte[MAGIC.length]; inputStream.readFully(buf); if (!Arrays.equals(MAGIC, buf)) { throw new IOException("File [" + path + "] not a " + BLUR_KEY_VALUE + " file."); } int version = inputStream.readInt(); if (version == 1) { long fileLength = HdfsUtils.getFileLength(_fileSystem, path, inputStream); Operation operation = new Operation(); try { while (inputStream.getPos() < fileLength) { try { operation.readFields(inputStream); } catch (IOException e) { // End of sync point found return; } loadIndex(path, operation); } } finally { inputStream.close(); } } else { throw new IOException("Unknown version [" + version + "]"); } }
Example 7
Source File: HdfsDirectory.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private void loadCacheFromManifest(Path manifest) throws IOException { FSDataInputStream inputStream = _fileSystem.open(manifest); int count = inputStream.readInt(); for (int i = 0; i < count; i++) { String name = readString(inputStream); long lastMod = inputStream.readLong(); long length = inputStream.readLong(); FStat fstat = new FStat(lastMod, length); _cache.put(name, fstat); } inputStream.close(); }
Example 8
Source File: BSTIndex.java From incubator-tajo with Apache License 2.0 | 5 votes |
private void fillRootIndex(int entryNum, FSDataInputStream in) throws IOException { this.dataIndex = new Tuple[entryNum]; this.offsetIndex = new long[entryNum]; Tuple keyTuple; byte[] buf; for (int i = 0; i < entryNum; i++) { buf = new byte[in.readInt()]; Bytes.readFully(in, buf, 0, buf.length); keyTuple = RowStoreUtil.RowStoreDecoder.toTuple(keySchema, buf); dataIndex[i] = keyTuple; this.offsetIndex[i] = in.readLong(); } }
Example 9
Source File: SuccinctIndexedFileStream.java From succinct with Apache License 2.0 | 5 votes |
/** * Constructor to map a file containing Succinct data structures via stream. * * @param filePath Path of the file. * @param conf Configuration for the filesystem. * @throws IOException */ public SuccinctIndexedFileStream(Path filePath, Configuration conf) throws IOException { super(filePath, conf); FSDataInputStream is = getStream(filePath); is.seek(endOfFileStream); int len = is.readInt(); offsets = new int[len]; for (int i = 0; i < len; i++) { offsets[i] = is.readInt(); } endOfIndexedFileStream = is.getPos(); is.close(); }
Example 10
Source File: FSInputRLECompressedColumnReader.java From kylin with Apache License 2.0 | 5 votes |
public FSInputRLECompressedColumnReader(FSDataInputStream fsInputStream, int columnDataStartOffset, int columnDataLength, int rowCount) throws IOException { this.rowCount = rowCount; this.fsInputStream = fsInputStream; int footStartOffset = columnDataStartOffset + columnDataLength - 8; fsInputStream.seek(footStartOffset); this.numValInBlock = fsInputStream.readInt(); this.valLen = fsInputStream.readInt(); this.fsInputStream.seek(columnDataStartOffset); this.currBlockNum = -1; }
Example 11
Source File: FSInputLZ4CompressedColumnReader.java From kylin with Apache License 2.0 | 5 votes |
public FSInputLZ4CompressedColumnReader(FSDataInputStream fsInputStream, int columnDataStartOffset, int columnDataLength, int rowCount) throws IOException { this.rowCount = rowCount; this.fsInputStream = fsInputStream; int footStartOffset = columnDataStartOffset + columnDataLength - 8; fsInputStream.seek(footStartOffset); this.numValInBlock = fsInputStream.readInt(); this.valLen = fsInputStream.readInt(); fsInputStream.seek(columnDataStartOffset); this.currBlockNum = -1; this.deCompressor = LZ4Factory.fastestInstance().safeDecompressor(); this.maxDecompressedLength = numValInBlock * valLen; }
Example 12
Source File: FSInputGeneralColumnDataReader.java From kylin with Apache License 2.0 | 5 votes |
public FSInputGeneralColumnDataReader(FSDataInputStream fsInputStream, int dataStartOffset, int dataLength) throws IOException { this.fsInputStream = fsInputStream; fsInputStream.seek(dataStartOffset + dataLength - 4L); this.numOfVals = fsInputStream.readInt(); fsInputStream.seek(dataStartOffset); }
Example 13
Source File: FSInputRLECompressedColumnReader.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public FSInputRLECompressedColumnReader(FSDataInputStream fsInputStream, int columnDataStartOffset, int columnDataLength, int rowCount) throws IOException { this.rowCount = rowCount; this.fsInputStream = fsInputStream; int footStartOffset = columnDataStartOffset + columnDataLength - 8; fsInputStream.seek(footStartOffset); this.numValInBlock = fsInputStream.readInt(); this.valLen = fsInputStream.readInt(); this.fsInputStream.seek(columnDataStartOffset); this.currBlockNum = -1; }
Example 14
Source File: FSInputGeneralColumnDataReader.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public FSInputGeneralColumnDataReader(FSDataInputStream fsInputStream, int dataStartOffset, int dataLength) throws IOException { this.fsInputStream = fsInputStream; fsInputStream.seek(dataStartOffset + dataLength - 4L); this.numOfVals = fsInputStream.readInt(); fsInputStream.seek(dataStartOffset); }
Example 15
Source File: HistoryReader.java From tajo with Apache License 2.0 | 4 votes |
public org.apache.tajo.worker.TaskHistory getTaskHistory(String taskAttemptId, long startTime) throws IOException { FileSystem fs = HistoryWriter.getNonCrcFileSystem(taskHistoryParentPath, tajoConf); SimpleDateFormat df = new SimpleDateFormat("yyyyMMddHH"); Calendar cal = Calendar.getInstance(); cal.setTime(new Date(startTime)); //current, current-1, current+1 hour String[] targetHistoryFileDates = new String[3]; targetHistoryFileDates[0] = df.format(cal.getTime()); cal.add(Calendar.HOUR_OF_DAY, -1); targetHistoryFileDates[1] = df.format(cal.getTime()); cal.setTime(new Date(startTime)); cal.add(Calendar.HOUR_OF_DAY, 1); targetHistoryFileDates[2] = df.format(cal.getTime()); for (String historyFileDate : targetHistoryFileDates) { Path fileParent = new Path(taskHistoryParentPath, historyFileDate.substring(0, 8) + "/tasks/" + processName); String hour = historyFileDate.substring(8, 10); if (!fs.exists(fileParent)) { if (LOG.isDebugEnabled()) { LOG.debug("Task history parent not exists:" + fileParent); } continue; } FileStatus[] files = fs.listStatus(fileParent); if (files == null || files.length == 0) { return null; } String filePrefix = processName + "_" + hour + "_"; for (FileStatus eachFile : files) { if (eachFile.getPath().getName().indexOf(filePrefix) != 0) { continue; } FSDataInputStream in = null; TaskHistoryProto.Builder builder = TaskHistoryProto.newBuilder(); try { FileStatus status = fs.getFileStatus(eachFile.getPath()); LOG.info("Finding TaskHistory from " + status.getLen() + "," + eachFile.getPath()); in = fs.open(eachFile.getPath()); while (true) { int len = in.readInt(); byte[] buf = new byte[len]; in.readFully(buf, 0, len); builder.clear(); TaskHistoryProto taskHistoryProto = builder.mergeFrom(buf).build(); TaskAttemptId attemptId = new TaskAttemptId(taskHistoryProto.getTaskAttemptId()); if (attemptId.toString().equals(taskAttemptId)) { return new org.apache.tajo.worker.TaskHistory(taskHistoryProto); } } } catch (EOFException e) { } finally { if (in != null) { in.close(); } } } } return null; }
Example 16
Source File: RubixFile.java From Cubert with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") public List<KeyData<K>> getKeyData() throws IOException, ClassNotFoundException { if (keyData != null) return keyData; final FileSystem fs = FileSystem.get(conf); keyData = new ArrayList<KeyData<K>>(); final long filesize = fs.getFileStatus(path).getLen(); FSDataInputStream in = fs.open(path); /* The last long in the file is the start position of the trailer section */ in.seek(filesize - 8); long metaDataStartPos = in.readLong(); in.seek(metaDataStartPos); ObjectMapper mapper = new ObjectMapper(); metadataJson = mapper.readValue(in.readUTF(), JsonNode.class); int keySectionSize = in.readInt(); // load the key section byte[] keySection = new byte[keySectionSize]; in.seek(filesize - keySectionSize - 8); in.read(keySection, 0, keySectionSize); in.close(); ByteArrayInputStream bis = new ByteArrayInputStream(keySection); DataInput dataInput = new DataInputStream(bis); int numberOfBlocks = metadataJson.get("numberOfBlocks").getIntValue(); // load the key section keyClass = (Class<K>) ClassCache.forName(JsonUtils.getText(metadataJson, "keyClass")); valueClass = (Class<V>) ClassCache.forName(JsonUtils.getText(metadataJson, "valueClass")); SerializationFactory serializationFactory = new SerializationFactory(conf); Deserializer<K> deserializer = serializationFactory.getDeserializer(keyClass); deserializer.open(bis); while (bis.available() > 0 && numberOfBlocks > 0) { K key = deserializer.deserialize(null); long offset = dataInput.readLong(); long blockId = dataInput.readLong(); long numRecords = dataInput.readLong(); keyData.add(new KeyData<K>(key, offset, 0, numRecords, blockId)); numberOfBlocks--; } // Assign length to each keydata entry int numEntries = keyData.size(); for (int i = 1; i < numEntries; i++) { KeyData<K> prev = keyData.get(i - 1); KeyData<K> current = keyData.get(i); prev.setLength(current.getOffset() - prev.getOffset()); } if (numEntries > 0) { KeyData<K> last = keyData.get(numEntries - 1); last.setLength(metaDataStartPos - last.offset); } return keyData; }
Example 17
Source File: HdfsDirectory.java From incubator-retired-blur with Apache License 2.0 | 4 votes |
private String readString(FSDataInputStream inputStream) throws IOException { int length = inputStream.readInt(); byte[] buf = new byte[length]; inputStream.readFully(buf); return new String(buf, UTF_8); }