Java Code Examples for org.apache.hadoop.fs.Path#getParent()
The following examples show how to use
org.apache.hadoop.fs.Path#getParent() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LocalCacheDirectoryManager.java From big-c with Apache License 2.0 | 6 votes |
/** * Given a path to a directory within a local cache tree return the * root of the cache directory. * * @param path the directory within a cache directory * @return the local cache directory root or null if not found */ public static Path getCacheDirectoryRoot(Path path) { while (path != null) { String name = path.getName(); if (name.length() != 1) { return path; } int dirnum = DIRECTORIES_PER_LEVEL; try { dirnum = Integer.parseInt(name, DIRECTORIES_PER_LEVEL); } catch (NumberFormatException e) { } if (dirnum >= DIRECTORIES_PER_LEVEL) { return path; } path = path.getParent(); } return path; }
Example 2
Source File: DistCpCopierTest.java From circus-train with Apache License 2.0 | 6 votes |
@Test public void typicalOneFile() throws Exception { Path inputFile = new Path(sourceDataBaseLocation, "sub1/sub2/data"); Path targetFile = new Path(replicaDataLocation, "output.txt"); copier = new DistCpCopier(conf, inputFile, Collections.<Path>emptyList(), targetFile, null, registry); Metrics metrics = copier.copy(); assertThat(metrics, not(nullValue())); String outputPath = targetFile.toUri().getPath(); Path parent = targetFile.getParent(); FileSystem fs = parent.getFileSystem(conf); int fileCopyCount = fs.listStatus(parent).length; assertThat(fileCopyCount, is(1)); File outputSub2Data = new File(outputPath); assertTrue(outputSub2Data.exists()); assertThat(Files.asCharSource(outputSub2Data, UTF_8).read(), is("test1")); }
Example 3
Source File: TezClientUtils.java From tez with Apache License 2.0 | 6 votes |
private static boolean checkAncestorPermissionsForAllUsers(Configuration conf, Path pathComponent, FsAction permission) throws IOException { FileSystem fs = pathComponent.getFileSystem(conf); if (Shell.WINDOWS && fs instanceof LocalFileSystem) { // Relax the requirement for public cache on LFS on Windows since default permissions are // "700" all the way up to the drive letter. In this model, the only requirement for a user // is to give EVERYONE group permission on the file and the file will be considered public. // This code path is only hit when fs.default.name is file:/// (mainly in tests). return true; } if (fs.getFileStatus(pathComponent).isFile()) { pathComponent = pathComponent.getParent(); } while (pathComponent != null) { if (!fs.getFileStatus(pathComponent).getPermission().getOtherAction().implies(permission)) { return false; } pathComponent = pathComponent.getParent(); } return true; }
Example 4
Source File: TestFSTableDescriptors.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testFormatTableInfoSequenceId() { Path p0 = assertWriteAndReadSequenceId(0); // Assert p0 has format we expect. StringBuilder sb = new StringBuilder(); for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) { sb.append("0"); } assertEquals(FSTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(), p0.getName()); // Check a few more. Path p2 = assertWriteAndReadSequenceId(2); Path p10000 = assertWriteAndReadSequenceId(10000); // Get a .tablinfo that has no sequenceid suffix. Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_FILE_PREFIX); FileStatus fs = new FileStatus(0, false, 0, 0, 0, p); FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0); FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2); FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000); Comparator<FileStatus> comparator = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR; assertTrue(comparator.compare(fs, fs0) > 0); assertTrue(comparator.compare(fs0, fs2) > 0); assertTrue(comparator.compare(fs2, fs10000) > 0); }
Example 5
Source File: JobHistory.java From RDFS with Apache License 2.0 | 6 votes |
/** * Helper function to encode the URL of the path of the job-history * log file. * * @param logFile path of the job-history file * @return URL encoded path * @throws IOException */ public static String encodeJobHistoryFilePath(String logFile) throws IOException { Path rawPath = new Path(logFile); String encodedFileName = null; try { encodedFileName = URLEncoder.encode(rawPath.getName(), "UTF-8"); } catch (UnsupportedEncodingException uee) { IOException ioe = new IOException(); ioe.initCause(uee); ioe.setStackTrace(uee.getStackTrace()); throw ioe; } Path encodedPath = new Path(rawPath.getParent(), encodedFileName); return encodedPath.toString(); }
Example 6
Source File: FileSystemRMStateStore.java From big-c with Apache License 2.0 | 5 votes |
protected void updateFile(Path outputPath, byte[] data, boolean makeUnradableByAdmin) throws Exception { Path newPath = new Path(outputPath.getParent(), outputPath.getName() + ".new"); // use writeFileWithRetries to make sure .new file is created atomically writeFileWithRetries(newPath, data, makeUnradableByAdmin); replaceFile(newPath, outputPath); }
Example 7
Source File: AcidInfo.java From presto with Apache License 2.0 | 5 votes |
public Builder addDeleteDelta(Path deleteDeltaPath, long minWriteId, long maxWriteId, int statementId) { requireNonNull(deleteDeltaPath, "deleteDeltaPath is null"); Path partitionPathFromDeleteDelta = deleteDeltaPath.getParent(); checkArgument( partitionLocation.equals(partitionPathFromDeleteDelta), "Partition location in DeleteDelta '%s' does not match stored location '%s'", deleteDeltaPath.getParent().toString(), partitionLocation); deleteDeltaInfoBuilder.add(new DeleteDeltaInfo(minWriteId, maxWriteId, statementId)); return this; }
Example 8
Source File: JobHistoryEventHandler.java From hadoop with Apache License 2.0 | 5 votes |
private void moveTmpToDone(Path tmpPath) throws IOException { if (tmpPath != null) { String tmpFileName = tmpPath.getName(); String fileName = getFileNameFromTmpFN(tmpFileName); Path path = new Path(tmpPath.getParent(), fileName); doneDirFS.rename(tmpPath, path); LOG.info("Moved tmp to done: " + tmpPath + " to " + path); } }
Example 9
Source File: NativeS3FileSystem.java From big-c with Apache License 2.0 | 5 votes |
@Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { Path absolutePath = makeAbsolute(f); List<Path> paths = new ArrayList<Path>(); do { paths.add(0, absolutePath); absolutePath = absolutePath.getParent(); } while (absolutePath != null); boolean result = true; for (Path path : paths) { result &= mkdir(path); } return result; }
Example 10
Source File: SemiTransactionalHiveMetastore.java From presto with Apache License 2.0 | 5 votes |
private static boolean isSameOrParent(Path parent, Path child) { int parentDepth = parent.depth(); int childDepth = child.depth(); if (parentDepth > childDepth) { return false; } for (int i = childDepth; i > parentDepth; i--) { child = child.getParent(); } return parent.equals(child); }
Example 11
Source File: YarnOutputFiles.java From hadoop with Apache License 2.0 | 5 votes |
/** * Create a local map output file name on the same volume. */ public Path getOutputFileForWriteInVolume(Path existing) { Path outputDir = new Path(existing.getParent(), JOB_OUTPUT_DIR); Path attemptOutputDir = new Path(outputDir, conf.get(JobContext.TASK_ATTEMPT_ID)); return new Path(attemptOutputDir, MAP_OUTPUT_FILENAME_STRING); }
Example 12
Source File: LustreFileSystem.java From hadoop-gpu with Apache License 2.0 | 5 votes |
@Override public FSDataOutputStream create(Path path, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { if (exists(path) && !overwrite) { throw new IOException("File already exists: " + path); } Path parent = path.getParent(); if (parent != null && !mkdirs(parent)) { throw new IOException("Mkdirs failed to create " + parent.toString()); } return new FSDataOutputStream(new BufferedOutputStream( new LFSOutputStream(path, false), bufferSize), statistics); }
Example 13
Source File: TestJobCounters.java From hadoop with Apache License 2.0 | 5 votes |
private static long getFileSize(Path path) throws IOException { FileSystem fs = FileSystem.getLocal(new Configuration()); long len = 0; len += fs.getFileStatus(path).getLen(); Path crcPath = new Path(path.getParent(), "." + path.getName() + ".crc"); if (fs.exists(crcPath)) { len += fs.getFileStatus(crcPath).getLen(); } return len; }
Example 14
Source File: NativeS3FileSystem.java From hadoop with Apache License 2.0 | 5 votes |
private void createParent(Path path) throws IOException { Path parent = path.getParent(); if (parent != null) { String key = pathToKey(makeAbsolute(parent)); if (key.length() > 0) { store.storeEmptyFile(key + FOLDER_SUFFIX); } } }
Example 15
Source File: AbstractFlagConfig.java From datawave with Apache License 2.0 | 5 votes |
protected void createTrackedDirs(final FileSystem fs, final InputFile file) throws IOException { final Path[] dirs = {file.getFlagged(), file.getFlagging(), file.getLoaded()}; for (final Path dir : dirs) { final Path p = dir.getParent(); if (!fs.mkdirs(p)) { throw new IllegalStateException("unable to create tracked directory (" + dir.getParent() + ")"); } } }
Example 16
Source File: HadoopUtils.java From incubator-gobblin with Apache License 2.0 | 5 votes |
public static void deletePathAndEmptyAncestors(FileSystem fs, Path f, boolean recursive) throws IOException { deletePath(fs, f, recursive); Path parent = f.getParent(); while (parent != null) { if (fs.exists(parent) && fs.listStatus(parent).length == 0) { deletePath(fs, parent, true); parent = parent.getParent(); } else { break; } } }
Example 17
Source File: HdfsFieldManager.java From incubator-retired-blur with Apache License 2.0 | 4 votes |
@Override protected boolean tryToStore(FieldTypeDefinition fieldTypeDefinition, String fieldName) throws IOException { Tracer trace = Trace.trace("filesystem - tryToStore fieldName", Trace.param("fieldName", fieldName), Trace.param("storagePath", _storagePath)); try { // Might want to make this a ZK lock _lock.lock(); try { String fieldType = fieldTypeDefinition.getFieldType(); boolean fieldLessIndexed = fieldTypeDefinition.isFieldLessIndexed(); boolean sortEnable = fieldTypeDefinition.isSortEnable(); boolean multiValueField = fieldTypeDefinition.isMultiValueField(); LOG.info( "Attempting to store new field [{0}] with fieldLessIndexing [{1}] with type [{2}] and properties [{3}]", fieldName, fieldLessIndexed, fieldType, fieldTypeDefinition.getProperties()); Properties properties = new Properties(); setProperty(properties, FAMILY, fieldTypeDefinition.getFamily()); setProperty(properties, FAMILY, fieldTypeDefinition.getFamily()); setProperty(properties, COLUMN_NAME, fieldTypeDefinition.getColumnName()); setProperty(properties, SUB_COLUMN_NAME, fieldTypeDefinition.getSubColumnName()); setProperty(properties, FIELD_LESS_INDEXING, Boolean.toString(fieldLessIndexed)); setProperty(properties, SORTENABLED, Boolean.toString(sortEnable)); setProperty(properties, MULTI_VALUE_FIELD, Boolean.toString(multiValueField)); setProperty(properties, FIELD_TYPE, fieldType); Map<String, String> props = fieldTypeDefinition.getProperties(); if (props != null) { for (Entry<String, String> e : props.entrySet()) { properties.setProperty(e.getKey(), e.getValue()); } } Path path = getFieldPath(fieldName); if (_fileSystem.exists(path)) { LOG.info("Field [{0}] already exists.", fieldName); return false; } Path tmpPath = new Path(path.getParent(), UUID.randomUUID().toString() + ".tmp"); FSDataOutputStream outputStream = _fileSystem.create(tmpPath, false); properties.store(outputStream, getComments()); outputStream.close(); if (_fileSystem.rename(tmpPath, path)) { // @TODO make this configurable _fileSystem.setReplication(path, (short) 10); return true; } else { _fileSystem.delete(tmpPath, false); LOG.info("Field [{0}] already exists.", fieldName, fieldLessIndexed, fieldType, props); return false; } } finally { _lock.unlock(); } } finally { trace.done(); } }
Example 18
Source File: SwiftNativeFileSystem.java From hadoop with Apache License 2.0 | 4 votes |
/** * @param permission Currently ignored. */ @Override public FSDataOutputStream create(Path file, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { LOG.debug("SwiftFileSystem.create"); FileStatus fileStatus = null; Path absolutePath = makeAbsolute(file); try { fileStatus = getFileStatus(absolutePath); } catch (FileNotFoundException e) { //the file isn't there. } if (fileStatus != null) { //the path exists -action depends on whether or not it is a directory, //and what the overwrite policy is. //What is clear at this point is that if the entry exists, there's //no need to bother creating any parent entries if (fileStatus.isDirectory()) { //here someone is trying to create a file over a directory /* we can't throw an exception here as there is no easy way to distinguish a file from the dir throw new SwiftPathExistsException("Cannot create a file over a directory:" + file); */ if (LOG.isDebugEnabled()) { LOG.debug("Overwriting either an empty file or a directory"); } } if (overwrite) { //overwrite set -> delete the object. store.delete(absolutePath, true); } else { throw new FileAlreadyExistsException("Path exists: " + file); } } else { // destination does not exist -trigger creation of the parent Path parent = file.getParent(); if (parent != null) { if (!mkdirs(parent)) { throw new SwiftOperationFailedException( "Mkdirs failed to create " + parent); } } } SwiftNativeOutputStream out = createSwiftOutputStream(file); return new FSDataOutputStream(out, statistics); }
Example 19
Source File: MROutputFiles.java From hadoop with Apache License 2.0 | 4 votes |
/** * Create a local map output index file name on the same volume. */ @Override public Path getOutputIndexFileForWriteInVolume(Path existing) { return new Path(existing.getParent(), MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING); }
Example 20
Source File: IndexedStorage.java From spork with Apache License 2.0 | 2 votes |
/** * Construct index file path for a given a data file * @param file - Data file * @return - Index file path for given data file */ private static Path getIndexFileName(Path file) { return new Path(file.getParent(), "." + file.getName() + ".index"); }