Java Code Examples for org.apache.hadoop.fs.Path#depth()
The following examples show how to use
org.apache.hadoop.fs.Path#depth() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GitFlowGraphMonitor.java From incubator-gobblin with Apache License 2.0 | 6 votes |
/** * check whether the file has the proper naming and hierarchy * @param file the relative path from the repo root * @return false if the file does not conform */ private boolean checkFilePath(String file, int depth) { // The file is either a node file or an edge file and needs to be stored at either: // flowGraphDir/nodeName/nodeName.properties (if it is a node file), or // flowGraphDir/nodeName/nodeName/edgeName.properties (if it is an edge file) Path filePath = new Path(file); String fileExtension = Files.getFileExtension(filePath.getName()); if (filePath.depth() != depth || !checkFileLevelRelativeToRoot(filePath, depth) || !(this.javaPropsExtensions.contains(fileExtension))) { log.warn("Changed file does not conform to directory structure and file name format, skipping: " + filePath); return false; } return true; }
Example 2
Source File: AbstractHadoopProcessor.java From localization_nifi with Apache License 2.0 | 6 votes |
/** * Returns the relative path of the child that does not include the filename or the root path. * * @param root * the path to relativize from * @param child * the path to relativize * @return the relative path */ public static String getPathDifference(final Path root, final Path child) { final int depthDiff = child.depth() - root.depth(); if (depthDiff <= 1) { return "".intern(); } String lastRoot = root.getName(); Path childsParent = child.getParent(); final StringBuilder builder = new StringBuilder(); builder.append(childsParent.getName()); for (int i = (depthDiff - 3); i >= 0; i--) { childsParent = childsParent.getParent(); String name = childsParent.getName(); if (name.equals(lastRoot) && childsParent.toString().endsWith(root.toString())) { break; } builder.insert(0, Path.SEPARATOR).insert(0, name); } return builder.toString(); }
Example 3
Source File: GitConfigMonitor.java From incubator-gobblin with Apache License 2.0 | 6 votes |
/** * check whether the file has the proper naming and hierarchy * @param configFilePath the relative path from the repo root * @return false if the file does not conform */ private boolean checkConfigFilePath(String configFilePath) { // The config needs to stored at configDir/flowGroup/flowName.(pull|job|json|conf) Path configFile = new Path(configFilePath); String fileExtension = Files.getFileExtension(configFile.getName()); if (configFile.depth() != CONFIG_FILE_DEPTH || !configFile.getParent().getParent().getName().equals(folderName) || !(PullFileLoader.DEFAULT_JAVA_PROPS_PULL_FILE_EXTENSIONS.contains(fileExtension) || PullFileLoader.DEFAULT_JAVA_PROPS_PULL_FILE_EXTENSIONS.contains(fileExtension))) { log.warn("Changed file does not conform to directory structure and file name format, skipping: " + configFilePath); return false; } return true; }
Example 4
Source File: CommonFSUtils.java From hbase with Apache License 2.0 | 6 votes |
/** * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider * schema; i.e. if schemas different but path or subpath matches, the two will equate. * @param pathToSearch Path we will be trying to match agains against * @param pathTail what to match * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code> */ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) { if (pathToSearch.depth() != pathTail.depth()) { return false; } Path tailPath = pathTail; String tailName; Path toSearch = pathToSearch; String toSearchName; boolean result = false; do { tailName = tailPath.getName(); if (tailName == null || tailName.length() <= 0) { result = true; break; } toSearchName = toSearch.getName(); if (toSearchName == null || toSearchName.length() <= 0) { break; } // Move up a parent on each path for next go around. Path doesn't let us go off the end. tailPath = tailPath.getParent(); toSearch = toSearch.getParent(); } while(tailName.equals(toSearchName)); return result; }
Example 5
Source File: AbstractHadoopProcessor.java From nifi with Apache License 2.0 | 6 votes |
/** * Returns the relative path of the child that does not include the filename or the root path. * * @param root * the path to relativize from * @param child * the path to relativize * @return the relative path */ public static String getPathDifference(final Path root, final Path child) { final int depthDiff = child.depth() - root.depth(); if (depthDiff <= 1) { return "".intern(); } String lastRoot = root.getName(); Path childsParent = child.getParent(); final StringBuilder builder = new StringBuilder(); builder.append(childsParent.getName()); for (int i = (depthDiff - 3); i >= 0; i--) { childsParent = childsParent.getParent(); String name = childsParent.getName(); if (name.equals(lastRoot) && childsParent.toString().endsWith(root.toString())) { break; } builder.insert(0, Path.SEPARATOR).insert(0, name); } return builder.toString(); }
Example 6
Source File: HadoopArchives.java From big-c with Apache License 2.0 | 5 votes |
private boolean checkValidName(String name) { Path tmp = new Path(name); if (tmp.depth() != 1) { return false; } if (name.endsWith(".har")) return true; return false; }
Example 7
Source File: Data.java From Hi-WAY with Apache License 2.0 | 5 votes |
public void stageOut() throws IOException { Path localPath = getLocalPath(); Path hdfsDirectory = getHdfsPath().getParent(); Path hdfsPath = getHdfsPath(); if (hdfsDirectory.depth() > 0) { mkHdfsDir(hdfsDirectory); } hdfs.copyFromLocalFile(false, true, localPath, hdfsPath); }
Example 8
Source File: GitFlowGraphMonitor.java From incubator-gobblin with Apache License 2.0 | 5 votes |
/** * Remove an element (i.e. either a {@link DataNode} or a {@link FlowEdge} from the {@link FlowGraph} for * a renamed or deleted {@link DataNode} or {@link FlowEdge} file. * @param change */ @Override public void removeChange(DiffEntry change) { Path path = new Path(change.getOldPath()); if (path.depth() == NODE_FILE_DEPTH) { removeDataNode(change); } else if (path.depth() == EDGE_FILE_DEPTH) { removeFlowEdge(change); } }
Example 9
Source File: FSUtils.java From hbase with Apache License 2.0 | 5 votes |
/** * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider * schema; i.e. if schemas different but path or subpath matches, the two will equate. * @param pathToSearch Path we will be trying to match. * @param pathTail * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code> */ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) { Path tailPath = pathTail; String tailName; Path toSearch = pathToSearch; String toSearchName; boolean result = false; if (pathToSearch.depth() != pathTail.depth()) { return false; } do { tailName = tailPath.getName(); if (tailName == null || tailName.isEmpty()) { result = true; break; } toSearchName = toSearch.getName(); if (toSearchName == null || toSearchName.isEmpty()) { break; } // Move up a parent on each path for next go around. Path doesn't let us go off the end. tailPath = tailPath.getParent(); toSearch = toSearch.getParent(); } while(tailName.equals(toSearchName)); return result; }
Example 10
Source File: SubmarineJob.java From zeppelin with Apache License 2.0 | 5 votes |
public void cleanJobDefaultCheckpointPath() { String jobCheckpointPath = getJobDefaultCheckpointPath(); Path notePath = new Path(jobCheckpointPath); if (notePath.depth() <= 3) { submarineUI.outputLog("ERROR", "Checkpoint path depth must be greater than 3"); return; } try { String message = "Clean up the checkpoint directory: " + jobCheckpointPath; submarineUI.outputLog("", message); hdfsClient.delete(notePath); } catch (IOException e) { LOGGER.error(e.getMessage(), e); } }
Example 11
Source File: HadoopArchives.java From big-c with Apache License 2.0 | 5 votes |
private Path largestDepth(List<Path> paths) { Path deepest = paths.get(0); for (Path p: paths) { if (p.depth() > deepest.depth()) { deepest = p; } } return deepest; }
Example 12
Source File: BlockIntegrityMonitor.java From RDFS with Apache License 2.0 | 5 votes |
static boolean doesParityDirExist(FileSystem parityFs, String path) throws IOException { // Check if it is impossible to have a parity file. We check if the // parent directory of the lost file exists under a parity path. // If the directory does not exist, the parity file cannot exist. Path fileRaidParent = new Path(path).getParent(); Path dirRaidParent = (fileRaidParent != null)? fileRaidParent.getParent(): null; boolean parityCanExist = false; for (Codec codec: Codec.getCodecs()) { Path parityDir = null; if (codec.isDirRaid) { if (dirRaidParent == null) continue; parityDir = (dirRaidParent.depth() == 0)? new Path(codec.getParityPrefix()): new Path(codec.getParityPrefix(), RaidNode.makeRelative(dirRaidParent)); } else { parityDir = (fileRaidParent.depth() == 0)? new Path(codec.getParityPrefix()): new Path(codec.getParityPrefix(), RaidNode.makeRelative(fileRaidParent)); } if (parityFs.exists(parityDir)) { parityCanExist = true; break; } } return parityCanExist; }
Example 13
Source File: TestNameNodeMetrics.java From big-c with Apache License 2.0 | 5 votes |
/** Test metrics associated with addition of a file */ @Test public void testFileAdd() throws Exception { // Add files with 100 blocks final Path file = getTestPath("testFileAdd"); createFile(file, 3200, (short)3); final long blockCount = 32; int blockCapacity = namesystem.getBlockCapacity(); updateMetrics(); assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS)); MetricsRecordBuilder rb = getMetrics(NN_METRICS); // File create operations is 1 // Number of files created is depth of <code>file</code> path assertCounter("CreateFileOps", 1L, rb); assertCounter("FilesCreated", (long)file.depth(), rb); updateMetrics(); long filesTotal = file.depth() + 1; // Add 1 for root rb = getMetrics(NS_METRICS); assertGauge("FilesTotal", filesTotal, rb); assertGauge("BlocksTotal", blockCount, rb); fs.delete(file, true); filesTotal--; // reduce the filecount for deleted file rb = waitForDnMetricValue(NS_METRICS, "FilesTotal", filesTotal); assertGauge("BlocksTotal", 0L, rb); assertGauge("PendingDeletionBlocks", 0L, rb); rb = getMetrics(NN_METRICS); // Delete file operations and number of files deleted must be 1 assertCounter("DeleteFileOps", 1L, rb); assertCounter("FilesDeleted", 1L, rb); }
Example 14
Source File: HadoopArchives.java From hadoop with Apache License 2.0 | 5 votes |
private Path largestDepth(List<Path> paths) { Path deepest = paths.get(0); for (Path p: paths) { if (p.depth() > deepest.depth()) { deepest = p; } } return deepest; }
Example 15
Source File: HadoopArchives.java From RDFS with Apache License 2.0 | 5 votes |
private boolean checkValidName(String name) { Path tmp = new Path(name); if (tmp.depth() != 1) { return false; } if (name.endsWith(".har")) return true; return false; }
Example 16
Source File: TestNameNodeMetrics.java From hadoop with Apache License 2.0 | 5 votes |
/** Test metrics associated with addition of a file */ @Test public void testFileAdd() throws Exception { // Add files with 100 blocks final Path file = getTestPath("testFileAdd"); createFile(file, 3200, (short)3); final long blockCount = 32; int blockCapacity = namesystem.getBlockCapacity(); updateMetrics(); assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS)); MetricsRecordBuilder rb = getMetrics(NN_METRICS); // File create operations is 1 // Number of files created is depth of <code>file</code> path assertCounter("CreateFileOps", 1L, rb); assertCounter("FilesCreated", (long)file.depth(), rb); updateMetrics(); long filesTotal = file.depth() + 1; // Add 1 for root rb = getMetrics(NS_METRICS); assertGauge("FilesTotal", filesTotal, rb); assertGauge("BlocksTotal", blockCount, rb); fs.delete(file, true); filesTotal--; // reduce the filecount for deleted file rb = waitForDnMetricValue(NS_METRICS, "FilesTotal", filesTotal); assertGauge("BlocksTotal", 0L, rb); assertGauge("PendingDeletionBlocks", 0L, rb); rb = getMetrics(NN_METRICS); // Delete file operations and number of files deleted must be 1 assertCounter("DeleteFileOps", 1L, rb); assertCounter("FilesDeleted", 1L, rb); }
Example 17
Source File: SemiTransactionalHiveMetastore.java From presto with Apache License 2.0 | 5 votes |
private static boolean isSameOrParent(Path parent, Path child) { int parentDepth = parent.depth(); int childDepth = child.depth(); if (parentDepth > childDepth) { return false; } for (int i = childDepth; i > parentDepth; i--) { child = child.getParent(); } return parent.equals(child); }
Example 18
Source File: HadoopArchives.java From RDFS with Apache License 2.0 | 5 votes |
private Path largestDepth(List<Path> paths) { Path deepest = paths.get(0); for (Path p: paths) { if (p.depth() > deepest.depth()) { deepest = p; } } return deepest; }
Example 19
Source File: NameNodeRpcServer.java From hadoop with Apache License 2.0 | 4 votes |
/** * Check path length does not exceed maximum. Returns true if * length and depth are okay. Returns false if length is too long * or depth is too great. */ private boolean checkPathLength(String src) { Path srcPath = new Path(src); return (src.length() <= MAX_PATH_LENGTH && srcPath.depth() <= MAX_PATH_DEPTH); }
Example 20
Source File: NameNode.java From hadoop-gpu with Apache License 2.0 | 2 votes |
/** * Check path length does not exceed maximum. Returns true if * length and depth are okay. Returns false if length is too long * or depth is too great. * */ private boolean checkPathLength(String src) { Path srcPath = new Path(src); return (src.length() <= MAX_PATH_LENGTH && srcPath.depth() <= MAX_PATH_DEPTH); }