org.apache.hadoop.fs.Path Java Examples
The following examples show how to use
org.apache.hadoop.fs.Path.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HiveTargetPathHelper.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public HiveTargetPathHelper(HiveDataset dataset) { this.dataset = dataset; this.relocateDataFiles = Boolean .valueOf(this.dataset.getProperties().getProperty(RELOCATE_DATA_FILES_KEY, DEFAULT_RELOCATE_DATA_FILES)); this.targetTableRoot = this.dataset.getProperties().containsKey(COPY_TARGET_TABLE_ROOT) ? Optional.of(resolvePath(this.dataset.getProperties().getProperty(COPY_TARGET_TABLE_ROOT), this.dataset.getTable().getDbName(), this.dataset.getTable().getTableName())) : Optional.<Path> absent(); this.targetTablePrefixTobeReplaced = this.dataset.getProperties().containsKey(COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED) ? Optional.of(new Path(this.dataset.getProperties().getProperty(COPY_TARGET_TABLE_PREFIX_TOBE_REPLACED))) : Optional.<Path> absent(); this.targetTablePrefixReplacement = this.dataset.getProperties().containsKey(COPY_TARGET_TABLE_PREFIX_REPLACEMENT) ? Optional.of(new Path(this.dataset.getProperties().getProperty(COPY_TARGET_TABLE_PREFIX_REPLACEMENT))) : Optional.<Path> absent(); }
Example #2
Source File: ConsistentListingAspect.java From s3mper with Apache License 2.0 | 6 votes |
/** * Check the the metastore listing against the s3 listing and return any paths * missing from s3. * * @param metastoreListing * @param s3Listing * @return */ private List<Path> checkListing(List<FileInfo> metastoreListing, FileStatus [] s3Listing) { Map<String, FileStatus> s3paths = new HashMap<String, FileStatus>(); if(s3Listing != null) { for (FileStatus fileStatus : s3Listing) { s3paths.put(fileStatus.getPath().toUri().normalize().getSchemeSpecificPart(), fileStatus); } } List<Path> missingPaths = new ArrayList<Path>(); for (FileInfo f : metastoreListing) { if(f.isDeleted()) { continue; } if (!s3paths.containsKey(f.getPath().toUri().normalize().getSchemeSpecificPart())) { missingPaths.add(f.getPath()); } } return missingPaths; }
Example #3
Source File: TestHadoopDirTreeGenerator.java From hadoop-ozone with Apache License 2.0 | 6 votes |
private int traverseToLeaf(FileSystem fs, Path dirPath, int depth, int expectedDepth, int expectedSpanCnt, int expectedFileCnt, int perFileSizeInBytes) throws IOException { FileStatus[] fileStatuses = fs.listStatus(dirPath); // check the num of peer directories except root and leaf as both // has less dirs. if (depth < expectedDepth - 1) { verifyActualSpan(expectedSpanCnt, fileStatuses); } int actualNumFiles = 0; for (FileStatus fileStatus : fileStatuses) { if (fileStatus.isDirectory()) { ++depth; return traverseToLeaf(fs, fileStatus.getPath(), depth, expectedDepth, expectedSpanCnt, expectedFileCnt, perFileSizeInBytes); } else { Assert.assertEquals("Mismatches file len", perFileSizeInBytes, fileStatus.getLen()); actualNumFiles++; } } Assert.assertEquals("Mismatches files count in a directory", expectedFileCnt, actualNumFiles); return depth; }
Example #4
Source File: UtilsForTests.java From hadoop with Apache License 2.0 | 6 votes |
/** * Configure a waiting job */ static void configureWaitingJobConf(JobConf jobConf, Path inDir, Path outputPath, int numMaps, int numRed, String jobName, String mapSignalFilename, String redSignalFilename) throws IOException { jobConf.setJobName(jobName); jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class); jobConf.setOutputFormat(SequenceFileOutputFormat.class); FileInputFormat.setInputPaths(jobConf, inDir); FileOutputFormat.setOutputPath(jobConf, outputPath); jobConf.setMapperClass(UtilsForTests.HalfWaitingMapper.class); jobConf.setReducerClass(IdentityReducer.class); jobConf.setOutputKeyClass(BytesWritable.class); jobConf.setOutputValueClass(BytesWritable.class); jobConf.setInputFormat(RandomInputFormat.class); jobConf.setNumMapTasks(numMaps); jobConf.setNumReduceTasks(numRed); jobConf.setJar("build/test/mapred/testjar/testjob.jar"); jobConf.set(getTaskSignalParameter(true), mapSignalFilename); jobConf.set(getTaskSignalParameter(false), redSignalFilename); }
Example #5
Source File: HadoopIgfs20FileSystemAbstractSelfTest.java From ignite with Apache License 2.0 | 6 votes |
/** @throws Exception If failed. */ @Test public void testSetPermissionIfOutputStreamIsNotClosed() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "myFile"); FsPermission perm = new FsPermission((short)123); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); fs.setPermission(file, perm); os.close(); assertEquals(perm, fs.getFileStatus(file).getPermission()); }
Example #6
Source File: SimpleFileIODatasetRuntimeTest.java From components with Apache License 2.0 | 6 votes |
@Test public void testGetSampleWithSpecialPath() throws Exception { RecordSet rs = getSimpleTestData(0); writeRandomCsvFile(mini.getFs(), "/user/test/Marketing Customer Contacts US.CSV", rs, "UTF-8"); String fileSpec = mini.getFs().getUri().resolve(new Path("/user/test/Marketing Customer Contacts US.CSV").toUri()).toString(); //the method above will escape it, so make it back here as the customer set the path, should not escape one fileSpec = fileSpec.replace("%20", " "); // Configure the component. SimpleFileIODatasetProperties props = createDatasetProperties(); props.format.setValue(SimpleFileIOFormat.CSV); props.path.setValue(fileSpec); final List<IndexedRecord> actual = getSample(props,Integer.MAX_VALUE); assertThat(actual, hasSize(10)); }
Example #7
Source File: TestRubixCaching.java From presto with Apache License 2.0 | 6 votes |
private FileSystem getCachingFileSystem(HdfsContext context, Path path) throws IOException { HdfsConfigurationInitializer configurationInitializer = new HdfsConfigurationInitializer(config, ImmutableSet.of()); HiveHdfsConfiguration configuration = new HiveHdfsConfiguration( configurationInitializer, ImmutableSet.of( rubixConfigInitializer, (dynamicConfig, ignoredContext, ignoredUri) -> { dynamicConfig.set("fs.file.impl", CachingLocalFileSystem.class.getName()); dynamicConfig.setBoolean("fs.gs.lazy.init.enable", true); dynamicConfig.set("fs.azure.account.key", "Zm9vCg=="); dynamicConfig.set("fs.adl.oauth2.client.id", "test"); dynamicConfig.set("fs.adl.oauth2.refresh.url", "http://localhost"); dynamicConfig.set("fs.adl.oauth2.credential", "password"); })); HdfsEnvironment environment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication()); return environment.getFileSystem(context, path); }
Example #8
Source File: TestStoreFileInfo.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testOpenErrorMessageHFileLink() throws IOException, IllegalStateException { // Test file link exception // Try to open nonsense hfilelink. Make sure exception is from HFileLink. Path p = new Path("/hbase/test/0123/cf/testtb=4567-abcd"); try (FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration())) { StoreFileInfo sfi = new StoreFileInfo(TEST_UTIL.getConfiguration(), fs, p, true); try { ReaderContext context = sfi.createReaderContext(false, 1000, ReaderType.PREAD); sfi.createReader(context, null); throw new IllegalStateException(); } catch (FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().contains(HFileLink.class.getSimpleName())); } } }
Example #9
Source File: BenchmarkThroughput.java From hadoop with Apache License 2.0 | 6 votes |
private void writeAndReadFile(FileSystem fs, String name, Configuration conf, long size ) throws IOException { Path f = null; try { f = writeFile(fs, name, conf, size); readFile(fs, f, name, conf); } finally { try { if (f != null) { fs.delete(f, true); } } catch (IOException ie) { // IGNORE } } }
Example #10
Source File: ParquetRecordWriter.java From Bats with Apache License 2.0 | 6 votes |
@Override public void abort() throws IOException { List<String> errors = Lists.newArrayList(); for (Path location : cleanUpLocations) { try { if (fs.exists(location)) { fs.delete(location, true); logger.info("Aborting writer. Location [{}] on file system [{}] is deleted.", location.toUri().getPath(), fs.getUri()); } } catch (IOException e) { errors.add(location.toUri().getPath()); logger.error("Failed to delete location [{}] on file system [{}].", location, fs.getUri(), e); } } if (!errors.isEmpty()) { throw new IOException(String.format("Failed to delete the following locations %s on file system [%s]" + " during aborting writer", errors, fs.getUri())); } }
Example #11
Source File: TestHftpFileSystem.java From RDFS with Apache License 2.0 | 6 votes |
public void readHftpFile( boolean strictContentLength, boolean sendContentLength ) throws IOException, URISyntaxException { int bufSize = 128 * 1024; byte[] buf = DFSTestUtil.generateSequentialBytes(0, bufSize); final ByteArrayInputStream inputStream = new ByteArrayInputStream(buf); final long contentLength = bufSize + 1; Configuration conf = new Configuration(); conf.setBoolean(HftpFileSystem.STRICT_CONTENT_LENGTH, strictContentLength); HftpFileSystem fileSystem = new MockHftpFileSystem( sendContentLength ? contentLength : null, inputStream, conf ); FSDataInputStream dataInputStream = fileSystem.open(new Path("dont-care")); byte[] readBuf = new byte[1024]; while (dataInputStream.read(readBuf) > -1) { //nothing } dataInputStream.close(); }
Example #12
Source File: TestHadoopArchives.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testGlobFiles() throws Exception { final Path sub1 = new Path(inputPath, "dir1"); final Path sub2 = new Path(inputPath, "dir2"); fs.mkdirs(sub1); String fileName = "a"; createFile(inputPath, fs, sub1.getName(), fileName); createFile(inputPath, fs, sub2.getName(), fileName); createFile(inputPath, fs, sub1.getName(), "b"); // not part of result final String glob = "dir{1,2}/a"; final FsShell shell = new FsShell(conf); final List<String> originalPaths = lsr(shell, inputPath.toString(), inputPath + "/" + glob); System.out.println("originalPaths: " + originalPaths); // make the archive: final String fullHarPathStr = makeArchive(inputPath, glob); // compare results: final List<String> harPaths = lsr(shell, fullHarPathStr, fullHarPathStr + "/" + glob); Assert.assertEquals(originalPaths, harPaths); }
Example #13
Source File: TestHadoopArchives.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testSingleFile() throws Exception { final Path sub1 = new Path(inputPath, "dir1"); fs.mkdirs(sub1); String singleFileName = "a"; createFile(inputPath, fs, sub1.getName(), singleFileName); final FsShell shell = new FsShell(conf); final List<String> originalPaths = lsr(shell, sub1.toString()); System.out.println("originalPaths: " + originalPaths); // make the archive: final String fullHarPathStr = makeArchive(sub1, singleFileName); // compare results: final List<String> harPaths = lsr(shell, fullHarPathStr); Assert.assertEquals(originalPaths, harPaths); }
Example #14
Source File: TestJobCleanup.java From big-c with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUp() throws IOException { JobConf conf = new JobConf(); fileSys = FileSystem.get(conf); fileSys.delete(new Path(TEST_ROOT_DIR), true); conf.set("mapred.job.tracker.handler.count", "1"); conf.set("mapred.job.tracker", "127.0.0.1:0"); conf.set("mapred.job.tracker.http.address", "127.0.0.1:0"); conf.set("mapred.task.tracker.http.address", "127.0.0.1:0"); conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, TEST_ROOT_DIR + "/intermediate"); conf.set(org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter .SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, "true"); mr = new MiniMRCluster(1, "file:///", 1, null, null, conf); inDir = new Path(TEST_ROOT_DIR, "test-input"); String input = "The quick brown fox\n" + "has many silly\n" + "red fox sox\n"; DataOutputStream file = fileSys.create(new Path(inDir, "part-" + 0)); file.writeBytes(input); file.close(); emptyInDir = new Path(TEST_ROOT_DIR, "empty-input"); fileSys.mkdirs(emptyInDir); }
Example #15
Source File: TestOzoneFileSystem.java From hadoop-ozone with Apache License 2.0 | 6 votes |
private void testDeleteCreatesFakeParentDir() throws Exception { Path grandparent = new Path("/testDeleteCreatesFakeParentDir"); Path parent = new Path(grandparent, "parent"); Path child = new Path(parent, "child"); ContractTestUtils.touch(fs, child); rootItemCount++; // grandparent // Verify that parent dir key does not exist // Creating a child should not add parent keys to the bucket try { getKey(parent, true); } catch (IOException ex) { assertKeyNotFoundException(ex); } // Delete the child key fs.delete(child, false); // Deleting the only child should create the parent dir key if it does // not exist String parentKey = o3fs.pathToKey(parent) + "/"; OzoneKeyDetails parentKeyInfo = getKey(parent, true); assertEquals(parentKey, parentKeyInfo.getName()); }
Example #16
Source File: ProviderUtils.java From hadoop with Apache License 2.0 | 6 votes |
/** * Convert a nested URI to decode the underlying path. The translation takes * the authority and parses it into the underlying scheme and authority. * For example, "myscheme://hdfs@nn/my/path" is converted to * "hdfs://nn/my/path". * @param nestedUri the URI from the nested URI * @return the unnested path */ public static Path unnestUri(URI nestedUri) { String[] parts = nestedUri.getAuthority().split("@", 2); StringBuilder result = new StringBuilder(parts[0]); result.append("://"); if (parts.length == 2) { result.append(parts[1]); } result.append(nestedUri.getPath()); if (nestedUri.getQuery() != null) { result.append("?"); result.append(nestedUri.getQuery()); } if (nestedUri.getFragment() != null) { result.append("#"); result.append(nestedUri.getFragment()); } return new Path(result.toString()); }
Example #17
Source File: Paths.java From s3committer with Apache License 2.0 | 5 votes |
private static Path localTemp(Configuration conf, int taskId, int attemptId) { String localDirs = conf.get("mapreduce.cluster.local.dir"); Random rand = new Random(Objects.hashCode(taskId, attemptId)); String[] dirs = localDirs.split(","); String dir = dirs[rand.nextInt(dirs.length)]; try { return FileSystem.getLocal(conf).makeQualified(new Path(dir)); } catch (IOException e) { throw new RuntimeException("Failed to localize path: " + dir, e); } }
Example #18
Source File: BasicRootedOzoneFileSystem.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Deletes the children of the input dir path by iterating though the * DeleteIterator. * * @param f directory path to be deleted * @return true if successfully deletes all required keys, false otherwise * @throws IOException */ private boolean innerDelete(Path f, boolean recursive) throws IOException { LOG.trace("delete() path:{} recursive:{}", f, recursive); try { DeleteIterator iterator = new DeleteIterator(f, recursive); return iterator.iterate(); } catch (FileNotFoundException e) { if (LOG.isDebugEnabled()) { LOG.debug("Couldn't delete {} - does not exist", f); } return false; } }
Example #19
Source File: HDFSResourceStore.java From kylin with Apache License 2.0 | 5 votes |
private TreeSet<String> getFilePath(Path p, String resPathPrefix) throws IOException { TreeSet<String> fileList = new TreeSet<>(); for (FileStatus fileStat : fs.listStatus(p)) { fileList.add(resPathPrefix + fileStat.getPath().getName()); } return fileList; }
Example #20
Source File: FileMergerTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Test public void testBlocksPath() { Assert.assertEquals("Blocks path not initialized in application context", context.getValue(DAGContext.APPLICATION_PATH) + Path.SEPARATOR + BlockWriter.DEFAULT_BLOCKS_DIR + Path.SEPARATOR, testFM.blocksDir); }
Example #21
Source File: AbstractContractDeleteTest.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testDeleteNonexistentPathNonRecursive() throws Throwable { Path path = path("testDeleteNonexistentPathNonRecursive"); ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "leftover", path); ContractTestUtils.rejectRootOperation(path); assertFalse("Returned true attempting to recursively delete" + " a nonexistent path " + path, getFileSystem().delete(path, false)); }
Example #22
Source File: TestLazyOutput.java From big-c with Apache License 2.0 | 5 votes |
private static void runTestLazyOutput(JobConf job, Path output, int numReducers, boolean createLazily) throws Exception { job.setJobName("test-lazy-output"); FileInputFormat.setInputPaths(job, INPUT); FileOutputFormat.setOutputPath(job, output); job.setInputFormat(TextInputFormat.class); job.setMapOutputKeyClass(LongWritable.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(LongWritable.class); job.setOutputValueClass(Text.class); job.setMapperClass(TestMapper.class); job.setReducerClass(TestReducer.class); JobClient client = new JobClient(job); job.setNumReduceTasks(numReducers); if (createLazily) { LazyOutputFormat.setOutputFormatClass (job, TextOutputFormat.class); } else { job.setOutputFormat(TextOutputFormat.class); } JobClient.runJob(job); }
Example #23
Source File: TestDataJoin.java From RDFS with Apache License 2.0 | 5 votes |
private static SequenceFile.Writer[] createWriters(Path testdir, JobConf conf, int srcs, Path[] src) throws IOException { for (int i = 0; i < srcs; ++i) { src[i] = new Path(testdir, Integer.toString(i + 10, 36)); } SequenceFile.Writer out[] = new SequenceFile.Writer[srcs]; for (int i = 0; i < srcs; ++i) { out[i] = new SequenceFile.Writer(testdir.getFileSystem(conf), conf, src[i], Text.class, Text.class); } return out; }
Example #24
Source File: TestStorageMover.java From hadoop with Apache License 2.0 | 5 votes |
/** * Create files/directories/snapshots. */ void prepare(DistributedFileSystem dfs, short repl) throws Exception { for (Path d : dirs) { dfs.mkdirs(d); } for (Path file : files) { DFSTestUtil.createFile(dfs, file, fileSize, repl, 0L); } for (Map.Entry<Path, List<String>> entry : snapshotMap.entrySet()) { for (String snapshot : entry.getValue()) { SnapshotTestHelper.createSnapshot(dfs, entry.getKey(), snapshot); } } }
Example #25
Source File: CopyListingTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void defaultCopyListing() throws Exception { S3MapReduceCpOptions options = S3MapReduceCpOptions .builder(Arrays.asList(new Path("/tmp/in4")), new URI("/tmp/out4")) .build(); CopyListing listing = CopyListing.getCopyListing(CONFIG, CREDENTIALS, options); assertThat(listing, is(instanceOf(SimpleCopyListing.class))); }
Example #26
Source File: HDFSResourceStore.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private Path getRealHDFSPath(String resourcePath) { if (resourcePath.equals("/")) return this.hdfsMetaPath; if (resourcePath.startsWith("/") && resourcePath.length() > 1) resourcePath = resourcePath.substring(1, resourcePath.length()); return new Path(this.hdfsMetaPath, resourcePath); }
Example #27
Source File: ViewFs.java From hadoop with Apache License 2.0 | 5 votes |
@Override public Path getHomeDirectory() { if (homeDir == null) { String base = fsState.getHomeDirPrefixValue(); if (base == null) { base = "/user"; } homeDir = (base.equals("/") ? this.makeQualified(new Path(base + ugi.getShortUserName())): this.makeQualified(new Path(base + "/" + ugi.getShortUserName()))); } return homeDir; }
Example #28
Source File: SnapshotTestHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** * Recursively generate the tree based on the height. * * @param parent The parent node * @param level The remaining levels to generate * @param fs The FileSystem where to generate the files/dirs * @throws Exception */ private void genChildren(Node parent, int level, FileSystem fs) throws Exception { if (level == 0) { return; } parent.leftChild = new Node(new Path(parent.nodePath, "left" + ++id), height - level, parent, fs); parent.rightChild = new Node(new Path(parent.nodePath, "right" + ++id), height - level, parent, fs); addDirNode(parent.leftChild, parent.leftChild.level); addDirNode(parent.rightChild, parent.rightChild.level); genChildren(parent.leftChild, level - 1, fs); genChildren(parent.rightChild, level - 1, fs); }
Example #29
Source File: HoodieRealtimeRecordReaderUtils.java From hudi with Apache License 2.0 | 5 votes |
/** * Reads the schema from the base file. */ public static Schema readSchema(Configuration conf, Path filePath) { try { HoodieFileReader storageReader = HoodieFileReaderFactory.getFileReader(conf, filePath); return storageReader.getSchema(); } catch (IOException e) { throw new HoodieIOException("Failed to read schema from " + filePath, e); } }
Example #30
Source File: TestPseudoDistributedFileSystem.java From dremio-oss with Apache License 2.0 | 5 votes |
@Test public void testDeleteUnknownLocalFile() throws IOException { doThrow(FileNotFoundException.class).when(mockLocalFS).delete( new Path("/foo/unknown"), false); Path path = new Path("/foo/10.0.0.1@unknown"); try{ fs.delete(path, false); fail("Expecting FileNotFoundException"); } catch(FileNotFoundException e) { // nothing } }