Java Code Examples for org.apache.hadoop.fs.FileSystem#setTimes()
The following examples show how to use
org.apache.hadoop.fs.FileSystem#setTimes() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseTestHttpFSWith.java From big-c with Apache License 2.0 | 6 votes |
private void testSetTimes() throws Exception { if (!isLocalFS()) { FileSystem fs = FileSystem.get(getProxiedFSConf()); Path path = new Path(getProxiedFSTestDir(), "foo.txt"); OutputStream os = fs.create(path); os.write(1); os.close(); FileStatus status1 = fs.getFileStatus(path); fs.close(); long at = status1.getAccessTime(); long mt = status1.getModificationTime(); fs = getHttpFSFileSystem(); fs.setTimes(path, mt - 10, at - 20); fs.close(); fs = FileSystem.get(getProxiedFSConf()); status1 = fs.getFileStatus(path); fs.close(); long atNew = status1.getAccessTime(); long mtNew = status1.getModificationTime(); Assert.assertEquals(mtNew, mt - 10); Assert.assertEquals(atNew, at - 20); } }
Example 2
Source File: TestAuditLogger.java From hadoop with Apache License 2.0 | 6 votes |
/** * Tests that AuditLogger works as expected. */ @Test public void testAuditLogger() throws IOException { Configuration conf = new HdfsConfiguration(); conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY, DummyAuditLogger.class.getName()); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitClusterUp(); assertTrue(DummyAuditLogger.initialized); DummyAuditLogger.resetLogCount(); FileSystem fs = cluster.getFileSystem(); long time = System.currentTimeMillis(); fs.setTimes(new Path("/"), time, time); assertEquals(1, DummyAuditLogger.logCount); } finally { cluster.shutdown(); } }
Example 3
Source File: TestDistCpUtils.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testPreserveUserOnFile() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.USER); Path dst = new Path("/tmp/dest2"); Path src = new Path("/tmp/src2"); createFile(fs, src); createFile(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); fs.setReplication(dst, (short) 2); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication()); }
Example 4
Source File: Utils.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Copy a local file to a remote file system. * * @param fs * remote filesystem * @param appId * application ID * @param localSrcPath * path to the local file * @param homedir * remote home directory base (will be extended) * @param relativeTargetPath * relative target path of the file (will be prefixed be the full home directory we set up) * * @return Path to remote file (usually hdfs) */ static Tuple2<Path, LocalResource> setupLocalResource( FileSystem fs, String appId, Path localSrcPath, Path homedir, String relativeTargetPath) throws IOException { File localFile = new File(localSrcPath.toUri().getPath()); if (localFile.isDirectory()) { throw new IllegalArgumentException("File to copy must not be a directory: " + localSrcPath); } // copy resource to HDFS String suffix = ".flink/" + appId + (relativeTargetPath.isEmpty() ? "" : "/" + relativeTargetPath) + "/" + localSrcPath.getName(); Path dst = new Path(homedir, suffix); LOG.debug("Copying from {} to {}", localSrcPath, dst); fs.copyFromLocalFile(false, true, localSrcPath, dst); // Note: If we used registerLocalResource(FileSystem, Path) here, we would access the remote // file once again which has problems with eventually consistent read-after-write file // systems. Instead, we decide to preserve the modification time at the remote // location because this and the size of the resource will be checked by YARN based on // the values we provide to #registerLocalResource() below. fs.setTimes(dst, localFile.lastModified(), -1); // now create the resource instance LocalResource resource = registerLocalResource(dst, localFile.length(), localFile.lastModified()); return Tuple2.of(dst, resource); }
Example 5
Source File: TestDistCpUtils.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testPreserveGroupOnFile() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.GROUP); Path dst = new Path("/tmp/dest2"); Path src = new Path("/tmp/src2"); createFile(fs, src); createFile(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); fs.setReplication(dst, (short) 2); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication()); }
Example 6
Source File: AbstractDistCacheConfigurer.java From titan1withtp3.1 with Apache License 2.0 | 5 votes |
protected Path uploadFileIfNecessary(FileSystem localFS, Path localPath, FileSystem destFS) throws IOException { // Fast path for local FS -- DistributedCache + local JobRunner seems copy/link files automatically if (destFS.equals(localFS)) { log.debug("Skipping file upload for {} (destination filesystem {} equals local filesystem)", localPath, destFS); return localPath; } Path destPath = new Path(destFS.getHomeDirectory() + "/" + HDFS_TMP_LIB_DIR + "/" + localPath.getName()); Stats fileStats = null; try { fileStats = compareModtimes(localFS, localPath, destFS, destPath); } catch (IOException e) { log.warn("Unable to read or stat file: localPath={}, destPath={}, destFS={}", localPath, destPath, destFS); } if (!fileStats.isRemoteCopyCurrent()) { log.debug("Copying {} to {}", localPath, destPath); destFS.copyFromLocalFile(localPath, destPath); if (null != fileStats.local) { final long mtime = fileStats.local.getModificationTime(); log.debug("Setting modtime on {} to {}", destPath, mtime); destFS.setTimes(destPath, mtime, -1); // -1 means leave atime alone } } return destPath; }
Example 7
Source File: TestDistCpUtils.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testPreserveNothingOnFile() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class); Path dst = new Path("/tmp/dest2"); Path src = new Path("/tmp/src2"); createFile(fs, src); createFile(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); fs.setReplication(dst, (short) 2); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication()); }
Example 8
Source File: TestDistCpUtils.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testPreserveTimestampOnDirectory() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.TIMES); Path dst = new Path("/tmp/abc"); Path src = new Path("/tmp/src"); createDirectory(fs, src); createDirectory(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime()); }
Example 9
Source File: TestDistCpUtils.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testPreserveUserOnFile() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.USER); Path dst = new Path("/tmp/dest2"); Path src = new Path("/tmp/src2"); createFile(fs, src); createFile(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); fs.setReplication(dst, (short) 2); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication()); }
Example 10
Source File: TestDistCpUtils.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testPreserveDefaults() throws IOException { FileSystem fs = FileSystem.get(config); // preserve replication, block size, user, group, permission, // checksum type and timestamps EnumSet<FileAttribute> attributes = DistCpUtils.unpackAttributes( DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.substring(1)); Path dst = new Path("/tmp/dest2"); Path src = new Path("/tmp/src2"); createFile(fs, src); createFile(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); fs.setReplication(dst, (short) 2); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime()); Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication()); }
Example 11
Source File: TestDistCpUtils.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testPreserveGroupOnFile() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.GROUP); Path dst = new Path("/tmp/dest2"); Path src = new Path("/tmp/src2"); createFile(fs, src); createFile(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); fs.setReplication(dst, (short) 2); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication()); }
Example 12
Source File: TestDistCpUtils.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testPreservePermissionOnFile() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.PERMISSION); Path dst = new Path("/tmp/dest2"); Path src = new Path("/tmp/src2"); createFile(fs, src); createFile(fs, dst); fs.setPermission(src, fullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(dst, noPerm); fs.setOwner(dst, "nobody", "nobody-group"); fs.setTimes(dst, 100, 100); fs.setReplication(dst, (short) 2); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication()); }
Example 13
Source File: TestNameNodeMXBean.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout=120000) public void testTopUsersDisabled() throws Exception { final Configuration conf = new Configuration(); // Disable nntop conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, false); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanNameFsns = new ObjectName( "Hadoop:service=NameNode,name=FSNamesystemState"); FileSystem fs = cluster.getFileSystem(); final Path path = new Path("/"); final int NUM_OPS = 10; for (int i=0; i< NUM_OPS; i++) { fs.listStatus(path); fs.setTimes(path, 0, 1); } String topUsers = (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts")); assertNull("Did not expect to find TopUserOpCounts bean!", topUsers); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 14
Source File: DistCpUtils.java From hadoop with Apache License 2.0 | 4 votes |
/** * Preserve attribute on file matching that of the file status being sent * as argument. Barring the block size, all the other attributes are preserved * by this function * * @param targetFS - File system * @param path - Path that needs to preserve original file status * @param srcFileStatus - Original file status * @param attributes - Attribute set that needs to be preserved * @param preserveRawXattrs if true, raw.* xattrs should be preserved * @throws IOException - Exception if any (particularly relating to group/owner * change or any transient error) */ public static void preserve(FileSystem targetFS, Path path, CopyListingFileStatus srcFileStatus, EnumSet<FileAttribute> attributes, boolean preserveRawXattrs) throws IOException { FileStatus targetFileStatus = targetFS.getFileStatus(path); String group = targetFileStatus.getGroup(); String user = targetFileStatus.getOwner(); boolean chown = false; if (attributes.contains(FileAttribute.ACL)) { List<AclEntry> srcAcl = srcFileStatus.getAclEntries(); List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus); if (!srcAcl.equals(targetAcl)) { targetFS.setAcl(path, srcAcl); } // setAcl doesn't preserve sticky bit, so also call setPermission if needed. if (srcFileStatus.getPermission().getStickyBit() != targetFileStatus.getPermission().getStickyBit()) { targetFS.setPermission(path, srcFileStatus.getPermission()); } } else if (attributes.contains(FileAttribute.PERMISSION) && !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) { targetFS.setPermission(path, srcFileStatus.getPermission()); } final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR); if (preserveXAttrs || preserveRawXattrs) { final String rawNS = StringUtils.toLowerCase(XAttr.NameSpace.RAW.name()); Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs(); Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path); if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) { for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) { String xattrName = entry.getKey(); if (xattrName.startsWith(rawNS) || preserveXAttrs) { targetFS.setXAttr(path, xattrName, entry.getValue()); } } } } if (attributes.contains(FileAttribute.REPLICATION) && !targetFileStatus.isDirectory() && (srcFileStatus.getReplication() != targetFileStatus.getReplication())) { targetFS.setReplication(path, srcFileStatus.getReplication()); } if (attributes.contains(FileAttribute.GROUP) && !group.equals(srcFileStatus.getGroup())) { group = srcFileStatus.getGroup(); chown = true; } if (attributes.contains(FileAttribute.USER) && !user.equals(srcFileStatus.getOwner())) { user = srcFileStatus.getOwner(); chown = true; } if (chown) { targetFS.setOwner(path, user, group); } if (attributes.contains(FileAttribute.TIMES)) { targetFS.setTimes(path, srcFileStatus.getModificationTime(), srcFileStatus.getAccessTime()); } }
Example 15
Source File: TestDistCpUtils.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testPreserveOnFileDownwardRecursion() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.allOf(FileAttribute.class); // Remove ACL because tests run with dfs.namenode.acls.enabled false attributes.remove(FileAttribute.ACL); Path src = new Path("/tmp/src2"); Path f0 = new Path("/f0"); Path f1 = new Path("/d1/f1"); Path f2 = new Path("/d1/d2/f2"); Path d1 = new Path("/d1/"); Path d2 = new Path("/d1/d2/"); createFile(fs, src); createFile(fs, f0); createFile(fs, f1); createFile(fs, f2); fs.setPermission(src, almostFullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(d1, fullPerm); fs.setOwner(d1, "anybody", "anybody-group"); fs.setTimes(d1, 400, 400); fs.setReplication(d1, (short) 3); fs.setPermission(d2, fullPerm); fs.setOwner(d2, "anybody", "anybody-group"); fs.setTimes(d2, 300, 300); fs.setReplication(d2, (short) 3); fs.setPermission(f0, fullPerm); fs.setOwner(f0, "anybody", "anybody-group"); fs.setTimes(f0, 200, 200); fs.setReplication(f0, (short) 3); fs.setPermission(f1, fullPerm); fs.setOwner(f1, "anybody", "anybody-group"); fs.setTimes(f1, 200, 200); fs.setReplication(f1, (short) 3); fs.setPermission(f2, fullPerm); fs.setOwner(f2, "anybody", "anybody-group"); fs.setTimes(f2, 200, 200); fs.setReplication(f2, (short) 3); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, f0, srcStatus, attributes, false); cluster.triggerHeartbeats(); // FileStatus.equals only compares path field, must explicitly compare all fields // attributes of src -> f0 ? should be yes CopyListingFileStatus f0Status = new CopyListingFileStatus(fs.getFileStatus(f0)); Assert.assertTrue(srcStatus.getPermission().equals(f0Status.getPermission())); Assert.assertTrue(srcStatus.getOwner().equals(f0Status.getOwner())); Assert.assertTrue(srcStatus.getGroup().equals(f0Status.getGroup())); Assert.assertTrue(srcStatus.getAccessTime() == f0Status.getAccessTime()); Assert.assertTrue(srcStatus.getModificationTime() == f0Status.getModificationTime()); Assert.assertTrue(srcStatus.getReplication() == f0Status.getReplication()); // attributes of src -> f1 ? should be no CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1)); Assert.assertFalse(srcStatus.getPermission().equals(f1Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(f1Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(f1Status.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == f1Status.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == f1Status.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == f1Status.getReplication()); // attributes of src -> f2 ? should be no CopyListingFileStatus f2Status = new CopyListingFileStatus(fs.getFileStatus(f2)); Assert.assertFalse(srcStatus.getPermission().equals(f2Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(f2Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(f2Status.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == f2Status.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == f2Status.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == f2Status.getReplication()); // attributes of src -> d1 ? should be no CopyListingFileStatus d1Status = new CopyListingFileStatus(fs.getFileStatus(d1)); Assert.assertFalse(srcStatus.getPermission().equals(d1Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(d1Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(d1Status.getGroup())); Assert.assertTrue(d1Status.getAccessTime() == 400); Assert.assertTrue(d1Status.getModificationTime() == 400); Assert.assertFalse(srcStatus.getReplication() == d1Status.getReplication()); // attributes of src -> d2 ? should be no CopyListingFileStatus d2Status = new CopyListingFileStatus(fs.getFileStatus(d2)); Assert.assertFalse(srcStatus.getPermission().equals(d2Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(d2Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(d2Status.getGroup())); Assert.assertTrue(d2Status.getAccessTime() == 300); Assert.assertTrue(d2Status.getModificationTime() == 300); Assert.assertFalse(srcStatus.getReplication() == d2Status.getReplication()); }
Example 16
Source File: TestMasterRegionCompaction.java From hbase with Apache License 2.0 | 4 votes |
@Test public void test() throws IOException, InterruptedException { for (int i = 0; i < compactMin - 1; i++) { final int index = i; region.update( r -> r.put(new Put(Bytes.toBytes(index)).addColumn(CF1, QUALIFIER, Bytes.toBytes(index)) .addColumn(CF2, QUALIFIER, Bytes.toBytes(index)))); region.flush(true); } assertEquals(2 * (compactMin - 1), getStorefilesCount()); region.update(r -> r.put(new Put(Bytes.toBytes(compactMin - 1)).addColumn(CF1, QUALIFIER, Bytes.toBytes(compactMin - 1)))); region.flusherAndCompactor.requestFlush(); htu.waitFor(15000, () -> getStorefilesCount() == 2); Path store1ArchiveDir = HFileArchiveUtil.getStoreArchivePathForRootDir(htu.getDataTestDir(), region.region.getRegionInfo(), CF1); Path store2ArchiveDir = HFileArchiveUtil.getStoreArchivePathForRootDir(htu.getDataTestDir(), region.region.getRegionInfo(), CF2); FileSystem fs = store1ArchiveDir.getFileSystem(htu.getConfiguration()); // after compaction, the old hfiles should have been compacted htu.waitFor(15000, () -> { try { FileStatus[] fses1 = fs.listStatus(store1ArchiveDir); FileStatus[] fses2 = fs.listStatus(store2ArchiveDir); return fses1 != null && fses1.length == compactMin && fses2 != null && fses2.length == compactMin - 1; } catch (FileNotFoundException e) { return false; } }); // ttl has not expired, so should not delete any files Thread.sleep(1000); FileStatus[] compactedHFiles = fs.listStatus(store1ArchiveDir); assertEquals(compactMin, compactedHFiles.length); assertFileCount(fs, store2ArchiveDir, compactMin - 1); Thread.sleep(2000); // touch one file long currentTime = System.currentTimeMillis(); fs.setTimes(compactedHFiles[0].getPath(), currentTime, currentTime); Thread.sleep(3000); // only the touched file is still there after clean up FileStatus[] remainingHFiles = fs.listStatus(store1ArchiveDir); assertEquals(1, remainingHFiles.length); assertEquals(compactedHFiles[0].getPath(), remainingHFiles[0].getPath()); assertFalse(fs.exists(store2ArchiveDir)); Thread.sleep(6000); // the touched file should also be cleaned up and then the cleaner will delete the parent // directory since it is empty. assertFalse(fs.exists(store1ArchiveDir)); }
Example 17
Source File: TestNameNodeMXBean.java From hadoop with Apache License 2.0 | 4 votes |
@Test(timeout=120000) @SuppressWarnings("unchecked") public void testTopUsers() throws Exception { final Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanNameFsns = new ObjectName( "Hadoop:service=NameNode,name=FSNamesystemState"); FileSystem fs = cluster.getFileSystem(); final Path path = new Path("/"); final int NUM_OPS = 10; for (int i=0; i< NUM_OPS; i++) { fs.listStatus(path); fs.setTimes(path, 0, 1); } String topUsers = (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts")); ObjectMapper mapper = new ObjectMapper(); Map<String, Object> map = mapper.readValue(topUsers, Map.class); assertTrue("Could not find map key timestamp", map.containsKey("timestamp")); assertTrue("Could not find map key windows", map.containsKey("windows")); List<Map<String, List<Map<String, Object>>>> windows = (List<Map<String, List<Map<String, Object>>>>) map.get("windows"); assertEquals("Unexpected num windows", 3, windows.size()); for (Map<String, List<Map<String, Object>>> window : windows) { final List<Map<String, Object>> ops = window.get("ops"); assertEquals("Unexpected num ops", 3, ops.size()); for (Map<String, Object> op: ops) { final long count = Long.parseLong(op.get("totalCount").toString()); final String opType = op.get("opType").toString(); final int expected; if (opType.equals(TopConf.ALL_CMDS)) { expected = 2*NUM_OPS; } else { expected = NUM_OPS; } assertEquals("Unexpected total count", expected, count); } } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 18
Source File: TestDistributedFileSystem.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testStatistics() throws Exception { int lsLimit = 2; final Configuration conf = getTestConfiguration(); conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, lsLimit); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { final FileSystem fs = cluster.getFileSystem(); Path dir = new Path("/test"); Path file = new Path(dir, "file"); int readOps = DFSTestUtil.getStatistics(fs).getReadOps(); int writeOps = DFSTestUtil.getStatistics(fs).getWriteOps(); int largeReadOps = DFSTestUtil.getStatistics(fs).getLargeReadOps(); fs.mkdirs(dir); checkStatistics(fs, readOps, ++writeOps, largeReadOps); FSDataOutputStream out = fs.create(file, (short)1); out.close(); checkStatistics(fs, readOps, ++writeOps, largeReadOps); FileStatus status = fs.getFileStatus(file); checkStatistics(fs, ++readOps, writeOps, largeReadOps); fs.getFileBlockLocations(file, 0, 0); checkStatistics(fs, ++readOps, writeOps, largeReadOps); fs.getFileBlockLocations(status, 0, 0); checkStatistics(fs, ++readOps, writeOps, largeReadOps); FSDataInputStream in = fs.open(file); in.close(); checkStatistics(fs, ++readOps, writeOps, largeReadOps); fs.setReplication(file, (short)2); checkStatistics(fs, readOps, ++writeOps, largeReadOps); Path file1 = new Path(dir, "file1"); fs.rename(file, file1); checkStatistics(fs, readOps, ++writeOps, largeReadOps); fs.getContentSummary(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); // Iterative ls test for (int i = 0; i < 10; i++) { Path p = new Path(dir, Integer.toString(i)); fs.mkdirs(p); FileStatus[] list = fs.listStatus(dir); if (list.length > lsLimit) { // if large directory, then count readOps and largeReadOps by // number times listStatus iterates int iterations = (int)Math.ceil((double)list.length/lsLimit); largeReadOps += iterations; readOps += iterations; } else { // Single iteration in listStatus - no large read operation done readOps++; } // writeOps incremented by 1 for mkdirs // readOps and largeReadOps incremented by 1 or more checkStatistics(fs, readOps, ++writeOps, largeReadOps); } fs.getStatus(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); fs.getFileChecksum(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); fs.setPermission(file1, new FsPermission((short)0777)); checkStatistics(fs, readOps, ++writeOps, largeReadOps); fs.setTimes(file1, 0L, 0L); checkStatistics(fs, readOps, ++writeOps, largeReadOps); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); fs.setOwner(file1, ugi.getUserName(), ugi.getGroupNames()[0]); checkStatistics(fs, readOps, ++writeOps, largeReadOps); fs.delete(dir, true); checkStatistics(fs, readOps, ++writeOps, largeReadOps); } finally { if (cluster != null) cluster.shutdown(); } }
Example 19
Source File: TestDistCpUtils.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testPreserveOnFileDownwardRecursion() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.allOf(FileAttribute.class); // Remove ACL because tests run with dfs.namenode.acls.enabled false attributes.remove(FileAttribute.ACL); Path src = new Path("/tmp/src2"); Path f0 = new Path("/f0"); Path f1 = new Path("/d1/f1"); Path f2 = new Path("/d1/d2/f2"); Path d1 = new Path("/d1/"); Path d2 = new Path("/d1/d2/"); createFile(fs, src); createFile(fs, f0); createFile(fs, f1); createFile(fs, f2); fs.setPermission(src, almostFullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(d1, fullPerm); fs.setOwner(d1, "anybody", "anybody-group"); fs.setTimes(d1, 400, 400); fs.setReplication(d1, (short) 3); fs.setPermission(d2, fullPerm); fs.setOwner(d2, "anybody", "anybody-group"); fs.setTimes(d2, 300, 300); fs.setReplication(d2, (short) 3); fs.setPermission(f0, fullPerm); fs.setOwner(f0, "anybody", "anybody-group"); fs.setTimes(f0, 200, 200); fs.setReplication(f0, (short) 3); fs.setPermission(f1, fullPerm); fs.setOwner(f1, "anybody", "anybody-group"); fs.setTimes(f1, 200, 200); fs.setReplication(f1, (short) 3); fs.setPermission(f2, fullPerm); fs.setOwner(f2, "anybody", "anybody-group"); fs.setTimes(f2, 200, 200); fs.setReplication(f2, (short) 3); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, f0, srcStatus, attributes, false); cluster.triggerHeartbeats(); // FileStatus.equals only compares path field, must explicitly compare all fields // attributes of src -> f0 ? should be yes CopyListingFileStatus f0Status = new CopyListingFileStatus(fs.getFileStatus(f0)); Assert.assertTrue(srcStatus.getPermission().equals(f0Status.getPermission())); Assert.assertTrue(srcStatus.getOwner().equals(f0Status.getOwner())); Assert.assertTrue(srcStatus.getGroup().equals(f0Status.getGroup())); Assert.assertTrue(srcStatus.getAccessTime() == f0Status.getAccessTime()); Assert.assertTrue(srcStatus.getModificationTime() == f0Status.getModificationTime()); Assert.assertTrue(srcStatus.getReplication() == f0Status.getReplication()); // attributes of src -> f1 ? should be no CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1)); Assert.assertFalse(srcStatus.getPermission().equals(f1Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(f1Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(f1Status.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == f1Status.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == f1Status.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == f1Status.getReplication()); // attributes of src -> f2 ? should be no CopyListingFileStatus f2Status = new CopyListingFileStatus(fs.getFileStatus(f2)); Assert.assertFalse(srcStatus.getPermission().equals(f2Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(f2Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(f2Status.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == f2Status.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == f2Status.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == f2Status.getReplication()); // attributes of src -> d1 ? should be no CopyListingFileStatus d1Status = new CopyListingFileStatus(fs.getFileStatus(d1)); Assert.assertFalse(srcStatus.getPermission().equals(d1Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(d1Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(d1Status.getGroup())); Assert.assertTrue(d1Status.getAccessTime() == 400); Assert.assertTrue(d1Status.getModificationTime() == 400); Assert.assertFalse(srcStatus.getReplication() == d1Status.getReplication()); // attributes of src -> d2 ? should be no CopyListingFileStatus d2Status = new CopyListingFileStatus(fs.getFileStatus(d2)); Assert.assertFalse(srcStatus.getPermission().equals(d2Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(d2Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(d2Status.getGroup())); Assert.assertTrue(d2Status.getAccessTime() == 300); Assert.assertTrue(d2Status.getModificationTime() == 300); Assert.assertFalse(srcStatus.getReplication() == d2Status.getReplication()); }
Example 20
Source File: FSOperations.java From hadoop with Apache License 2.0 | 2 votes |
/** * Executes the filesystem operation. * * @param fs filesystem instance to use. * * @return void. * * @throws IOException thrown if an IO error occured. */ @Override public Void execute(FileSystem fs) throws IOException { fs.setTimes(path, mTime, aTime); return null; }