org.apache.hadoop.hdfs.protocol.HdfsConstants Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.HdfsConstants.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
/** * Infer the checksum type for a replica by sending an OP_READ_BLOCK * for the first byte of that replica. This is used for compatibility * with older HDFS versions which did not include the checksum type in * OpBlockChecksumResponseProto. * * @param lb the located block * @param dn the connected datanode * @return the inferred checksum type * @throws IOException if an error occurs */ private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) throws IOException { IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb); try { DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out, HdfsConstants.SMALL_BUFFER_SIZE)); DataInputStream in = new DataInputStream(pair.in); new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy()); final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn; DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { IOUtils.cleanup(null, pair.in, pair.out); } }
Example #2
Source File: TestHistoryFileManager.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout() throws Exception { dfsCluster.getFileSystem().setSafeMode( HdfsConstants.SafeModeAction.SAFEMODE_ENTER); Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode()); new Thread() { @Override public void run() { try { Thread.sleep(500); dfsCluster.getFileSystem().setSafeMode( HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode()); } catch (Exception ex) { Assert.fail(ex.toString()); } } }.start(); testCreateHistoryDirs(dfsCluster.getConfiguration(0), new SystemClock()); }
Example #3
Source File: TestDatanodeRegister.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testDifferentLayoutVersions() throws Exception { // We expect no exceptions to be thrown when the layout versions match. assertEquals(HdfsConstants.NAMENODE_LAYOUT_VERSION, actor.retrieveNamespaceInfo().getLayoutVersion()); // We expect an exception to be thrown when the NN reports a layout version // different from that of the DN. doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo) .getLayoutVersion(); try { actor.retrieveNamespaceInfo(); } catch (IOException e) { fail("Should not fail to retrieve NS info from DN with different layout version"); } }
Example #4
Source File: TestLazyPersistFiles.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testPolicyPersistenceInFsImage() throws IOException { startUpCluster(false, -1); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, 0, true); // checkpoint fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); fs.saveNamespace(); fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); cluster.restartNameNode(true); // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID)); }
Example #5
Source File: TestQuotaByStorageType.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout = 60000) public void testQuotaByStorageTypeParentOnChildOn() throws Exception { final Path parent = new Path(dir, "parent"); final Path child = new Path(parent, "child"); dfs.mkdirs(parent); dfs.mkdirs(child); dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); dfs.setQuotaByStorageType(parent, StorageType.SSD, 2 * BLOCKSIZE); dfs.setQuotaByStorageType(child, StorageType.SSD, 3 * BLOCKSIZE); // Create file of size 2.5 * BLOCKSIZE under child directory // Verify parent Quota applies Path createdFile1 = new Path(child, "created_file1.data"); long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2; int bufLen = BLOCKSIZE / 16; try { DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed); fail("Should have failed with QuotaByStorageTypeExceededException "); } catch (Throwable t) { LOG.info("Got expected exception ", t); } }
Example #6
Source File: DFSOutputStream.java From hadoop with Apache License 2.0 | 6 votes |
/** * Create a socket for a write pipeline * @param first the first datanode * @param length the pipeline length * @param client client * @return the socket connected to the first datanode */ static Socket createSocketForPipeline(final DatanodeInfo first, final int length, final DFSClient client) throws IOException { final String dnAddr = first.getXferAddr( client.getConf().connectToDnViaHostname); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Connecting to datanode " + dnAddr); } final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr); final Socket sock = client.socketFactory.createSocket(); final int timeout = client.getDatanodeReadTimeout(length); NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout); sock.setSoTimeout(timeout); sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); if(DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize()); } return sock; }
Example #7
Source File: TestTruncateQuotaUpdate.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { final Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); fsdir = cluster.getNamesystem().getFSDirectory(); dfs = cluster.getFileSystem(); dfs.mkdirs(dir); dfs.setQuota(dir, Long.MAX_VALUE - 1, DISKQUOTA); dfs.setQuotaByStorageType(dir, StorageType.DISK, DISKQUOTA); dfs.setStoragePolicy(dir, HdfsConstants.HOT_STORAGE_POLICY_NAME); }
Example #8
Source File: DistCpSync.java From hadoop with Apache License 2.0 | 5 votes |
private static Path getSourceSnapshotPath(Path sourceDir, String snapshotName) { if (Path.CUR_DIR.equals(snapshotName)) { return sourceDir; } else { return new Path(sourceDir, HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName); } }
Example #9
Source File: INode.java From hadoop with Apache License 2.0 | 5 votes |
/** * Get the quota set for this inode * @return the quota counts. The count is -1 if it is not set. */ public QuotaCounts getQuotaCounts() { return new QuotaCounts.Builder(). nameSpace(HdfsConstants.QUOTA_RESET). storageSpace(HdfsConstants.QUOTA_RESET). typeSpaces(HdfsConstants.QUOTA_RESET). build(); }
Example #10
Source File: SecondaryNameNode.java From hadoop with Apache License 2.0 | 5 votes |
/** * Returns the Jetty server that the Namenode is listening on. */ private URL getInfoServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) { throw new IOException("This is not a DFS"); } final String scheme = DFSUtil.getHttpClientScheme(conf); URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf, scheme); LOG.debug("Will connect to NameNode at " + address); return address.toURL(); }
Example #11
Source File: FSEditLogOp.java From big-c with Apache License 2.0 | 5 votes |
/** * Similar with decodeOp(), but instead of doing the real decoding, we skip * the content of the op if the length of the editlog is supported. * @return the last txid of the segment, or INVALID_TXID on exception */ public long scanOp() throws IOException { if (supportEditLogLength) { limiter.setLimit(maxOpSize); in.mark(maxOpSize); final byte opCodeByte; try { opCodeByte = in.readByte(); // op code } catch (EOFException e) { return HdfsConstants.INVALID_TXID; } FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte); if (opCode == OP_INVALID) { verifyTerminator(); return HdfsConstants.INVALID_TXID; } int length = in.readInt(); // read the length of the op long txid = in.readLong(); // read the txid // skip the remaining content IOUtils.skipFully(in, length - 8); // TODO: do we want to verify checksum for JN? For now we don't. return txid; } else { FSEditLogOp op = decodeOp(); return op == null ? HdfsConstants.INVALID_TXID : op.getTransactionId(); } }
Example #12
Source File: TestQuotaByStorageType.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testQuotaByStorageTypeWithFileCreateDelete() throws Exception { final Path foo = new Path(dir, "foo"); Path createdFile1 = new Path(foo, "created_file1.data"); dfs.mkdirs(foo); dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); // set quota by storage type on directory "foo" dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10); INode fnode = fsdir.getINode4Write(foo.toString()); assertTrue(fnode.isDirectory()); assertTrue(fnode.isQuotaSet()); // Create file of size 2.5 * BLOCKSIZE under directory "foo" long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2; int bufLen = BLOCKSIZE / 16; DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed); // Verify space consumed and remaining quota long storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(file1Len, storageTypeConsumed); // Delete file and verify the consumed space of the storage type is updated dfs.delete(createdFile1, false); storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(0, storageTypeConsumed); QuotaCounts counts = new QuotaCounts.Builder().build(); fnode.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts, true); assertEquals(fnode.dumpTreeRecursively().toString(), 0, counts.getTypeSpaces().get(StorageType.SSD)); ContentSummary cs = dfs.getContentSummary(foo); assertEquals(cs.getSpaceConsumed(), 0); assertEquals(cs.getTypeConsumed(StorageType.SSD), 0); assertEquals(cs.getTypeConsumed(StorageType.DISK), 0); }
Example #13
Source File: Hdfs.java From big-c with Apache License 2.0 | 5 votes |
/** * This constructor has the signature needed by * {@link AbstractFileSystem#createFileSystem(URI, Configuration)} * * @param theUri which must be that of Hdfs * @param conf configuration * @throws IOException */ Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT); if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) { throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs"); } String host = theUri.getHost(); if (host == null) { throw new IOException("Incomplete HDFS URI, no host: " + theUri); } this.dfs = new DFSClient(theUri, conf, getStatistics()); }
Example #14
Source File: TestQuotaByStorageType.java From big-c with Apache License 2.0 | 5 votes |
private void testQuotaByStorageTypeOrTraditionalQuotaExceededCase( long storageSpaceQuotaInBlocks, long ssdQuotaInBlocks, long testFileLenInBlocks, short replication) throws Exception { final String METHOD_NAME = GenericTestUtils.getMethodName(); final Path testDir = new Path(dir, METHOD_NAME); dfs.mkdirs(testDir); dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); final long ssdQuota = BLOCKSIZE * ssdQuotaInBlocks; final long storageSpaceQuota = BLOCKSIZE * storageSpaceQuotaInBlocks; dfs.setQuota(testDir, Long.MAX_VALUE - 1, storageSpaceQuota); dfs.setQuotaByStorageType(testDir, StorageType.SSD, ssdQuota); INode testDirNode = fsdir.getINode4Write(testDir.toString()); assertTrue(testDirNode.isDirectory()); assertTrue(testDirNode.isQuotaSet()); Path createdFile = new Path(testDir, "created_file.data"); long fileLen = testFileLenInBlocks * BLOCKSIZE; try { DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen, BLOCKSIZE, replication, seed); fail("Should have failed with DSQuotaExceededException or " + "QuotaByStorageTypeExceededException "); } catch (Throwable t) { LOG.info("Got expected exception ", t); long currentSSDConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(Math.min(ssdQuota, storageSpaceQuota/replication), currentSSDConsumed); } }
Example #15
Source File: TestHDFSFileContextMainOperations.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testOldRenameWithQuota() throws Exception { DistributedFileSystem fs = cluster.getFileSystem(); Path src1 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src1"); Path src2 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src2"); Path dst1 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst1"); Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2"); createFile(src1); createFile(src2); fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_DONT_SET); fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true); fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET); /* * Test1: src does not exceed quota and dst has no quota check and hence * accommodates rename */ oldRename(src1, dst1, true, false); /* * Test2: src does not exceed quota and dst has *no* quota to accommodate * rename. */ // dstDir quota = 1 and dst1 already uses it oldRename(src2, dst2, false, true); /* * Test3: src exceeds quota and dst has *no* quota to accommodate rename */ // src1 has no quota to accommodate new rename node fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET); oldRename(dst1, src1, false, true); }
Example #16
Source File: TestDFSUpgradeWithHA.java From hadoop with Apache License 2.0 | 5 votes |
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster) throws IOException { Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0) .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE); BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox .getInternalState(journal1, "committedTxnId"); return committedTxnId != null ? committedTxnId.get() : HdfsConstants.INVALID_TXID; }
Example #17
Source File: INode.java From big-c with Apache License 2.0 | 5 votes |
/** * Get the quota set for this inode * @return the quota counts. The count is -1 if it is not set. */ public QuotaCounts getQuotaCounts() { return new QuotaCounts.Builder(). nameSpace(HdfsConstants.QUOTA_RESET). storageSpace(HdfsConstants.QUOTA_RESET). typeSpaces(HdfsConstants.QUOTA_RESET). build(); }
Example #18
Source File: TestDFSUpgradeFromImage.java From hadoop with Apache License 2.0 | 5 votes |
void upgradeAndVerify(MiniDFSCluster.Builder bld, ClusterVerifier verifier) throws IOException { MiniDFSCluster cluster = null; try { bld.format(false).startupOption(StartupOption.UPGRADE) .clusterId("testClusterId"); cluster = bld.build(); cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); DFSClient dfsClient = dfs.dfs; //Safemode will be off only after upgrade is complete. Wait for it. while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) { LOG.info("Waiting for SafeMode to be OFF."); try { Thread.sleep(1000); } catch (InterruptedException ignored) {} } recoverAllLeases(dfsClient, new Path("/")); verifyFileSystem(dfs); if (verifier != null) { verifier.verifyClusterPostUpgrade(cluster); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #19
Source File: TestQuotaByStorageType.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception { final Path foo = new Path(dir, "foo"); Path createdFile1 = new Path(foo, "created_file1.data"); dfs.mkdirs(foo); // set storage policy on directory "foo" to ONESSD dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); // set quota by storage type on directory "foo" dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4); INode fnode = fsdir.getINode4Write(foo.toString()); assertTrue(fnode.isDirectory()); assertTrue(fnode.isQuotaSet()); // Create file of size 2 * BLOCKSIZE under directory "foo" long file1Len = BLOCKSIZE * 2; int bufLen = BLOCKSIZE / 16; DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed); // Verify space consumed and remaining quota long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(file1Len, ssdConsumed); // append several blocks int appendLen = BLOCKSIZE * 2; DFSTestUtil.appendFile(dfs, createdFile1, appendLen); file1Len += appendLen; ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(file1Len, ssdConsumed); ContentSummary cs = dfs.getContentSummary(foo); assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION); assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len); assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2); }
Example #20
Source File: TestReplicationPolicyWithNodeGroup.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test replica placement policy in case of boundary topology. * Rack 2 has only 1 node group & can't be placed with two replicas * The 1st replica will be placed on writer. * The 2nd replica should be placed on a different rack * The 3rd replica should be placed on the same rack with writer, but on a * different node group. */ @Test public void testChooseTargetsOnBoundaryTopology() throws Exception { for(int i=0; i<NUM_OF_DATANODES; i++) { cluster.remove(dataNodes[i]); } for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) { cluster.add(dataNodesInBoundaryCase[i]); } for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) { updateHeartbeatWithUsage(dataNodes[0], 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); updateHeartbeatWithUsage(dataNodesInBoundaryCase[i], 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } DatanodeStorageInfo[] targets; targets = chooseTarget(0, dataNodesInBoundaryCase[0]); assertEquals(targets.length, 0); targets = chooseTarget(1, dataNodesInBoundaryCase[0]); assertEquals(targets.length, 1); targets = chooseTarget(2, dataNodesInBoundaryCase[0]); assertEquals(targets.length, 2); assertFalse(isOnSameRack(targets[0], targets[1])); targets = chooseTarget(3, dataNodesInBoundaryCase[0]); assertEquals(targets.length, 3); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); }
Example #21
Source File: TestMetadataVersionOutput.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout = 30000) public void testMetadataVersionOutput() throws IOException { initConfig(); dfsCluster = new MiniDFSCluster.Builder(conf). manageNameDfsDirs(false). numDataNodes(1). checkExitOnShutdown(false). build(); dfsCluster.waitClusterUp(); dfsCluster.shutdown(false); initConfig(); final PrintStream origOut = System.out; final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final PrintStream stdOut = new PrintStream(baos); System.setOut(stdOut); try { NameNode.createNameNode(new String[] { "-metadataVersion" }, conf); } catch (Exception e) { assertExceptionContains("ExitException", e); } /* Check if meta data version is printed correctly. */ final String verNumStr = HdfsConstants.NAMENODE_LAYOUT_VERSION + ""; assertTrue(baos.toString("UTF-8"). contains("HDFS Image Version: " + verNumStr)); assertTrue(baos.toString("UTF-8"). contains("Software format version: " + verNumStr)); System.setOut(origOut); }
Example #22
Source File: EditLogLedgerMetadata.java From hadoop with Apache License 2.0 | 5 votes |
EditLogLedgerMetadata(String zkPath, int dataLayoutVersion, long ledgerId, long firstTxId) { this.zkPath = zkPath; this.dataLayoutVersion = dataLayoutVersion; this.ledgerId = ledgerId; this.firstTxId = firstTxId; this.lastTxId = HdfsConstants.INVALID_TXID; this.inprogress = true; }
Example #23
Source File: NamespaceInfo.java From big-c with Apache License 2.0 | 5 votes |
public NamespaceInfo(int nsID, String clusterID, String bpID, long cT, String buildVersion, String softwareVersion, long capabilities) { super(HdfsConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT, NodeType.NAME_NODE); blockPoolID = bpID; this.buildVersion = buildVersion; this.softwareVersion = softwareVersion; this.capabilities = capabilities; }
Example #24
Source File: SecondaryNameNode.java From big-c with Apache License 2.0 | 5 votes |
/** * Returns the Jetty server that the Namenode is listening on. */ private URL getInfoServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) { throw new IOException("This is not a DFS"); } final String scheme = DFSUtil.getHttpClientScheme(conf); URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf, scheme); LOG.debug("Will connect to NameNode at " + address); return address.toURL(); }
Example #25
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
@Override public void run(Path path) throws IOException { if (type != null) { dfs.setQuotaByStorageType(path, type, HdfsConstants.QUOTA_RESET); } else { dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET); } }
Example #26
Source File: ClusterStatusServlet.java From terrapin with Apache License 2.0 | 5 votes |
/** * Get all data nodes * @param hdfsClient client instance for HDFS * @return live data nodes * @throws IOException if client goes wrong when communicating with server */ public static List<String> getAllNodeNames(DFSClient hdfsClient) throws IOException { DatanodeInfo[] allNodes = hdfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE); List<String> allNodeNames = new ArrayList<String>(allNodes.length); for (DatanodeInfo nodeInfo : allNodes) { allNodeNames.add(TerrapinUtil.getHelixInstanceFromHDFSHost(nodeInfo.getHostName())); } return allNodeNames; }
Example #27
Source File: TestQuotaByStorageType.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testQuotaByStorageTypeParentOnChildOff() throws Exception { short replication = 1; final Path parent = new Path(dir, "parent"); final Path child = new Path(parent, "child"); dfs.mkdirs(parent); dfs.mkdirs(child); dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); dfs.setQuotaByStorageType(parent, StorageType.SSD, 3 * BLOCKSIZE); // Create file of size 2.5 * BLOCKSIZE under child directory // Verify parent Quota applies Path createdFile1 = new Path(child, "created_file1.data"); long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2; int bufLen = BLOCKSIZE / 16; DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, replication, seed); INode fnode = fsdir.getINode4Write(parent.toString()); assertTrue(fnode.isDirectory()); assertTrue(fnode.isQuotaSet()); long currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(file1Len, currentSSDConsumed); // Create the 2nd file of size BLOCKSIZE under child directory and expect quota exceeded exception Path createdFile2 = new Path(child, "created_file2.data"); long file2Len = BLOCKSIZE; try { DFSTestUtil.createFile(dfs, createdFile2, bufLen, file2Len, BLOCKSIZE, replication, seed); fail("Should have failed with QuotaByStorageTypeExceededException "); } catch (Throwable t) { LOG.info("Got expected exception ", t); currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(file1Len, currentSSDConsumed); } }
Example #28
Source File: FSDirectory.java From hadoop with Apache License 2.0 | 5 votes |
/** Verify if the inode name is legal. */ void verifyINodeName(byte[] childName) throws HadoopIllegalArgumentException { if (Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) { String s = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name."; if (!namesystem.isImageLoaded()) { s += " Please rename it before upgrade."; } throw new HadoopIllegalArgumentException(s); } }
Example #29
Source File: RedundantEditLogInputStream.java From big-c with Apache License 2.0 | 5 votes |
RedundantEditLogInputStream(Collection<EditLogInputStream> streams, long startTxId) { this.curIdx = 0; this.prevTxId = (startTxId == HdfsConstants.INVALID_TXID) ? HdfsConstants.INVALID_TXID : (startTxId - 1); this.state = (streams.isEmpty()) ? State.EOF : State.SKIP_UNTIL; this.prevException = null; // EditLogInputStreams in a RedundantEditLogInputStream must be finalized, // and can't be pre-transactional. EditLogInputStream first = null; for (EditLogInputStream s : streams) { Preconditions.checkArgument(s.getFirstTxId() != HdfsConstants.INVALID_TXID, "invalid first txid in stream: %s", s); Preconditions.checkArgument(s.getLastTxId() != HdfsConstants.INVALID_TXID, "invalid last txid in stream: %s", s); if (first == null) { first = s; } else { Preconditions.checkArgument(s.getFirstTxId() == first.getFirstTxId(), "All streams in the RedundantEditLogInputStream must have the same " + "start transaction ID! " + first + " had start txId " + first.getFirstTxId() + ", but " + s + " had start txId " + s.getFirstTxId()); } } this.streams = streams.toArray(new EditLogInputStream[0]); // We sort the streams here so that the streams that end later come first. Arrays.sort(this.streams, new Comparator<EditLogInputStream>() { @Override public int compare(EditLogInputStream a, EditLogInputStream b) { return Longs.compare(b.getLastTxId(), a.getLastTxId()); } }); }
Example #30
Source File: HAUtil.java From big-c with Apache License 2.0 | 5 votes |
/** * Locate a delegation token associated with the given HA cluster URI, and if * one is found, clone it to also represent the underlying namenode address. * @param ugi the UGI to modify * @param haUri the logical URI for the cluster * @param nnAddrs collection of NNs in the cluster to which the token * applies */ public static void cloneDelegationTokenForLogicalUri( UserGroupInformation ugi, URI haUri, Collection<InetSocketAddress> nnAddrs) { // this cloning logic is only used by hdfs Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri, HdfsConstants.HDFS_URI_SCHEME); Token<DelegationTokenIdentifier> haToken = tokenSelector.selectToken(haService, ugi.getTokens()); if (haToken != null) { for (InetSocketAddress singleNNAddr : nnAddrs) { // this is a minor hack to prevent physical HA tokens from being // exposed to the user via UGI.getCredentials(), otherwise these // cloned tokens may be inadvertently propagated to jobs Token<DelegationTokenIdentifier> specificToken = new Token.PrivateToken<DelegationTokenIdentifier>(haToken); SecurityUtil.setTokenService(specificToken, singleNNAddr); Text alias = new Text( buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME) + "//" + specificToken.getService()); ugi.addToken(alias, specificToken); LOG.debug("Mapped HA service delegation token for logical URI " + haUri + " to namenode " + singleNNAddr); } } else { LOG.debug("No HA service delegation token found for logical URI " + haUri); } }