org.apache.hadoop.io.EnumSetWritable Java Examples
The following examples show how to use
org.apache.hadoop.io.EnumSetWritable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClientNamenodeProtocolTranslatorPB.java From hadoop with Apache License 2.0 | 6 votes |
@Override public LastBlockWithStatus append(String src, String clientName, EnumSetWritable<CreateFlag> flag) throws AccessControlException, DSQuotaExceededException, FileNotFoundException, SafeModeException, UnresolvedLinkException, IOException { AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src) .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag)) .build(); try { AppendResponseProto res = rpcProxy.append(null, req); LocatedBlock lastBlock = res.hasBlock() ? PBHelper .convert(res.getBlock()) : null; HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat()) : null; return new LastBlockWithStatus(lastBlock, stat); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #2
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private static void mockCreate(ClientProtocol mcp, CipherSuite suite, CryptoProtocolVersion version) throws Exception { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, new FileEncryptionInfo(suite, version, new byte[suite.getAlgorithmBlockSize()], new byte[suite.getAlgorithmBlockSize()], "fakeKey", "fakeVersion"), (byte) 0)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject()); }
Example #3
Source File: TestAddBlockRetry.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception { final String src = "/testAddBlockRetryShouldReturnBlockWithLocations"; NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc(); // create file nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 3, 1024, null); // start first addBlock() LOG.info("Starting first addBlock for " + src); LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null); assertTrue("Block locations should be present", lb1.getLocations().length > 0); cluster.restartNameNode(); nameNodeRpc = cluster.getNameNodeRpc(); LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null); assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock()); assertTrue("Wrong locations with retry", lb2.getLocations().length > 0); }
Example #4
Source File: ClientNamenodeProtocolTranslatorPB.java From big-c with Apache License 2.0 | 6 votes |
@Override public LastBlockWithStatus append(String src, String clientName, EnumSetWritable<CreateFlag> flag) throws AccessControlException, DSQuotaExceededException, FileNotFoundException, SafeModeException, UnresolvedLinkException, IOException { AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src) .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag)) .build(); try { AppendResponseProto res = rpcProxy.append(null, req); LocatedBlock lastBlock = res.hasBlock() ? PBHelper .convert(res.getBlock()) : null; HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat()) : null; return new LastBlockWithStatus(lastBlock, stat); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #5
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java From big-c with Apache License 2.0 | 6 votes |
@Override public AppendResponseProto append(RpcController controller, AppendRequestProto req) throws ServiceException { try { EnumSetWritable<CreateFlag> flags = req.hasFlag() ? PBHelper.convertCreateFlag(req.getFlag()) : new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)); LastBlockWithStatus result = server.append(req.getSrc(), req.getClientName(), flags); AppendResponseProto.Builder builder = AppendResponseProto.newBuilder(); if (result.getLastBlock() != null) { builder.setBlock(PBHelper.convert(result.getLastBlock())); } if (result.getFileStatus() != null) { builder.setStat(PBHelper.convert(result.getFileStatus())); } return builder.build(); } catch (IOException e) { throw new ServiceException(e); } }
Example #6
Source File: TestNamenodeRetryCache.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test for create file */ @Test public void testCreate() throws Exception { String src = "/testNamenodeRetryCache/testCreate/file"; // Two retried calls succeed newCall(); HdfsFileStatus status = nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null); Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null)); Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null)); // A non-retried call fails newCall(); try { nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null); Assert.fail("testCreate - expected exception is not thrown"); } catch (IOException e) { // expected } }
Example #7
Source File: NNThroughputBenchmark.java From hadoop with Apache License 2.0 | 6 votes |
/** * Do file create. */ @Override long executeOp(int daemonId, int inputIdx, String clientName) throws IOException { long start = Time.now(); // dummyActionNoSynch(fileIdx); nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(), clientName, new EnumSetWritable<CreateFlag>(EnumSet .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE, null); long end = Time.now(); for(boolean written = !closeUponCreate; !written; written = nameNodeProto.complete(fileNames[daemonId][inputIdx], clientName, null, INodeId.GRANDFATHER_INODE_ID)); return end-start; }
Example #8
Source File: PBHelper.java From big-c with Apache License 2.0 | 6 votes |
public static EnumSetWritable<CreateFlag> convertCreateFlag(int flag) { EnumSet<CreateFlag> result = EnumSet.noneOf(CreateFlag.class); if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) { result.add(CreateFlag.APPEND); } if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) { result.add(CreateFlag.CREATE); } if ((flag & CreateFlagProto.OVERWRITE_VALUE) == CreateFlagProto.OVERWRITE_VALUE) { result.add(CreateFlag.OVERWRITE); } if ((flag & CreateFlagProto.LAZY_PERSIST_VALUE) == CreateFlagProto.LAZY_PERSIST_VALUE) { result.add(CreateFlag.LAZY_PERSIST); } if ((flag & CreateFlagProto.NEW_BLOCK_VALUE) == CreateFlagProto.NEW_BLOCK_VALUE) { result.add(CreateFlag.NEW_BLOCK); } return new EnumSetWritable<CreateFlag>(result, CreateFlag.class); }
Example #9
Source File: NNThroughputBenchmark.java From big-c with Apache License 2.0 | 6 votes |
/** * Do file create. */ @Override long executeOp(int daemonId, int inputIdx, String clientName) throws IOException { long start = Time.now(); // dummyActionNoSynch(fileIdx); nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(), clientName, new EnumSetWritable<CreateFlag>(EnumSet .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE, null); long end = Time.now(); for(boolean written = !closeUponCreate; !written; written = nameNodeProto.complete(fileNames[daemonId][inputIdx], clientName, null, INodeId.GRANDFATHER_INODE_ID)); return end-start; }
Example #10
Source File: TestNamenodeRetryCache.java From big-c with Apache License 2.0 | 6 votes |
/** * Test for create file */ @Test public void testCreate() throws Exception { String src = "/testNamenodeRetryCache/testCreate/file"; // Two retried calls succeed newCall(); HdfsFileStatus status = nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null); Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null)); Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null)); // A non-retried call fails newCall(); try { nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null); Assert.fail("testCreate - expected exception is not thrown"); } catch (IOException e) { // expected } }
Example #11
Source File: PBHelper.java From big-c with Apache License 2.0 | 6 votes |
public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) { int value = 0; if (flag.contains(CreateFlag.APPEND)) { value |= CreateFlagProto.APPEND.getNumber(); } if (flag.contains(CreateFlag.CREATE)) { value |= CreateFlagProto.CREATE.getNumber(); } if (flag.contains(CreateFlag.OVERWRITE)) { value |= CreateFlagProto.OVERWRITE.getNumber(); } if (flag.contains(CreateFlag.LAZY_PERSIST)) { value |= CreateFlagProto.LAZY_PERSIST.getNumber(); } if (flag.contains(CreateFlag.NEW_BLOCK)) { value |= CreateFlagProto.NEW_BLOCK.getNumber(); } return value; }
Example #12
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java From hadoop with Apache License 2.0 | 6 votes |
@Override public AppendResponseProto append(RpcController controller, AppendRequestProto req) throws ServiceException { try { EnumSetWritable<CreateFlag> flags = req.hasFlag() ? PBHelper.convertCreateFlag(req.getFlag()) : new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)); LastBlockWithStatus result = server.append(req.getSrc(), req.getClientName(), flags); AppendResponseProto.Builder builder = AppendResponseProto.newBuilder(); if (result.getLastBlock() != null) { builder.setBlock(PBHelper.convert(result.getLastBlock())); } if (result.getFileStatus() != null) { builder.setStat(PBHelper.convert(result.getFileStatus())); } return builder.build(); } catch (IOException e) { throw new ServiceException(e); } }
Example #13
Source File: PBHelper.java From hadoop with Apache License 2.0 | 6 votes |
public static EnumSetWritable<CreateFlag> convertCreateFlag(int flag) { EnumSet<CreateFlag> result = EnumSet.noneOf(CreateFlag.class); if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) { result.add(CreateFlag.APPEND); } if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) { result.add(CreateFlag.CREATE); } if ((flag & CreateFlagProto.OVERWRITE_VALUE) == CreateFlagProto.OVERWRITE_VALUE) { result.add(CreateFlag.OVERWRITE); } if ((flag & CreateFlagProto.LAZY_PERSIST_VALUE) == CreateFlagProto.LAZY_PERSIST_VALUE) { result.add(CreateFlag.LAZY_PERSIST); } if ((flag & CreateFlagProto.NEW_BLOCK_VALUE) == CreateFlagProto.NEW_BLOCK_VALUE) { result.add(CreateFlag.NEW_BLOCK); } return new EnumSetWritable<CreateFlag>(result, CreateFlag.class); }
Example #14
Source File: PBHelper.java From hadoop with Apache License 2.0 | 6 votes |
public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) { int value = 0; if (flag.contains(CreateFlag.APPEND)) { value |= CreateFlagProto.APPEND.getNumber(); } if (flag.contains(CreateFlag.CREATE)) { value |= CreateFlagProto.CREATE.getNumber(); } if (flag.contains(CreateFlag.OVERWRITE)) { value |= CreateFlagProto.OVERWRITE.getNumber(); } if (flag.contains(CreateFlag.LAZY_PERSIST)) { value |= CreateFlagProto.LAZY_PERSIST.getNumber(); } if (flag.contains(CreateFlag.NEW_BLOCK)) { value |= CreateFlagProto.NEW_BLOCK.getNumber(); } return value; }
Example #15
Source File: TestAddBlockRetry.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception { final String src = "/testAddBlockRetryShouldReturnBlockWithLocations"; NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc(); // create file nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 3, 1024, null); // start first addBlock() LOG.info("Starting first addBlock for " + src); LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null); assertTrue("Block locations should be present", lb1.getLocations().length > 0); cluster.restartNameNode(); nameNodeRpc = cluster.getNameNodeRpc(); LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null); assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock()); assertTrue("Wrong locations with retry", lb2.getLocations().length > 0); }
Example #16
Source File: TestEncryptionZones.java From big-c with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private static void mockCreate(ClientProtocol mcp, CipherSuite suite, CryptoProtocolVersion version) throws Exception { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, new FileEncryptionInfo(suite, version, new byte[suite.getAlgorithmBlockSize()], new byte[suite.getAlgorithmBlockSize()], "fakeKey", "fakeVersion"), (byte) 0)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject()); }
Example #17
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
/** Method to get stream returned by append call */ private DFSOutputStream callAppend(String src, int buffersize, EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes) throws IOException { CreateFlag.validateForAppend(flag); try { LastBlockWithStatus blkWithStatus = namenode.append(src, clientName, new EnumSetWritable<>(flag, CreateFlag.class)); return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize, progress, blkWithStatus.getLastBlock(), blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(), favoredNodes); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, SafeModeException.class, DSQuotaExceededException.class, UnsupportedOperationException.class, UnresolvedPathException.class, SnapshotAccessControlException.class); } }
Example #18
Source File: DFSClient.java From big-c with Apache License 2.0 | 6 votes |
/** Method to get stream returned by append call */ private DFSOutputStream callAppend(String src, int buffersize, EnumSet<CreateFlag> flag, Progressable progress, String[] favoredNodes) throws IOException { CreateFlag.validateForAppend(flag); try { LastBlockWithStatus blkWithStatus = namenode.append(src, clientName, new EnumSetWritable<>(flag, CreateFlag.class)); return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize, progress, blkWithStatus.getLastBlock(), blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(), favoredNodes); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, SafeModeException.class, DSQuotaExceededException.class, UnsupportedOperationException.class, UnresolvedPathException.class, SnapshotAccessControlException.class); } }
Example #19
Source File: TestRetryCacheWithHA.java From big-c with Apache License 2.0 | 5 votes |
@Override void invoke() throws Exception { EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE); this.status = client.getNamenode().create(fileName, FsPermission.getFileDefault(), client.getClientName(), new EnumSetWritable<CreateFlag>(createFlag), false, DataNodes, BlockSize, new CryptoProtocolVersion[] {CryptoProtocolVersion.ENCRYPTION_ZONES}); }
Example #20
Source File: NameNodeRpcServer.java From big-c with Apache License 2.0 | 5 votes |
@Override // ClientProtocol public LastBlockWithStatus append(String src, String clientName, EnumSetWritable<CreateFlag> flag) throws IOException { checkNNStartup(); String clientMachine = getClientMachine(); if (stateChangeLog.isDebugEnabled()) { stateChangeLog.debug("*DIR* NameNode.append: file " +src+" for "+clientName+" at "+clientMachine); } CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null); if (cacheEntry != null && cacheEntry.isSuccess()) { return (LastBlockWithStatus) cacheEntry.getPayload(); } LastBlockWithStatus info = null; boolean success = false; try { info = namesystem.appendFile(src, clientName, clientMachine, flag.get(), cacheEntry != null); success = true; } finally { RetryCache.setState(cacheEntry, success, info); } metrics.incrFilesAppended(); return info; }
Example #21
Source File: ClientNamenodeProtocolTranslatorPB.java From big-c with Apache License 2.0 | 5 votes |
@Override public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) throws AccessControlException, AlreadyBeingCreatedException, DSQuotaExceededException, FileAlreadyExistsException, FileNotFoundException, NSQuotaExceededException, ParentNotDirectoryException, SafeModeException, UnresolvedLinkException, IOException { CreateRequestProto.Builder builder = CreateRequestProto.newBuilder() .setSrc(src) .setMasked(PBHelper.convert(masked)) .setClientName(clientName) .setCreateFlag(PBHelper.convertCreateFlag(flag)) .setCreateParent(createParent) .setReplication(replication) .setBlockSize(blockSize); builder.addAllCryptoProtocolVersion(PBHelper.convert(supportedVersions)); CreateRequestProto req = builder.build(); try { CreateResponseProto res = rpcProxy.create(null, req); return res.hasFs() ? PBHelper.convert(res.getFs()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #22
Source File: TestLease.java From big-c with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Test public void testFactory() throws Exception { final String[] groups = new String[]{"supergroup"}; final UserGroupInformation[] ugi = new UserGroupInformation[3]; for(int i = 0; i < ugi.length; i++) { ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups); } Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString()); Mockito .doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject()); final Configuration conf = new Configuration(); final DFSClient c1 = createDFSClientAs(ugi[0], conf); FSDataOutputStream out1 = createFsOut(c1, "/out1"); final DFSClient c2 = createDFSClientAs(ugi[0], conf); FSDataOutputStream out2 = createFsOut(c2, "/out2"); Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer()); final DFSClient c3 = createDFSClientAs(ugi[1], conf); FSDataOutputStream out3 = createFsOut(c3, "/out3"); Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer()); final DFSClient c4 = createDFSClientAs(ugi[1], conf); FSDataOutputStream out4 = createFsOut(c4, "/out4"); Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer()); final DFSClient c5 = createDFSClientAs(ugi[2], conf); FSDataOutputStream out5 = createFsOut(c5, "/out5"); Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer()); Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer()); }
Example #23
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 5 votes |
private static FileCreator createFileCreator2() throws NoSuchMethodException { Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, String.class, EnumSetWritable.class, boolean.class, short.class, long.class, CryptoProtocolVersion[].class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions); }; }
Example #24
Source File: TestNamenodeRetryCache.java From big-c with Apache License 2.0 | 5 votes |
/** * Test for rename1 */ @Test public void testAppend() throws Exception { String src = "/testNamenodeRetryCache/testAppend/src"; resetCall(); // Create a file with partial block DFSTestUtil.createFile(filesystem, new Path(src), 128, (short)1, 0L); // Retried append requests succeed newCall(); LastBlockWithStatus b = nnRpc.append(src, "holder", new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))); Assert.assertEquals(b, nnRpc.append(src, "holder", new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)))); Assert.assertEquals(b, nnRpc.append(src, "holder", new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)))); // non-retried call fails newCall(); try { nnRpc.append(src, "holder", new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))); Assert.fail("testAppend - expected exception is not thrown"); } catch (Exception e) { // Expected } }
Example #25
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 5 votes |
default HdfsFileStatus create(ClientProtocol instance, String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) throws Exception { try { return (HdfsFileStatus) createObject(instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions); } catch (InvocationTargetException e) { if (e.getCause() instanceof Exception) { throw (Exception) e.getCause(); } else { throw new RuntimeException(e.getCause()); } } }
Example #26
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 5 votes |
private static FileCreator createFileCreator3_3() throws NoSuchMethodException { Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, String.class, EnumSetWritable.class, boolean.class, short.class, long.class, CryptoProtocolVersion[].class, String.class, String.class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions, null, null); }; }
Example #27
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 5 votes |
private static FileCreator createFileCreator3() throws NoSuchMethodException { Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, String.class, EnumSetWritable.class, boolean.class, short.class, long.class, CryptoProtocolVersion[].class, String.class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions, null); }; }
Example #28
Source File: FanOutOneBlockAsyncDFSOutputHelper.java From hbase with Apache License 2.0 | 5 votes |
private static EnumSetWritable<CreateFlag> getCreateFlags(boolean overwrite) { List<CreateFlag> flags = new ArrayList<>(); flags.add(CreateFlag.CREATE); if (overwrite) { flags.add(CreateFlag.OVERWRITE); } if (SHOULD_REPLICATE_FLAG != null) { flags.add(SHOULD_REPLICATE_FLAG); } return new EnumSetWritable<>(EnumSet.copyOf(flags)); }
Example #29
Source File: NameNodeRpcServer.java From hadoop with Apache License 2.0 | 5 votes |
@Override // ClientProtocol public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) throws IOException { checkNNStartup(); String clientMachine = getClientMachine(); if (stateChangeLog.isDebugEnabled()) { stateChangeLog.debug("*DIR* NameNode.create: file " +src+" for "+clientName+" at "+clientMachine); } if (!checkPathLength(src)) { throw new IOException("create: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); } CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null); if (cacheEntry != null && cacheEntry.isSuccess()) { return (HdfsFileStatus) cacheEntry.getPayload(); } HdfsFileStatus status = null; try { PermissionStatus perm = new PermissionStatus(getRemoteUser() .getShortUserName(), null, masked); status = namesystem.startFile(src, perm, clientName, clientMachine, flag.get(), createParent, replication, blockSize, supportedVersions, cacheEntry != null); } finally { RetryCache.setState(cacheEntry, status != null, status); } metrics.incrFilesCreated(); metrics.incrCreateFileOps(); return status; }
Example #30
Source File: NameNodeRpcServer.java From hadoop with Apache License 2.0 | 5 votes |
@Override // ClientProtocol public LastBlockWithStatus append(String src, String clientName, EnumSetWritable<CreateFlag> flag) throws IOException { checkNNStartup(); String clientMachine = getClientMachine(); if (stateChangeLog.isDebugEnabled()) { stateChangeLog.debug("*DIR* NameNode.append: file " +src+" for "+clientName+" at "+clientMachine); } CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null); if (cacheEntry != null && cacheEntry.isSuccess()) { return (LastBlockWithStatus) cacheEntry.getPayload(); } LastBlockWithStatus info = null; boolean success = false; try { info = namesystem.appendFile(src, clientName, clientMachine, flag.get(), cacheEntry != null); success = true; } finally { RetryCache.setState(cacheEntry, success, info); } metrics.incrFilesAppended(); return info; }