Java Code Examples for org.apache.hadoop.security.UserGroupInformation#createUserForTesting()
The following examples show how to use
org.apache.hadoop.security.UserGroupInformation#createUserForTesting() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHadoopSecurityUtil.java From datacollector with Apache License 2.0 | 6 votes |
@Test public void testGetProxyUser() throws Exception { final UserGroupInformation fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "all" }); Stage.Context context = mock(Stage.Context.class); com.streamsets.pipeline.api.Configuration configuration = mock(com.streamsets.pipeline.api.Configuration.class); when(context.getConfiguration()).thenReturn(configuration); List<Stage.ConfigIssue> issues = new ArrayList<>(); UserGroupInformation ugi = HadoopSecurityUtil.getProxyUser( "proxy", context, fooUgi, issues, "config", "userName" ); Assert.assertEquals("proxy", ugi.getUserName()); }
Example 2
Source File: HIVERangerAuthorizerTest.java From ranger with Apache License 2.0 | 6 votes |
@Test public void testHiveSelectSpecificColumnAsAlice() throws Exception { UserGroupInformation ugi = UserGroupInformation.createUserForTesting("alice", new String[] {"IT"}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { String url = "jdbc:hive2://localhost:" + port + "/rangerauthz"; Connection connection = DriverManager.getConnection(url, "alice", "alice"); Statement statement = connection.createStatement(); ResultSet resultSet = statement.executeQuery("SELECT count FROM words where count == '100'"); if (resultSet.next()) { Assert.assertEquals(100, resultSet.getInt(1)); } else { Assert.fail("No ResultSet found"); } statement.close(); connection.close(); return null; } }); }
Example 3
Source File: StramClientUtilsTest.java From attic-apex-core with Apache License 2.0 | 6 votes |
/** * apex.dfsRootDirectory set: absolute path with %USER_NAME% and scheme e.g. file:/x/%USER_NAME%/z * @throws IOException * @throws InterruptedException * */ @Test public void getApexDFSRootDirWithSchemeAndVar() throws IOException, InterruptedException { final Configuration conf = new YarnConfiguration(new Configuration(false)); conf.set(StramClientUtils.APEX_APP_DFS_ROOT_DIR, "file:/x/%USER_NAME%/z"); conf.setBoolean(StramUserLogin.DT_APP_PATH_IMPERSONATED, true); final FileSystem fs = FileSystem.newInstance(conf); UserGroupInformation testUser = UserGroupInformation.createUserForTesting("testUser1", new String[]{""}); UserGroupInformation.setLoginUser(testUser); UserGroupInformation doAsUser = UserGroupInformation.createUserForTesting("impersonated", new String[]{""}); doAsUser.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { Path path = StramClientUtils.getApexDFSRootDir(fs, conf); Assert.assertEquals("file:/x/impersonated/z", path.toString()); return null; } }); }
Example 4
Source File: TestHDFSIntegration.java From incubator-sentry with Apache License 2.0 | 6 votes |
private void verifyAccessToPath(String user, String group, String path, boolean hasPermission) throws Exception{ Path p = new Path(path); UserGroupInformation hadoopUser = UserGroupInformation.createUserForTesting(user, new String[] {group}); FileSystem fs = DFSTestUtil.getFileSystemAs(hadoopUser, hadoopConf); try { fs.listFiles(p, true); if(!hasPermission) { Assert.assertFalse("Expected listing files to fail", false); } } catch (Exception e) { if(hasPermission) { throw e; } } }
Example 5
Source File: HBaseRangerAuthorizationTest.java From ranger with Apache License 2.0 | 6 votes |
@Test public void testReadRowFromColFam2AsGroupIT() throws Exception { final Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", "localhost"); conf.set("hbase.zookeeper.property.clientPort", "" + port); conf.set("zookeeper.znode.parent", "/hbase-unsecure"); String user = "public"; UserGroupInformation ugi = UserGroupInformation.createUserForTesting(user, new String[] {"IT"}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(TableName.valueOf("temp")); // Read a row Get get = new Get(Bytes.toBytes("row1")); Result result = table.get(get); byte[] valResult = result.getValue(Bytes.toBytes("colfam2"), Bytes.toBytes("col1")); Assert.assertNull(valResult); conn.close(); return null; } }); }
Example 6
Source File: TestReadWhileWriting.java From hadoop with Apache License 2.0 | 6 votes |
static void checkFile(Path p, int expectedsize, final Configuration conf ) throws IOException, InterruptedException { //open the file with another user account final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "_" + ++userCount; UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[] {"supergroup"}); final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf); final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p); //Check visible length Assert.assertTrue(in.getVisibleLength() >= expectedsize); //Able to read? for(int i = 0; i < expectedsize; i++) { Assert.assertEquals((byte)i, (byte)in.read()); } in.close(); }
Example 7
Source File: StramClientUtilsTest.java From attic-apex-core with Apache License 2.0 | 5 votes |
/** * apex.dfsRootDirectory set: absolute path e.g. /x/y/z * @throws IOException * */ @Test public void getApexDFSRootDirAbsPath() throws IOException { Configuration conf = new YarnConfiguration(new Configuration(false)); conf.set(StramClientUtils.APEX_APP_DFS_ROOT_DIR, "/x/y/z"); conf.setBoolean(StramUserLogin.DT_APP_PATH_IMPERSONATED, false); FileSystem fs = FileSystem.newInstance(conf); UserGroupInformation testUser = UserGroupInformation.createUserForTesting("testUser1", new String[]{""}); UserGroupInformation.setLoginUser(testUser); Path path = StramClientUtils.getApexDFSRootDir(fs, conf); Assert.assertEquals(fs.getHomeDirectory() + "/datatorrent", path.toString()); }
Example 8
Source File: TestHFileArchiving.java From hbase with Apache License 2.0 | 5 votes |
@Test(expected=IOException.class) public void testArchiveRegionsWhenPermissionDenied() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); List<HRegion> regions = initTableForArchivingRegions(tableName); // now attempt to depose the regions Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); Path tableDir = CommonFSUtils.getTableDir(rootDir, regions.get(0).getRegionInfo().getTable()); List<Path> regionDirList = regions.stream() .map(region -> FSUtils.getRegionDirFromTableDir(tableDir, region.getRegionInfo())) .collect(Collectors.toList()); // To create a permission denied error, we do archive regions as a non-current user UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo1234", new String[]{"group1"}); try { ugi.doAs((PrivilegedExceptionAction<Void>) () -> { FileSystem fs = UTIL.getTestFileSystem(); HFileArchiver.archiveRegions(UTIL.getConfiguration(), fs, rootDir, tableDir, regionDirList); return null; }); } catch (IOException e) { assertTrue(e.getCause().getMessage().contains("Permission denied")); throw e; } finally { UTIL.deleteTable(tableName); } }
Example 9
Source File: TestSecureIPC.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testRpcFallbackToSimpleAuth() throws Exception { String clientUsername = "testuser"; UserGroupInformation clientUgi = UserGroupInformation.createUserForTesting(clientUsername, new String[] { clientUsername }); // check that the client user is insecure assertNotSame(ugi, clientUgi); assertEquals(AuthenticationMethod.SIMPLE, clientUgi.getAuthenticationMethod()); assertEquals(clientUsername, clientUgi.getUserName()); clientConf.set(User.HBASE_SECURITY_CONF_KEY, "simple"); serverConf.setBoolean(RpcServer.FALLBACK_TO_INSECURE_CLIENT_AUTH, true); callRpcService(User.create(clientUgi)); }
Example 10
Source File: TestLease.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Test public void testFactory() throws Exception { final String[] groups = new String[]{"supergroup"}; final UserGroupInformation[] ugi = new UserGroupInformation[3]; for(int i = 0; i < ugi.length; i++) { ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups); } Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString()); Mockito .doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject()); final Configuration conf = new Configuration(); final DFSClient c1 = createDFSClientAs(ugi[0], conf); FSDataOutputStream out1 = createFsOut(c1, "/out1"); final DFSClient c2 = createDFSClientAs(ugi[0], conf); FSDataOutputStream out2 = createFsOut(c2, "/out2"); Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer()); final DFSClient c3 = createDFSClientAs(ugi[1], conf); FSDataOutputStream out3 = createFsOut(c3, "/out3"); Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer()); final DFSClient c4 = createDFSClientAs(ugi[1], conf); FSDataOutputStream out4 = createFsOut(c4, "/out4"); Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer()); final DFSClient c5 = createDFSClientAs(ugi[2], conf); FSDataOutputStream out5 = createFsOut(c5, "/out5"); Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer()); Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer()); }
Example 11
Source File: GetGroupsTestBase.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUpUsers() throws IOException { // Make sure the current user's info is in the list of test users. UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); UserGroupInformation.createUserForTesting(currentUser.getUserName(), currentUser.getGroupNames()); testUser1 = UserGroupInformation.createUserForTesting("foo", new String[]{"bar", "baz"}); testUser2 = UserGroupInformation.createUserForTesting("fiz", new String[]{"buz", "boz"}); }
Example 12
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test listing encryption zones as a non super user. */ @Test(timeout = 60000) public void testListEncryptionZonesAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] { "mygroup" }); final Path testRoot = new Path("/tmp/TestEncryptionZones"); final Path superPath = new Path(testRoot, "superuseronly"); final Path allPath = new Path(testRoot, "accessall"); fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true); dfsAdmin.createEncryptionZone(superPath, TEST_KEY); fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true); dfsAdmin.createEncryptionZone(allPath, TEST_KEY); user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { final HdfsAdmin userAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); try { userAdmin.listEncryptionZones(); } catch (AccessControlException e) { assertExceptionContains("Superuser privilege is required", e); } return null; } }); }
Example 13
Source File: StramClientUtilsTest.java From attic-apex-core with Apache License 2.0 | 5 votes |
/** * apex.dfsRootDirectory set: absolute path with scheme e.g. file:/p/q/r * @throws IOException * */ @Test public void getApexDFSRootDirScheme() throws IOException { Configuration conf = new YarnConfiguration(new Configuration(false)); conf.set(StramClientUtils.APEX_APP_DFS_ROOT_DIR, "file:/p/q/r"); conf.setBoolean(StramUserLogin.DT_APP_PATH_IMPERSONATED, false); FileSystem fs = FileSystem.newInstance(conf); UserGroupInformation testUser = UserGroupInformation.createUserForTesting("testUser1", new String[]{""}); UserGroupInformation.setLoginUser(testUser); Path path = StramClientUtils.getApexDFSRootDir(fs, conf); Assert.assertEquals(fs.getHomeDirectory() + "/datatorrent", path.toString()); }
Example 14
Source File: RangerHdfsAuthorizerTest.java From ranger with Apache License 2.0 | 5 votes |
/** * Checks that the <b>directory</b> access is <b>allowed</b> for the given user in the given groups. * Throws an exception, if not. */ public void checkDirAccess(FsAction access, String userName, String... groups) throws AccessControlException { final UserGroupInformation user = UserGroupInformation.createUserForTesting(userName, groups); rangerControlEnforcer.checkPermission(FILE_OWNER, FILE_GROUP, user, Arrays.copyOf(attributes, attributes.length - 1), Arrays.copyOf(nodes, nodes.length - 1), new byte[0][0], SNAPSHOT_ID, path, ancestorIndex - 1, false /* doCheckOwner */, null /* ancestorAccess */, null /* parentAccess */ , access, null /* subAccess */ , false /* ignoreEmptyDir */); }
Example 15
Source File: KafkaRangerTopicCreationTest.java From ranger with Apache License 2.0 | 4 votes |
@org.junit.BeforeClass public static void setup() throws Exception { String basedir = System.getProperty("basedir"); if (basedir == null) { basedir = new File(".").getCanonicalPath(); } System.out.println("Base Dir " + basedir); configureKerby(basedir); // JAAS Config file - We need to point to the correct keytab files Path path = FileSystems.getDefault().getPath(basedir, "/src/test/resources/kafka_kerberos.jaas"); String content = new String(Files.readAllBytes(path), StandardCharsets.UTF_8); content = content.replaceAll("<basedir>", basedir); //content = content.replaceAll("zookeeper/localhost", "zookeeper/" + address); Path path2 = FileSystems.getDefault().getPath(basedir, "/target/test-classes/kafka_kerberos.jaas"); Files.write(path2, content.getBytes(StandardCharsets.UTF_8)); System.setProperty("java.security.auth.login.config", path2.toString()); // Set up Zookeeper to require SASL Map<String,Object> zookeeperProperties = new HashMap<>(); zookeeperProperties.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider"); zookeeperProperties.put("requireClientAuthScheme", "sasl"); zookeeperProperties.put("jaasLoginRenew", "3600000"); InstanceSpec instanceSpec = new InstanceSpec(null, -1, -1, -1, true, 1,-1, -1, zookeeperProperties, "localhost"); zkServer = new TestingServer(instanceSpec, true); // Get a random port ServerSocket serverSocket = new ServerSocket(0); port = serverSocket.getLocalPort(); serverSocket.close(); tempDir = Files.createTempDirectory("kafka"); LOG.info("Port is {}", port); LOG.info("Temporary directory is at {}", tempDir); final Properties props = new Properties(); props.put("broker.id", 1); props.put("host.name", "localhost"); props.put("port", port); props.put("log.dir", tempDir.toString()); props.put("zookeeper.connect", zkServer.getConnectString()); props.put("replica.socket.timeout.ms", "1500"); props.put("controlled.shutdown.enable", Boolean.TRUE.toString()); // Enable SASL_PLAINTEXT props.put("listeners", "SASL_PLAINTEXT://localhost:" + port); props.put("security.inter.broker.protocol", "SASL_PLAINTEXT"); props.put("sasl.enabled.mechanisms", "GSSAPI"); props.put("sasl.mechanism.inter.broker.protocol", "GSSAPI"); props.put("sasl.kerberos.service.name", "kafka"); props.put("offsets.topic.replication.factor", (short) 1); props.put("offsets.topic.num.partitions", 1); // Plug in Apache Ranger authorizer props.put("authorizer.class.name", "org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer"); // Create users for testing UserGroupInformation.createUserForTesting("[email protected]", new String[] {"public"}); UserGroupInformation.createUserForTesting("kafka/[email protected]", new String[] {"IT"}); KafkaConfig config = new KafkaConfig(props); kafkaServer = new KafkaServerStartable(config); kafkaServer.startup(); }
Example 16
Source File: TestSnapshottableDirListing.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test the listing with different user names to make sure only directories * that are owned by the user are listed. */ @Test (timeout=60000) public void testListWithDifferentUser() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); // first make dir1 and dir2 snapshottable hdfs.allowSnapshot(dir1); hdfs.allowSnapshot(dir2); hdfs.setPermission(root, FsPermission.valueOf("-rwxrwxrwx")); // create two dirs and make them snapshottable under the name of user1 UserGroupInformation ugi1 = UserGroupInformation.createUserForTesting( "user1", new String[] { "group1" }); DistributedFileSystem fs1 = (DistributedFileSystem) DFSTestUtil .getFileSystemAs(ugi1, conf); Path dir1_user1 = new Path("/dir1_user1"); Path dir2_user1 = new Path("/dir2_user1"); fs1.mkdirs(dir1_user1); fs1.mkdirs(dir2_user1); hdfs.allowSnapshot(dir1_user1); hdfs.allowSnapshot(dir2_user1); // user2 UserGroupInformation ugi2 = UserGroupInformation.createUserForTesting( "user2", new String[] { "group2" }); DistributedFileSystem fs2 = (DistributedFileSystem) DFSTestUtil .getFileSystemAs(ugi2, conf); Path dir_user2 = new Path("/dir_user2"); Path subdir_user2 = new Path(dir_user2, "subdir"); fs2.mkdirs(dir_user2); fs2.mkdirs(subdir_user2); hdfs.allowSnapshot(dir_user2); hdfs.allowSnapshot(subdir_user2); // super user String supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); UserGroupInformation superUgi = UserGroupInformation.createUserForTesting( "superuser", new String[] { supergroup }); DistributedFileSystem fs3 = (DistributedFileSystem) DFSTestUtil .getFileSystemAs(superUgi, conf); // list the snapshottable dirs for superuser SnapshottableDirectoryStatus[] dirs = fs3.getSnapshottableDirListing(); // 6 snapshottable dirs: dir1, dir2, dir1_user1, dir2_user1, dir_user2, and // subdir_user2 assertEquals(6, dirs.length); // list the snapshottable dirs for user1 dirs = fs1.getSnapshottableDirListing(); // 2 dirs owned by user1: dir1_user1 and dir2_user1 assertEquals(2, dirs.length); assertEquals(dir1_user1, dirs[0].getFullPath()); assertEquals(dir2_user1, dirs[1].getFullPath()); // list the snapshottable dirs for user2 dirs = fs2.getSnapshottableDirListing(); // 2 dirs owned by user2: dir_user2 and subdir_user2 assertEquals(2, dirs.length); assertEquals(dir_user2, dirs[0].getFullPath()); assertEquals(subdir_user2, dirs[1].getFullPath()); }
Example 17
Source File: KafkaRangerAuthorizerSASLSSLTest.java From ranger with Apache License 2.0 | 4 votes |
@org.junit.BeforeClass public static void setup() throws Exception { // JAAS Config file String basedir = System.getProperty("basedir"); if (basedir == null) { basedir = new File(".").getCanonicalPath(); } File f = new File(basedir + "/src/test/resources/kafka_plain.jaas"); System.setProperty("java.security.auth.login.config", f.getPath()); // Create keys String serviceDN = "CN=Service,O=Apache,L=Dublin,ST=Leinster,C=IE"; String clientDN = "CN=Client,O=Apache,L=Dublin,ST=Leinster,C=IE"; // Create a truststore KeyStore keystore = KeyStore.getInstance(KeyStore.getDefaultType()); keystore.load(null, "security".toCharArray()); serviceKeystorePath = KafkaTestUtils.createAndStoreKey(serviceDN, serviceDN, BigInteger.valueOf(30), "sspass", "myservicekey", "skpass", keystore); clientKeystorePath = KafkaTestUtils.createAndStoreKey(clientDN, clientDN, BigInteger.valueOf(31), "cspass", "myclientkey", "ckpass", keystore); File truststoreFile = File.createTempFile("kafkatruststore", ".jks"); try (OutputStream output = new FileOutputStream(truststoreFile)) { keystore.store(output, "security".toCharArray()); } truststorePath = truststoreFile.getPath(); zkServer = new TestingServer(); // Get a random port ServerSocket serverSocket = new ServerSocket(0); port = serverSocket.getLocalPort(); serverSocket.close(); final Properties props = new Properties(); props.put("broker.id", 1); props.put("host.name", "localhost"); props.put("port", port); props.put("log.dir", "/tmp/kafka"); props.put("zookeeper.connect", zkServer.getConnectString()); props.put("replica.socket.timeout.ms", "1500"); props.put("controlled.shutdown.enable", Boolean.TRUE.toString()); // Enable SASL_SSL props.put("listeners", "SASL_SSL://localhost:" + port); props.put("security.inter.broker.protocol", "SASL_SSL"); props.put("sasl.enabled.mechanisms", "PLAIN"); props.put("sasl.mechanism.inter.broker.protocol", "PLAIN"); props.put("offsets.topic.replication.factor", (short) 1); props.put("offsets.topic.num.partitions", 1); props.put("ssl.keystore.location", serviceKeystorePath); props.put("ssl.keystore.password", "sspass"); props.put("ssl.key.password", "skpass"); props.put("ssl.truststore.location", truststorePath); props.put("ssl.truststore.password", "security"); // Plug in Apache Ranger authorizer props.put("authorizer.class.name", "org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer"); // Create users for testing UserGroupInformation.createUserForTesting("alice", new String[] {"IT"}); KafkaConfig config = new KafkaConfig(props); kafkaServer = new KafkaServerStartable(config); kafkaServer.startup(); // Create some topics KafkaTestUtils.createSomeTopics(zkServer.getConnectString()); }
Example 18
Source File: TestMiniMRWithDFSWithDistinctUsers.java From big-c with Apache License 2.0 | 4 votes |
static UserGroupInformation createUGI(String name, boolean issuper) { String group = issuper? "supergroup": name; return UserGroupInformation.createUserForTesting(name, new String[]{group}); }
Example 19
Source File: TestFsShellPermission.java From big-c with Apache License 2.0 | 4 votes |
static UserGroupInformation createUGI(String ownername, String groupName) { return UserGroupInformation.createUserForTesting(ownername, new String[]{groupName}); }
Example 20
Source File: TestSecureOzoneContainer.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testCreateOzoneContainer() throws Exception { LOG.info("Test case: requireBlockToken: {} hasBlockToken: {} " + "blockTokenExpired: {}.", requireBlockToken, hasBlockToken, blockTokeExpired); conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, requireBlockToken); long containerID = ContainerTestHelper.getTestContainerID(); OzoneContainer container = null; System.out.println(System.getProperties().getProperty("java.library.path")); try { Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath()); conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline .getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE) .getValue()); conf.setBoolean( OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); container = new OzoneContainer(dn, conf, getContext(dn), caClient); //Set scmId and manually start ozone container. container.start(UUID.randomUUID().toString()); UserGroupInformation ugi = UserGroupInformation.createUserForTesting( "user1", new String[] {"usergroup"}); long expiryDate = (blockTokeExpired) ? Time.now() - 60 * 60 * 2 : Time.now() + 60 * 60 * 24; OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( "testUser", "cid:lud:bcsid", EnumSet.allOf(AccessModeProto.class), expiryDate, "1234", 128L); int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); if (port == 0) { port = secConfig.getConfiguration().getInt(OzoneConfigKeys .DFS_CONTAINER_IPC_PORT, DFS_CONTAINER_IPC_PORT_DEFAULT); } secretManager.start(caClient); Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken( "123", EnumSet.allOf(AccessModeProto.class), RandomUtils.nextLong()); if (hasBlockToken) { ugi.addToken(token); } ugi.doAs((PrivilegedAction<Void>) () -> { try { XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf); client.connect(token.encodeToUrlString()); if (hasBlockToken) { createContainerForTesting(client, containerID, token); } else { createContainerForTesting(client, containerID, null); } } catch (Exception e) { if (requireBlockToken && hasBlockToken && !blockTokeExpired) { LOG.error("Unexpected error. ", e); fail("Client with BlockToken should succeed when block token is" + " required."); } if (requireBlockToken && hasBlockToken && blockTokeExpired) { assertTrue("Receive expected exception", e instanceof SCMSecurityException); } if (requireBlockToken && !hasBlockToken) { assertTrue("Receive expected exception", e instanceof IOException); } } return null; }); } finally { if (container != null) { container.stop(); } } }