Java Code Examples for org.apache.hadoop.hbase.security.User#isHBaseSecurityEnabled()
The following examples show how to use
org.apache.hadoop.hbase.security.User#isHBaseSecurityEnabled() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DstClusterUtil.java From kylin with Apache License 2.0 | 6 votes |
public void deployCoprocessor(HTableDescriptor tableDesc, String localCoprocessorJar) throws IOException { List<String> existingCoprocessors = tableDesc.getCoprocessors(); for (String existingCoprocessor : existingCoprocessors) { tableDesc.removeCoprocessor(existingCoprocessor); } Path hdfsCoprocessorJar = DeployCoprocessorCLI.uploadCoprocessorJar(localCoprocessorJar, hbaseFS, hdfsWorkingDirectory, null); if (User.isHBaseSecurityEnabled(hbaseConf)) { // add coprocessor for bulk load tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"); } DeployCoprocessorCLI.addCoprocessorOnHTable(tableDesc, hdfsCoprocessorJar); logger.info("deployed hbase table {} with coprocessor.", tableDesc.getTableName()); }
Example 2
Source File: HBaseTestingUtility.java From hbase with Apache License 2.0 | 6 votes |
/** * This method clones the passed <code>c</code> configuration setting a new * user into the clone. Use it getting new instances of FileSystem. Only * works for DistributedFileSystem w/o Kerberos. * @param c Initial configuration * @param differentiatingSuffix Suffix to differentiate this user from others. * @return A new configuration instance with a different user set into it. * @throws IOException */ public static User getDifferentUser(final Configuration c, final String differentiatingSuffix) throws IOException { FileSystem currentfs = FileSystem.get(c); if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) { return User.getCurrent(); } // Else distributed filesystem. Make a new instance per daemon. Below // code is taken from the AppendTestUtil over in hdfs. String username = User.getCurrent().getName() + differentiatingSuffix; User user = User.createUserForTesting(c, username, new String[]{"supergroup"}); return user; }
Example 3
Source File: IntegrationTestIngestWithACL.java From hbase with Apache License 2.0 | 6 votes |
@Override protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey, long numKeys) { String[] args = super.getArgsForLoadTestTool(mode, modeSpecificArg, startKey, numKeys); List<String> tmp = new ArrayList<>(Arrays.asList(args)); tmp.add(HYPHEN + LoadTestTool.OPT_GENERATOR); StringBuilder sb = new StringBuilder(LoadTestDataGeneratorWithACL.class.getName()); sb.append(COLON); if (User.isHBaseSecurityEnabled(getConf())) { sb.append(authnFileName); sb.append(COLON); } sb.append(superUser); sb.append(COLON); sb.append(userNames); sb.append(COLON); sb.append(Integer.toString(SPECIAL_PERM_CELL_INSERTION_FACTOR)); tmp.add(sb.toString()); return tmp.toArray(new String[tmp.size()]); }
Example 4
Source File: IntegrationTestIngestWithACL.java From hbase with Apache License 2.0 | 6 votes |
@Override protected void processOptions(CommandLine cmd) { super.processOptions(cmd); if (cmd.hasOption(OPT_SUPERUSER)) { superUser = cmd.getOptionValue(OPT_SUPERUSER); } if (cmd.hasOption(OPT_USERS)) { userNames = cmd.getOptionValue(OPT_USERS); } if (User.isHBaseSecurityEnabled(getConf())) { boolean authFileNotFound = false; if (cmd.hasOption(OPT_AUTHN)) { authnFileName = cmd.getOptionValue(OPT_AUTHN); if (StringUtils.isEmpty(authnFileName)) { authFileNotFound = true; } } else { authFileNotFound = true; } if (authFileNotFound) { super.printUsage(); System.exit(EXIT_FAILURE); } } }
Example 5
Source File: HBaseTap.java From SpyGlass with Apache License 2.0 | 6 votes |
private void obtainToken(JobConf conf) { if (User.isHBaseSecurityEnabled(conf)) { String user = conf.getUser(); LOG.info("obtaining HBase token for: {}", user); try { UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); user = currentUser.getUserName(); Credentials credentials = conf.getCredentials(); for (Token t : currentUser.getTokens()) { LOG.debug("Token {} is available", t); if ("HBASE_AUTH_TOKEN".equalsIgnoreCase(t.getKind().toString())) credentials.addToken(t.getKind(), t); } } catch (IOException e) { throw new TapException("Unable to obtain HBase auth token for " + user, e); } } }
Example 6
Source File: MasterRpcServices.java From hbase with Apache License 2.0 | 5 votes |
/** * Returns the security capabilities in effect on the cluster */ @Override public SecurityCapabilitiesResponse getSecurityCapabilities(RpcController controller, SecurityCapabilitiesRequest request) throws ServiceException { SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder(); try { master.checkInitialized(); Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>(); // Authentication if (User.isHBaseSecurityEnabled(master.getConfiguration())) { capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION); } else { capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION); } // A coprocessor that implements AccessControlService can provide AUTHORIZATION and // CELL_AUTHORIZATION if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { if (AccessChecker.isAuthorizationSupported(master.getConfiguration())) { capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION); } if (AccessController.isCellAuthorizationSupported(master.getConfiguration())) { capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION); } } // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY. if (master.cpHost != null && hasVisibilityLabelsServiceCoprocessor(master.cpHost)) { if (VisibilityController.isCellAuthorizationSupported(master.getConfiguration())) { capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY); } } response.addAllCapabilities(capabilities); } catch (IOException e) { throw new ServiceException(e); } return response.build(); }
Example 7
Source File: BuiltInProviderSelector.java From hbase with Apache License 2.0 | 5 votes |
@Override public Pair<SaslClientAuthenticationProvider, Token<? extends TokenIdentifier>> selectProvider( String clusterId, User user) { requireNonNull(clusterId, "Null clusterId was given"); requireNonNull(user, "Null user was given"); // Superfluous: we don't do SIMPLE auth over SASL, but we should to simplify. if (!User.isHBaseSecurityEnabled(conf)) { return new Pair<>(simpleAuth, null); } final Text clusterIdAsText = new Text(clusterId); // Must be digest auth, look for a token. // TestGenerateDelegationToken is written expecting DT is used when DT and Krb are both present. // (for whatever that's worth). for (Token<? extends TokenIdentifier> token : user.getTokens()) { // We need to check for two things: // 1. This token is for the HBase cluster we want to talk to // 2. We have suppporting client implementation to handle the token (the "kind" of token) if (clusterIdAsText.equals(token.getService()) && digestAuthTokenKind.equals(token.getKind())) { return new Pair<>(digestAuth, token); } } // Unwrap PROXY auth'n method if that's what we have coming in. final UserGroupInformation currentUser = user.getUGI(); // May be null if Hadoop AuthenticationMethod is PROXY final UserGroupInformation realUser = currentUser.getRealUser(); if (currentUser.hasKerberosCredentials() || (realUser != null && realUser.hasKerberosCredentials())) { return new Pair<>(krbAuth, null); } // This indicates that a client is requesting some authentication mechanism which the servers // don't know how to process (e.g. there is no provider which can support it). This may be // a bug or simply a misconfiguration of client *or* server. LOG.warn("No matching SASL authentication provider and supporting token found from providers" + " for user: {}", user); return null; }
Example 8
Source File: CubeHTableUtil.java From kylin-on-parquet-v2 with Apache License 2.0 | 4 votes |
public static void createHTable(CubeSegment cubeSegment, byte[][] splitKeys) throws IOException { String tableName = cubeSegment.getStorageLocationIdentifier(); CubeInstance cubeInstance = cubeSegment.getCubeInstance(); CubeDesc cubeDesc = cubeInstance.getDescriptor(); KylinConfig kylinConfig = cubeDesc.getConfig(); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(cubeSegment.getStorageLocationIdentifier())); tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName()); tableDesc.setValue(IRealizationConstants.HTableTag, kylinConfig.getMetadataUrlPrefix()); tableDesc.setValue(IRealizationConstants.HTableCreationTime, String.valueOf(System.currentTimeMillis())); if (!StringUtils.isEmpty(kylinConfig.getKylinOwner())) { //HTableOwner is the team that provides kylin service tableDesc.setValue(IRealizationConstants.HTableOwner, kylinConfig.getKylinOwner()); } String commitInfo = KylinVersion.getGitCommitInfo(); if (!StringUtils.isEmpty(commitInfo)) { tableDesc.setValue(IRealizationConstants.HTableGitTag, commitInfo); } //HTableUser is the cube owner, which will be the "user" tableDesc.setValue(IRealizationConstants.HTableUser, cubeInstance.getOwner()); tableDesc.setValue(IRealizationConstants.HTableSegmentTag, cubeSegment.toString()); Configuration conf = HBaseConnection.getCurrentHBaseConfiguration(); Connection conn = HBaseConnection.get(kylinConfig.getStorageUrl()); Admin admin = conn.getAdmin(); try { if (User.isHBaseSecurityEnabled(conf)) { // add coprocessor for bulk load tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"); } for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHbaseMapping().getColumnFamily()) { HColumnDescriptor cf = createColumnFamily(kylinConfig, cfDesc.getName(), cfDesc.isMemoryHungry()); tableDesc.addFamily(cf); } if (admin.tableExists(TableName.valueOf(tableName))) { // admin.disableTable(tableName); // admin.deleteTable(tableName); throw new RuntimeException("HBase table " + tableName + " exists!"); } DeployCoprocessorCLI.deployCoprocessor(tableDesc); admin.createTable(tableDesc, splitKeys); Preconditions.checkArgument(admin.isTableAvailable(TableName.valueOf(tableName)), "table " + tableName + " created, but is not available due to some reasons"); logger.info("create hbase table " + tableName + " done."); } finally { IOUtils.closeQuietly(admin); } }
Example 9
Source File: PingHBaseCLI.java From kylin-on-parquet-v2 with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws IOException { String hbaseTable = args[0]; System.out.println("Hello friend."); Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration(); if (User.isHBaseSecurityEnabled(hconf)) { try { System.out.println("--------------Getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); System.out.println("--------------Error while getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); } } Scan scan = new Scan(); int limit = 20; Connection conn = null; Table table = null; ResultScanner scanner = null; try { conn = ConnectionFactory.createConnection(hconf); table = conn.getTable(TableName.valueOf(hbaseTable)); scanner = table.getScanner(scan); int count = 0; for (Result r : scanner) { byte[] rowkey = r.getRow(); System.out.println(Bytes.toStringBinary(rowkey)); count++; if (count == limit) break; } } finally { IOUtils.closeQuietly(scanner); IOUtils.closeQuietly(table); IOUtils.closeQuietly(conn); } }
Example 10
Source File: PingHBaseCLI.java From kylin with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws IOException { String hbaseTable = args[0]; System.out.println("Hello friend."); Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration(); if (User.isHBaseSecurityEnabled(hconf)) { try { System.out.println("--------------Getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); System.out.println("--------------Error while getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); } } Scan scan = new Scan(); int limit = 20; Connection conn = null; Table table = null; ResultScanner scanner = null; try { conn = ConnectionFactory.createConnection(hconf); table = conn.getTable(TableName.valueOf(hbaseTable)); scanner = table.getScanner(scan); int count = 0; for (Result r : scanner) { byte[] rowkey = r.getRow(); System.out.println(Bytes.toStringBinary(rowkey)); count++; if (count == limit) break; } } finally { IOUtils.closeQuietly(scanner); IOUtils.closeQuietly(table); IOUtils.closeQuietly(conn); } }
Example 11
Source File: CreateHTableJob.java From Kylin with Apache License 2.0 | 4 votes |
@Override public int run(String[] args) throws Exception { Options options = new Options(); options.addOption(OPTION_CUBE_NAME); options.addOption(OPTION_PARTITION_FILE_PATH); options.addOption(OPTION_HTABLE_NAME); parseOptions(options, args); Path partitionFilePath = new Path(getOptionValue(OPTION_PARTITION_FILE_PATH)); String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase(); KylinConfig config = KylinConfig.getInstanceFromEnv(); CubeManager cubeMgr = CubeManager.getInstance(config); CubeInstance cube = cubeMgr.getCube(cubeName); CubeDesc cubeDesc = cube.getDescriptor(); String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase(); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName)); // https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.html tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); tableDesc.setValue(IRealizationConstants.HTableTag, config.getMetadataUrlPrefix()); Configuration conf = HBaseConfiguration.create(getConf()); HBaseAdmin admin = new HBaseAdmin(conf); try { if (User.isHBaseSecurityEnabled(conf)) { // add coprocessor for bulk load tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"); } for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHBaseMapping().getColumnFamily()) { HColumnDescriptor cf = new HColumnDescriptor(cfDesc.getName()); cf.setMaxVersions(1); if (LZOSupportnessChecker.getSupportness()) { logger.info("hbase will use lzo to compress data"); cf.setCompressionType(Algorithm.LZO); } else { logger.info("hbase will not use lzo to compress data"); } cf.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); cf.setInMemory(false); cf.setBlocksize(4 * 1024 * 1024); // set to 4MB tableDesc.addFamily(cf); } byte[][] splitKeys = getSplits(conf, partitionFilePath); if (admin.tableExists(tableName)) { // admin.disableTable(tableName); // admin.deleteTable(tableName); throw new RuntimeException("HBase table " + tableName + " exists!"); } DeployCoprocessorCLI.deployCoprocessor(tableDesc); admin.createTable(tableDesc, splitKeys); logger.info("create hbase table " + tableName + " done."); return 0; } catch (Exception e) { printUsage(options); e.printStackTrace(System.err); logger.error(e.getLocalizedMessage(), e); return 2; } finally { admin.close(); } }
Example 12
Source File: IICreateHTableJob.java From Kylin with Apache License 2.0 | 4 votes |
@Override public int run(String[] args) throws Exception { Options options = new Options(); try { options.addOption(OPTION_II_NAME); options.addOption(OPTION_HTABLE_NAME); parseOptions(options, args); String tableName = getOptionValue(OPTION_HTABLE_NAME); String iiName = getOptionValue(OPTION_II_NAME); KylinConfig config = KylinConfig.getInstanceFromEnv(); IIManager iiManager = IIManager.getInstance(config); IIInstance ii = iiManager.getII(iiName); int sharding = ii.getDescriptor().getSharding(); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor cf = new HColumnDescriptor(IIDesc.HBASE_FAMILY); cf.setMaxVersions(1); //cf.setCompressionType(Algorithm.LZO); cf.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); tableDesc.addFamily(cf); tableDesc.setValue(IRealizationConstants.HTableTag, config.getMetadataUrlPrefix()); Configuration conf = HBaseConfiguration.create(getConf()); if (User.isHBaseSecurityEnabled(conf)) { // add coprocessor for bulk load tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"); } DeployCoprocessorCLI.deployCoprocessor(tableDesc); // drop the table first HBaseAdmin admin = new HBaseAdmin(conf); if (admin.tableExists(tableName)) { admin.disableTable(tableName); admin.deleteTable(tableName); } // create table byte[][] splitKeys = getSplits(sharding); if (splitKeys.length == 0) splitKeys = null; admin.createTable(tableDesc, splitKeys); if (splitKeys != null) { for (int i = 0; i < splitKeys.length; i++) { System.out.println("split key " + i + ": " + BytesUtil.toHex(splitKeys[i])); } } System.out.println("create hbase table " + tableName + " done."); admin.close(); return 0; } catch (Exception e) { printUsage(options); throw e; } }
Example 13
Source File: PingHBaseCLI.java From Kylin with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws IOException { String metadataUrl = args[0]; String hbaseTable = args[1]; System.out.println("Hello friend."); Configuration hconf = HadoopUtil.newHBaseConfiguration(metadataUrl); if (User.isHBaseSecurityEnabled(hconf)) { try { System.out.println("--------------Getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser()); } catch (InterruptedException e) { System.out.println("--------------Error while getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); } } Scan scan = new Scan(); int limit = 20; HConnection conn = null; HTableInterface table = null; ResultScanner scanner = null; try { conn = HConnectionManager.createConnection(hconf); table = conn.getTable(hbaseTable); scanner = table.getScanner(scan); int count = 0; for (Result r : scanner) { byte[] rowkey = r.getRow(); System.out.println(Bytes.toStringBinary(rowkey)); count++; if (count == limit) break; } } finally { if (scanner != null) { scanner.close(); } if (table != null) { table.close(); } if (conn != null) { conn.close(); } } }
Example 14
Source File: HBaseSinkSecurityManager.java From mt-flume with Apache License 2.0 | 2 votes |
/** * Checks if security is enabled for the HBase cluster. * * @return - true if security is enabled on the HBase cluster and * the underlying HDFS cluster. */ public static boolean isSecurityEnabled(Configuration conf) { return User.isSecurityEnabled() && User.isHBaseSecurityEnabled(conf); }