Java Code Examples for org.apache.hadoop.hbase.security.User#runAsLoginUser()
The following examples show how to use
org.apache.hadoop.hbase.security.User#runAsLoginUser() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AccessController.java From hbase with Apache License 2.0 | 6 votes |
@Override public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, final TableName tableName) throws IOException { final Configuration conf = c.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (Table table = c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.removeTablePermissions(conf, tableName, table); } return null; } }); zkPermissionWatcher.deleteTableACLNode(tableName); }
Example 2
Source File: AccessController.java From hbase with Apache License 2.0 | 6 votes |
@Override public void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c, final TableName tableName) throws IOException { requirePermission(c, "truncateTable", tableName, null, null, Action.ADMIN, Action.CREATE); final Configuration conf = c.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { List<UserPermission> acls = PermissionStorage.getUserTablePermissions(conf, tableName, null, null, null, false); if (acls != null) { tableAcls.put(tableName, acls); } return null; } }); }
Example 3
Source File: AccessController.java From hbase with Apache License 2.0 | 6 votes |
@Override public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName) throws IOException { final Configuration conf = ctx.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { List<UserPermission> perms = tableAcls.get(tableName); if (perms != null) { for (UserPermission perm : perms) { try (Table table = ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(conf, perm, table); } } } tableAcls.remove(tableName); return null; } }); }
Example 4
Source File: AccessController.java From hbase with Apache License 2.0 | 6 votes |
@Override public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, TableDescriptor oldDesc, TableDescriptor currentDesc) throws IOException { final Configuration conf = c.getEnvironment().getConfiguration(); // default the table owner to current user, if not specified. final String owner = (currentDesc.getOwnerString() != null) ? currentDesc.getOwnerString() : getActiveUser(c).getShortName(); User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { UserPermission userperm = new UserPermission(owner, Permission.newBuilder(currentDesc.getTableName()).withActions(Action.values()).build()); try (Table table = c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(conf, userperm, table); } return null; } }); }
Example 5
Source File: AccessController.java From hbase with Apache License 2.0 | 6 votes |
@Override public void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, final String namespace) throws IOException { final Configuration conf = ctx.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (Table table = ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.removeNamespacePermissions(conf, namespace, table); } return null; } }); zkPermissionWatcher.deleteNamespaceACLNode(namespace); LOG.info(namespace + " entry deleted in " + PermissionStorage.ACL_TABLE_NAME + " table."); }
Example 6
Source File: StatisticsWriter.java From phoenix with Apache License 2.0 | 6 votes |
public void commitStats(final List<Mutation> mutations, final StatisticsCollector statsCollector) throws IOException { User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { commitLastStatsUpdatedTime(statsCollector); if (mutations.size() > 0) { byte[] row = mutations.get(0).getRow(); MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); for (Mutation m : mutations) { mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m)); } MutateRowsRequest mrm = mrmBuilder.build(); CoprocessorRpcChannel channel = statsWriterTable.coprocessorService(row); MultiRowMutationService.BlockingInterface service = MultiRowMutationService .newBlockingStub(channel); try { service.mutateRows(null, mrm); } catch (ServiceException ex) { ProtobufUtil.toIOException(ex); } } return null; } }); }
Example 7
Source File: SnapshotDescriptionUtils.java From hbase with Apache License 2.0 | 5 votes |
private static SnapshotDescription writeAclToSnapshotDescription(SnapshotDescription snapshot, Configuration conf) throws IOException { ListMultimap<String, UserPermission> perms = User.runAsLoginUser(new PrivilegedExceptionAction<ListMultimap<String, UserPermission>>() { @Override public ListMultimap<String, UserPermission> run() throws Exception { return PermissionStorage.getTablePermissions(conf, TableName.valueOf(snapshot.getTable())); } }); return snapshot.toBuilder() .setUsersAndPermissions(ShadedAccessControlUtil.toUserTablePermissions(perms)).build(); }
Example 8
Source File: PhoenixAccessController.java From phoenix with Apache License 2.0 | 5 votes |
private void grantPermissions(final String toUser, final byte[] table, final Action... actions) throws IOException { User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(env.getConfiguration())) { AccessControlClient.grant(conn, TableName.valueOf(table), toUser , null, null, actions); } catch (Throwable e) { new DoNotRetryIOException(e); } return null; } }); }
Example 9
Source File: PruneUpperBoundWriter.java From phoenix-tephra with Apache License 2.0 | 4 votes |
private void startFlushThread() { flushThread = new Thread("tephra-prune-upper-bound-writer") { @Override public void run() { while ((!isInterrupted()) && (!stopped)) { long now = System.currentTimeMillis(); if (now > (lastChecked + pruneFlushInterval)) { // should flush data try { User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { // Record prune upper bound while (!pruneEntries.isEmpty()) { Map.Entry<byte[], Long> firstEntry = pruneEntries.firstEntry(); dataJanitorState.savePruneUpperBoundForRegion(firstEntry.getKey(), firstEntry.getValue()); // We can now remove the entry only if the key and value match with what we wrote since it is // possible that a new pruneUpperBound for the same key has been added pruneEntries.remove(firstEntry.getKey(), firstEntry.getValue()); } // Record empty regions while (!emptyRegions.isEmpty()) { Map.Entry<byte[], Long> firstEntry = emptyRegions.firstEntry(); dataJanitorState.saveEmptyRegionForTime(firstEntry.getValue(), firstEntry.getKey()); // We can now remove the entry only if the key and value match with what we wrote since it is // possible that a new value for the same key has been added emptyRegions.remove(firstEntry.getKey(), firstEntry.getValue()); } return null; } }); } catch (IOException ex) { LOG.warn("Cannot record prune upper bound for a region to table " + tableName.getNameWithNamespaceInclAsString(), ex); } lastChecked = now; } try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException ex) { interrupt(); break; } } LOG.info("PruneUpperBound Writer thread terminated."); } }; flushThread.setDaemon(true); flushThread.start(); }
Example 10
Source File: PruneUpperBoundWriter.java From phoenix-tephra with Apache License 2.0 | 4 votes |
private void startFlushThread() { flushThread = new Thread("tephra-prune-upper-bound-writer") { @Override public void run() { while ((!isInterrupted()) && (!stopped)) { long now = System.currentTimeMillis(); if (now > (lastChecked + pruneFlushInterval)) { // should flush data try { User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { // Record prune upper bound while (!pruneEntries.isEmpty()) { Map.Entry<byte[], Long> firstEntry = pruneEntries.firstEntry(); dataJanitorState.savePruneUpperBoundForRegion(firstEntry.getKey(), firstEntry.getValue()); // We can now remove the entry only if the key and value match with what we wrote since it is // possible that a new pruneUpperBound for the same key has been added pruneEntries.remove(firstEntry.getKey(), firstEntry.getValue()); } // Record empty regions while (!emptyRegions.isEmpty()) { Map.Entry<byte[], Long> firstEntry = emptyRegions.firstEntry(); dataJanitorState.saveEmptyRegionForTime(firstEntry.getValue(), firstEntry.getKey()); // We can now remove the entry only if the key and value match with what we wrote since it is // possible that a new value for the same key has been added emptyRegions.remove(firstEntry.getKey(), firstEntry.getValue()); } return null; } }); } catch (IOException ex) { LOG.warn("Cannot record prune upper bound for a region to table " + tableName.getNameWithNamespaceInclAsString(), ex); } lastChecked = now; } try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException ex) { interrupt(); break; } } LOG.info("PruneUpperBound Writer thread terminated."); } }; flushThread.setDaemon(true); flushThread.start(); }
Example 11
Source File: PruneUpperBoundWriter.java From phoenix-tephra with Apache License 2.0 | 4 votes |
private void startFlushThread() { flushThread = new Thread("tephra-prune-upper-bound-writer") { @Override public void run() { while ((!isInterrupted()) && (!stopped)) { long now = System.currentTimeMillis(); if (now > (lastChecked + pruneFlushInterval)) { // should flush data try { User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { // Record prune upper bound while (!pruneEntries.isEmpty()) { Map.Entry<byte[], Long> firstEntry = pruneEntries.firstEntry(); dataJanitorState.savePruneUpperBoundForRegion(firstEntry.getKey(), firstEntry.getValue()); // We can now remove the entry only if the key and value match with what we wrote since it is // possible that a new pruneUpperBound for the same key has been added pruneEntries.remove(firstEntry.getKey(), firstEntry.getValue()); } // Record empty regions while (!emptyRegions.isEmpty()) { Map.Entry<byte[], Long> firstEntry = emptyRegions.firstEntry(); dataJanitorState.saveEmptyRegionForTime(firstEntry.getValue(), firstEntry.getKey()); // We can now remove the entry only if the key and value match with what we wrote since it is // possible that a new value for the same key has been added emptyRegions.remove(firstEntry.getKey(), firstEntry.getValue()); } return null; } }); } catch (IOException ex) { LOG.warn("Cannot record prune upper bound for a region to table " + tableName.getNameWithNamespaceInclAsString(), ex); } lastChecked = now; } try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException ex) { interrupt(); break; } } LOG.info("PruneUpperBound Writer thread terminated."); } }; flushThread.setDaemon(true); flushThread.start(); }
Example 12
Source File: PruneUpperBoundWriter.java From phoenix-tephra with Apache License 2.0 | 4 votes |
private void startFlushThread() { flushThread = new Thread("tephra-prune-upper-bound-writer") { @Override public void run() { while ((!isInterrupted()) && (!stopped)) { long now = System.currentTimeMillis(); if (now > (lastChecked + pruneFlushInterval)) { // should flush data try { User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { // Record prune upper bound while (!pruneEntries.isEmpty()) { Map.Entry<byte[], Long> firstEntry = pruneEntries.firstEntry(); dataJanitorState.savePruneUpperBoundForRegion(firstEntry.getKey(), firstEntry.getValue()); // We can now remove the entry only if the key and value match with what we wrote since it is // possible that a new pruneUpperBound for the same key has been added pruneEntries.remove(firstEntry.getKey(), firstEntry.getValue()); } // Record empty regions while (!emptyRegions.isEmpty()) { Map.Entry<byte[], Long> firstEntry = emptyRegions.firstEntry(); dataJanitorState.saveEmptyRegionForTime(firstEntry.getValue(), firstEntry.getKey()); // We can now remove the entry only if the key and value match with what we wrote since it is // possible that a new value for the same key has been added emptyRegions.remove(firstEntry.getKey(), firstEntry.getValue()); } return null; } }); } catch (IOException ex) { LOG.warn("Cannot record prune upper bound for a region to table " + tableName.getNameWithNamespaceInclAsString(), ex); } lastChecked = now; } try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException ex) { interrupt(); break; } } LOG.info("PruneUpperBound Writer thread terminated."); } }; flushThread.setDaemon(true); flushThread.start(); }
Example 13
Source File: RangerAuthorizationCoprocessor.java From ranger with Apache License 2.0 | 4 votes |
@Override public void getUserPermissions(RpcController controller, AccessControlProtos.GetUserPermissionsRequest request, RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done) { AccessControlProtos.GetUserPermissionsResponse response = null; try { String operation = "userPermissions"; final RangerAccessResourceImpl resource = new RangerAccessResourceImpl(); User user = getActiveUser(null); Set<String> groups = _userUtils.getUserGroups(user); if (groups.isEmpty() && user.getUGI() != null) { String[] groupArray = user.getUGI().getGroupNames(); if (groupArray != null) { groups = Sets.newHashSet(groupArray); } } RangerAccessRequestImpl rangerAccessrequest = new RangerAccessRequestImpl(resource, null, _userUtils.getUserAsString(user), groups, null); rangerAccessrequest.setAction(operation); rangerAccessrequest.setClientIPAddress(getRemoteAddress()); rangerAccessrequest.setResourceMatchingScope(RangerAccessRequest.ResourceMatchingScope.SELF); List<UserPermission> perms = null; if (request.getType() == AccessControlProtos.Permission.Type.Table) { final TableName table = request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null; requirePermission(null, operation, table.getName(), Action.ADMIN); resource.setValue(RangerHBaseResource.KEY_TABLE, table.getNameAsString()); perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() { @Override public List<UserPermission> run() throws Exception { return getUserPermissions( hbasePlugin.getResourceACLs(rangerAccessrequest), table.getNameAsString(), false); } }); } else if (request.getType() == AccessControlProtos.Permission.Type.Namespace) { final String namespace = request.getNamespaceName().toStringUtf8(); requireGlobalPermission(null, "getUserPermissionForNamespace", namespace, Action.ADMIN); resource.setValue(RangerHBaseResource.KEY_TABLE, namespace + RangerHBaseResource.NAMESPACE_SEPARATOR); rangerAccessrequest.setRequestData(namespace); perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() { @Override public List<UserPermission> run() throws Exception { return getUserPermissions( hbasePlugin.getResourceACLs(rangerAccessrequest), namespace, true); } }); } else { requirePermission(null, "userPermissions", Action.ADMIN); perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() { @Override public List<UserPermission> run() throws Exception { return getUserPermissions( hbasePlugin.getResourceACLs(rangerAccessrequest), null, false); } }); if (_userUtils.isSuperUser(user)) { perms.add(new UserPermission(Bytes.toBytes(_userUtils.getUserAsString(user)), AccessControlLists.ACL_TABLE_NAME, null, Action.values())); } } response = AccessControlUtil.buildGetUserPermissionsResponse(perms); } catch (IOException ioe) { // pass exception back up ResponseConverter.setControllerException(controller, ioe); } done.run(response); }
Example 14
Source File: AccessController.java From hbase with Apache License 2.0 | 4 votes |
@Override public void postCompletedCreateTableAction( final ObserverContext<MasterCoprocessorEnvironment> c, final TableDescriptor desc, final RegionInfo[] regions) throws IOException { // When AC is used, it should be configured as the 1st CP. // In Master, the table operations like create, are handled by a Thread pool but the max size // for this pool is 1. So if multiple CPs create tables on startup, these creations will happen // sequentially only. // Related code in HMaster#startServiceThreads // {code} // // We depend on there being only one instance of this executor running // // at a time. To do concurrency, would need fencing of enable/disable of // // tables. // this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1); // {code} // In future if we change this pool to have more threads, then there is a chance for thread, // creating acl table, getting delayed and by that time another table creation got over and // this hook is getting called. In such a case, we will need a wait logic here which will // wait till the acl table is created. if (PermissionStorage.isAclTable(desc)) { this.aclTabAvailable = true; } else { if (!aclTabAvailable) { LOG.warn("Not adding owner permission for table " + desc.getTableName() + ". " + PermissionStorage.ACL_TABLE_NAME + " is not yet created. " + getClass().getSimpleName() + " should be configured as the first Coprocessor"); } else { String owner = desc.getOwnerString(); // default the table owner to current user, if not specified. if (owner == null) owner = getActiveUser(c).getShortName(); final UserPermission userPermission = new UserPermission(owner, Permission.newBuilder(desc.getTableName()).withActions(Action.values()).build()); // switch to the real hbase master user for doing the RPC on the ACL table User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (Table table = c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(c.getEnvironment().getConfiguration(), userPermission, table); } return null; } }); } } }
Example 15
Source File: UngroupedAggregateRegionObserver.java From phoenix with Apache License 2.0 | 4 votes |
@Override public InternalScanner preCompact( org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { if (scanType.equals(ScanType.COMPACT_DROP_DELETES)) { final TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); // Compaction and split upcalls run with the effective user context of the requesting user. // This will lead to failure of cross cluster RPC if the effective user is not // the login user. Switch to the login user context to ensure we have the expected // security context. return User.runAsLoginUser( new PrivilegedExceptionAction<InternalScanner>() { @Override public InternalScanner run() throws Exception { InternalScanner internalScanner = scanner; try { long clientTimeStamp = EnvironmentEdgeManager.currentTimeMillis(); DelegateRegionCoprocessorEnvironment compactionConfEnv = new DelegateRegionCoprocessorEnvironment( c.getEnvironment(), ConnectionType.COMPACTION_CONNECTION); StatisticsCollector statisticsCollector = StatisticsCollectorFactory.createStatisticsCollector( compactionConfEnv, table.getNameAsString(), clientTimeStamp, store.getColumnFamilyDescriptor().getName()); statisticsCollector.init(); internalScanner = statisticsCollector.createCompactionScanner(compactionConfEnv, store, scanner); } catch (Exception e) { // If we can't reach the stats table, don't interrupt the normal // compaction operation, just log a warning. if (LOGGER.isWarnEnabled()) { LOGGER.warn("Unable to collect stats for " + table, e); } } return internalScanner; } }); } return scanner; }
Example 16
Source File: UngroupedAggregateRegionObserver.java From phoenix with Apache License 2.0 | 4 votes |
@Override public void preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store, ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, final CompactionRequest request) throws IOException { // Compaction and split upcalls run with the effective user context of the requesting user. // This will lead to failure of cross cluster RPC if the effective user is not // the login user. Switch to the login user context to ensure we have the expected // security context. final String fullTableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(); // since we will make a call to syscat, do nothing if we are compacting syscat itself if (request.isMajor() && !PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME.equals(fullTableName)) { User.runAsLoginUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { // If the index is disabled, keep the deleted cells so the rebuild doesn't corrupt the index try (PhoenixConnection conn = QueryUtil.getConnectionOnServer(compactionConfig).unwrap(PhoenixConnection.class)) { PTable table = PhoenixRuntime.getTableNoCache(conn, fullTableName); List<PTable> indexes = PTableType.INDEX.equals(table.getType()) ? Lists.newArrayList(table) : table.getIndexes(); // FIXME need to handle views and indexes on views as well for (PTable index : indexes) { if (index.getIndexDisableTimestamp() != 0) { LOGGER.info( "Modifying major compaction scanner to retain deleted cells for a table with disabled index: " + fullTableName); options.setKeepDeletedCells(KeepDeletedCells.TRUE); options.readAllVersions(); options.setTTL(Long.MAX_VALUE); } } } catch (Exception e) { if (e instanceof TableNotFoundException) { LOGGER.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName); // non-Phoenix HBase tables won't be found, do nothing } else { LOGGER.error("Unable to modify compaction scanner to retain deleted cells for a table with disabled Index; " + fullTableName, e); } } return null; } }); } }