Java Code Examples for org.apache.hadoop.hbase.CellUtil#cloneQualifier()
The following examples show how to use
org.apache.hadoop.hbase.CellUtil#cloneQualifier() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TupleProjector.java From phoenix with Apache License 2.0 | 6 votes |
/** * Iterate over the list of cells and populate dynamic columns * @param result list of cells * @param dynCols Populated list of PColumns corresponding to dynamic columns * @param dynColCellQualifiers Populated set of <column family, column qualifier> pairs * for the cells in the list, which correspond to dynamic columns * @throws InvalidProtocolBufferException Thrown if there is an error parsing byte[] to protobuf */ private static void populateDynColsFromResult(List<Cell> result, List<PColumn> dynCols, Set<Pair<ByteBuffer, ByteBuffer>> dynColCellQualifiers) throws InvalidProtocolBufferException { for (Cell c : result) { byte[] qual = CellUtil.cloneQualifier(c); byte[] fam = CellUtil.cloneFamily(c); int index = Bytes.indexOf(qual, DYN_COLS_METADATA_CELL_QUALIFIER); // Contains dynamic column metadata, so add it to the list of dynamic columns if (index != -1) { byte[] dynColMetaDataProto = CellUtil.cloneValue(c); dynCols.add(PColumnImpl.createFromProto( PTableProtos.PColumn.parseFrom(dynColMetaDataProto))); // Add the <fam, qualifier> pair for the actual dynamic column. The column qualifier // of the dynamic column is got by parsing out the known bytes from the shadow cell // containing the metadata for that column i.e. // DYN_COLS_METADATA_CELL_QUALIFIER<actual column qualifier> byte[] dynColQual = Arrays.copyOfRange(qual, index + DYN_COLS_METADATA_CELL_QUALIFIER.length, qual.length); dynColCellQualifiers.add( new Pair<>(ByteBuffer.wrap(fam), ByteBuffer.wrap(dynColQual))); } } }
Example 2
Source File: HBaseCLI.java From cloud-bigtable-examples with Apache License 2.0 | 6 votes |
public void run(Connection connection, List<String> args) throws InvalidArgsException, IOException { if (args.size() != 2) { throw new InvalidArgsException(args); } String tableName = args.get(0); String rowId = args.get(1); Table table = connection.getTable(TableName.valueOf(tableName)); // Create a new Get request and specify the rowId passed by the user. Result result = table.get(new Get(rowId.getBytes())); // Iterate of the results. Each Cell is a value for column // so multiple Cells will be processed for each row. for (Cell cell : result.listCells()) { // We use the CellUtil class to clone values // from the returned cells. String row = new String(CellUtil.cloneRow(cell)); String family = new String(CellUtil.cloneFamily(cell)); String column = new String(CellUtil.cloneQualifier(cell)); String value = new String(CellUtil.cloneValue(cell)); long timestamp = cell.getTimestamp(); System.out.printf("%-20s column=%s:%s, timestamp=%s, value=%s\n", row, family, column, timestamp, value); } }
Example 3
Source File: TransactionVisibilityFilter.java From phoenix-tephra with Apache License 2.0 | 6 votes |
@Override public Cell transformCell(Cell cell) throws IOException { // Convert Tephra deletes back into HBase deletes if (tx.getVisibilityLevel() == Transaction.VisibilityLevel.SNAPSHOT_ALL) { if (DeleteTracker.isFamilyDelete(cell)) { return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), null, cell.getTimestamp(), KeyValue.Type.DeleteFamily); } else if (isColumnDelete(cell)) { // Note: in some cases KeyValue.Type.Delete is used in Delete object, // and in some other cases KeyValue.Type.DeleteColumn is used. // Since Tephra cannot distinguish between the two, we return KeyValue.Type.DeleteColumn. // KeyValue.Type.DeleteColumn makes both CellUtil.isDelete and CellUtil.isDeleteColumns return true, and will // work in both cases. return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), KeyValue.Type.DeleteColumn); } } return cell; }
Example 4
Source File: MultipleColumnPrefixFilter.java From hbase with Apache License 2.0 | 6 votes |
public ReturnCode filterColumn(Cell cell) { byte [] qualifier = CellUtil.cloneQualifier(cell); TreeSet<byte []> lesserOrEqualPrefixes = (TreeSet<byte []>) sortedPrefixes.headSet(qualifier, true); if (lesserOrEqualPrefixes.size() != 0) { byte [] largestPrefixSmallerThanQualifier = lesserOrEqualPrefixes.last(); if (Bytes.startsWith(qualifier, largestPrefixSmallerThanQualifier)) { return ReturnCode.INCLUDE; } if (lesserOrEqualPrefixes.size() == sortedPrefixes.size()) { return ReturnCode.NEXT_ROW; } else { hint = sortedPrefixes.higher(largestPrefixSmallerThanQualifier); return ReturnCode.SEEK_NEXT_USING_HINT; } } else { hint = sortedPrefixes.first(); return ReturnCode.SEEK_NEXT_USING_HINT; } }
Example 5
Source File: TransactionVisibilityFilter.java From phoenix-tephra with Apache License 2.0 | 6 votes |
@Override public Cell transformCell(Cell cell) throws IOException { // Convert Tephra deletes back into HBase deletes if (tx.getVisibilityLevel() == Transaction.VisibilityLevel.SNAPSHOT_ALL) { if (DeleteTracker.isFamilyDelete(cell)) { return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), null, cell.getTimestamp(), KeyValue.Type.DeleteFamily); } else if (isColumnDelete(cell)) { // Note: in some cases KeyValue.Type.Delete is used in Delete object, // and in some other cases KeyValue.Type.DeleteColumn is used. // Since Tephra cannot distinguish between the two, we return KeyValue.Type.DeleteColumn. // KeyValue.Type.DeleteColumn makes both CellUtil.isDelete and CellUtil.isDeleteColumns return true, and will // work in both cases. return new KeyValue(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), KeyValue.Type.DeleteColumn); } } return cell; }
Example 6
Source File: Result.java From hbase with Apache License 2.0 | 5 votes |
/** * Map of families to all versions of its qualifiers and values. * <p> * Returns a three level Map of the form: * <code>Map&family,Map<qualifier,Map<timestamp,value>>></code> * <p> * Note: All other map returning methods make use of this map internally. * @return map from families to qualifiers to versions */ public NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> getMap() { if (this.familyMap != null) { return this.familyMap; } if(isEmpty()) { return null; } this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(Cell kv : this.cells) { byte [] family = CellUtil.cloneFamily(kv); NavigableMap<byte[], NavigableMap<Long, byte[]>> columnMap = familyMap.get(family); if(columnMap == null) { columnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, columnMap); } byte [] qualifier = CellUtil.cloneQualifier(kv); NavigableMap<Long, byte[]> versionMap = columnMap.get(qualifier); if(versionMap == null) { versionMap = new TreeMap<>(new Comparator<Long>() { @Override public int compare(Long l1, Long l2) { return l2.compareTo(l1); } }); columnMap.put(qualifier, versionMap); } Long timestamp = kv.getTimestamp(); byte [] value = CellUtil.cloneValue(kv); versionMap.put(timestamp, value); } return this.familyMap; }
Example 7
Source File: WriteHeavyIncrementObserver.java From hbase with Apache License 2.0 | 5 votes |
@Override public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> c, Get get, List<Cell> result) throws IOException { Scan scan = new Scan().withStartRow(get.getRow()).withStopRow(get.getRow(), true).readAllVersions(); NavigableMap<byte[], NavigableMap<byte[], MutableLong>> sums = new TreeMap<>(Bytes.BYTES_COMPARATOR); get.getFamilyMap().forEach((cf, cqs) -> { NavigableMap<byte[], MutableLong> ss = new TreeMap<>(Bytes.BYTES_COMPARATOR); sums.put(cf, ss); cqs.forEach(cq -> { ss.put(cq, new MutableLong(0)); scan.addColumn(cf, cq); }); }); List<Cell> cells = new ArrayList<>(); try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) { boolean moreRows; do { moreRows = scanner.next(cells); for (Cell cell : cells) { byte[] family = CellUtil.cloneFamily(cell); byte[] qualifier = CellUtil.cloneQualifier(cell); long value = Bytes.toLong(cell.getValueArray(), cell.getValueOffset()); sums.get(family).get(qualifier).add(value); } cells.clear(); } while (moreRows); } sums.forEach((cf, m) -> m.forEach((cq, s) -> result .add(createCell(get.getRow(), cf, cq, HConstants.LATEST_TIMESTAMP, s.longValue())))); c.bypass(); }
Example 8
Source File: ApiMetaDataMapper.java From pinpoint with Apache License 2.0 | 5 votes |
private byte[] getValue(Cell cell) { if (Bytes.equals(API_METADATA_CF_API_QUALI_SIGNATURE, 0, API_METADATA_CF_API_QUALI_SIGNATURE.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())) { return CellUtil.cloneValue(cell); } else { // backward compatibility return CellUtil.cloneQualifier(cell); } }
Example 9
Source File: ExtendCubeToHybridCLI.java From kylin with Apache License 2.0 | 5 votes |
private void copyAcl(String origCubeId, String newCubeId, String projectName) throws Exception { String projectResPath = ProjectInstance.concatResourcePath(projectName); Serializer<ProjectInstance> projectSerializer = new JsonSerializer<ProjectInstance>(ProjectInstance.class); ProjectInstance project = store.getResource(projectResPath, projectSerializer); String projUUID = project.getUuid(); Table aclHtable = null; try { aclHtable = HBaseConnection.get(kylinConfig.getStorageUrl()).getTable(TableName.valueOf(kylinConfig.getMetadataUrlPrefix() + "_acl")); // cube acl Result result = aclHtable.get(new Get(Bytes.toBytes(origCubeId))); if (result.listCells() != null) { for (Cell cell : result.listCells()) { byte[] family = CellUtil.cloneFamily(cell); byte[] column = CellUtil.cloneQualifier(cell); byte[] value = CellUtil.cloneValue(cell); // use the target project uuid as the parent if (Bytes.toString(family).equals(ACL_INFO_FAMILY) && Bytes.toString(column).equals(ACL_INFO_FAMILY_PARENT_COLUMN)) { String valueString = "{\"id\":\"" + projUUID + "\",\"type\":\"org.apache.kylin.metadata.project.ProjectInstance\"}"; value = Bytes.toBytes(valueString); } Put put = new Put(Bytes.toBytes(newCubeId)); put.add(family, column, value); aclHtable.put(put); } } } finally { IOUtils.closeQuietly(aclHtable); } }
Example 10
Source File: HBaseMergingFilter.java From geowave with Apache License 2.0 | 5 votes |
/** Handle the entire row at one time */ @Override public void filterRowCells(final List<Cell> rowCells) throws IOException { if (!rowCells.isEmpty()) { if (rowCells.size() > 1) { try { final Cell firstCell = rowCells.get(0); final byte[] singleRow = CellUtil.cloneRow(firstCell); final byte[] singleFam = CellUtil.cloneFamily(firstCell); final byte[] singleQual = CellUtil.cloneQualifier(firstCell); Mergeable mergedValue = null; for (final Cell cell : rowCells) { final byte[] byteValue = CellUtil.cloneValue(cell); final Mergeable value = (Mergeable) URLClassloaderUtils.fromBinary(byteValue); if (mergedValue != null) { mergedValue.merge(value); } else { mergedValue = value; } } final Cell singleCell = CellUtil.createCell( singleRow, singleFam, singleQual, System.currentTimeMillis(), KeyValue.Type.Put.getCode(), URLClassloaderUtils.toBinary(mergedValue)); rowCells.clear(); rowCells.add(singleCell); } catch (final Exception e) { throw new IOException("Exception in filter", e); } } } }
Example 11
Source File: IndexRebuildRegionScanner.java From phoenix with Apache License 2.0 | 5 votes |
private boolean isColumnIncluded(Cell cell) { byte[] family = CellUtil.cloneFamily(cell); if (!familyMap.containsKey(family)) { return false; } NavigableSet<byte[]> set = familyMap.get(family); if (set == null || set.isEmpty()) { return true; } byte[] qualifier = CellUtil.cloneQualifier(cell); return set.contains(qualifier); }
Example 12
Source File: FromClientSideBase.java From hbase with Apache License 2.0 | 5 votes |
protected void assertNResult(Result result, byte [] row, byte [][] families, byte [][] qualifiers, byte [][] values, int [][] idxs) { assertTrue("Expected row [" + Bytes.toString(row) + "] " + "Got row [" + Bytes.toString(result.getRow()) +"]", equals(row, result.getRow())); assertEquals("Expected " + idxs.length + " keys but result contains " + result.size(), result.size(), idxs.length); Cell [] keys = result.rawCells(); for(int i=0;i<keys.length;i++) { byte [] family = families[idxs[i][0]]; byte [] qualifier = qualifiers[idxs[i][1]]; byte [] value = values[idxs[i][2]]; Cell key = keys[i]; byte[] famb = CellUtil.cloneFamily(key); byte[] qualb = CellUtil.cloneQualifier(key); byte[] valb = CellUtil.cloneValue(key); assertTrue("(" + i + ") Expected family [" + Bytes.toString(family) + "] " + "Got family [" + Bytes.toString(famb) + "]", equals(family, famb)); assertTrue("(" + i + ") Expected qualifier [" + Bytes.toString(qualifier) + "] " + "Got qualifier [" + Bytes.toString(qualb) + "]", equals(qualifier, qualb)); assertTrue("(" + i + ") Expected value [" + Bytes.toString(value) + "] " + "Got value [" + Bytes.toString(valb) + "]", equals(value, valb)); } }
Example 13
Source File: TestReplicationWithTags.java From hbase with Apache License 2.0 | 5 votes |
@Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put, final WALEdit edit, final Durability durability) throws IOException { byte[] attribute = put.getAttribute("visibility"); byte[] cf = null; List<Cell> updatedCells = new ArrayList<>(); if (attribute != null) { for (List<? extends Cell> edits : put.getFamilyCellMap().values()) { for (Cell cell : edits) { KeyValue kv = KeyValueUtil.ensureKeyValue(cell); if (cf == null) { cf = CellUtil.cloneFamily(kv); } Tag tag = new ArrayBackedTag(TAG_TYPE, attribute); List<Tag> tagList = new ArrayList<>(1); tagList.add(tag); KeyValue newKV = new KeyValue(CellUtil.cloneRow(kv), 0, kv.getRowLength(), CellUtil.cloneFamily(kv), 0, kv.getFamilyLength(), CellUtil.cloneQualifier(kv), 0, kv.getQualifierLength(), kv.getTimestamp(), KeyValue.Type.codeToType(kv.getTypeByte()), CellUtil.cloneValue(kv), 0, kv.getValueLength(), tagList); ((List<Cell>) updatedCells).add(newKV); } } put.getFamilyCellMap().remove(cf); // Update the family map put.getFamilyCellMap().put(cf, updatedCells); } }
Example 14
Source File: TTable.java From phoenix-omid with Apache License 2.0 | 5 votes |
private Put putInternal(Transaction tx, Put put, boolean addShadowCell) throws IOException { throwExceptionIfOpSetsTimerange(put); HBaseTransaction transaction = enforceHBaseTransactionAsParam(tx); final long writeTimestamp = transaction.getWriteTimestamp(); // create put with correct ts final Put tsput = new Put(put.getRow(), writeTimestamp); propagateAttributes(put, tsput); Map<byte[], List<Cell>> kvs = put.getFamilyCellMap(); for (List<Cell> kvl : kvs.values()) { for (Cell c : kvl) { CellUtils.validateCell(c, writeTimestamp); // Reach into keyvalue to update timestamp. // It's not nice to reach into keyvalue internals, // but we want to avoid having to copy the whole thing KeyValue kv = KeyValueUtil.ensureKeyValue(c); Bytes.putLong(kv.getValueArray(), kv.getTimestampOffset(), writeTimestamp); tsput.add(kv); if (addShadowCell) { tsput.addColumn(CellUtil.cloneFamily(kv), CellUtils.addShadowCellSuffixPrefix(CellUtil.cloneQualifier(kv), 0, CellUtil.cloneQualifier(kv).length), kv.getTimestamp(), Bytes.toBytes(kv.getTimestamp())); } else { HBaseCellId cellId = new HBaseCellId(this, CellUtil.cloneRow(kv), CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv.getTimestamp()); addWriteSetElement(transaction, cellId); } } } return tsput; }
Example 15
Source File: WriteHeavyIncrementObserver.java From hbase with Apache License 2.0 | 4 votes |
private InternalScanner wrap(byte[] family, InternalScanner scanner) { return new InternalScanner() { private List<Cell> srcResult = new ArrayList<>(); private byte[] row; private byte[] qualifier; private long timestamp; private long sum; @Override public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException { boolean moreRows = scanner.next(srcResult, scannerContext); if (srcResult.isEmpty()) { if (!moreRows && row != null) { result.add(createCell(row, family, qualifier, timestamp, sum)); } return moreRows; } Cell firstCell = srcResult.get(0); // Check if there is a row change first. All the cells will come from the same row so just // check the first one once is enough. if (row == null) { row = CellUtil.cloneRow(firstCell); qualifier = CellUtil.cloneQualifier(firstCell); } else if (!CellUtil.matchingRows(firstCell, row)) { result.add(createCell(row, family, qualifier, timestamp, sum)); row = CellUtil.cloneRow(firstCell); qualifier = CellUtil.cloneQualifier(firstCell); sum = 0; } srcResult.forEach(c -> { if (CellUtil.matchingQualifier(c, qualifier)) { sum += Bytes.toLong(c.getValueArray(), c.getValueOffset()); } else { result.add(createCell(row, family, qualifier, timestamp, sum)); qualifier = CellUtil.cloneQualifier(c); sum = Bytes.toLong(c.getValueArray(), c.getValueOffset()); } timestamp = c.getTimestamp(); }); if (!moreRows) { result.add(createCell(row, family, qualifier, timestamp, sum)); } srcResult.clear(); return moreRows; } @Override public void close() throws IOException { scanner.close(); } }; }
Example 16
Source File: PermissionStorage.java From hbase with Apache License 2.0 | 4 votes |
private static Pair<String, Permission> parsePermissionRecord(byte[] entryName, Cell kv, byte[] cf, byte[] cq, boolean filterPerms, String filterUser) { // return X given a set of permissions encoded in the permissionRecord kv. byte[] family = CellUtil.cloneFamily(kv); if (!Bytes.equals(family, ACL_LIST_FAMILY)) { return null; } byte[] key = CellUtil.cloneQualifier(kv); byte[] value = CellUtil.cloneValue(kv); if (LOG.isDebugEnabled()) { LOG.debug("Read acl: entry[" + Bytes.toStringBinary(entryName) + "], kv [" + Bytes.toStringBinary(key) + ": " + Bytes.toStringBinary(value)+"]"); } // check for a column family appended to the key // TODO: avoid the string conversion to make this more efficient String username = Bytes.toString(key); // Retrieve group list for the filterUser if cell key is a group. // Group list is not required when filterUser itself a group List<String> filterUserGroups = null; if (filterPerms) { if (username.charAt(0) == '@' && !StringUtils.isEmpty(filterUser) && filterUser.charAt(0) != '@') { filterUserGroups = AccessChecker.getUserGroups(filterUser); } } // Handle namespace entry if (isNamespaceEntry(entryName)) { // Filter the permissions cell record if client query if (filterPerms && !validateFilterUser(username, filterUser, filterUserGroups)) { return null; } return new Pair<>(username, Permission.newBuilder(Bytes.toString(fromNamespaceEntry(entryName))) .withActionCodes(value).build()); } // Handle global entry if (isGlobalEntry(entryName)) { // Filter the permissions cell record if client query if (filterPerms && !validateFilterUser(username, filterUser, filterUserGroups)) { return null; } return new Pair<>(username, Permission.newBuilder().withActionCodes(value).build()); } // Handle table entry int idx = username.indexOf(ACL_KEY_DELIMITER); byte[] permFamily = null; byte[] permQualifier = null; if (idx > 0 && idx < username.length()-1) { String remainder = username.substring(idx+1); username = username.substring(0, idx); idx = remainder.indexOf(ACL_KEY_DELIMITER); if (idx > 0 && idx < remainder.length()-1) { permFamily = Bytes.toBytes(remainder.substring(0, idx)); permQualifier = Bytes.toBytes(remainder.substring(idx+1)); } else { permFamily = Bytes.toBytes(remainder); } } // Filter the permissions cell record if client query if (filterPerms) { // ACL table contain 3 types of cell key entries; hbase:Acl, namespace and table. So to filter // the permission cell records additional validations are required at CF, CQ and username. // Here we can proceed based on client input whether it contain filterUser. // Validate the filterUser when specified if (filterUser != null && !validateFilterUser(username, filterUser, filterUserGroups)) { return null; } if (!validateCFAndCQ(permFamily, cf, permQualifier, cq)) { return null; } } return new Pair<>(username, Permission.newBuilder(TableName.valueOf(entryName)) .withFamily(permFamily).withQualifier(permQualifier).withActionCodes(value).build()); }
Example 17
Source File: HCell.java From spliceengine with GNU Affero General Public License v3.0 | 4 votes |
@Override public byte[] qualifier(){ if(delegate==null) return null; return CellUtil.cloneQualifier(delegate); }
Example 18
Source File: TestRowProcessorEndpoint.java From hbase with Apache License 2.0 | 4 votes |
@Override public void process(long now, HRegion region, List<Mutation> mutations, WALEdit walEdit) throws IOException { // Override the time to avoid race-condition in the unit test caused by // inacurate timer on some machines now = myTimer.getAndIncrement(); // Scan both rows List<Cell> kvs1 = new ArrayList<>(); List<Cell> kvs2 = new ArrayList<>(); doScan(region, new Scan().withStartRow(row1).withStopRow(row1), kvs1); doScan(region, new Scan().withStartRow(row2).withStopRow(row2), kvs2); // Assert swapped if (swapped) { assertEquals(rowSize, kvs2.size()); assertEquals(row2Size, kvs1.size()); } else { assertEquals(rowSize, kvs1.size()); assertEquals(row2Size, kvs2.size()); } swapped = !swapped; // Add and delete keyvalues List<List<Cell>> kvs = new ArrayList<>(2); kvs.add(kvs1); kvs.add(kvs2); byte[][] rows = new byte[][]{row1, row2}; for (int i = 0; i < kvs.size(); ++i) { for (Cell kv : kvs.get(i)) { // Delete from the current row and add to the other row Delete d = new Delete(rows[i]); KeyValue kvDelete = new KeyValue(rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv.getTimestamp(), KeyValue.Type.Delete); d.add(kvDelete); Put p = new Put(rows[1 - i]); KeyValue kvAdd = new KeyValue(rows[1 - i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), now, CellUtil.cloneValue(kv)); p.add(kvAdd); mutations.add(d); walEdit.add(kvDelete); mutations.add(p); walEdit.add(kvAdd); } } }
Example 19
Source File: HBaseCLI.java From cloud-bigtable-examples with Apache License 2.0 | 4 votes |
public void run(Connection connection, List<String> args) throws InvalidArgsException, IOException { if (args.size() < 1 || args.size() > 2) { throw new InvalidArgsException(args); } String tableName = args.get(0); String filterVal = null; if (args.size() > 1) { filterVal = args.get(1); } Table table = connection.getTable(TableName.valueOf(tableName)); // Create a new Scan instance. Scan scan = new Scan(); // This command supports using a columnvalue filter. // The filter takes the form of <columnfamily>:<column><operator><value> // An example would be cf:col>=10 if (filterVal != null) { String splitVal = "="; CompareFilter.CompareOp op = CompareFilter.CompareOp.EQUAL; if (filterVal.contains(">=")) { op = CompareFilter.CompareOp.GREATER_OR_EQUAL; splitVal = ">="; } else if (filterVal.contains("<=")) { op = CompareFilter.CompareOp.LESS_OR_EQUAL; splitVal = "<="; } else if (filterVal.contains(">")) { op = CompareFilter.CompareOp.GREATER; splitVal = ">"; } else if (filterVal.contains("<")) { op = CompareFilter.CompareOp.LESS; splitVal = "<"; } String[] filter = filterVal.split(splitVal); String[] filterCol = filter[0].split(":"); scan.setFilter(new SingleColumnValueFilter(filterCol[0].getBytes(), filterCol[1].getBytes(), op, filter[1].getBytes())); } ResultScanner resultScanner = table.getScanner(scan); for (Result result : resultScanner) { for (Cell cell : result.listCells()) { String row = new String(CellUtil.cloneRow(cell)); String family = new String(CellUtil.cloneFamily(cell)); String column = new String(CellUtil.cloneQualifier(cell)); String value = new String(CellUtil.cloneValue(cell)); long timestamp = cell.getTimestamp(); System.out.printf("%-20s column=%s:%s, timestamp=%s, value=%s\n", row, family, column, timestamp, value); } } }
Example 20
Source File: RangerAuthorizationFilter.java From ranger with Apache License 2.0 | 4 votes |
@Override public ReturnCode filterKeyValue(Cell kv) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("==> filterKeyValue"); } String family = null; byte[] familyBytes = CellUtil.cloneFamily(kv); if (familyBytes != null && familyBytes.length > 0) { family = Bytes.toString(familyBytes); if (LOG.isDebugEnabled()) { LOG.debug("filterKeyValue: evaluating family[" + family + "]."); } } String column = null; byte[] qualifier = CellUtil.cloneQualifier(kv); if (qualifier != null && qualifier.length > 0) { column = Bytes.toString(qualifier); if (LOG.isDebugEnabled()) { LOG.debug("filterKeyValue: evaluating column[" + column + "]."); } } else { LOG.warn("filterKeyValue: empty/null column set! Unexpected!"); } ReturnCode result = ReturnCode.NEXT_COL; boolean authCheckNeeded = false; if (family == null) { LOG.warn("filterKeyValue: Unexpected - null/empty family! Access denied!"); } else if (_familiesAccessDenied.contains(family)) { LOG.debug("filterKeyValue: family found in access denied families cache. Access denied."); } else if (_columnsAccessAllowed.containsKey(family)) { LOG.debug("filterKeyValue: family found in column level access results cache."); if (_columnsAccessAllowed.get(family).contains(column)) { LOG.debug("filterKeyValue: family/column found in column level access results cache. Access allowed."); result = ReturnCode.INCLUDE; } else { LOG.debug("filterKeyValue: family/column not in column level access results cache. Access denied."); } } else if (_familiesAccessAllowed.contains(family)) { LOG.debug("filterKeyValue: family found in access allowed families cache. Must re-authorize for correct audit generation."); authCheckNeeded = true; } else if (_familiesAccessIndeterminate.contains(family)) { LOG.debug("filterKeyValue: family found in indeterminate families cache. Evaluating access..."); authCheckNeeded = true; } else { LOG.warn("filterKeyValue: Unexpected - alien family encountered that wasn't seen by pre-hook! Access Denied.!"); } if (authCheckNeeded) { LOG.debug("filterKeyValue: Checking authorization..."); _session.columnFamily(family) .column(column) .buildRequest() .authorize(); // must always purge the captured audit event out of the audit handler to avoid messing up the next check AuthzAuditEvent auditEvent = _auditHandler.getAndDiscardMostRecentEvent(); if (_session.isAuthorized()) { LOG.debug("filterKeyValue: Access granted."); result = ReturnCode.INCLUDE; if (auditEvent != null) { LOG.debug("filterKeyValue: access is audited."); _auditHandler.logAuthzAudits(Collections.singletonList(auditEvent)); } else { LOG.debug("filterKeyValue: no audit event returned. Access not audited."); } } else { LOG.debug("filterKeyValue: Access denied. Denial not audited."); } } if (LOG.isDebugEnabled()) { LOG.debug("filterKeyValue: " + result); } return result; }