Java Code Examples for org.apache.hadoop.hbase.client.Put#setAttribute()
The following examples show how to use
org.apache.hadoop.hbase.client.Put#setAttribute() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransactionAwareHTable.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private Put transactionalizeAction(Put put) throws IOException { Put txPut = new Put(put.getRow(), tx.getWritePointer()); Set<Map.Entry<byte[], List<Cell>>> familyMap = put.getFamilyCellMap().entrySet(); if (!familyMap.isEmpty()) { for (Map.Entry<byte[], List<Cell>> family : familyMap) { List<Cell> familyValues = family.getValue(); if (!familyValues.isEmpty()) { for (Cell value : familyValues) { txPut.add(value.getFamily(), value.getQualifier(), tx.getWritePointer(), value.getValue()); addToChangeSet(txPut.getRow(), value.getFamily(), value.getQualifier()); } } } } for (Map.Entry<String, byte[]> entry : put.getAttributesMap().entrySet()) { txPut.setAttribute(entry.getKey(), entry.getValue()); } txPut.setDurability(put.getDurability()); addToOperation(txPut, tx); return txPut; }
Example 2
Source File: TransactionAwareHTable.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private Put transactionalizeAction(Put put) throws IOException { Put txPut = new Put(put.getRow(), tx.getWritePointer()); Set<Map.Entry<byte[], List<Cell>>> familyMap = put.getFamilyCellMap().entrySet(); if (!familyMap.isEmpty()) { for (Map.Entry<byte[], List<Cell>> family : familyMap) { List<Cell> familyValues = family.getValue(); if (!familyValues.isEmpty()) { for (Cell value : familyValues) { txPut.add(value.getFamily(), value.getQualifier(), tx.getWritePointer(), value.getValue()); addToChangeSet(txPut.getRow(), value.getFamily(), value.getQualifier()); } } } } for (Map.Entry<String, byte[]> entry : put.getAttributesMap().entrySet()) { txPut.setAttribute(entry.getKey(), entry.getValue()); } txPut.setDurability(put.getDurability()); addToOperation(txPut, tx); return txPut; }
Example 3
Source File: TransactionAwareHTable.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private Put transactionalizeAction(Put put) throws IOException { Put txPut = new Put(put.getRow(), tx.getWritePointer()); Set<Map.Entry<byte[], List<Cell>>> familyMap = put.getFamilyCellMap().entrySet(); if (!familyMap.isEmpty()) { for (Map.Entry<byte[], List<Cell>> family : familyMap) { List<Cell> familyValues = family.getValue(); if (!familyValues.isEmpty()) { for (Cell value : familyValues) { txPut.addColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), tx.getWritePointer(), CellUtil.cloneValue(value)); addToChangeSet(txPut.getRow(), CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)); } } } } for (Map.Entry<String, byte[]> entry : put.getAttributesMap().entrySet()) { txPut.setAttribute(entry.getKey(), entry.getValue()); } txPut.setDurability(put.getDurability()); addToOperation(txPut, tx); return txPut; }
Example 4
Source File: TransactionAwareHTable.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private Put transactionalizeAction(Put put) throws IOException { Put txPut = new Put(put.getRow(), tx.getWritePointer()); Set<Map.Entry<byte[], List<Cell>>> familyMap = put.getFamilyCellMap().entrySet(); if (!familyMap.isEmpty()) { for (Map.Entry<byte[], List<Cell>> family : familyMap) { List<Cell> familyValues = family.getValue(); if (!familyValues.isEmpty()) { for (Cell value : familyValues) { txPut.add(value.getFamily(), value.getQualifier(), tx.getWritePointer(), value.getValue()); addToChangeSet(txPut.getRow(), value.getFamily(), value.getQualifier()); } } } } for (Map.Entry<String, byte[]> entry : put.getAttributesMap().entrySet()) { txPut.setAttribute(entry.getKey(), entry.getValue()); } txPut.setDurability(put.getDurability()); addToOperation(txPut, tx); return txPut; }
Example 5
Source File: TransactionAwareHTable.java From phoenix-tephra with Apache License 2.0 | 6 votes |
private Put transactionalizeAction(Put put) throws IOException { Put txPut = new Put(put.getRow(), tx.getWritePointer()); Set<Map.Entry<byte[], List<Cell>>> familyMap = put.getFamilyCellMap().entrySet(); if (!familyMap.isEmpty()) { for (Map.Entry<byte[], List<Cell>> family : familyMap) { List<Cell> familyValues = family.getValue(); if (!familyValues.isEmpty()) { for (Cell value : familyValues) { txPut.add(value.getFamily(), value.getQualifier(), tx.getWritePointer(), value.getValue()); addToChangeSet(txPut.getRow(), value.getFamily(), value.getQualifier()); } } } } for (Map.Entry<String, byte[]> entry : put.getAttributesMap().entrySet()) { txPut.setAttribute(entry.getKey(), entry.getValue()); } txPut.setDurability(put.getDurability()); addToOperation(txPut, tx); return txPut; }
Example 6
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@Override public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit, Durability durability) throws IOException { // Translate deletes into our own delete tombstones // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows // us to rollback the changes (by a real delete) if the transaction fails // Deletes that are part of a transaction rollback do not need special handling. // They will never be rolled back, so are performed as normal HBase deletes. if (isRollbackOperation(delete)) { return; } Transaction tx = getFromOperation(delete); ensureValidTxLifetime(e.getEnvironment(), delete, tx); // Other deletes are client-initiated and need to be translated into our own tombstones // TODO: this should delegate to the DeleteStrategy implementation. Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp()); for (byte[] family : delete.getFamilyCellMap().keySet()) { List<Cell> familyCells = delete.getFamilyCellMap().get(family); if (isFamilyDelete(familyCells)) { deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } else { for (Cell cell : familyCells) { deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } } } for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) { deleteMarkers.setAttribute(entry.getKey(), entry.getValue()); } e.getEnvironment().getRegion().put(deleteMarkers); // skip normal delete handling e.bypass(); }
Example 7
Source File: AccessController.java From hbase with Apache License 2.0 | 5 votes |
@Override public boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c, final byte [] row, final byte [] family, final byte [] qualifier, final CompareOperator op, final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, put); // Require READ and WRITE permissions on the table, CF, and KV to update RegionCoprocessorEnvironment env = c.getEnvironment(); Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier); AuthResult authResult = permissionGranted(OpType.CHECK_AND_PUT, user, env, families, Action.READ, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { put.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL); if (bytes != null) { if (cellFeaturesEnabled) { addCellPermissions(bytes, put.getFamilyCellMap()); } else { throw new DoNotRetryIOException("Cell ACLs cannot be persisted"); } } return result; }
Example 8
Source File: AccessController.java From hbase with Apache License 2.0 | 5 votes |
@Override public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, final Put put, final WALEdit edit, final Durability durability) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, put); // Require WRITE permission to the table, CF, or top visible value, if any. // NOTE: We don't need to check the permissions for any earlier Puts // because we treat the ACLs in each Put as timestamped like any other // HBase value. A new ACL in a new Put applies to that Put. It doesn't // change the ACL of any previous Put. This allows simple evolution of // security policy over time without requiring expensive updates. RegionCoprocessorEnvironment env = c.getEnvironment(); Map<byte[],? extends Collection<Cell>> families = put.getFamilyCellMap(); AuthResult authResult = permissionGranted(OpType.PUT, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { put.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } // Add cell ACLs from the operation to the cells themselves byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL); if (bytes != null) { if (cellFeaturesEnabled) { addCellPermissions(bytes, put.getFamilyCellMap()); } else { throw new DoNotRetryIOException("Cell ACLs cannot be persisted"); } } }
Example 9
Source File: EdgeIndexWriter.java From hgraphdb with Apache License 2.0 | 5 votes |
private Put constructPut(Direction direction, Map.Entry<String, Boolean> entry) { long timestamp = ts != null ? ts : HConstants.LATEST_TIMESTAMP; boolean isUnique = entry.getValue(); Put put = new Put(graph.getEdgeIndexModel().serializeForWrite(edge, direction, isUnique, entry.getKey())); put.addColumn(Constants.DEFAULT_FAMILY_BYTES, Constants.CREATED_AT_BYTES, timestamp, ValueUtils.serialize(((HBaseEdge) edge).createdAt())); if (isUnique) { Object vertexId = direction == Direction.IN ? edge.outVertex().id() : edge.inVertex().id(); put.addColumn(Constants.DEFAULT_FAMILY_BYTES, Constants.VERTEX_ID_BYTES, timestamp, ValueUtils.serialize(vertexId)); put.addColumn(Constants.DEFAULT_FAMILY_BYTES, Constants.EDGE_ID_BYTES, timestamp, ValueUtils.serialize(edge.id())); } put.setAttribute(Mutators.IS_UNIQUE, Bytes.toBytes(isUnique)); return put; }
Example 10
Source File: VertexIndexWriter.java From hgraphdb with Apache License 2.0 | 5 votes |
private Put constructPut(Map.Entry<String, Boolean> entry) { long timestamp = ts != null ? ts : HConstants.LATEST_TIMESTAMP; boolean isUnique = entry.getValue(); Put put = new Put(graph.getVertexIndexModel().serializeForWrite(vertex, isUnique, entry.getKey())); put.addColumn(Constants.DEFAULT_FAMILY_BYTES, Constants.CREATED_AT_BYTES, timestamp, ValueUtils.serialize(((HBaseVertex) vertex).createdAt())); if (isUnique) { put.addColumn(Constants.DEFAULT_FAMILY_BYTES, Constants.VERTEX_ID_BYTES, timestamp, ValueUtils.serialize(vertex.id())); } put.setAttribute(Mutators.IS_UNIQUE, Bytes.toBytes(isUnique)); return put; }
Example 11
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@Override public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit, Durability durability) throws IOException { // Translate deletes into our own delete tombstones // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows // us to rollback the changes (by a real delete) if the transaction fails // Deletes that are part of a transaction rollback do not need special handling. // They will never be rolled back, so are performed as normal HBase deletes. if (isRollbackOperation(delete)) { return; } Transaction tx = getFromOperation(delete); ensureValidTxLifetime(e.getEnvironment(), delete, tx); // Other deletes are client-initiated and need to be translated into our own tombstones // TODO: this should delegate to the DeleteStrategy implementation. Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp()); for (byte[] family : delete.getFamilyCellMap().keySet()) { List<Cell> familyCells = delete.getFamilyCellMap().get(family); if (isFamilyDelete(familyCells)) { deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } else { for (Cell cell : familyCells) { deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } } } for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) { deleteMarkers.setAttribute(entry.getKey(), entry.getValue()); } e.getEnvironment().getRegion().put(deleteMarkers); // skip normal delete handling e.bypass(); }
Example 12
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@Override public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit, Durability durability) throws IOException { // Translate deletes into our own delete tombstones // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows // us to rollback the changes (by a real delete) if the transaction fails // Deletes that are part of a transaction rollback do not need special handling. // They will never be rolled back, so are performed as normal HBase deletes. if (isRollbackOperation(delete)) { return; } Transaction tx = getFromOperation(delete); ensureValidTxLifetime(e.getEnvironment(), delete, tx); // Other deletes are client-initiated and need to be translated into our own tombstones // TODO: this should delegate to the DeleteStrategy implementation. Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp()); for (byte[] family : delete.getFamilyCellMap().keySet()) { List<Cell> familyCells = delete.getFamilyCellMap().get(family); if (isFamilyDelete(familyCells)) { deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } else { for (Cell cell : familyCells) { deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } } } for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) { deleteMarkers.setAttribute(entry.getKey(), entry.getValue()); } e.getEnvironment().getRegion().put(deleteMarkers); // skip normal delete handling e.bypass(); }
Example 13
Source File: TestReplicationWithTags.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testReplicationWithCellTags() throws Exception { LOG.info("testSimplePutDelete"); Put put = new Put(ROW); put.setAttribute("visibility", Bytes.toBytes("myTag3")); put.addColumn(FAMILY, ROW, ROW); htable1 = utility1.getConnection().getTable(TABLE_NAME); htable1.put(put); Get get = new Get(ROW); try { for (int i = 0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } Result res = htable2.get(get); if (res.isEmpty()) { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME); } else { assertArrayEquals(ROW, res.value()); assertEquals(1, TestCoprocessorForTagsAtSink.TAGS.size()); Tag tag = TestCoprocessorForTagsAtSink.TAGS.get(0); assertEquals(TAG_TYPE, tag.getType()); break; } } } finally { TestCoprocessorForTagsAtSink.TAGS = null; } }
Example 14
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@Override public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit, Durability durability) throws IOException { // Translate deletes into our own delete tombstones // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows // us to rollback the changes (by a real delete) if the transaction fails // Deletes that are part of a transaction rollback do not need special handling. // They will never be rolled back, so are performed as normal HBase deletes. if (isRollbackOperation(delete)) { return; } Transaction tx = getFromOperation(delete); ensureValidTxLifetime(e.getEnvironment(), delete, tx); // Other deletes are client-initiated and need to be translated into our own tombstones // TODO: this should delegate to the DeleteStrategy implementation. Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp()); for (byte[] family : delete.getFamilyCellMap().keySet()) { List<Cell> familyCells = delete.getFamilyCellMap().get(family); if (isFamilyDelete(familyCells)) { deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } else { for (Cell cell : familyCells) { deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } } } for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) { deleteMarkers.setAttribute(entry.getKey(), entry.getValue()); } e.getEnvironment().getRegion().put(deleteMarkers); // skip normal delete handling e.bypass(); }
Example 15
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@Override public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit, Durability durability) throws IOException { // Translate deletes into our own delete tombstones // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows // us to rollback the changes (by a real delete) if the transaction fails // Deletes that are part of a transaction rollback do not need special handling. // They will never be rolled back, so are performed as normal HBase deletes. if (isRollbackOperation(delete)) { return; } Transaction tx = getFromOperation(delete); ensureValidTxLifetime(e.getEnvironment(), delete, tx); // Other deletes are client-initiated and need to be translated into our own tombstones // TODO: this should delegate to the DeleteStrategy implementation. Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp()); for (byte[] family : delete.getFamilyCellMap().keySet()) { List<Cell> familyCells = delete.getFamilyCellMap().get(family); if (isFamilyDelete(familyCells)) { deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } else { for (Cell cell : familyCells) { deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } } } for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) { deleteMarkers.setAttribute(entry.getKey(), entry.getValue()); } e.getEnvironment().getRegion().put(deleteMarkers); // skip normal delete handling e.bypass(); }
Example 16
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@Override public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit, Durability durability) throws IOException { // Translate deletes into our own delete tombstones // Since HBase deletes cannot be undone, we need to translate deletes into special puts, // which allows // us to rollback the changes (by a real delete) if the transaction fails // Deletes that are part of a transaction rollback do not need special handling. // They will never be rolled back, so are performed as normal HBase deletes. if (isRollbackOperation(delete)) { return; } Transaction tx = getFromOperation(delete); ensureValidTxLifetime(e.getEnvironment(), delete, tx); // Other deletes are client-initiated and need to be translated into our own tombstones // TODO: this should delegate to the DeleteStrategy implementation. Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp()); for (byte[] family : delete.getFamilyCellMap().keySet()) { List<Cell> familyCells = delete.getFamilyCellMap().get(family); if (isFamilyDelete(familyCells)) { deleteMarkers.addColumn(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } else { for (Cell cell : familyCells) { deleteMarkers.addColumn(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } } } for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) { deleteMarkers.setAttribute(entry.getKey(), entry.getValue()); } e.getEnvironment().getRegion().put(deleteMarkers); // skip normal delete handling e.bypass(); }
Example 17
Source File: TestRegionServerAbort.java From hbase with Apache License 2.0 | 5 votes |
/** * Test that a regionserver is able to abort properly, even when a coprocessor * throws an exception in preStopRegionServer(). */ @Test public void testAbortFromRPC() throws Exception { TableName tableName = TableName.valueOf("testAbortFromRPC"); // create a test table Table table = testUtil.createTable(tableName, FAMILY_BYTES); // write some edits testUtil.loadTable(table, FAMILY_BYTES); LOG.info("Wrote data"); // force a flush cluster.flushcache(tableName); LOG.info("Flushed table"); // Send a poisoned put to trigger the abort Put put = new Put(new byte[]{0, 0, 0, 0}); put.addColumn(FAMILY_BYTES, Bytes.toBytes("c"), new byte[]{}); put.setAttribute(StopBlockingRegionObserver.DO_ABORT, new byte[]{1}); List<HRegion> regions = cluster.findRegionsForTable(tableName); HRegion firstRegion = cluster.findRegionsForTable(tableName).get(0); table.put(put); // Verify that the regionserver is stopped assertNotNull(firstRegion); assertNotNull(firstRegion.getRegionServerServices()); LOG.info("isAborted = " + firstRegion.getRegionServerServices().isAborted()); assertTrue(firstRegion.getRegionServerServices().isAborted()); LOG.info("isStopped = " + firstRegion.getRegionServerServices().isStopped()); assertTrue(firstRegion.getRegionServerServices().isStopped()); }
Example 18
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@Override public void preDelete(ObserverContext<RegionCoprocessorEnvironment> e, Delete delete, WALEdit edit, Durability durability) throws IOException { // Translate deletes into our own delete tombstones // Since HBase deletes cannot be undone, we need to translate deletes into special puts, which allows // us to rollback the changes (by a real delete) if the transaction fails // Deletes that are part of a transaction rollback do not need special handling. // They will never be rolled back, so are performed as normal HBase deletes. if (isRollbackOperation(delete)) { return; } Transaction tx = getFromOperation(delete); ensureValidTxLifetime(e.getEnvironment(), delete, tx); // Other deletes are client-initiated and need to be translated into our own tombstones // TODO: this should delegate to the DeleteStrategy implementation. Put deleteMarkers = new Put(delete.getRow(), delete.getTimeStamp()); for (byte[] family : delete.getFamilyCellMap().keySet()) { List<Cell> familyCells = delete.getFamilyCellMap().get(family); if (isFamilyDelete(familyCells)) { deleteMarkers.add(family, TxConstants.FAMILY_DELETE_QUALIFIER, familyCells.get(0).getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } else { for (Cell cell : familyCells) { deleteMarkers.add(family, CellUtil.cloneQualifier(cell), cell.getTimestamp(), HConstants.EMPTY_BYTE_ARRAY); } } } for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) { deleteMarkers.setAttribute(entry.getKey(), entry.getValue()); } e.getEnvironment().getRegion().put(deleteMarkers); // skip normal delete handling e.bypass(); }
Example 19
Source File: TestVisibilityLabelsReplication.java From hbase with Apache License 2.0 | 5 votes |
static Table writeData(TableName tableName, String... labelExps) throws Exception { Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME); int i = 1; List<Put> puts = new ArrayList<>(labelExps.length); for (String labelExp : labelExps) { Put put = new Put(Bytes.toBytes("row" + i)); put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value); put.setCellVisibility(new CellVisibility(labelExp)); put.setAttribute(NON_VISIBILITY, Bytes.toBytes(TEMP)); puts.add(put); i++; } table.put(puts); return table; }
Example 20
Source File: TestTags.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testTags() throws Exception { Table table = null; try { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); byte[] fam = Bytes.toBytes("info"); byte[] row = Bytes.toBytes("rowa"); // column names byte[] qual = Bytes.toBytes("qual"); byte[] row1 = Bytes.toBytes("rowb"); byte[] row2 = Bytes.toBytes("rowc"); TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(tableName); ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor = new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(fam); familyDescriptor.setBlockCacheEnabled(true); familyDescriptor.setDataBlockEncoding(DataBlockEncoding.NONE); tableDescriptor.setColumnFamily(familyDescriptor); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); byte[] value = Bytes.toBytes("value"); table = TEST_UTIL.getConnection().getTable(tableName); Put put = new Put(row); put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value); put.setAttribute("visibility", Bytes.toBytes("myTag")); table.put(put); admin.flush(tableName); // We are lacking an API for confirming flush request compaction. // Just sleep for a short time. We won't be able to confirm flush // completion but the test won't hang now or in the future if // default compaction policy causes compaction between flush and // when we go to confirm it. Thread.sleep(1000); Put put1 = new Put(row1); byte[] value1 = Bytes.toBytes("1000dfsdf"); put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1); // put1.setAttribute("visibility", Bytes.toBytes("myTag3")); table.put(put1); admin.flush(tableName); Thread.sleep(1000); Put put2 = new Put(row2); byte[] value2 = Bytes.toBytes("1000dfsdf"); put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2); put2.setAttribute("visibility", Bytes.toBytes("myTag3")); table.put(put2); admin.flush(tableName); Thread.sleep(1000); result(fam, row, qual, row2, table, value, value2, row1, value1); admin.compact(tableName); while (admin.getCompactionState(tableName) != CompactionState.NONE) { Thread.sleep(10); } result(fam, row, qual, row2, table, value, value2, row1, value1); } finally { if (table != null) { table.close(); } } }