org.apache.hadoop.hbase.client.Increment Java Examples
The following examples show how to use
org.apache.hadoop.hbase.client.Increment.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BulkIncrementerTest.java From pinpoint with Apache License 2.0 | 6 votes |
@Override public Map<TableName, List<Increment>> call() { Map<TableName, List<Increment>> resultMap = new HashMap<>(); try { do { flushToMap(resultMap); } while (!awaitLatch.await(10L, TimeUnit.MILLISECONDS)); flushToMap(resultMap); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return Collections.emptyMap(); } finally { completeLatch.countDown(); } return resultMap; }
Example #2
Source File: Sequence.java From phoenix with BSD 3-Clause "New" or "Revised" License | 6 votes |
public Increment newIncrement(long timestamp) { Increment inc = new Increment(SchemaUtil.getSequenceKey(key.getTenantId(), key.getSchemaName(), key.getSequenceName())); // It doesn't matter what we set the amount too - we always use the values we get // from the Get we do to prevent any race conditions. All columns that get added // are returned with their current value try { inc.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); } catch (IOException e) { throw new RuntimeException(e); // Impossible } for (KeyValue kv : SEQUENCE_KV_COLUMNS) { // We don't care about the amount, as we'll add what gets looked up on the server-side inc.addColumn(kv.getFamily(), kv.getQualifier(), AMOUNT); } return inc; }
Example #3
Source File: BasicFraudHBaseService.java From hadoop-arch-book with Apache License 2.0 | 6 votes |
public void updateProfileCountsForSaleInHBase(Long buyerId, Long sellerId, ItemSaleEvent event) throws IOException, InterruptedException { HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE); ArrayList<Row> actions = new ArrayList<Row>(); Increment buyerValueIncrement = new Increment(generateProfileRowKey(buyerId)); buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, event.getItemValue()); buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue()); actions.add(buyerValueIncrement); Increment sellerValueIncrement = new Increment(generateProfileRowKey(sellerId)); sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, event.getItemValue()); sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue()); actions.add(sellerValueIncrement); profileTable.batch(actions); }
Example #4
Source File: Sequence.java From phoenix with Apache License 2.0 | 6 votes |
@SuppressWarnings("deprecation") public Increment newIncrement(long timestamp, Sequence.ValueOp action) { Increment inc = new Increment(key.getKey()); // It doesn't matter what we set the amount too - we always use the values we get // from the Get we do to prevent any race conditions. All columns that get added // are returned with their current value try { inc.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); } catch (IOException e) { throw new RuntimeException(e); // Impossible } for (KeyValue kv : SEQUENCE_KV_COLUMNS) { // We don't care about the amount, as we'll add what gets looked up on the server-side inc.addColumn(kv.getFamily(), kv.getQualifier(), action.ordinal()); } return inc; }
Example #5
Source File: BasicFraudHBaseService.java From hadoop-arch-book with Apache License 2.0 | 6 votes |
public void logInProfileInHBase(long userId, String ipAddress) throws IOException, Exception { HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE); ArrayList<Row> actions = new ArrayList<Row>(); byte[] profileRowKey = generateProfileRowKey(userId); Delete delete = new Delete(profileRowKey); delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL); delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL); actions.add(delete); Increment increment = new Increment(profileRowKey); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1); actions.add(increment); Put put = new Put(profileRowKey); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis())); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress)); actions.add(put); profileTable.batch(actions); }
Example #6
Source File: BasicFraudHBaseService.java From hadoop-arch-book with Apache License 2.0 | 6 votes |
@Override public void createProfile(long userId, ProfilePojo pojo, String ipAddress) throws Exception { HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE); ArrayList<Row> actions = new ArrayList<Row>(); byte[] rowKey = generateProfileRowKey(userId); Put put = new Put(rowKey); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.FIXED_INFO_COL, Bytes.toBytes(pojo.getUsername() + "|" + pojo.getAge() + "|" + System.currentTimeMillis())); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress)); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis())); actions.add(put); Increment increment = new Increment(rowKey); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_SELLS_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_PURCHASES_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_PURCHASES_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, 0); actions.add(increment); profileTable.batch(actions); }
Example #7
Source File: HBaseStorage.java From cantor with Apache License 2.0 | 6 votes |
/** * @return the value before increment */ @Override public Optional<Long> incrementAndGet(long category, long ts, long range) { String tbl = String.format(TABLE_FMT, category % TABLE_COUNT); Table table = tableConnections.get(tbl); try { Increment increment = new Increment(Bytes.toBytes(String.format(ROW_KEY_FMT, ts))); increment.setTTL(ttl); byte[] col = Bytes.toBytes(String.valueOf(category)); increment.addColumn(SERVICE_FAMILY, col, range); Result result = table.increment(increment); Long afterInc = Bytes.toLong(result.getValue(SERVICE_FAMILY, col)); return Optional.of(afterInc); } catch (Exception e) { if (log.isErrorEnabled()) log.error( "increment range value failed for [ category: {} ] [ timestamp {} ] [ range {} ]", category, ts, range, e); return Optional.empty(); } }
Example #8
Source File: TestWALMonotonicallyIncreasingSeqId.java From hbase with Apache License 2.0 | 6 votes |
@Override public void run() { try { for (int i = 0; i < 100; i++) { byte[] row = Bytes.toBytes("incrementRow" + i); Increment inc = new Increment(row); inc.addColumn(Bytes.toBytes("cf"), Bytes.toBytes(0), 1); // inc.setDurability(Durability.ASYNC_WAL); region.increment(inc); latch.countDown(); Thread.sleep(10); } } catch (Throwable t) { LOG.warn("Error happend when Put: ", t); } }
Example #9
Source File: ThriftUtilities.java From hbase with Apache License 2.0 | 6 votes |
public static Increment incrementFromThrift(TIncrement in) throws IOException { Increment out = new Increment(in.getRow()); for (TColumnIncrement column : in.getColumns()) { out.addColumn(column.getFamily(), column.getQualifier(), column.getAmount()); } if (in.isSetAttributes()) { addAttributes(out,in.getAttributes()); } if (in.isSetDurability()) { out.setDurability(durabilityFromThrift(in.getDurability())); } if(in.getCellVisibility() != null) { out.setCellVisibility(new CellVisibility(in.getCellVisibility().getExpression())); } if (in.isSetReturnResults()) { out.setReturnResults(in.isReturnResults()); } return out; }
Example #10
Source File: WriteHeavyIncrementObserver.java From hbase with Apache License 2.0 | 6 votes |
@Override public Result preIncrement(ObserverContext<RegionCoprocessorEnvironment> c, Increment increment) throws IOException { byte[] row = increment.getRow(); Put put = new Put(row); long ts = getUniqueTimestamp(row); for (Map.Entry<byte[], List<Cell>> entry : increment.getFamilyCellMap().entrySet()) { for (Cell cell : entry.getValue()) { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) .setValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()) .setType(Cell.Type.Put).setTimestamp(ts).build()); } } c.getEnvironment().getRegion().put(put); c.bypass(); return Result.EMPTY_RESULT; }
Example #11
Source File: RowKeyMerge.java From pinpoint with Apache License 2.0 | 6 votes |
public Map<TableName, List<Increment>> createBulkIncrement(Map<RowInfo, Long> data, RowKeyDistributorByHashPrefix rowKeyDistributorByHashPrefix) { if (data.isEmpty()) { return Collections.emptyMap(); } final Map<TableName, List<Increment>> tableIncrementMap = new HashMap<>(); final Map<TableName, Map<RowKey, List<ColumnName>>> tableRowKeyMap = mergeRowKeys(data); for (Map.Entry<TableName, Map<RowKey, List<ColumnName>>> tableRowKeys : tableRowKeyMap.entrySet()) { final TableName tableName = tableRowKeys.getKey(); final List<Increment> incrementList = new ArrayList<>(); for (Map.Entry<RowKey, List<ColumnName>> rowKeyEntry : tableRowKeys.getValue().entrySet()) { Increment increment = createIncrement(rowKeyEntry, rowKeyDistributorByHashPrefix); incrementList.add(increment); } tableIncrementMap.put(tableName, incrementList); } return tableIncrementMap; }
Example #12
Source File: ThriftHBaseServiceHandler.java From hbase with Apache License 2.0 | 6 votes |
@Override public void increment(TIncrement tincrement) throws IOError, TException { if (tincrement.getRow().length == 0 || tincrement.getTable().length == 0) { throw new TException("Must supply a table and a row key; can't increment"); } if (conf.getBoolean(COALESCE_INC_KEY, false)) { this.coalescer.queueIncrement(tincrement); return; } Table table = null; try { table = getTable(tincrement.getTable()); Increment inc = ThriftUtilities.incrementFromThrift(tincrement); table.increment(inc); } catch (IOException e) { LOG.warn(e.getMessage(), e); throw getIOError(e); } finally{ closeTable(table); } }
Example #13
Source File: TestCellACLWithMultipleVersions.java From hbase with Apache License 2.0 | 6 votes |
private void verifyUserDeniedForIncrementMultipleVersions(final User user, final byte[] row, final byte[] q1) throws IOException, InterruptedException { user.runAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf)) { try (Table t = connection.getTable(testTable.getTableName())) { Increment inc = new Increment(row); inc.setTimeRange(0, 127); inc.addColumn(TEST_FAMILY1, q1, 2L); t.increment(inc); fail(user.getShortName() + " cannot do the increment."); } catch (Exception e) { } } return null; } }); }
Example #14
Source File: TestVisibilityLabels.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testLabelsWithIncrement() throws Throwable { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); try (Table table = TEST_UTIL.createTable(tableName, fam)) { byte[] row1 = Bytes.toBytes("row1"); byte[] val = Bytes.toBytes(1L); Put put = new Put(row1); put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, val); put.setCellVisibility(new CellVisibility(SECRET + " & " + CONFIDENTIAL)); table.put(put); Get get = new Get(row1); get.setAuthorizations(new Authorizations(SECRET)); Result result = table.get(get); assertTrue(result.isEmpty()); table.incrementColumnValue(row1, fam, qual, 2L); result = table.get(get); assertTrue(result.isEmpty()); Increment increment = new Increment(row1); increment.addColumn(fam, qual, 2L); increment.setCellVisibility(new CellVisibility(SECRET)); table.increment(increment); result = table.get(get); assertTrue(!result.isEmpty()); } }
Example #15
Source File: RegionCoprocessorHost.java From hbase with Apache License 2.0 | 6 votes |
/** * Supports Coprocessor 'bypass'. * @param increment increment object * @return result to return to client if default operation should be bypassed, null otherwise * @throws IOException if an error occurred on the coprocessor */ public Result preIncrementAfterRowLock(final Increment increment) throws IOException { boolean bypassable = true; Result defaultResult = null; if (coprocEnvironments.isEmpty()) { return defaultResult; } return execOperationWithResult( new ObserverOperationWithResult<RegionObserver, Result>(regionObserverGetter, defaultResult, bypassable) { @Override public Result call(RegionObserver observer) throws IOException { return observer.preIncrementAfterRowLock(this, increment); } }); }
Example #16
Source File: RegionCoprocessorHost.java From hbase with Apache License 2.0 | 6 votes |
/** * Supports Coprocessor 'bypass'. * @param increment increment object * @return result to return to client if default operation should be bypassed, null otherwise * @throws IOException if an error occurred on the coprocessor */ public Result preIncrement(final Increment increment) throws IOException { boolean bypassable = true; Result defaultResult = null; if (coprocEnvironments.isEmpty()) { return defaultResult; } return execOperationWithResult( new ObserverOperationWithResult<RegionObserver, Result>(regionObserverGetter, defaultResult, bypassable) { @Override public Result call(RegionObserver observer) throws IOException { return observer.preIncrement(this, increment); } }); }
Example #17
Source File: RSRpcServices.java From hbase with Apache License 2.0 | 6 votes |
private static Get toGet(final Mutation mutation) throws IOException { if(!(mutation instanceof Increment) && !(mutation instanceof Append)) { throw new AssertionError("mutation must be a instance of Increment or Append"); } Get get = new Get(mutation.getRow()); CellScanner cellScanner = mutation.cellScanner(); while (!cellScanner.advance()) { Cell cell = cellScanner.current(); get.addColumn(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell)); } if (mutation instanceof Increment) { // Increment Increment increment = (Increment) mutation; get.setTimeRange(increment.getTimeRange().getMin(), increment.getTimeRange().getMax()); } else { // Append Append append = (Append) mutation; get.setTimeRange(append.getTimeRange().getMin(), append.getTimeRange().getMax()); } for (Entry<String, byte[]> entry : mutation.getAttributesMap().entrySet()) { get.setAttribute(entry.getKey(), entry.getValue()); } return get; }
Example #18
Source File: MultiThreadedUpdaterWithACL.java From hbase with Apache License 2.0 | 6 votes |
@Override public Object run() throws Exception { try { if (table == null) { table = connection.getTable(tableName); } if (m instanceof Increment) { table.increment((Increment) m); } else if (m instanceof Append) { table.append((Append) m); } else if (m instanceof Put) { table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenPut((Put) m); } else if (m instanceof Delete) { table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenDelete((Delete) m); } else { throw new IllegalArgumentException("unsupported mutation " + m.getClass().getSimpleName()); } totalOpTimeMs.addAndGet(System.currentTimeMillis() - start); } catch (IOException e) { recordFailure(m, keyBase, start, e); } return null; }
Example #19
Source File: BaseEntityMapper.java From kite with Apache License 2.0 | 6 votes |
@Override public Increment mapToIncrement(PartitionKey key, String fieldName, long amount) { FieldMapping fieldMapping = entitySchema.getColumnMappingDescriptor() .getFieldMapping(fieldName); if (fieldMapping == null) { throw new DatasetException("Unknown field in the schema: " + fieldName); } if (fieldMapping.getMappingType() != MappingType.COUNTER) { throw new DatasetException("Field is not a counter type: " + fieldName); } byte[] keyBytes; if (keySerDe == null) { keyBytes = new byte[] { (byte) 0 }; } else { keyBytes = keySerDe.serialize(key); } Increment increment = new Increment(keyBytes); increment.addColumn(fieldMapping.getFamily(), fieldMapping.getQualifier(), amount); return increment; }
Example #20
Source File: TestPostIncrementAndAppendBeforeWAL.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testChangeCellWithDifferntColumnFamily() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); createTableWithCoprocessor(tableName, ChangeCellWithDifferntColumnFamilyObserver.class.getName()); try (Table table = connection.getTable(tableName)) { Increment increment = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1); table.increment(increment); Get get = new Get(ROW).addColumn(CF2_BYTES, CQ1); Result result = table.get(get); assertEquals(1, result.size()); assertEquals(1, Bytes.toLong(result.getValue(CF2_BYTES, CQ1))); Append append = new Append(ROW).addColumn(CF1_BYTES, CQ2, VALUE); table.append(append); get = new Get(ROW).addColumn(CF2_BYTES, CQ2); result = table.get(get); assertEquals(1, result.size()); assertTrue(Bytes.equals(VALUE, result.getValue(CF2_BYTES, CQ2))); } }
Example #21
Source File: Indexer.java From phoenix with Apache License 2.0 | 5 votes |
/** * We use an Increment to serialize the ON DUPLICATE KEY clause so that the HBase plumbing * sets up the necessary locks and mvcc to allow an atomic update. The Increment is not a * real increment, though, it's really more of a Put. We translate the Increment into a * list of mutations, at most a single Put and Delete that are the changes upon executing * the list of ON DUPLICATE KEY clauses for this row. */ @Override public Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> e, final Increment inc) throws IOException { long start = EnvironmentEdgeManager.currentTimeMillis(); try { List<Mutation> mutations = this.builder.executeAtomicOp(inc); if (mutations == null) { return null; } // Causes the Increment to be ignored as we're committing the mutations // ourselves below. e.bypass(); // ON DUPLICATE KEY IGNORE will return empty list if row already exists // as no action is required in that case. if (!mutations.isEmpty()) { Region region = e.getEnvironment().getRegion(); // Otherwise, submit the mutations directly here region.batchMutate(mutations.toArray(new Mutation[0])); } return Result.EMPTY_RESULT; } catch (Throwable t) { throw ServerUtil.createIOException( "Unable to process ON DUPLICATE IGNORE for " + e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + "(" + Bytes.toStringBinary(inc.getRow()) + ")", t); } finally { long duration = EnvironmentEdgeManager.currentTimeMillis() - start; if (duration >= slowIndexPrepareThreshold) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold)); } metricSource.incrementSlowDuplicateKeyCheckCalls(); } metricSource.updateDuplicateKeyCheckTime(duration); } }
Example #22
Source File: BulkIncrementerTest.java From pinpoint with Apache License 2.0 | 5 votes |
@Test public void singleTableConcurrent() throws Exception { // Given TableName tableA = TableName.valueOf("A"); TestDataSet testDataSetA_0_0 = new TestDataSet(tableA, 0, 0, 1000000); TestDataSet testDataSetA_0_1 = new TestDataSet(tableA, 0, 1, 1000001); List<TestData> testDatas = new ArrayList<>(); testDatas.addAll(testDataSetA_0_0.getTestDatas()); testDatas.addAll(testDataSetA_0_1.getTestDatas()); Collections.shuffle(testDatas); // When final int numIncrementers = 16; List<List<TestData>> testDataPartitions = Lists.partition(testDatas, testDatas.size() / (numIncrementers - 1)); final CountDownLatch completeLatch = new CountDownLatch(testDataPartitions.size()); final CountDownLatch flusherLatch = new CountDownLatch(1); FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(completeLatch, flusherLatch)); new Thread(flushTask, "Flusher").start(); int counter = 0; for (List<TestData> testDataPartition : testDataPartitions) { Incrementer incrementer = new Incrementer(completeLatch, testDataPartition); new Thread(incrementer, "Incrementer-" + counter++).start(); } flusherLatch.await(30L, TimeUnit.SECONDS); // Then Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS); TestVerifier verifier = new TestVerifier(incrementMap); verifier.verify(testDataSetA_0_0); verifier.verify(testDataSetA_0_1); }
Example #23
Source File: TestIncrementAndAppendWithNullResult.java From hbase with Apache License 2.0 | 5 votes |
private void testAppend(Increment inc) throws Exception { checkResult(table.increment(inc)); List<Row> actions = Arrays.asList(inc, inc); Object[] results = new Object[actions.size()]; table.batch(actions, results); checkResult(results); }
Example #24
Source File: RowKeyMerge.java From pinpoint with Apache License 2.0 | 5 votes |
private Increment createIncrement(Map.Entry<RowKey, List<ColumnName>> rowKeyEntry, RowKeyDistributorByHashPrefix rowKeyDistributorByHashPrefix) { RowKey rowKey = rowKeyEntry.getKey(); byte[] key = getRowKey(rowKey, rowKeyDistributorByHashPrefix); final Increment increment = new Increment(key); for (ColumnName columnName : rowKeyEntry.getValue()) { increment.addColumn(family, columnName.getColumnName(), columnName.getCallCount()); } logger.trace("create increment row:{}, column:{}", rowKey, rowKeyEntry.getValue()); return increment; }
Example #25
Source File: TestRegionIncrement.java From hbase with Apache License 2.0 | 5 votes |
/** * Have each thread update its own Cell. Avoid contention with another thread. */ @Test public void testUnContendedSingleCellIncrement() throws IOException, InterruptedException { final HRegion region = getRegion(TEST_UTIL.getConfiguration(), TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); long startTime = System.currentTimeMillis(); try { SingleCellIncrementer [] threads = new SingleCellIncrementer[THREAD_COUNT]; for (int i = 0; i < threads.length; i++) { byte [] rowBytes = Bytes.toBytes(i); Increment increment = new Increment(rowBytes); increment.addColumn(INCREMENT_BYTES, INCREMENT_BYTES, 1); threads[i] = new SingleCellIncrementer(i, INCREMENT_COUNT, region, increment); } for (int i = 0; i < threads.length; i++) { threads[i].start(); } for (int i = 0; i < threads.length; i++) { threads[i].join(); } RegionScanner regionScanner = region.getScanner(new Scan()); List<Cell> cells = new ArrayList<>(THREAD_COUNT); while(regionScanner.next(cells)) continue; assertEquals(THREAD_COUNT, cells.size()); long total = 0; for (Cell cell: cells) total += Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); assertEquals(INCREMENT_COUNT * THREAD_COUNT, total); } finally { closeRegion(region); LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms"); } }
Example #26
Source File: TestAtomicOperation.java From hbase with Apache License 2.0 | 5 votes |
@Override public void run() { for (int i = 0; i < numIncrements; i++) { try { Increment inc = new Increment(row); inc.addColumn(fam1, qual1, amount); inc.addColumn(fam1, qual2, amount*2); inc.addColumn(fam2, qual3, amount*3); inc.setDurability(Durability.ASYNC_WAL); Result result = region.increment(inc); if (result != null) { assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1, qual2))); assertTrue(result.getValue(fam2, qual3) != null); assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2, qual3))); assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1, qual2))); long fam1Increment = Bytes.toLong(result.getValue(fam1, qual1))*3; long fam2Increment = Bytes.toLong(result.getValue(fam2, qual3)); assertEquals("fam1=" + fam1Increment + ", fam2=" + fam2Increment, fam1Increment, fam2Increment); } } catch (IOException e) { e.printStackTrace(); } } }
Example #27
Source File: ProtobufUtil.java From hbase with Apache License 2.0 | 5 votes |
public static MutationProto toMutation(final MutationType type, final Mutation mutation, MutationProto.Builder builder, long nonce) throws IOException { builder = getMutationBuilderAndSetCommonFields(type, mutation, builder); if (nonce != HConstants.NO_NONCE) { builder.setNonce(nonce); } if (type == MutationType.INCREMENT) { builder.setTimeRange(ProtobufUtil.toTimeRange(((Increment) mutation).getTimeRange())); } if (type == MutationType.APPEND) { builder.setTimeRange(ProtobufUtil.toTimeRange(((Append) mutation).getTimeRange())); } ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); for (Map.Entry<byte[],List<Cell>> family: mutation.getFamilyCellMap().entrySet()) { columnBuilder.clear(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); for (Cell cell: family.getValue()) { valueBuilder.clear(); valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap( cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); valueBuilder.setValue(UnsafeByteOperations.unsafeWrap( cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); valueBuilder.setTimestamp(cell.getTimestamp()); if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) { KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte()); valueBuilder.setDeleteType(toDeleteType(keyValueType)); } columnBuilder.addQualifierValue(valueBuilder.build()); } builder.addColumnValue(columnBuilder.build()); } return builder.build(); }
Example #28
Source File: HBaseSink.java From mt-flume with Apache License 2.0 | 5 votes |
@Override public Status process() throws EventDeliveryException { Status status = Status.READY; Channel channel = getChannel(); Transaction txn = channel.getTransaction(); List<Row> actions = new LinkedList<Row>(); List<Increment> incs = new LinkedList<Increment>(); txn.begin(); long i = 0; for(; i < batchSize; i++) { Event event = channel.take(); if(event == null){ status = Status.BACKOFF; if (i == 0) { sinkCounter.incrementBatchEmptyCount(); } else { sinkCounter.incrementBatchUnderflowCount(); } break; } else { serializer.initialize(event, columnFamily); actions.addAll(serializer.getActions()); incs.addAll(serializer.getIncrements()); } } if (i == batchSize) { sinkCounter.incrementBatchCompleteCount(); } sinkCounter.addToEventDrainAttemptCount(i); putEventsAndCommit(actions, incs, txn); return status; }
Example #29
Source File: IndexRegionObserver.java From phoenix with Apache License 2.0 | 5 votes |
/** * We use an Increment to serialize the ON DUPLICATE KEY clause so that the HBase plumbing * sets up the necessary locks and mvcc to allow an atomic update. The Increment is not a * real increment, though, it's really more of a Put. We translate the Increment into a * list of mutations, at most a single Put and Delete that are the changes upon executing * the list of ON DUPLICATE KEY clauses for this row. */ @Override public Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> e, final Increment inc) throws IOException { long start = EnvironmentEdgeManager.currentTimeMillis(); try { List<Mutation> mutations = this.builder.executeAtomicOp(inc); if (mutations == null) { return null; } // Causes the Increment to be ignored as we're committing the mutations // ourselves below. e.bypass(); // ON DUPLICATE KEY IGNORE will return empty list if row already exists // as no action is required in that case. if (!mutations.isEmpty()) { Region region = e.getEnvironment().getRegion(); // Otherwise, submit the mutations directly here region.batchMutate(mutations.toArray(new Mutation[0])); } return Result.EMPTY_RESULT; } catch (Throwable t) { throw ServerUtil.createIOException( "Unable to process ON DUPLICATE IGNORE for " + e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + "(" + Bytes.toStringBinary(inc.getRow()) + ")", t); } finally { long duration = EnvironmentEdgeManager.currentTimeMillis() - start; if (duration >= slowIndexPrepareThreshold) { if (LOG.isDebugEnabled()) { LOG.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold)); } metricSource.incrementSlowDuplicateKeyCheckCalls(); } metricSource.updateDuplicateKeyCheckTime(duration); } }
Example #30
Source File: PhoenixIndexBuilder.java From phoenix with Apache License 2.0 | 5 votes |
private static List<Mutation> convertIncrementToPutInSingletonList(Increment inc) { byte[] rowKey = inc.getRow(); Put put = new Put(rowKey); transferCells(inc, put); transferAttributes(inc, put); return Collections.<Mutation>singletonList(put); }