Java Code Examples for org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress#getOperation()
The following examples show how to use
org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress#getOperation() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PhoenixTransactionalIndexer.java From phoenix with Apache License 2.0 | 6 votes |
private static Iterator<Mutation> getMutationIterator(final MiniBatchOperationInProgress<Mutation> miniBatchOp) { return new Iterator<Mutation>() { private int i = 0; @Override public boolean hasNext() { return i < miniBatchOp.size(); } @Override public Mutation next() { return miniBatchOp.getOperation(i++); } @Override public void remove() { throw new UnsupportedOperationException(); } }; }
Example 2
Source File: TestRegionObserverForAddingMutationsFromCoprocessors.java From hbase with Apache License 2.0 | 6 votes |
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { Mutation mut = miniBatchOp.getOperation(0); if (mut instanceof Delete) { List<Cell> cells = mut.getFamilyCellMap().get(test); Delete[] deletes = new Delete[] { // delete only 2 rows new Delete(row1).addColumns(test, dummy, cells.get(0).getTimestamp()), new Delete(row2).addColumns(test, dummy, cells.get(0).getTimestamp()), }; LOG.info("Deleting:" + Arrays.toString(deletes)); miniBatchOp.addOperationsFromCP(0, deletes); } }
Example 3
Source File: TestRegionObserverForAddingMutationsFromCoprocessors.java From hbase with Apache License 2.0 | 6 votes |
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { Mutation mut = miniBatchOp.getOperation(0); if (mut instanceof Delete) { List<Cell> cells = mut.getFamilyCellMap().get(test); Delete[] deletes = new Delete[] { // delete only 2 rows new Delete(row1).addFamily(test, cells.get(0).getTimestamp()), new Delete(row2).addFamily(test, cells.get(0).getTimestamp()), }; LOG.info("Deleting:" + Arrays.toString(deletes)); miniBatchOp.addOperationsFromCP(0, deletes); } }
Example 4
Source File: TestRegionObserverForAddingMutationsFromCoprocessors.java From hbase with Apache License 2.0 | 6 votes |
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { Mutation mut = miniBatchOp.getOperation(0); if (mut instanceof Delete) { List<Cell> cells = mut.getFamilyCellMap().get(test); Delete[] deletes = new Delete[] { // delete only 2 rows new Delete(row1, cells.get(0).getTimestamp()), new Delete(row2, cells.get(0).getTimestamp()), }; LOG.info("Deleting:" + Arrays.toString(deletes)); miniBatchOp.addOperationsFromCP(0, deletes); } }
Example 5
Source File: IndexRegionObserver.java From phoenix with Apache License 2.0 | 6 votes |
private Collection<? extends Mutation> groupMutations(MiniBatchOperationInProgress<Mutation> miniBatchOp, BatchMutateContext context) throws IOException { context.multiMutationMap = new HashMap<>(); for (int i = 0; i < miniBatchOp.size(); i++) { Mutation m = miniBatchOp.getOperation(i); // skip this mutation if we aren't enabling indexing // unfortunately, we really should ask if the raw mutation (rather than the combined mutation) // should be indexed, which means we need to expose another method on the builder. Such is the // way optimization go though. if (miniBatchOp.getOperationStatus(i) != IGNORE && this.builder.isEnabled(m)) { ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); MultiMutation stored = context.multiMutationMap.get(row); if (stored == null) { // we haven't seen this row before, so add it stored = new MultiMutation(row); context.multiMutationMap.put(row, stored); } stored.addAll(m); } } return context.multiMutationMap.values(); }
Example 6
Source File: IndexRegionObserver.java From phoenix with Apache License 2.0 | 5 votes |
private void ignoreAtomicOperations (MiniBatchOperationInProgress<Mutation> miniBatchOp) { for (int i = 0; i < miniBatchOp.size(); i++) { Mutation m = miniBatchOp.getOperation(i); if (this.builder.isAtomicOp(m)) { miniBatchOp.setOperationStatus(i, IGNORE); continue; } } }
Example 7
Source File: MutableIndexFailureIT.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { boolean throwException = false; if (FAIL_NEXT_WRITE) { throwException = true; FAIL_NEXT_WRITE = false; } else if (c.getEnvironment().getRegionInfo().getTable().getNameAsString().endsWith("A_" + FAIL_INDEX_NAME) && FAIL_WRITE) { throwException = true; if (TOGGLE_FAIL_WRITE_FOR_RETRY) { FAIL_WRITE = !FAIL_WRITE; } } else { // When local index updates are atomic with data updates, testing a write failure to a local // index won't make sense. Mutation operation = miniBatchOp.getOperation(0); if (FAIL_WRITE) { Map<byte[],List<Cell>>cellMap = operation.getFamilyCellMap(); for (Map.Entry<byte[],List<Cell>> entry : cellMap.entrySet()) { byte[] family = entry.getKey(); if (Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { int regionStartKeyLen = c.getEnvironment().getRegionInfo().getStartKey().length; Cell firstCell = entry.getValue().get(0); long indexId = MetaDataUtil.getViewIndexIdDataType().getCodec().decodeLong(firstCell.getRowArray(), firstCell.getRowOffset() + regionStartKeyLen, SortOrder.getDefault()); // Only throw for first local index as the test may have multiple local indexes if (indexId == Short.MIN_VALUE) { throwException = true; break; } } } } } if (throwException) { if (!TOGGLE_FAIL_WRITE_FOR_RETRY) { dropIndex(c); } throw new DoNotRetryIOException(); } }
Example 8
Source File: WALRecoveryRegionPostOpenIT.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> observerContext, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { if (observerContext.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString().contains(INDEX_TABLE_NAME) && failIndexTableWrite) { throw new DoNotRetryIOException(); } Mutation operation = miniBatchOp.getOperation(0); Set<byte[]> keySet = operation.getFamilyCellMap().keySet(); for(byte[] family: keySet) { if(Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX) && failIndexTableWrite) { throw new DoNotRetryIOException(); } } super.preBatchMutate(observerContext, miniBatchOp); }
Example 9
Source File: IndexRegionObserver.java From phoenix with Apache License 2.0 | 5 votes |
/** * This method applies the pending put mutations on the the next row states. * Before this method is called, the next row states is set to current row states. */ private void applyPendingPutMutations(MiniBatchOperationInProgress<Mutation> miniBatchOp, BatchMutateContext context, long now) throws IOException { for (Integer i = 0; i < miniBatchOp.size(); i++) { if (miniBatchOp.getOperationStatus(i) == IGNORE) { continue; } Mutation m = miniBatchOp.getOperation(i); // skip this mutation if we aren't enabling indexing if (!this.builder.isEnabled(m)) { continue; } // Unless we're replaying edits to rebuild the index, we update the time stamp // of the data table to prevent overlapping time stamps (which prevents index // inconsistencies as this case isn't handled correctly currently). setTimestamp(m, now); if (m instanceof Put) { ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(m.getRow()); Pair<Put, Put> dataRowState = context.dataRowStates.get(rowKeyPtr); if (dataRowState == null) { dataRowState = new Pair<Put, Put>(null, null); context.dataRowStates.put(rowKeyPtr, dataRowState); } Put nextDataRowState = dataRowState.getSecond(); dataRowState.setSecond((nextDataRowState != null) ? applyNew((Put) m, nextDataRowState) : new Put((Put) m)); } } }
Example 10
Source File: IndexRegionObserver.java From phoenix with Apache License 2.0 | 5 votes |
private void populateRowsToLock(MiniBatchOperationInProgress<Mutation> miniBatchOp, BatchMutateContext context) { for (int i = 0; i < miniBatchOp.size(); i++) { if (miniBatchOp.getOperationStatus(i) == IGNORE) { continue; } Mutation m = miniBatchOp.getOperation(i); if (this.builder.isEnabled(m)) { ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); if (!context.rowsToLock.contains(row)) { context.rowsToLock.add(row); } } } }
Example 11
Source File: Indexer.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { if (this.disabled) { super.postBatchMutate(c, miniBatchOp); return; } this.builder.batchCompleted(miniBatchOp); //each batch operation, only the first one will have anything useful, so we can just grab that Mutation mutation = miniBatchOp.getOperation(0); WALEdit edit = miniBatchOp.getWalEdit(0); doPost(edit, mutation, mutation.getDurability()); }
Example 12
Source File: TestRegionObserverForAddingMutationsFromCoprocessors.java From hbase with Apache License 2.0 | 5 votes |
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { Mutation mut = miniBatchOp.getOperation(0); List<Cell> cells = mut.getFamilyCellMap().get(test); Put[] puts = new Put[] { new Put(row1).addColumn(test, dummy, cells.get(0).getTimestamp(), Bytes.toBytes("cpdummy")), new Put(row2).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy), new Put(row3).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy), }; LOG.info("Putting:" + Arrays.toString(puts)); miniBatchOp.addOperationsFromCP(0, puts); }
Example 13
Source File: TestRegionObserverForAddingMutationsFromCoprocessors.java From hbase with Apache License 2.0 | 5 votes |
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { Mutation mut = miniBatchOp.getOperation(0); List<Cell> cells = mut.getFamilyCellMap().get(test); Put[] puts = new Put[] { new Put(Bytes.toBytes("cpPut")).addColumn(test, dummy, cells.get(0).getTimestamp(), Bytes.toBytes("cpdummy")).setTTL(mut.getTTL()) }; LOG.info("Putting:" + Arrays.toString(puts)); miniBatchOp.addOperationsFromCP(0, puts); }
Example 14
Source File: AccessController.java From hbase with Apache License 2.0 | 5 votes |
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { if (cellFeaturesEnabled && !compatibleEarlyTermination) { TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); User user = getActiveUser(c); for (int i = 0; i < miniBatchOp.size(); i++) { Mutation m = miniBatchOp.getOperation(i); if (m.getAttribute(CHECK_COVERING_PERM) != null) { // We have a failure with table, cf and q perm checks and now giving a chance for cell // perm check OpType opType; if (m instanceof Put) { checkForReservedTagPresence(user, m); opType = OpType.PUT; } else { opType = OpType.DELETE; } AuthResult authResult = null; if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), m.getFamilyCellMap(), m.getTimestamp(), Action.WRITE)) { authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap()); } else { authResult = AuthResult.deny(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap()); } AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } } } }
Example 15
Source File: IndexRegionObserver.java From phoenix with Apache License 2.0 | 4 votes |
/** * This method applies pending delete mutations on the next row states */ private void applyPendingDeleteMutations(MiniBatchOperationInProgress<Mutation> miniBatchOp, BatchMutateContext context) throws IOException { for (int i = 0; i < miniBatchOp.size(); i++) { if (miniBatchOp.getOperationStatus(i) == IGNORE) { continue; } Mutation m = miniBatchOp.getOperation(i); if (!this.builder.isEnabled(m)) { continue; } if (!(m instanceof Delete)) { continue; } ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(m.getRow()); Pair<Put, Put> dataRowState = context.dataRowStates.get(rowKeyPtr); if (dataRowState == null) { dataRowState = new Pair<Put, Put>(null, null); context.dataRowStates.put(rowKeyPtr, dataRowState); } Put nextDataRowState = dataRowState.getSecond(); if (nextDataRowState == null) { if (dataRowState.getFirst() == null) { // This is a delete row mutation on a non-existing row. There is no need to apply this mutation // on the data table miniBatchOp.setOperationStatus(i, NOWRITE); } continue; } for (List<Cell> cells : m.getFamilyCellMap().values()) { for (Cell cell : cells) { switch (KeyValue.Type.codeToType(cell.getTypeByte())) { case DeleteFamily: case DeleteFamilyVersion: nextDataRowState.getFamilyCellMap().remove(CellUtil.cloneFamily(cell)); break; case DeleteColumn: case Delete: removeColumn(nextDataRowState, cell); } } } if (nextDataRowState != null && nextDataRowState.getFamilyCellMap().size() == 0) { dataRowState.setSecond(null); } } }
Example 16
Source File: PhoenixTransactionalIndexer.java From phoenix with Apache License 2.0 | 4 votes |
@Override public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { Mutation m = miniBatchOp.getOperation(0); if (!codec.isEnabled(m)) { return; } PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaDataBuilder(c.getEnvironment()).getIndexMetaData(miniBatchOp); if ( indexMetaData.getClientVersion() >= MetaDataProtocol.MIN_TX_CLIENT_SIDE_MAINTENANCE && !indexMetaData.hasLocalIndexes()) { // Still generate index updates server side for local indexes return; } BatchMutateContext context = new BatchMutateContext(indexMetaData.getClientVersion()); setBatchMutateContext(c, context); Collection<Pair<Mutation, byte[]>> indexUpdates = null; // get the current span, or just use a null-span to avoid a bunch of if statements try (TraceScope scope = Trace.startSpan("Starting to build index updates")) { Span current = scope.getSpan(); if (current == null) { current = NullSpan.INSTANCE; } RegionCoprocessorEnvironment env = c.getEnvironment(); PhoenixTransactionContext txnContext = indexMetaData.getTransactionContext(); if (txnContext == null) { throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString()); } PhoenixTxIndexMutationGenerator generator = new PhoenixTxIndexMutationGenerator(env.getConfiguration(), indexMetaData, env.getRegionInfo().getTable().getName(), env.getRegionInfo().getStartKey(), env.getRegionInfo().getEndKey()); try (Table htable = env.getConnection().getTable(env.getRegionInfo().getTable())) { // get the index updates for all elements in this batch indexUpdates = generator.getIndexUpdates(htable, getMutationIterator(miniBatchOp)); } byte[] tableName = c.getEnvironment().getRegionInfo().getTable().getName(); Iterator<Pair<Mutation, byte[]>> indexUpdatesItr = indexUpdates.iterator(); List<Mutation> localUpdates = new ArrayList<Mutation>(indexUpdates.size()); while(indexUpdatesItr.hasNext()) { Pair<Mutation, byte[]> next = indexUpdatesItr.next(); if (Bytes.compareTo(next.getSecond(), tableName) == 0) { // These mutations will not go through the preDelete hooks, so we // must manually convert them here. Mutation mutation = TransactionUtil.convertIfDelete(next.getFirst()); localUpdates.add(mutation); indexUpdatesItr.remove(); } } if (!localUpdates.isEmpty()) { miniBatchOp.addOperationsFromCP(0, localUpdates.toArray(new Mutation[localUpdates.size()])); } if (!indexUpdates.isEmpty()) { context.indexUpdates = indexUpdates; } current.addTimelineAnnotation("Built index updates, doing preStep"); TracingUtils.addAnnotation(current, "index update count", context.indexUpdates.size()); } catch (Throwable t) { String msg = "Failed to update index with entries:" + indexUpdates; LOGGER.error(msg, t); ServerUtil.throwIOException(msg, t); } }
Example 17
Source File: PhoenixIndexBuilder.java From phoenix with Apache License 2.0 | 4 votes |
@Override public void batchStarted(MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException { // The entire purpose of this method impl is to get the existing rows for the // table rows being indexed into the block cache, as the index maintenance code // does a point scan per row List<KeyRange> keys = Lists.newArrayListWithExpectedSize(miniBatchOp.size()); Map<ImmutableBytesWritable, IndexMaintainer> maintainers = new HashMap<ImmutableBytesWritable, IndexMaintainer>(); ImmutableBytesWritable indexTableName = new ImmutableBytesWritable(); for (int i = 0; i < miniBatchOp.size(); i++) { Mutation m = miniBatchOp.getOperation(i); keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow())); List<IndexMaintainer> indexMaintainers = getCodec().getIndexMaintainers(m.getAttributesMap()); for(IndexMaintainer indexMaintainer: indexMaintainers) { if (indexMaintainer.isImmutableRows() && indexMaintainer.isLocalIndex()) continue; indexTableName.set(indexMaintainer.getIndexTableName()); if (maintainers.get(indexTableName) != null) continue; maintainers.put(indexTableName, indexMaintainer); } } if (maintainers.isEmpty()) return; Scan scan = IndexManagementUtil.newLocalStateScan(new ArrayList<IndexMaintainer>(maintainers.values())); ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN); scanRanges.initializeScan(scan); scan.setFilter(scanRanges.getSkipScanFilter()); HRegion region = this.env.getRegion(); RegionScanner scanner = region.getScanner(scan); // Run through the scanner using internal nextRaw method region.startRegionOperation(); try { boolean hasMore; do { List<Cell> results = Lists.newArrayList(); // Results are potentially returned even when the return value of s.next is false // since this is an indication of whether or not there are more values after the // ones returned hasMore = scanner.nextRaw(results); } while (hasMore); } finally { try { scanner.close(); } finally { region.closeRegionOperation(); } } }