Java Code Examples for org.apache.hadoop.hbase.client.HTable#flushCommits()
The following examples show how to use
org.apache.hadoop.hbase.client.HTable#flushCommits() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IndexHandlerIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testClientWritesWithPriority() throws Exception { Configuration conf = new Configuration(UTIL.getConfiguration()); // add the keys for our rpc factory conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, CountingIndexClientRpcFactory.class.getName()); // and set the index table as the current table conf.setStrings(IndexQosRpcControllerFactory.INDEX_TABLE_NAMES_KEY, TestTable.getTableNameString()); HTable table = new HTable(conf, TestTable.getTableName()); // do a write to the table Put p = new Put(row); p.add(family, qual, new byte[] { 1, 0, 1, 0 }); table.put(p); table.flushCommits(); // check the counts on the rpc controller assertEquals("Didn't get the expected number of index priority writes!", 1, (int) CountingIndexClientRpcController.priorityCounts .get(QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY)); table.close(); }
Example 2
Source File: TestEndToEndCoveredIndexing.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Test public void testSimpleDeletes() throws Exception { HTable primary = createSetupTables(fam1); // do a simple Put long ts = 10; Put p = new Put(row1); p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); Delete d = new Delete(row1); primary.delete(d); HTable index = new HTable(UTIL.getConfiguration(), fam1.getTable()); List<KeyValue> expected = Collections.<KeyValue> emptyList(); // scan over all time should cause the delete to be covered IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, 0, Long.MAX_VALUE, value1, HConstants.EMPTY_END_ROW); // scan at the older timestamp should still show the older value List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts, value1); // cleanup closeAndCleanupTables(index, primary); }
Example 3
Source File: HbaseLocalClusterIntegrationTest.java From hadoop-mini-clusters with Apache License 2.0 | 5 votes |
private static void putRow(String tableName, String colFamName, String rowKey, String colQualifier, String value, Configuration configuration) throws Exception { HTable table = new HTable(configuration, tableName); Put put = new Put(Bytes.toBytes(rowKey)); put.add(Bytes.toBytes(colFamName), Bytes.toBytes(colQualifier), Bytes.toBytes(value)); table.put(put); table.flushCommits(); table.close(); }
Example 4
Source File: KnoxLocalClusterIntegrationTest.java From hadoop-mini-clusters with Apache License 2.0 | 5 votes |
private static void putRow(String tableName, String colFamName, String rowKey, String colQualifier, String value, Configuration configuration) throws Exception { HTable table = new HTable(configuration, tableName); Put put = new Put(Bytes.toBytes(rowKey)); put.add(Bytes.toBytes(colFamName), Bytes.toBytes(colQualifier), Bytes.toBytes(value)); table.put(put); table.flushCommits(); table.close(); }
Example 5
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 5 votes |
/** * Test that a bunch of puts with a single timestamp across all the puts builds and inserts index * entries as expected * @throws Exception on failure */ @Test public void testSimpleTimestampedUpdates() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts = 10; p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); // verify that the index matches IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1); // cleanup closeAndCleanupTables(primary, index1); }
Example 6
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 5 votes |
/** * Test that the multiple timestamps in a single put build the correct index updates. * @throws Exception on failure */ @Test public void testMultipleTimestampsInSinglePut() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts1 = 10; long ts2 = 11; p.add(FAM, indexed_qualifer, ts1, value1); p.add(FAM, regular_qualifer, ts1, value2); // our group indexes all columns in the this family, so any qualifier here is ok p.add(FAM2, regular_qualifer, ts2, value3); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); // check the first entry at ts1 List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1); // check the second entry at ts2 pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // cleanup closeAndCleanupTables(primary, index1); }
Example 7
Source File: TestEndToEndCoveredIndexing.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Test that we make updates to multiple {@link ColumnGroup}s across a single put/delete * @throws Exception on failure */ @Test public void testMultipleConcurrentGroupsUpdated() throws Exception { HTable primary = createSetupTables(fam1, fam2); // do a put to the primary table Put p = new Put(row1); long ts = 10; p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); p.add(FAM2, indexed_qualifer, ts, value3); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable()); HTable index2 = new HTable(UTIL.getConfiguration(), fam2.getTable()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1); // and check the second index as well pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(value3, col3)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index2, expected, ts, value3); // cleanup closeAndCleanupTables(primary, index1, index2); }
Example 8
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testSimpleDeletes() throws Exception { HTable primary = createSetupTables(fam1); // do a simple Put long ts = 10; Put p = new Put(row1); p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); Delete d = new Delete(row1); primary.delete(d); HTable index = new HTable(UTIL.getConfiguration(), fam1.getTable()); List<KeyValue> expected = Collections.<KeyValue> emptyList(); // scan over all time should cause the delete to be covered IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, 0, Long.MAX_VALUE, value1, HConstants.EMPTY_END_ROW); // scan at the older timestamp should still show the older value List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts, value1); // cleanup closeAndCleanupTables(index, primary); }
Example 9
Source File: HbaseUtil.java From DataLink with Apache License 2.0 | 5 votes |
@SuppressWarnings("rawtypes") public static void startWrite(RecordReceiver lineReceiver, HTable table, Configuration configuration) { List<Map> columns = configuration.getList(Key.COLUMN, Map.class); Integer batchSize = configuration.getInt(Key.BATCH_SIZE, 100); boolean writeToWAL = configuration.getBool(Key.WRITE_TO_WAL, true); List<HbaseColumnCell> hbaseColumnCells = parseColumns(columns); try { Record record = null; List<Put> puts = new ArrayList<Put>(); while ((record = lineReceiver.getFromReader()) != null) { puts.add(getPut(hbaseColumnCells, record, writeToWAL)); if (puts.size() % batchSize == 0) { table.put(puts); table.flushCommits(); puts.clear(); } } if (!puts.isEmpty()) { table.put(puts); table.flushCommits(); } table.close(); } catch (Exception e) { String message = String.format("写hbase[%s]时发生IO异常,请检查您的网络是否正常!", table.getName()); LOG.error(message, e); ErrorRecord.addError(message+"->"+e.getMessage()); throw DataXException.asDataXException(HBaseWriter98ErrorCode.WRITE_HBASE_IO_ERROR, e); } }
Example 10
Source File: TestEndToEndCoveredIndexing.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Test that the multiple timestamps in a single put build the correct index updates. * @throws Exception on failure */ @Test public void testMultipleTimestampsInSinglePut() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts1 = 10; long ts2 = 11; p.add(FAM, indexed_qualifer, ts1, value1); p.add(FAM, regular_qualifer, ts1, value2); // our group indexes all columns in the this family, so any qualifier here is ok p.add(FAM2, regular_qualifer, ts2, value3); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); // check the first entry at ts1 List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1); // check the second entry at ts2 pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // cleanup closeAndCleanupTables(primary, index1); }
Example 11
Source File: TestEndToEndCoveredIndexing.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Test that a bunch of puts with a single timestamp across all the puts builds and inserts index * entries as expected * @throws Exception on failure */ @Test public void testSimpleTimestampedUpdates() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts = 10; p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); // verify that the index matches IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1); // cleanup closeAndCleanupTables(primary, index1); }
Example 12
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Similar to the {@link #testMultipleTimestampsInSinglePut()}, this check the same with deletes * @throws Exception on failure */ @Test public void testMultipleTimestampsInSingleDelete() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts1 = 10, ts2 = 11, ts3 = 12; p.add(FAM, indexed_qualifer, ts1, value1); // our group indexes all columns in the this family, so any qualifier here is ok p.add(FAM2, regular_qualifer, ts2, value3); primary.put(p); primary.flushCommits(); // check to make sure everything we expect is there HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable()); // ts1, we just have v1 List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1); // at ts2, don't have the above anymore pairs.clear(); expected = Collections.emptyList(); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts2 + 1, value1, value1); // but we do have the new entry at ts2 pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // now build up a delete with a couple different timestamps Delete d = new Delete(row1); // these deletes have to match the exact ts since we are doing an exact match (deleteColumn). d.deleteColumn(FAM, indexed_qualifer, ts1); // since this doesn't match exactly, we actually shouldn't see a change in table state d.deleteColumn(FAM2, regular_qualifer, ts3); primary.delete(d); // at ts1, we should have the put covered exactly by the delete and into the entire future expected = Collections.emptyList(); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, Long.MAX_VALUE, value1, value1); // at ts2, we should just see value3 pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // the later delete is a point delete, so we shouldn't see any change at ts3 IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts3, value1, HConstants.EMPTY_END_ROW); // cleanup closeAndCleanupTables(primary, index1); }
Example 13
Source File: TestEndToEndCoveredColumnsIndexBuilder.java From phoenix with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** * Similar to {@link #testExpectedResultsInTableStateForSinglePut()}, but against batches of puts. * Previous implementations managed batches by playing current state against each element in the * batch, rather than combining all the per-row updates into a single mutation for the batch. This * test ensures that we see the correct expected state. * @throws Exception on failure */ @Test public void testExpectedResultsInTableStateForBatchPuts() throws Exception { long ts = state.ts; // build up a list of puts to make, all on the same row Put p1 = new Put(row, ts); p1.add(family, qual, Bytes.toBytes("v1")); Put p2 = new Put(row, ts + 1); p2.add(family, qual, Bytes.toBytes("v2")); // setup all the verifiers we need. This is just the same as above, but will be called twice // since we need to iterate the batch. // get all the underlying kvs for the put final List<KeyValue> allKvs = new ArrayList<KeyValue>(2); allKvs.addAll(p2.getFamilyMap().get(family)); allKvs.addAll(p1.getFamilyMap().get(family)); // setup the verifier for the data we expect to write // both puts should be put into a single batch final ColumnReference familyRef = new ColumnReference(TestEndToEndCoveredColumnsIndexBuilder.family, ColumnReference.ALL_QUALIFIERS); VerifyingIndexCodec codec = state.codec; // no previous state in the table codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections .<KeyValue> emptyList(), familyRef)); codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyMap().get(family), familyRef)); codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyMap().get(family), familyRef)); // kvs from both puts should be in the table now codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef)); // do the actual put (no indexing will actually be done) HTable primary = state.table; primary.setAutoFlush(false); primary.put(Arrays.asList(p1, p2)); primary.flushCommits(); // cleanup after ourselves cleanup(state); }
Example 14
Source File: HBaseWriter.java From hiped2 with Apache License 2.0 | 4 votes |
/** * The MapReduce driver - setup and launch the job. * * @param args the command-line arguments * @return the process exit code * @throws Exception if something goes wrong */ public int run(final String[] args) throws Exception { Cli cli = Cli.builder().setArgs(args).addOptions(CliCommonOpts.InputFileOption.values()).build(); int result = cli.runCmd(); if (result != 0) { return result; } File inputFile = new File(cli.getArgValueAsString(CliCommonOpts.InputFileOption.INPUT)); Configuration conf = HBaseConfiguration.create(); createTableAndColumn(conf, STOCKS_TABLE_NAME, STOCK_DETAILS_COLUMN_FAMILY_AS_BYTES); HTable htable = new HTable(conf, STOCKS_TABLE_NAME); htable.setAutoFlush(false); htable.setWriteBufferSize(1024 * 1024 * 12); SpecificDatumWriter<Stock> writer = new SpecificDatumWriter<Stock>(); writer.setSchema(Stock.SCHEMA$); ByteArrayOutputStream bao = new ByteArrayOutputStream(); BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(bao, null); for (Stock stock: AvroStockUtils.fromCsvFile(inputFile)) { writer.write(stock, encoder); encoder.flush(); byte[] rowkey = Bytes.add( Bytes.toBytes(stock.getSymbol().toString()), Bytes.toBytes(stock.getDate().toString())); byte[] stockAsAvroBytes = bao.toByteArray(); Put put = new Put(rowkey); put.add(STOCK_DETAILS_COLUMN_FAMILY_AS_BYTES, STOCK_COLUMN_QUALIFIER_AS_BYTES, stockAsAvroBytes); htable.put(put); bao.reset(); } htable.flushCommits(); htable.close(); System.out.println("done"); return 0; }
Example 15
Source File: EndToEndCoveredColumnsIndexBuilderIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Similar to {@link #testExpectedResultsInTableStateForSinglePut()}, but against batches of puts. * Previous implementations managed batches by playing current state against each element in the * batch, rather than combining all the per-row updates into a single mutation for the batch. This * test ensures that we see the correct expected state. * @throws Exception on failure */ @SuppressWarnings("deprecation") @Test public void testExpectedResultsInTableStateForBatchPuts() throws Exception { long ts = state.ts; // build up a list of puts to make, all on the same row Put p1 = new Put(row, ts); p1.add(family, qual, Bytes.toBytes("v1")); Put p2 = new Put(row, ts + 1); p2.add(family, qual, Bytes.toBytes("v2")); // setup all the verifiers we need. This is just the same as above, but will be called twice // since we need to iterate the batch. // get all the underlying kvs for the put final List<Cell> allKvs = new ArrayList<Cell>(2); allKvs.addAll(p2.getFamilyCellMap().get(family)); allKvs.addAll(p1.getFamilyCellMap().get(family)); // setup the verifier for the data we expect to write // both puts should be put into a single batch final ColumnReference familyRef = new ColumnReference(EndToEndCoveredColumnsIndexBuilderIT.family, ColumnReference.ALL_QUALIFIERS); VerifyingIndexCodec codec = state.codec; // no previous state in the table codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections .<Cell> emptyList(), familyRef)); codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyCellMap().get(family), familyRef)); codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyCellMap().get(family), familyRef)); // kvs from both puts should be in the table now codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef)); // do the actual put (no indexing will actually be done) HTable primary = state.table; primary.setAutoFlush(false); primary.put(Arrays.asList(p1, p2)); primary.flushCommits(); // cleanup after ourselves cleanup(state); }
Example 16
Source File: FailWithoutRetriesIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * If this test times out, then we didn't fail quickly enough. {@link Indexer} maybe isn't * rethrowing the exception correctly? * <p> * We use a custom codec to enforce the thrown exception. * @throws Exception */ @Test(timeout = 300000) public void testQuickFailure() throws Exception { // incorrectly setup indexing for the primary table - target index table doesn't exist, which // should quickly return to the client byte[] family = Bytes.toBytes("family"); ColumnGroup fam1 = new ColumnGroup(getIndexTableName()); // values are [col1] fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); // add the index family builder.addIndexGroup(fam1); // usually, we would create the index table here, but we don't for the sake of the test. // setup the primary table String primaryTable = Bytes.toString(table.getTableName()); @SuppressWarnings("deprecation") HTableDescriptor pTable = new HTableDescriptor(primaryTable); pTable.addFamily(new HColumnDescriptor(family)); // override the codec so we can use our test one builder.build(pTable, FailingTestCodec.class); // create the primary table HBaseAdmin admin = UTIL.getHBaseAdmin(); admin.createTable(pTable); Configuration conf = new Configuration(UTIL.getConfiguration()); // up the number of retries/wait time to make it obvious that we are failing with retries here conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 20); conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000); HTable primary = new HTable(conf, primaryTable); primary.setAutoFlush(false, true); // do a simple put that should be indexed Put p = new Put(Bytes.toBytes("row")); p.add(family, null, Bytes.toBytes("value")); primary.put(p); try { primary.flushCommits(); fail("Shouldn't have gotten a successful write to the primary table"); } catch (RetriesExhaustedWithDetailsException e) { LOG.info("Correclty got a failure of the put!"); } primary.close(); }
Example 17
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Covering deletes (via {@link Delete#deleteColumns}) cover everything back in time from the * given time. If its modifying the latest state, we don't need to do anything but add deletes. If * its modifying back in time state, we need to just fix up the surrounding elements as anything * else ahead of it will be fixed up by later updates. * <p> * similar to {@link #testMultipleTimestampsInSingleDelete()}, but with covering deletes. * @throws Exception on failure */ @Test public void testDeleteColumnsInThePast() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts1 = 10, ts2 = 11, ts3 = 12; p.add(FAM, indexed_qualifer, ts1, value1); p.add(FAM2, regular_qualifer, ts2, value3); primary.put(p); primary.flushCommits(); // now build up a delete with a couple different timestamps Delete d = new Delete(row1); // these deletes don't need to match the exact ts because they cover everything earlier d.deleteColumns(FAM, indexed_qualifer, ts2); d.deleteColumns(FAM2, regular_qualifer, ts3); primary.delete(d); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); // check the first entry at ts1 List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1); // delete at ts2 changes what the put would insert pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // final delete clears out everything expected = Collections.emptyList(); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value1); // cleanup closeAndCleanupTables(primary, index1); }
Example 18
Source File: TestFailWithoutRetries.java From phoenix with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** * If this test times out, then we didn't fail quickly enough. {@link Indexer} maybe isn't * rethrowing the exception correctly? * <p> * We use a custom codec to enforce the thrown exception. * @throws Exception */ @Test(timeout = 300000) public void testQuickFailure() throws Exception { // incorrectly setup indexing for the primary table - target index table doesn't exist, which // should quickly return to the client byte[] family = Bytes.toBytes("family"); ColumnGroup fam1 = new ColumnGroup(getIndexTableName()); // values are [col1] fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); // add the index family builder.addIndexGroup(fam1); // usually, we would create the index table here, but we don't for the sake of the test. // setup the primary table String primaryTable = Bytes.toString(table.getTableName()); HTableDescriptor pTable = new HTableDescriptor(primaryTable); pTable.addFamily(new HColumnDescriptor(family)); // override the codec so we can use our test one builder.build(pTable, FailingTestCodec.class); // create the primary table HBaseAdmin admin = UTIL.getHBaseAdmin(); admin.createTable(pTable); Configuration conf = new Configuration(UTIL.getConfiguration()); // up the number of retries/wait time to make it obvious that we are failing with retries here conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 20); conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 1000); HTable primary = new HTable(conf, primaryTable); primary.setAutoFlush(false, true); // do a simple put that should be indexed Put p = new Put(Bytes.toBytes("row")); p.add(family, null, Bytes.toBytes("value")); primary.put(p); try { primary.flushCommits(); fail("Shouldn't have gotten a successful write to the primary table"); } catch (RetriesExhaustedWithDetailsException e) { LOG.info("Correclty got a failure of the put!"); } primary.close(); }
Example 19
Source File: HBaseStore.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
protected void flushTable(HTable table) throws InterruptedIOException, RetriesExhaustedWithDetailsException { table.flushCommits(); }
Example 20
Source File: TestEndToEndCoveredIndexing.java From phoenix with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** * Covering deletes (via {@link Delete#deleteColumns}) cover everything back in time from the * given time. If its modifying the latest state, we don't need to do anything but add deletes. If * its modifying back in time state, we need to just fix up the surrounding elements as anything * else ahead of it will be fixed up by later updates. * <p> * similar to {@link #testMultipleTimestampsInSingleDelete()}, but with covering deletes. * @throws Exception on failure */ @Test public void testDeleteColumnsInThePast() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts1 = 10, ts2 = 11, ts3 = 12; p.add(FAM, indexed_qualifer, ts1, value1); p.add(FAM2, regular_qualifer, ts2, value3); primary.put(p); primary.flushCommits(); // now build up a delete with a couple different timestamps Delete d = new Delete(row1); // these deletes don't need to match the exact ts because they cover everything earlier d.deleteColumns(FAM, indexed_qualifer, ts2); d.deleteColumns(FAM2, regular_qualifer, ts3); primary.delete(d); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); // check the first entry at ts1 List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1); // delete at ts2 changes what the put would insert pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // final delete clears out everything expected = Collections.emptyList(); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value1); // cleanup closeAndCleanupTables(primary, index1); }