Java Code Examples for org.apache.hadoop.hbase.client.HTable#put()
The following examples show how to use
org.apache.hadoop.hbase.client.HTable#put() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHbaseClient.java From kylin with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws IOException { foo(6, 5); foo(5, 2); foo(3, 0); Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", "hbase_host"); conf.set("zookeeper.znode.parent", "/hbase-unsecure"); HTable table = new HTable(conf, "test1"); Put put = new Put(Bytes.toBytes("row1")); put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1")); put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val2")); table.put(put); table.close(); }
Example 2
Source File: IndexHandlerIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testClientWritesWithPriority() throws Exception { Configuration conf = new Configuration(UTIL.getConfiguration()); // add the keys for our rpc factory conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, CountingIndexClientRpcFactory.class.getName()); // and set the index table as the current table conf.setStrings(IndexQosRpcControllerFactory.INDEX_TABLE_NAMES_KEY, TestTable.getTableNameString()); HTable table = new HTable(conf, TestTable.getTableName()); // do a write to the table Put p = new Put(row); p.add(family, qual, new byte[] { 1, 0, 1, 0 }); table.put(p); table.flushCommits(); // check the counts on the rpc controller assertEquals("Didn't get the expected number of index priority writes!", 1, (int) CountingIndexClientRpcController.priorityCounts .get(QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY)); table.close(); }
Example 3
Source File: LobUtil.java From Transwarp-Sample-Code with MIT License | 6 votes |
/** * 上传对象到LOB * @param tableName Hyperbase表名 * @param row rowkey byte形式 * @param filename 文件名 * @param fileData 文件 */ public void putLob(String tableName, String row, String filename, byte[] fileData){ byte[] rowkey = Bytes.toBytes(row); try { HTable htable = new HTable(conf, tableName); Put put = new Put(rowkey); put.add(Bytes.toBytes(family1), Bytes.toBytes(f1_q1), Bytes.toBytes(filename)); put.add(Bytes.toBytes(family2), Bytes.toBytes(f2_q1), fileData); htable.put(put); htable.flushCommits(); htable.close(); } catch (IOException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } }
Example 4
Source File: TestEndToEndCoveredIndexing.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Test that a bunch of puts with a single timestamp across all the puts builds and inserts index * entries as expected * @throws Exception on failure */ @Test public void testSimpleTimestampedUpdates() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts = 10; p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); // verify that the index matches IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1); // cleanup closeAndCleanupTables(primary, index1); }
Example 5
Source File: HBaseConnectorITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static void prepareTable() throws IOException { // create a table TableName tableName = TableName.valueOf(TEST_TABLE); // column families byte[][] families = new byte[][]{ Bytes.toBytes(FAMILY1), Bytes.toBytes(FAMILY2), Bytes.toBytes(FAMILY3) }; // split keys byte[][] splitKeys = new byte[][]{ Bytes.toBytes(4) }; createTable(tableName, families, splitKeys); // get the HTable instance HTable table = openTable(tableName); List<Put> puts = new ArrayList<>(); // add some data puts.add(putRow(1, 10, "Hello-1", 100L, 1.01, false, "Welt-1")); puts.add(putRow(2, 20, "Hello-2", 200L, 2.02, true, "Welt-2")); puts.add(putRow(3, 30, "Hello-3", 300L, 3.03, false, "Welt-3")); puts.add(putRow(4, 40, null, 400L, 4.04, true, "Welt-4")); puts.add(putRow(5, 50, "Hello-5", 500L, 5.05, false, "Welt-5")); puts.add(putRow(6, 60, "Hello-6", 600L, 6.06, true, "Welt-6")); puts.add(putRow(7, 70, "Hello-7", 700L, 7.07, false, "Welt-7")); puts.add(putRow(8, 80, null, 800L, 8.08, true, "Welt-8")); // append rows to table table.put(puts); table.close(); }
Example 6
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testSimpleDeletes() throws Exception { HTable primary = createSetupTables(fam1); // do a simple Put long ts = 10; Put p = new Put(row1); p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); Delete d = new Delete(row1); primary.delete(d); HTable index = new HTable(UTIL.getConfiguration(), fam1.getTable()); List<KeyValue> expected = Collections.<KeyValue> emptyList(); // scan over all time should cause the delete to be covered IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, 0, Long.MAX_VALUE, value1, HConstants.EMPTY_END_ROW); // scan at the older timestamp should still show the older value List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index, expected, ts, value1); // cleanup closeAndCleanupTables(index, primary); }
Example 7
Source File: KnoxLocalClusterIntegrationTest.java From hadoop-mini-clusters with Apache License 2.0 | 5 votes |
private static void putRow(String tableName, String colFamName, String rowKey, String colQualifier, String value, Configuration configuration) throws Exception { HTable table = new HTable(configuration, tableName); Put put = new Put(Bytes.toBytes(rowKey)); put.add(Bytes.toBytes(colFamName), Bytes.toBytes(colQualifier), Bytes.toBytes(value)); table.put(put); table.flushCommits(); table.close(); }
Example 8
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 5 votes |
/** * Test that we make updates to multiple {@link ColumnGroup}s across a single put/delete * @throws Exception on failure */ @Test public void testMultipleConcurrentGroupsUpdated() throws Exception { HTable primary = createSetupTables(fam1, fam2); // do a put to the primary table Put p = new Put(row1); long ts = 10; p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); p.add(FAM2, indexed_qualifer, ts, value3); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable()); HTable index2 = new HTable(UTIL.getConfiguration(), fam2.getTable()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1); // and check the second index as well pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(value3, col3)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index2, expected, ts, value3); // cleanup closeAndCleanupTables(primary, index1, index2); }
Example 9
Source File: HbaseTestCase.java From wifi with Apache License 2.0 | 5 votes |
public static void put(String tablename,String row, String columnFamily,String column,String data) throws Exception { HTable table = new HTable(cfg, tablename); Put p1=new Put(Bytes.toBytes(row)); p1.add(Bytes.toBytes(columnFamily), Bytes.toBytes(column), Bytes.toBytes(data)); table.put(p1); System.out.println("put '"+row+"','"+columnFamily+":"+column+"','"+data+"'"); }
Example 10
Source File: HbaseUtil.java From DataLink with Apache License 2.0 | 5 votes |
@SuppressWarnings("rawtypes") public static void startWrite(RecordReceiver lineReceiver, HTable table, Configuration configuration) { List<Map> columns = configuration.getList(Key.COLUMN, Map.class); Integer batchSize = configuration.getInt(Key.BATCH_SIZE, 100); boolean writeToWAL = configuration.getBool(Key.WRITE_TO_WAL, true); List<HbaseColumnCell> hbaseColumnCells = parseColumns(columns); try { Record record = null; List<Put> puts = new ArrayList<Put>(); while ((record = lineReceiver.getFromReader()) != null) { puts.add(getPut(hbaseColumnCells, record, writeToWAL)); if (puts.size() % batchSize == 0) { table.put(puts); table.flushCommits(); puts.clear(); } } if (!puts.isEmpty()) { table.put(puts); table.flushCommits(); } table.close(); } catch (Exception e) { String message = String.format("写hbase[%s]时发生IO异常,请检查您的网络是否正常!", table.getName()); LOG.error(message, e); ErrorRecord.addError(message+"->"+e.getMessage()); throw DataXException.asDataXException(HBaseWriter98ErrorCode.WRITE_HBASE_IO_ERROR, e); } }
Example 11
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 5 votes |
/** * Test that the multiple timestamps in a single put build the correct index updates. * @throws Exception on failure */ @Test public void testMultipleTimestampsInSinglePut() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts1 = 10; long ts2 = 11; p.add(FAM, indexed_qualifer, ts1, value1); p.add(FAM, regular_qualifer, ts1, value2); // our group indexes all columns in the this family, so any qualifier here is ok p.add(FAM2, regular_qualifer, ts2, value3); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); // check the first entry at ts1 List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1); // check the second entry at ts2 pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // cleanup closeAndCleanupTables(primary, index1); }
Example 12
Source File: InvalidListPruneTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPruneEmptyTable() throws Exception { // Make sure that empty tables do not block the progress of pruning // Create an empty table TableName txEmptyTable = TableName.valueOf("emptyPruneTestTable"); HTable emptyHTable = createTable(txEmptyTable.getName(), new byte[][]{family}, false, Collections.singletonList(TestTransactionProcessor.class.getName())); TransactionPruningPlugin transactionPruningPlugin = new TestTransactionPruningPlugin(); transactionPruningPlugin.initialize(conf); try { long now1 = System.currentTimeMillis(); long inactiveTxTimeNow1 = (now1 - 150) * TxConstants.MAX_TX_PER_MS; long noPruneUpperBound = -1; long expectedPruneUpperBound1 = (now1 - 200) * TxConstants.MAX_TX_PER_MS; InMemoryTransactionStateCache.setTransactionSnapshot( new TransactionSnapshot(expectedPruneUpperBound1, expectedPruneUpperBound1, expectedPruneUpperBound1, ImmutableSet.of(expectedPruneUpperBound1), ImmutableSortedMap.<Long, TransactionManager.InProgressTx>of())); testUtil.compact(txEmptyTable, true); testUtil.compact(txDataTable1, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // fetch prune upper bound, there should be no prune upper bound since txEmptyTable cannot be compacted long pruneUpperBound1 = transactionPruningPlugin.fetchPruneUpperBound(now1, inactiveTxTimeNow1); Assert.assertEquals(noPruneUpperBound, pruneUpperBound1); transactionPruningPlugin.pruneComplete(now1, noPruneUpperBound); // Now flush the empty table, this will record the table region as empty, and then pruning will continue testUtil.flush(txEmptyTable); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // fetch prune upper bound, again, this time it should work pruneUpperBound1 = transactionPruningPlugin.fetchPruneUpperBound(now1, inactiveTxTimeNow1); Assert.assertEquals(expectedPruneUpperBound1, pruneUpperBound1); transactionPruningPlugin.pruneComplete(now1, expectedPruneUpperBound1); // Now add some data to the empty table // (adding data non-transactionally is okay too, we just need some data for the compaction to run) emptyHTable.put(new Put(Bytes.toBytes(1)).add(family, qualifier, Bytes.toBytes(1))); emptyHTable.close(); // Now run another compaction on txDataTable1 with an updated tx snapshot long now2 = System.currentTimeMillis(); long inactiveTxTimeNow2 = (now2 - 150) * TxConstants.MAX_TX_PER_MS; long expectedPruneUpperBound2 = (now2 - 200) * TxConstants.MAX_TX_PER_MS; InMemoryTransactionStateCache.setTransactionSnapshot( new TransactionSnapshot(expectedPruneUpperBound2, expectedPruneUpperBound2, expectedPruneUpperBound2, ImmutableSet.of(expectedPruneUpperBound2), ImmutableSortedMap.<Long, TransactionManager.InProgressTx>of())); testUtil.flush(txEmptyTable); testUtil.compact(txDataTable1, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // Running a prune now should still return min(inactiveTxTimeNow1, expectedPruneUpperBound1) since // txEmptyTable is no longer empty. This information is returned since the txEmptyTable was recorded as being // empty in the previous run with inactiveTxTimeNow1 long pruneUpperBound2 = transactionPruningPlugin.fetchPruneUpperBound(now2, inactiveTxTimeNow2); Assert.assertEquals(inactiveTxTimeNow1, pruneUpperBound2); transactionPruningPlugin.pruneComplete(now2, expectedPruneUpperBound1); // However, after compacting txEmptyTable we should get the latest upper bound testUtil.flush(txEmptyTable); testUtil.compact(txEmptyTable, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); pruneUpperBound2 = transactionPruningPlugin.fetchPruneUpperBound(now2, inactiveTxTimeNow2); Assert.assertEquals(expectedPruneUpperBound2, pruneUpperBound2); transactionPruningPlugin.pruneComplete(now2, expectedPruneUpperBound2); } finally { transactionPruningPlugin.destroy(); hBaseAdmin.disableTable(txEmptyTable); hBaseAdmin.deleteTable(txEmptyTable); } }
Example 13
Source File: InvalidListPruneTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPruneEmptyTable() throws Exception { // Make sure that empty tables do not block the progress of pruning // Create an empty table TableName txEmptyTable = TableName.valueOf("emptyPruneTestTable"); HTable emptyHTable = createTable(txEmptyTable.getName(), new byte[][]{family}, false, Collections.singletonList(TestTransactionProcessor.class.getName())); TransactionPruningPlugin transactionPruningPlugin = new TestTransactionPruningPlugin(); transactionPruningPlugin.initialize(conf); try { long now1 = System.currentTimeMillis(); long inactiveTxTimeNow1 = (now1 - 150) * TxConstants.MAX_TX_PER_MS; long noPruneUpperBound = -1; long expectedPruneUpperBound1 = (now1 - 200) * TxConstants.MAX_TX_PER_MS; InMemoryTransactionStateCache.setTransactionSnapshot( new TransactionSnapshot(expectedPruneUpperBound1, expectedPruneUpperBound1, expectedPruneUpperBound1, ImmutableSet.of(expectedPruneUpperBound1), ImmutableSortedMap.<Long, TransactionManager.InProgressTx>of())); testUtil.compact(txEmptyTable, true); testUtil.compact(txDataTable1, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // fetch prune upper bound, there should be no prune upper bound since txEmptyTable cannot be compacted long pruneUpperBound1 = transactionPruningPlugin.fetchPruneUpperBound(now1, inactiveTxTimeNow1); Assert.assertEquals(noPruneUpperBound, pruneUpperBound1); transactionPruningPlugin.pruneComplete(now1, noPruneUpperBound); // Now flush the empty table, this will record the table region as empty, and then pruning will continue hBaseAdmin.flush(txEmptyTable); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // fetch prune upper bound, again, this time it should work pruneUpperBound1 = transactionPruningPlugin.fetchPruneUpperBound(now1, inactiveTxTimeNow1); Assert.assertEquals(expectedPruneUpperBound1, pruneUpperBound1); transactionPruningPlugin.pruneComplete(now1, expectedPruneUpperBound1); // Now add some data to the empty table // (adding data non-transactionally is okay too, we just need some data for the compaction to run) emptyHTable.put(new Put(Bytes.toBytes(1)).addColumn(family, qualifier, Bytes.toBytes(1))); emptyHTable.close(); // Now run another compaction on txDataTable1 with an updated tx snapshot long now2 = System.currentTimeMillis(); long inactiveTxTimeNow2 = (now2 - 150) * TxConstants.MAX_TX_PER_MS; long expectedPruneUpperBound2 = (now2 - 200) * TxConstants.MAX_TX_PER_MS; InMemoryTransactionStateCache.setTransactionSnapshot( new TransactionSnapshot(expectedPruneUpperBound2, expectedPruneUpperBound2, expectedPruneUpperBound2, ImmutableSet.of(expectedPruneUpperBound2), ImmutableSortedMap.<Long, TransactionManager.InProgressTx>of())); testUtil.flush(txEmptyTable); testUtil.compact(txDataTable1, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // Running a prune now should still return min(inactiveTxTimeNow1, expectedPruneUpperBound1) since // txEmptyTable is no longer empty. This information is returned since the txEmptyTable was recorded as being // empty in the previous run with inactiveTxTimeNow1 long pruneUpperBound2 = transactionPruningPlugin.fetchPruneUpperBound(now2, inactiveTxTimeNow2); Assert.assertEquals(inactiveTxTimeNow1, pruneUpperBound2); transactionPruningPlugin.pruneComplete(now2, expectedPruneUpperBound1); // However, after compacting txEmptyTable we should get the latest upper bound testUtil.flush(txEmptyTable); testUtil.compact(txEmptyTable, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); pruneUpperBound2 = transactionPruningPlugin.fetchPruneUpperBound(now2, inactiveTxTimeNow2); Assert.assertEquals(expectedPruneUpperBound2, pruneUpperBound2); transactionPruningPlugin.pruneComplete(now2, expectedPruneUpperBound2); } finally { transactionPruningPlugin.destroy(); hBaseAdmin.disableTable(txEmptyTable); hBaseAdmin.deleteTable(txEmptyTable); } }
Example 14
Source File: InvalidListPruneTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPruneEmptyTable() throws Exception { // Make sure that empty tables do not block the progress of pruning // Create an empty table TableName txEmptyTable = TableName.valueOf("emptyPruneTestTable"); HTable emptyHTable = createTable(txEmptyTable.getName(), new byte[][]{family}, false, Collections.singletonList(TestTransactionProcessor.class.getName())); TransactionPruningPlugin transactionPruningPlugin = new TestTransactionPruningPlugin(); transactionPruningPlugin.initialize(conf); try { long now1 = System.currentTimeMillis(); long inactiveTxTimeNow1 = (now1 - 150) * TxConstants.MAX_TX_PER_MS; long noPruneUpperBound = -1; long expectedPruneUpperBound1 = (now1 - 200) * TxConstants.MAX_TX_PER_MS; InMemoryTransactionStateCache.setTransactionSnapshot( new TransactionSnapshot(expectedPruneUpperBound1, expectedPruneUpperBound1, expectedPruneUpperBound1, ImmutableSet.of(expectedPruneUpperBound1), ImmutableSortedMap.<Long, TransactionManager.InProgressTx>of())); testUtil.compact(txEmptyTable, true); testUtil.compact(txDataTable1, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // fetch prune upper bound, there should be no prune upper bound since txEmptyTable cannot be compacted long pruneUpperBound1 = transactionPruningPlugin.fetchPruneUpperBound(now1, inactiveTxTimeNow1); Assert.assertEquals(noPruneUpperBound, pruneUpperBound1); transactionPruningPlugin.pruneComplete(now1, noPruneUpperBound); // Now flush the empty table, this will record the table region as empty, and then pruning will continue hBaseAdmin.flush(txEmptyTable); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // fetch prune upper bound, again, this time it should work pruneUpperBound1 = transactionPruningPlugin.fetchPruneUpperBound(now1, inactiveTxTimeNow1); Assert.assertEquals(expectedPruneUpperBound1, pruneUpperBound1); transactionPruningPlugin.pruneComplete(now1, expectedPruneUpperBound1); // Now add some data to the empty table // (adding data non-transactionally is okay too, we just need some data for the compaction to run) emptyHTable.put(new Put(Bytes.toBytes(1)).addColumn(family, qualifier, Bytes.toBytes(1))); emptyHTable.close(); // Now run another compaction on txDataTable1 with an updated tx snapshot long now2 = System.currentTimeMillis(); long inactiveTxTimeNow2 = (now2 - 150) * TxConstants.MAX_TX_PER_MS; long expectedPruneUpperBound2 = (now2 - 200) * TxConstants.MAX_TX_PER_MS; InMemoryTransactionStateCache.setTransactionSnapshot( new TransactionSnapshot(expectedPruneUpperBound2, expectedPruneUpperBound2, expectedPruneUpperBound2, ImmutableSet.of(expectedPruneUpperBound2), ImmutableSortedMap.<Long, TransactionManager.InProgressTx>of())); testUtil.flush(txEmptyTable); testUtil.compact(txDataTable1, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // Running a prune now should still return min(inactiveTxTimeNow1, expectedPruneUpperBound1) since // txEmptyTable is no longer empty. This information is returned since the txEmptyTable was recorded as being // empty in the previous run with inactiveTxTimeNow1 long pruneUpperBound2 = transactionPruningPlugin.fetchPruneUpperBound(now2, inactiveTxTimeNow2); Assert.assertEquals(inactiveTxTimeNow1, pruneUpperBound2); transactionPruningPlugin.pruneComplete(now2, expectedPruneUpperBound1); // However, after compacting txEmptyTable we should get the latest upper bound testUtil.flush(txEmptyTable); testUtil.compact(txEmptyTable, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); pruneUpperBound2 = transactionPruningPlugin.fetchPruneUpperBound(now2, inactiveTxTimeNow2); Assert.assertEquals(expectedPruneUpperBound2, pruneUpperBound2); transactionPruningPlugin.pruneComplete(now2, expectedPruneUpperBound2); } finally { transactionPruningPlugin.destroy(); hBaseAdmin.disableTable(txEmptyTable); hBaseAdmin.deleteTable(txEmptyTable); } }
Example 15
Source File: InvalidListPruneTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPruneEmptyTable() throws Exception { // Make sure that empty tables do not block the progress of pruning // Create an empty table TableName txEmptyTable = TableName.valueOf("emptyPruneTestTable"); HTable emptyHTable = createTable(txEmptyTable.getName(), new byte[][]{family}, false, Collections.singletonList(TestTransactionProcessor.class.getName())); TransactionPruningPlugin transactionPruningPlugin = new TestTransactionPruningPlugin(); transactionPruningPlugin.initialize(conf); try { long now1 = System.currentTimeMillis(); long inactiveTxTimeNow1 = (now1 - 150) * TxConstants.MAX_TX_PER_MS; long noPruneUpperBound = -1; long expectedPruneUpperBound1 = (now1 - 200) * TxConstants.MAX_TX_PER_MS; InMemoryTransactionStateCache.setTransactionSnapshot( new TransactionSnapshot(expectedPruneUpperBound1, expectedPruneUpperBound1, expectedPruneUpperBound1, ImmutableSet.of(expectedPruneUpperBound1), ImmutableSortedMap.<Long, TransactionManager.InProgressTx>of())); testUtil.compact(txEmptyTable, true); testUtil.compact(txDataTable1, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // fetch prune upper bound, there should be no prune upper bound since txEmptyTable cannot be compacted long pruneUpperBound1 = transactionPruningPlugin.fetchPruneUpperBound(now1, inactiveTxTimeNow1); Assert.assertEquals(noPruneUpperBound, pruneUpperBound1); transactionPruningPlugin.pruneComplete(now1, noPruneUpperBound); // Now flush the empty table, this will record the table region as empty, and then pruning will continue hBaseAdmin.flush(txEmptyTable); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // fetch prune upper bound, again, this time it should work pruneUpperBound1 = transactionPruningPlugin.fetchPruneUpperBound(now1, inactiveTxTimeNow1); Assert.assertEquals(expectedPruneUpperBound1, pruneUpperBound1); transactionPruningPlugin.pruneComplete(now1, expectedPruneUpperBound1); // Now add some data to the empty table // (adding data non-transactionally is okay too, we just need some data for the compaction to run) emptyHTable.put(new Put(Bytes.toBytes(1)).addColumn(family, qualifier, Bytes.toBytes(1))); emptyHTable.close(); // Now run another compaction on txDataTable1 with an updated tx snapshot long now2 = System.currentTimeMillis(); long inactiveTxTimeNow2 = (now2 - 150) * TxConstants.MAX_TX_PER_MS; long expectedPruneUpperBound2 = (now2 - 200) * TxConstants.MAX_TX_PER_MS; InMemoryTransactionStateCache.setTransactionSnapshot( new TransactionSnapshot(expectedPruneUpperBound2, expectedPruneUpperBound2, expectedPruneUpperBound2, ImmutableSet.of(expectedPruneUpperBound2), ImmutableSortedMap.<Long, TransactionManager.InProgressTx>of())); testUtil.flush(txEmptyTable); testUtil.compact(txDataTable1, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); // Running a prune now should still return min(inactiveTxTimeNow1, expectedPruneUpperBound1) since // txEmptyTable is no longer empty. This information is returned since the txEmptyTable was recorded as being // empty in the previous run with inactiveTxTimeNow1 long pruneUpperBound2 = transactionPruningPlugin.fetchPruneUpperBound(now2, inactiveTxTimeNow2); Assert.assertEquals(inactiveTxTimeNow1, pruneUpperBound2); transactionPruningPlugin.pruneComplete(now2, expectedPruneUpperBound1); // However, after compacting txEmptyTable we should get the latest upper bound testUtil.flush(txEmptyTable); testUtil.compact(txEmptyTable, true); // Since the write to prune table happens async, we need to sleep a bit before checking the state of the table TimeUnit.SECONDS.sleep(2); pruneUpperBound2 = transactionPruningPlugin.fetchPruneUpperBound(now2, inactiveTxTimeNow2); Assert.assertEquals(expectedPruneUpperBound2, pruneUpperBound2); transactionPruningPlugin.pruneComplete(now2, expectedPruneUpperBound2); } finally { transactionPruningPlugin.destroy(); hBaseAdmin.disableTable(txEmptyTable); hBaseAdmin.deleteTable(txEmptyTable); } }
Example 16
Source File: HBaseWriter.java From hiped2 with Apache License 2.0 | 4 votes |
/** * The MapReduce driver - setup and launch the job. * * @param args the command-line arguments * @return the process exit code * @throws Exception if something goes wrong */ public int run(final String[] args) throws Exception { Cli cli = Cli.builder().setArgs(args).addOptions(CliCommonOpts.InputFileOption.values()).build(); int result = cli.runCmd(); if (result != 0) { return result; } File inputFile = new File(cli.getArgValueAsString(CliCommonOpts.InputFileOption.INPUT)); Configuration conf = HBaseConfiguration.create(); createTableAndColumn(conf, STOCKS_TABLE_NAME, STOCK_DETAILS_COLUMN_FAMILY_AS_BYTES); HTable htable = new HTable(conf, STOCKS_TABLE_NAME); htable.setAutoFlush(false); htable.setWriteBufferSize(1024 * 1024 * 12); SpecificDatumWriter<Stock> writer = new SpecificDatumWriter<Stock>(); writer.setSchema(Stock.SCHEMA$); ByteArrayOutputStream bao = new ByteArrayOutputStream(); BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(bao, null); for (Stock stock: AvroStockUtils.fromCsvFile(inputFile)) { writer.write(stock, encoder); encoder.flush(); byte[] rowkey = Bytes.add( Bytes.toBytes(stock.getSymbol().toString()), Bytes.toBytes(stock.getDate().toString())); byte[] stockAsAvroBytes = bao.toByteArray(); Put put = new Put(rowkey); put.add(STOCK_DETAILS_COLUMN_FAMILY_AS_BYTES, STOCK_COLUMN_QUALIFIER_AS_BYTES, stockAsAvroBytes); htable.put(put); bao.reset(); } htable.flushCommits(); htable.close(); System.out.println("done"); return 0; }
Example 17
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Covering deletes (via {@link Delete#deleteColumns}) cover everything back in time from the * given time. If its modifying the latest state, we don't need to do anything but add deletes. If * its modifying back in time state, we need to just fix up the surrounding elements as anything * else ahead of it will be fixed up by later updates. * <p> * similar to {@link #testMultipleTimestampsInSingleDelete()}, but with covering deletes. * @throws Exception on failure */ @Test public void testDeleteColumnsInThePast() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts1 = 10, ts2 = 11, ts3 = 12; p.add(FAM, indexed_qualifer, ts1, value1); p.add(FAM2, regular_qualifer, ts2, value3); primary.put(p); primary.flushCommits(); // now build up a delete with a couple different timestamps Delete d = new Delete(row1); // these deletes don't need to match the exact ts because they cover everything earlier d.deleteColumns(FAM, indexed_qualifer, ts2); d.deleteColumns(FAM2, regular_qualifer, ts3); primary.delete(d); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); // check the first entry at ts1 List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1); // delete at ts2 changes what the put would insert pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // final delete clears out everything expected = Collections.emptyList(); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value1); // cleanup closeAndCleanupTables(primary, index1); }
Example 18
Source File: EndToEndCoveredIndexingIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Similar to the {@link #testMultipleTimestampsInSinglePut()}, this check the same with deletes * @throws Exception on failure */ @Test public void testMultipleTimestampsInSingleDelete() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts1 = 10, ts2 = 11, ts3 = 12; p.add(FAM, indexed_qualifer, ts1, value1); // our group indexes all columns in the this family, so any qualifier here is ok p.add(FAM2, regular_qualifer, ts2, value3); primary.put(p); primary.flushCommits(); // check to make sure everything we expect is there HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable()); // ts1, we just have v1 List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1); // at ts2, don't have the above anymore pairs.clear(); expected = Collections.emptyList(); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts2 + 1, value1, value1); // but we do have the new entry at ts2 pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // now build up a delete with a couple different timestamps Delete d = new Delete(row1); // these deletes have to match the exact ts since we are doing an exact match (deleteColumn). d.deleteColumn(FAM, indexed_qualifer, ts1); // since this doesn't match exactly, we actually shouldn't see a change in table state d.deleteColumn(FAM2, regular_qualifer, ts3); primary.delete(d); // at ts1, we should have the put covered exactly by the delete and into the entire future expected = Collections.emptyList(); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, Long.MAX_VALUE, value1, value1); // at ts2, we should just see value3 pairs.clear(); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col1)); pairs.add(new Pair<byte[], CoveredColumn>(value3, col2)); expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs); IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value1); // the later delete is a point delete, so we shouldn't see any change at ts3 IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, ts3, value1, HConstants.EMPTY_END_ROW); // cleanup closeAndCleanupTables(primary, index1); }
Example 19
Source File: EndToEndCoveredColumnsIndexBuilderIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Similar to {@link #testExpectedResultsInTableStateForSinglePut()}, but against batches of puts. * Previous implementations managed batches by playing current state against each element in the * batch, rather than combining all the per-row updates into a single mutation for the batch. This * test ensures that we see the correct expected state. * @throws Exception on failure */ @SuppressWarnings("deprecation") @Test public void testExpectedResultsInTableStateForBatchPuts() throws Exception { long ts = state.ts; // build up a list of puts to make, all on the same row Put p1 = new Put(row, ts); p1.add(family, qual, Bytes.toBytes("v1")); Put p2 = new Put(row, ts + 1); p2.add(family, qual, Bytes.toBytes("v2")); // setup all the verifiers we need. This is just the same as above, but will be called twice // since we need to iterate the batch. // get all the underlying kvs for the put final List<Cell> allKvs = new ArrayList<Cell>(2); allKvs.addAll(p2.getFamilyCellMap().get(family)); allKvs.addAll(p1.getFamilyCellMap().get(family)); // setup the verifier for the data we expect to write // both puts should be put into a single batch final ColumnReference familyRef = new ColumnReference(EndToEndCoveredColumnsIndexBuilderIT.family, ColumnReference.ALL_QUALIFIERS); VerifyingIndexCodec codec = state.codec; // no previous state in the table codec.verifiers.add(new ListMatchingVerifier("cleanup state 1", Collections .<Cell> emptyList(), familyRef)); codec.verifiers.add(new ListMatchingVerifier("put state 1", p1.getFamilyCellMap().get(family), familyRef)); codec.verifiers.add(new ListMatchingVerifier("cleanup state 2", p1.getFamilyCellMap().get(family), familyRef)); // kvs from both puts should be in the table now codec.verifiers.add(new ListMatchingVerifier("put state 2", allKvs, familyRef)); // do the actual put (no indexing will actually be done) HTable primary = state.table; primary.setAutoFlush(false); primary.put(Arrays.asList(p1, p2)); primary.flushCommits(); // cleanup after ourselves cleanup(state); }
Example 20
Source File: IndexedTableAdmin.java From hbase-secondary-index with GNU General Public License v3.0 | 4 votes |
private void flushBatch(final List<Put> batch, final HTable indexTable) throws IOException { if (!batch.isEmpty()) { indexTable.put(batch); batch.clear(); } }