Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#getHBaseAdmin()
The following examples show how to use
org.apache.hadoop.hbase.HBaseTestingUtility#getHBaseAdmin() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HbaseTestUtil.java From kafka-connect-hbase with Apache License 2.0 | 6 votes |
/** * Creates the table with the column families * * @param tableName * @param columnFamilies * @return */ public static void createTable(String tableName, String... columnFamilies) { HBaseTestingUtility testingUtility = getUtility(); if (!status.get()) { throw new RuntimeException("The mini cluster hasn't started yet. " + " Call HBaseTestUtil#startMiniCluster() before creating a table"); } final TableName name = TableName.valueOf(tableName); try (HBaseAdmin hBaseAdmin = testingUtility.getHBaseAdmin()) { final HTableDescriptor hTableDescriptor = new HTableDescriptor(name); for (String family : columnFamilies) { final HColumnDescriptor hColumnDescriptor = new HColumnDescriptor(Bytes.toBytes(family)); hTableDescriptor.addFamily(hColumnDescriptor); } hBaseAdmin.createTable(hTableDescriptor); testingUtility.waitUntilAllRegionsAssigned(name); } catch (IOException e) { throw new RuntimeException(e); } }
Example 2
Source File: HBaseIOTest.java From beam with Apache License 2.0 | 6 votes |
@BeforeClass public static void beforeClass() throws Exception { conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); // Try to bind the hostname to localhost to solve an issue when it is not configured or // no DNS resolution available. conf.setStrings("hbase.master.hostname", "localhost"); conf.setStrings("hbase.regionserver.hostname", "localhost"); htu = new HBaseTestingUtility(conf); // We don't use the full htu.startMiniCluster() to avoid starting unneeded HDFS/MR daemons htu.startMiniZKCluster(); MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 4); hbm.waitForActiveAndReadyMaster(); admin = htu.getHBaseAdmin(); }
Example 3
Source File: TestOmidTableManager.java From phoenix-omid with Apache License 2.0 | 5 votes |
@BeforeClass public void setUpClass() throws Exception { // HBase setup hbaseConf = HBaseConfiguration.create(); hbaseConf.setBoolean("hbase.localcluster.assign.random.ports",true); hBaseTestUtil = new HBaseTestingUtility(hbaseConf); hBaseTestUtil.startMiniCluster(1); hBaseAdmin = hBaseTestUtil.getHBaseAdmin(); }
Example 4
Source File: TransactionAwareHTableTest.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupBeforeClass() throws Exception { testUtil = new HBaseTestingUtility(); conf = testUtil.getConfiguration(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath()); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER); conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath()); conf.setLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5); // Tune down the connection thread pool size conf.setInt("hbase.hconnection.threads.core", 5); conf.setInt("hbase.hconnection.threads.max", 10); // Tunn down handler threads in regionserver conf.setInt("hbase.regionserver.handler.count", 10); // Set to random port conf.setInt("hbase.master.port", 0); conf.setInt("hbase.master.info.port", 0); conf.setInt("hbase.regionserver.port", 0); conf.setInt("hbase.regionserver.info.port", 0); testUtil.startMiniCluster(); hBaseAdmin = testUtil.getHBaseAdmin(); txStateStorage = new HDFSTransactionStateStorage(conf, new SnapshotCodecProvider(conf), new TxMetricsCollector()); txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector()); txManager.startAndWait(); }
Example 5
Source File: TransactionAwareHTableTest.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupBeforeClass() throws Exception { testUtil = new HBaseTestingUtility(); conf = testUtil.getConfiguration(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath()); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER); conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath()); conf.setLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5); // Tune down the connection thread pool size conf.setInt("hbase.hconnection.threads.core", 5); conf.setInt("hbase.hconnection.threads.max", 10); // Tunn down handler threads in regionserver conf.setInt("hbase.regionserver.handler.count", 10); // Set to random port conf.setInt("hbase.master.port", 0); conf.setInt("hbase.master.info.port", 0); conf.setInt("hbase.regionserver.port", 0); conf.setInt("hbase.regionserver.info.port", 0); testUtil.startMiniCluster(); hBaseAdmin = testUtil.getHBaseAdmin(); txStateStorage = new HDFSTransactionStateStorage(conf, new SnapshotCodecProvider(conf), new TxMetricsCollector()); txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector()); txManager.startAndWait(); }
Example 6
Source File: TransactionAwareHTableTest.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupBeforeClass() throws Exception { testUtil = new HBaseTestingUtility(); conf = testUtil.getConfiguration(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath()); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER); conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath()); conf.setLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5); // Tune down the connection thread pool size conf.setInt("hbase.hconnection.threads.core", 5); conf.setInt("hbase.hconnection.threads.max", 10); // Tunn down handler threads in regionserver conf.setInt("hbase.regionserver.handler.count", 10); // Set to random port conf.setInt("hbase.master.port", 0); conf.setInt("hbase.master.info.port", 0); conf.setInt("hbase.regionserver.port", 0); conf.setInt("hbase.regionserver.info.port", 0); testUtil.startMiniCluster(); hBaseAdmin = testUtil.getHBaseAdmin(); txStateStorage = new HDFSTransactionStateStorage(conf, new SnapshotCodecProvider(conf), new TxMetricsCollector()); txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector()); txManager.startAndWait(); }
Example 7
Source File: TransactionAwareHTableTest.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupBeforeClass() throws Exception { testUtil = new HBaseTestingUtility(); conf = testUtil.getConfiguration(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath()); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER); conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath()); conf.setLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5); // Tune down the connection thread pool size conf.setInt("hbase.hconnection.threads.core", 5); conf.setInt("hbase.hconnection.threads.max", 10); // Tunn down handler threads in regionserver conf.setInt("hbase.regionserver.handler.count", 10); // Set to random port conf.setInt("hbase.master.port", 0); conf.setInt("hbase.master.info.port", 0); conf.setInt("hbase.regionserver.port", 0); conf.setInt("hbase.regionserver.info.port", 0); testUtil.startMiniCluster(); hBaseAdmin = testUtil.getHBaseAdmin(); conn = testUtil.getConnection(); txStateStorage = new HDFSTransactionStateStorage(conf, new SnapshotCodecProvider(conf), new TxMetricsCollector()); txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector()); txManager.startAndWait(); }
Example 8
Source File: HBaseClientTest.java From metron with Apache License 2.0 | 5 votes |
@BeforeAll public static void startHBase() throws Exception { Configuration config = HBaseConfiguration.create(); config.set("hbase.master.hostname", "localhost"); config.set("hbase.regionserver.hostname", "localhost"); util = new HBaseTestingUtility(config); util.startMiniCluster(); admin = util.getHBaseAdmin(); // create the table table = util.createTable(Bytes.toBytes(tableName), cf); util.waitTableEnabled(table.getName()); // setup the client client = new HBaseClient((c,t) -> table, table.getConfiguration(), tableName); }
Example 9
Source File: CompactionSplitIT.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testSplitRegion() throws Throwable { String tableName = "D"; String columnName = "A"; String schema = SCHEMA; String query = String.format("select * from %s order by %s", tableName, columnName); ResultSet rs = classWatcher.executeQuery(query); String actualResult = TestUtils.FormattedResult.ResultFactory.toStringUnsorted(rs); SConfiguration config = HConfiguration.getConfiguration(); HBaseTestingUtility testingUtility = new HBaseTestingUtility((Configuration) config.getConfigSource().unwrapDelegate()); HBaseAdmin admin = testingUtility.getHBaseAdmin(); TableName tn = TableName.valueOf(config.getNamespace(), Long.toString(TestUtils.baseTableConglomerateId(classWatcher.getOrCreateConnection(), SCHEMA, "D"))); for (HRegionInfo info : admin.getTableRegions(tn)) { System.out.println(info.getRegionNameAsString()+" - "+info.getRegionName()+ " - "+info.getEncodedName()); CallableStatement callableStatement = classWatcher.getOrCreateConnection(). prepareCall("call SYSCS_UTIL.SYSCS_SPLIT_REGION_AT_POINTS(?,?)"); callableStatement.setString(1, info.getEncodedName()); callableStatement.setString(2, ""); // empty splitpoints will be turned into null arg (hbase will decide) assertTrue(HBaseTestUtils.setBlockPostSplit(true)); helpTestProc(callableStatement, 10, classWatcher, query, actualResult); assertTrue(HBaseTestUtils.setBlockPostSplit(false)); helpTestProc(callableStatement, 10, classWatcher, query, actualResult); } }
Example 10
Source File: SpliceRegionAdminIT.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testTable() throws Exception { Connection connection = methodWatcher.getOrCreateConnection(); SConfiguration config = HConfiguration.getConfiguration(); HBaseTestingUtility testingUtility = new HBaseTestingUtility((Configuration) config.getConfigSource().unwrapDelegate()); HBaseAdmin admin = testingUtility.getHBaseAdmin(); long conglomerateId = TableSplit.getConglomerateId(connection, SCHEMA_NAME, LINEITEM, null); TableName tn = TableName.valueOf(config.getNamespace(),Long.toString(conglomerateId)); List<HRegionInfo> partitions = admin.getTableRegions(tn); for (HRegionInfo partition : partitions) { String startKey = Bytes.toStringBinary(partition.getStartKey()); int index = Collections.binarySearch(hbaseTableSplitKeys, startKey); String encodedRegionName = partition.getEncodedName(); PreparedStatement ps = methodWatcher.prepareStatement("CALL SYSCS_UTIL.GET_START_KEY(?,?,null,?)"); ps.setString(1, SCHEMA_NAME); ps.setString(2, LINEITEM); ps.setString(3, encodedRegionName); ResultSet rs = ps.executeQuery(); rs.next(); String result = rs.getString(1); Assert.assertEquals(result, spliceTableSplitKeys.get(index)); } }
Example 11
Source File: SpliceRegionAdminIT.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testIndex() throws Exception { Connection connection = methodWatcher.getOrCreateConnection(); SConfiguration config = HConfiguration.getConfiguration(); HBaseTestingUtility testingUtility = new HBaseTestingUtility((Configuration) config.getConfigSource().unwrapDelegate()); HBaseAdmin admin = testingUtility.getHBaseAdmin(); long conglomerateId = TableSplit.getConglomerateId(connection, SCHEMA_NAME, ORDERS, CUST_IDX); TableName tn = TableName.valueOf(config.getNamespace(),Long.toString(conglomerateId)); List<HRegionInfo> partitions = admin.getTableRegions(tn); for (HRegionInfo partition : partitions) { String startKey = Bytes.toStringBinary(partition.getStartKey()); int index = Collections.binarySearch(hbaseIndexSplitKeys, startKey); String encodedRegionName = partition.getEncodedName(); PreparedStatement ps = methodWatcher.prepareStatement("CALL SYSCS_UTIL.GET_START_KEY(?,?,?,?)"); ps.setString(1, SCHEMA_NAME); ps.setString(2, ORDERS); ps.setString(3, CUST_IDX); ps.setString(4, encodedRegionName); ResultSet rs = ps.executeQuery(); rs.next(); String result = rs.getString(1); Assert.assertEquals(result, spliceIndexSplitKeys.get(index)); } }
Example 12
Source File: FailForUnsupportedHBaseVersionsIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Test that we correctly abort a RegionServer when we run tests with an unsupported HBase * version. The 'completeness' of this test requires that we run the test with both a version of * HBase that wouldn't be supported with WAL Compression. Currently, this is the default version * (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail * when running against a version of HBase with WALCompression enabled. Therefore, to fully test * this functionality, we need to run the test against both a supported and an unsupported version * of HBase (as long as we want to support an version of HBase that doesn't support custom WAL * Codecs). * @throws Exception on failure */ @Test(timeout = 300000 /* 5 mins */) public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception { Configuration conf = HBaseConfiguration.create(); setUpConfigForMiniCluster(conf); IndexTestingUtils.setupConfig(conf); // enable WAL Compression conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); // check the version to see if it isn't supported String version = VersionInfo.getVersion(); boolean supported = false; if (Indexer.validateVersion(version, conf) == null) { supported = true; } // start the minicluster HBaseTestingUtility util = new HBaseTestingUtility(conf); // set replication required parameter ConfigUtil.setReplicationConfigIfAbsent(conf); try { util.startMiniCluster(); // setup the primary table @SuppressWarnings("deprecation") HTableDescriptor desc = new HTableDescriptor( "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion"); byte[] family = Bytes.toBytes("f"); desc.addFamily(new HColumnDescriptor(family)); // enable indexing to a non-existant index table String indexTableName = "INDEX_TABLE"; ColumnGroup fam1 = new ColumnGroup(indexTableName); fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(desc); // get a reference to the regionserver, so we can ensure it aborts HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0); // create the primary table HBaseAdmin admin = util.getHBaseAdmin(); if (supported) { admin.createTable(desc); assertFalse("Hosting regeion server failed, even the HBase version (" + version + ") supports WAL Compression.", server.isAborted()); } else { admin.createTableAsync(desc, null); // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its // broken. while (!server.isAborted()) { LOG.debug("Waiting on regionserver to abort.."); } } } finally { // cleanup util.shutdownMiniCluster(); } }
Example 13
Source File: TestFailForUnsupportedHBaseVersions.java From phoenix with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** * Test that we correctly abort a RegionServer when we run tests with an unsupported HBase * version. The 'completeness' of this test requires that we run the test with both a version of * HBase that wouldn't be supported with WAL Compression. Currently, this is the default version * (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail * when running against a version of HBase with WALCompression enabled. Therefore, to fully test * this functionality, we need to run the test against both a supported and an unsupported version * of HBase (as long as we want to support an version of HBase that doesn't support custom WAL * Codecs). * @throws Exception on failure */ @Test(timeout = 300000 /* 5 mins */) public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception { Configuration conf = HBaseConfiguration.create(); IndexTestingUtils.setupConfig(conf); // enable WAL Compression conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); // check the version to see if it isn't supported String version = VersionInfo.getVersion(); boolean supported = false; if (Indexer.validateVersion(version, conf) == null) { supported = true; } // start the minicluster HBaseTestingUtility util = new HBaseTestingUtility(conf); util.startMiniCluster(); // setup the primary table HTableDescriptor desc = new HTableDescriptor( "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion"); byte[] family = Bytes.toBytes("f"); desc.addFamily(new HColumnDescriptor(family)); // enable indexing to a non-existant index table String indexTableName = "INDEX_TABLE"; ColumnGroup fam1 = new ColumnGroup(indexTableName); fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(desc); // get a reference to the regionserver, so we can ensure it aborts HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0); // create the primary table HBaseAdmin admin = util.getHBaseAdmin(); if (supported) { admin.createTable(desc); assertFalse("Hosting regeion server failed, even the HBase version (" + version + ") supports WAL Compression.", server.isAborted()); } else { admin.createTableAsync(desc, null); // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its // broken. while (!server.isAborted()) { LOG.debug("Waiting on regionserver to abort.."); } } // cleanup util.shutdownMiniCluster(); }
Example 14
Source File: CostEstimationIT.java From spliceengine with GNU Affero General Public License v3.0 | 4 votes |
@Test public void testCardinalityAfterTableSplit() throws Exception { SConfiguration config = HConfiguration.getConfiguration(); HBaseTestingUtility testingUtility = new HBaseTestingUtility((Configuration) config.getConfigSource().unwrapDelegate()); HBaseAdmin admin = testingUtility.getHBaseAdmin(); TableName tableName = TableName.valueOf(config.getNamespace(), Long.toString(TestUtils.baseTableConglomerateId(spliceClassWatcher.getOrCreateConnection(), spliceSchemaWatcher.toString(), "T2"))); List<HRegionInfo> regions = admin.getTableRegions(tableName); int size1 = regions.size(); if (size1 >= 2) { // expect number of partitions to be at least 2 if table split happens String sqlText = "explain select * from --splice-properties joinOrder=fixed \n" + "t1, t2 --splice-properties joinStrategy=NESTEDLOOP \n" + "where c1=c2"; double outputRows = parseOutputRows(getExplainMessage(4, sqlText, methodWatcher)); Assert.assertTrue(format("OutputRows is expected to be greater than 1, actual is %s", outputRows), outputRows > 1); /* split the table at value 30 */ methodWatcher.executeUpdate(format("CALL SYSCS_UTIL.SYSCS_SPLIT_TABLE_OR_INDEX_AT_POINTS('%s', '%s', null, '%s')", spliceSchemaWatcher.toString(), "T2", "\\x9E")); regions = admin.getTableRegions(tableName); int size2 = regions.size(); if (size2 >= 3) { // expect number of partitions to be at least 3 if table split happens /**The two newly split partitions do not have stats. Ideally, we should re-collect stats, * but if we haven't, explain should reflect the stats from the remaining partitions. * For current test case, t2 has some partition stats missing, without the fix of SPLICE-1452, * its cardinality estimation assumes unique for all non-null rows, which is too conservative, * so we end up estimating 1 output row from t2 for each outer table row from t1. * With SPLICE-1452's fix, we should see a higher number for the output row from t2. */ outputRows = parseOutputRows(getExplainMessage(4, sqlText, methodWatcher)); Assert.assertTrue(format("OutputRows is expected to be greater than 1, actual is %s", outputRows), outputRows > 1); } } }