Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#startMiniHBaseCluster()
The following examples show how to use
org.apache.hadoop.hbase.HBaseTestingUtility#startMiniHBaseCluster() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseIOTest.java From beam with Apache License 2.0 | 6 votes |
@BeforeClass public static void beforeClass() throws Exception { conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); // Try to bind the hostname to localhost to solve an issue when it is not configured or // no DNS resolution available. conf.setStrings("hbase.master.hostname", "localhost"); conf.setStrings("hbase.regionserver.hostname", "localhost"); htu = new HBaseTestingUtility(conf); // We don't use the full htu.startMiniCluster() to avoid starting unneeded HDFS/MR daemons htu.startMiniZKCluster(); MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 4); hbm.waitForActiveAndReadyMaster(); admin = htu.getHBaseAdmin(); }
Example 2
Source File: TestRegionServerAbort.java From hbase with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { testUtil = new HBaseTestingUtility(); conf = testUtil.getConfiguration(); conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, StopBlockingRegionObserver.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, StopBlockingRegionObserver.class.getName()); // make sure we have multiple blocks so that the client does not prefetch all block locations conf.set("dfs.blocksize", Long.toString(100 * 1024)); // prefetch the first block conf.set(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, Long.toString(100 * 1024)); conf.set(HConstants.REGION_IMPL, ErrorThrowingHRegion.class.getName()); testUtil.startMiniZKCluster(); dfsCluster = testUtil.startMiniDFSCluster(2); StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(2).build(); cluster = testUtil.startMiniHBaseCluster(option); }
Example 3
Source File: UserDefinedFunctionsIT.java From phoenix with Apache License 2.0 | 6 votes |
@BeforeClass public static synchronized void doSetup() throws Exception { Configuration conf = HBaseConfiguration.create(); setUpConfigForMiniCluster(conf); util = new HBaseTestingUtility(conf); util.startMiniDFSCluster(1); util.startMiniZKCluster(1); String string = util.getConfiguration().get("fs.defaultFS"); // PHOENIX-4675 setting the trailing slash implicitly tests that we're doing some path normalization conf.set(DYNAMIC_JARS_DIR_KEY, string+"/hbase/tmpjars/"); util.startMiniHBaseCluster(1, 1); UDFExpression.setConfig(conf); String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB); url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; Map<String, String> props = Maps.newHashMapWithExpectedSize(1); props.put(QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB, "true"); props.put(QueryServices.DYNAMIC_JARS_DIR_KEY,string+"/hbase/tmpjars/"); driver = initAndRegisterTestDriver(url, new ReadOnlyProps(props.entrySet().iterator())); }
Example 4
Source File: TestHBaseOutput.java From envelope with Apache License 2.0 | 5 votes |
@BeforeClass public static void beforeClass() throws Exception { utility = new HBaseTestingUtility(); utility.startMiniZKCluster(); utility.startMiniHBaseCluster(1,1); connection = utility.getConnection(); }
Example 5
Source File: TestHBaseStorage.java From spork with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { // This is needed by Pig conf = HBaseConfiguration.create(new Configuration()); util = new HBaseTestingUtility(conf); util.startMiniZKCluster(); util.startMiniHBaseCluster(1, 1); }
Example 6
Source File: HBaseSITestEnv.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
private void startCluster(Configuration conf) throws Exception{ int basePort = getNextBasePort(); // -> MapR work-around conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///"); conf.set("fs.default.name", "file:///"); conf.set("fs.hdfs.client", "org.apache.hadoop.hdfs.DistributedFileSystem"); System.setProperty("zookeeper.sasl.client", "false"); System.setProperty("zookeeper.sasl.serverconfig", "fake"); // <- MapR work-around conf.setInt("hbase.master.port", basePort); conf.setInt("hbase.master.info.port", basePort + 1); conf.setInt("hbase.regionserver.port", basePort + 2); conf.setInt("hbase.regionserver.info.port", basePort + 3); testUtility = new HBaseTestingUtility(conf); Configuration configuration = testUtility.getConfiguration(); // -> MapR work-around configuration.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///"); configuration.set("fs.default.name", "file:///"); configuration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"); configuration.set("fs.hdfs.client", "org.apache.hadoop.hdfs.DistributedFileSystem"); System.setProperty("zookeeper.sasl.client", "false"); System.setProperty("zookeeper.sasl.serverconfig", "fake"); // <- MapR work-around configuration.setInt("hbase.master.port", basePort); configuration.setInt("hbase.master.info.port", basePort + 1); configuration.setInt("hbase.regionserver.port", basePort + 2); configuration.setInt("hbase.regionserver.info.port", basePort + 3); if (FileSystem.class.getProtectionDomain().getCodeSource().getLocation().getPath().contains("mapr")) { testUtility.startMiniCluster(1); } else { testUtility.startMiniZKCluster(); testUtility.startMiniHBaseCluster(1, 1); } ZkUtils.getZkManager().initialize(HConfiguration.getConfiguration()); ZkUtils.initializeZookeeper(); }
Example 7
Source File: TestJobSubmission.java From spork with Apache License 2.0 | 4 votes |
@Test public void testReducerNumEstimation() throws Exception{ // Skip the test for Tez. Tez use a different mechanism. // Equivalent test is in TestTezAutoParallelism Assume.assumeTrue("Skip this test for TEZ", Util.isMapredExecType(cluster.getExecType())); // use the estimation Configuration conf = HBaseConfiguration.create(new Configuration()); HBaseTestingUtility util = new HBaseTestingUtility(conf); int clientPort = util.startMiniZKCluster().getClientPort(); util.startMiniHBaseCluster(1, 1); String query = "a = load '/passwd';" + "b = group a by $0;" + "store b into 'output';"; PigServer ps = new PigServer(cluster.getExecType(), cluster.getProperties()); PhysicalPlan pp = Util.buildPp(ps, query); MROperPlan mrPlan = Util.buildMRPlan(pp, pc); pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100"); pc.getConf().setProperty("pig.exec.reducers.max", "10"); pc.getConf().setProperty(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort)); ConfigurationValidator.validatePigProperties(pc.getProperties()); conf = ConfigurationUtil.toConfiguration(pc.getProperties()); JobControlCompiler jcc = new JobControlCompiler(pc, conf); JobControl jc=jcc.compile(mrPlan, "Test"); Job job = jc.getWaitingJobs().get(0); long reducer=Math.min((long)Math.ceil(new File("test/org/apache/pig/test/data/passwd").length()/100.0), 10); Util.assertParallelValues(-1, -1, reducer, reducer, job.getJobConf()); // use the PARALLEL key word, it will override the estimated reducer number query = "a = load '/passwd';" + "b = group a by $0 PARALLEL 2;" + "store b into 'output';"; pp = Util.buildPp(ps, query); mrPlan = Util.buildMRPlan(pp, pc); pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100"); pc.getConf().setProperty("pig.exec.reducers.max", "10"); ConfigurationValidator.validatePigProperties(pc.getProperties()); conf = ConfigurationUtil.toConfiguration(pc.getProperties()); jcc = new JobControlCompiler(pc, conf); jc=jcc.compile(mrPlan, "Test"); job = jc.getWaitingJobs().get(0); Util.assertParallelValues(-1, 2, -1, 2, job.getJobConf()); final byte[] COLUMNFAMILY = Bytes.toBytes("pig"); util.createTable(Bytes.toBytesBinary("test_table"), COLUMNFAMILY); // the estimation won't take effect when it apply to non-dfs or the files doesn't exist, such as hbase query = "a = load 'hbase://test_table' using org.apache.pig.backend.hadoop.hbase.HBaseStorage('c:f1 c:f2');" + "b = group a by $0 ;" + "store b into 'output';"; pp = Util.buildPp(ps, query); mrPlan = Util.buildMRPlan(pp, pc); pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100"); pc.getConf().setProperty("pig.exec.reducers.max", "10"); ConfigurationValidator.validatePigProperties(pc.getProperties()); conf = ConfigurationUtil.toConfiguration(pc.getProperties()); jcc = new JobControlCompiler(pc, conf); jc=jcc.compile(mrPlan, "Test"); job = jc.getWaitingJobs().get(0); Util.assertParallelValues(-1, -1, -1, 1, job.getJobConf()); util.deleteTable(Bytes.toBytesBinary("test_table")); // In HBase 0.90.1 and above we can use util.shutdownMiniHBaseCluster() // here instead. MiniHBaseCluster hbc = util.getHBaseCluster(); if (hbc != null) { hbc.shutdown(); hbc.join(); } util.shutdownMiniZKCluster(); }
Example 8
Source File: RunLocalTest.java From hadoop-arch-book with Apache License 2.0 | 2 votes |
public static void main(String[] args) throws Exception{ HBaseTestingUtility htu = HBaseTestingUtility.createLocalHTU(); Configuration config = htu.getConfiguration(); htu.cleanupTestDir(); htu.startMiniZKCluster(); htu.startMiniHBaseCluster(1, 1); RemoveTables.executeDeleteTables(config); CreateTables.executeCreateTables(config); //Start up servers Server flumeTestServer = startTestFlumeServer(4243); List<String> flumePorts = new ArrayList<String>(); flumePorts.add("127.0.0.1:4243"); EventReviewServer server = new EventReviewServer(4242, config, flumePorts, false); server.startServer(); EventClient client = new EventClient("127.0.0.1", 4242); client.startClient(); HConnection connection = HConnectionManager.createConnection(config); //popoulate initial data populateUserProfileData(connection); populateValidationRules(connection); //populate user events UserEvent userEvent = new UserEvent("101", System.currentTimeMillis(), "127.0.0.1", "1", "55555", "42", 100.0, "101", true); client.submitUserEvent(userEvent); //shut down servers client.closeClient(); server.closeServer(); stopTestFlumeServer(flumeTestServer); htu.shutdownMiniCluster(); }