Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#startMiniDFSCluster()
The following examples show how to use
org.apache.hadoop.hbase.HBaseTestingUtility#startMiniDFSCluster() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CustomSaslAuthenticationProviderTestBase.java From hbase with Apache License 2.0 | 6 votes |
private static void createBaseCluster(HBaseTestingUtility util, File keytabFile, MiniKdc kdc) throws Exception { String servicePrincipal = "hbase/localhost"; String spnegoPrincipal = "HTTP/localhost"; kdc.createPrincipal(keytabFile, servicePrincipal); util.startMiniZKCluster(); HBaseKerberosUtils.setSecuredConfiguration(util.getConfiguration(), servicePrincipal + "@" + kdc.getRealm(), spnegoPrincipal + "@" + kdc.getRealm()); HBaseKerberosUtils.setSSLConfiguration(util, SecureTestCluster.class); util.getConfiguration().setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName()); util.startMiniDFSCluster(1); Path rootdir = util.getDataTestDirOnTestFS("TestCustomSaslAuthenticationProvider"); CommonFSUtils.setRootDir(util.getConfiguration(), rootdir); }
Example 2
Source File: TestRegionServerAbort.java From hbase with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { testUtil = new HBaseTestingUtility(); conf = testUtil.getConfiguration(); conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, StopBlockingRegionObserver.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, StopBlockingRegionObserver.class.getName()); // make sure we have multiple blocks so that the client does not prefetch all block locations conf.set("dfs.blocksize", Long.toString(100 * 1024)); // prefetch the first block conf.set(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, Long.toString(100 * 1024)); conf.set(HConstants.REGION_IMPL, ErrorThrowingHRegion.class.getName()); testUtil.startMiniZKCluster(); dfsCluster = testUtil.startMiniDFSCluster(2); StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(2).build(); cluster = testUtil.startMiniHBaseCluster(option); }
Example 3
Source File: TestFileLink.java From hbase with Apache License 2.0 | 6 votes |
/** * Test, on HDFS, that the FileLink is still readable * even when the current file gets renamed. */ @Test public void testHDFSLinkReadDuringRename() throws Exception { HBaseTestingUtility testUtil = new HBaseTestingUtility(); Configuration conf = testUtil.getConfiguration(); conf.setInt("dfs.blocksize", 1024 * 1024); conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024); testUtil.startMiniDFSCluster(1); MiniDFSCluster cluster = testUtil.getDFSCluster(); FileSystem fs = cluster.getFileSystem(); assertEquals("hdfs", fs.getUri().getScheme()); try { testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath()); } finally { testUtil.shutdownMiniCluster(); } }
Example 4
Source File: UserDefinedFunctionsIT.java From phoenix with Apache License 2.0 | 6 votes |
@BeforeClass public static synchronized void doSetup() throws Exception { Configuration conf = HBaseConfiguration.create(); setUpConfigForMiniCluster(conf); util = new HBaseTestingUtility(conf); util.startMiniDFSCluster(1); util.startMiniZKCluster(1); String string = util.getConfiguration().get("fs.defaultFS"); // PHOENIX-4675 setting the trailing slash implicitly tests that we're doing some path normalization conf.set(DYNAMIC_JARS_DIR_KEY, string+"/hbase/tmpjars/"); util.startMiniHBaseCluster(1, 1); UDFExpression.setConfig(conf); String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB); url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; Map<String, String> props = Maps.newHashMapWithExpectedSize(1); props.put(QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB, "true"); props.put(QueryServices.DYNAMIC_JARS_DIR_KEY,string+"/hbase/tmpjars/"); driver = initAndRegisterTestDriver(url, new ReadOnlyProps(props.entrySet().iterator())); }
Example 5
Source File: TestWALEntryStream.java From hbase with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtility(); CONF = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniDFSCluster(3); cluster = TEST_UTIL.getDFSCluster(); fs = cluster.getFileSystem(); }
Example 6
Source File: TestBlockReorderBlockLocation.java From hbase with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { htu = new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); htu.startMiniDFSCluster(3, new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); dfs = (DistributedFileSystem) FileSystem.get(conf); }
Example 7
Source File: TestBlockReorder.java From hbase with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { htu = new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); htu.startMiniDFSCluster(3, new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); dfs = (DistributedFileSystem) FileSystem.get(conf); }
Example 8
Source File: TestBlockReorderMultiBlocks.java From hbase with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { htu = new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); htu.startMiniDFSCluster(3, new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); dfs = (DistributedFileSystem) FileSystem.get(conf); }
Example 9
Source File: TestCompactionArchiveIOException.java From hbase with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { testUtil = new HBaseTestingUtility(); testUtil.startMiniDFSCluster(1); testDir = testUtil.getDataTestDirOnTestFS(); CommonFSUtils.setRootDir(testUtil.getConfiguration(), testDir); }
Example 10
Source File: TestRegionServerReportForDuty.java From hbase with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { testUtil = new HBaseTestingUtility(); testUtil.startMiniDFSCluster(1); testUtil.startMiniZKCluster(1); testUtil.createRootDir(); cluster = new LocalHBaseCluster(testUtil.getConfiguration(), 0, 0); }
Example 11
Source File: TestShadeSaslAuthenticationProvider.java From hbase with Apache License 2.0 | 5 votes |
static LocalHBaseCluster createCluster(HBaseTestingUtility util, File keytabFile, MiniKdc kdc, Map<String,char[]> userDatabase) throws Exception { String servicePrincipal = "hbase/localhost"; String spnegoPrincipal = "HTTP/localhost"; kdc.createPrincipal(keytabFile, servicePrincipal); util.startMiniZKCluster(); HBaseKerberosUtils.setSecuredConfiguration(util.getConfiguration(), servicePrincipal + "@" + kdc.getRealm(), spnegoPrincipal + "@" + kdc.getRealm()); HBaseKerberosUtils.setSSLConfiguration(util, TestShadeSaslAuthenticationProvider.class); util.getConfiguration().setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName()); util.startMiniDFSCluster(1); Path testDir = util.getDataTestDirOnTestFS("TestShadeSaslAuthenticationProvider"); USER_DATABASE_FILE = new Path(testDir, "user-db.txt"); createUserDBFile( USER_DATABASE_FILE.getFileSystem(CONF), USER_DATABASE_FILE, userDatabase); CONF.set(ShadeSaslServerAuthenticationProvider.PASSWORD_FILE_KEY, USER_DATABASE_FILE.toString()); Path rootdir = new Path(testDir, "hbase-root"); CommonFSUtils.setRootDir(CONF, rootdir); LocalHBaseCluster cluster = new LocalHBaseCluster(CONF, 1); return cluster; }
Example 12
Source File: TestBackupBase.java From hbase with Apache License 2.0 | 4 votes |
/** * @throws Exception if starting the mini cluster or setting up the tables fails */ @Before public void setUp() throws Exception { if (setupIsDone) { return; } if (secure) { // set the always on security provider UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(), HadoopSecurityEnabledUserProviderForTesting.class); // setup configuration SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); } conf1.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true); BackupManager.decorateMasterConfiguration(conf1); BackupManager.decorateRegionServerConfiguration(conf1); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); // Set TTL for old WALs to 1 sec to enforce fast cleaning of an archived // WAL files conf1.setLong(TimeToLiveLogCleaner.TTL_CONF_KEY, 1000); conf1.setLong(LogCleaner.OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC, 1000); // Set MultiWAL (with 2 default WAL files per RS) conf1.set(WALFactory.WAL_PROVIDER, provider); TEST_UTIL.startMiniCluster(); if (useSecondCluster) { conf2 = HBaseConfiguration.create(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); TEST_UTIL2 = new HBaseTestingUtility(conf2); TEST_UTIL2.setZkCluster(TEST_UTIL.getZkCluster()); TEST_UTIL2.startMiniDFSCluster(3); String root2 = TEST_UTIL2.getConfiguration().get("fs.defaultFS"); Path p = new Path(new Path(root2), "/tmp/wal"); CommonFSUtils.setWALRootDir(TEST_UTIL2.getConfiguration(), p); TEST_UTIL2.startMiniCluster(); } conf1 = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniMapReduceCluster(); BACKUP_ROOT_DIR = new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR).toString(); LOG.info("ROOTDIR " + BACKUP_ROOT_DIR); if (useSecondCluster) { BACKUP_REMOTE_ROOT_DIR = new Path(new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) + BACKUP_REMOTE_ROOT_DIR).toString(); LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR); } createTables(); populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1); setupIsDone = true; }
Example 13
Source File: TestRSKilledWhenInitializing.java From hbase with Apache License 2.0 | 4 votes |
/** * Test verifies whether a region server is removed from online servers list in master if it went * down after registering with master. Test will TIMEOUT if an error!!!! * @throws Exception */ @Test public void testRSTerminationAfterRegisteringToMasterBeforeCreatingEphemeralNode() throws Exception { // Create config to use for this cluster Configuration conf = HBaseConfiguration.create(); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1); // Start the cluster final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf); TEST_UTIL.startMiniDFSCluster(3); TEST_UTIL.startMiniZKCluster(); TEST_UTIL.createRootDir(); final LocalHBaseCluster cluster = new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class, RegisterAndDieRegionServer.class); final MasterThread master = startMaster(cluster.getMasters().get(0)); try { // Master is up waiting on RegionServers to check in. Now start RegionServers. for (int i = 0; i < NUM_RS; i++) { cluster.getRegionServers().get(i).start(); } // Expected total regionservers depends on whether Master can host regions or not. int expectedTotalRegionServers = NUM_RS + (LoadBalancer.isTablesOnMaster(conf)? 1: 0); List<ServerName> onlineServersList = null; do { onlineServersList = master.getMaster().getServerManager().getOnlineServersList(); } while (onlineServersList.size() < expectedTotalRegionServers); // Wait until killedRS is set. Means RegionServer is starting to go down. while (killedRS.get() == null) { Threads.sleep(1); } // Wait on the RegionServer to fully die. while (cluster.getLiveRegionServers().size() >= expectedTotalRegionServers) { Threads.sleep(1); } // Make sure Master is fully up before progressing. Could take a while if regions // being reassigned. while (!master.getMaster().isInitialized()) { Threads.sleep(1); } // Now in steady state. How many regions open? Master should have too many regionservers // showing still. The downed RegionServer should still be showing as registered. assertTrue(master.getMaster().getServerManager().isServerOnline(killedRS.get())); // Find non-meta region (namespace?) and assign to the killed server. That'll trigger cleanup. Map<RegionInfo, ServerName> assignments = null; do { assignments = master.getMaster().getAssignmentManager().getRegionStates().getRegionAssignments(); } while (assignments == null || assignments.size() < 2); RegionInfo hri = null; for (Map.Entry<RegionInfo, ServerName> e: assignments.entrySet()) { if (e.getKey().isMetaRegion()) continue; hri = e.getKey(); break; } // Try moving region to the killed server. It will fail. As by-product, we will // remove the RS from Master online list because no corresponding znode. assertEquals(expectedTotalRegionServers, master.getMaster().getServerManager().getOnlineServersList().size()); LOG.info("Move " + hri.getEncodedName() + " to " + killedRS.get()); master.getMaster().move(hri.getEncodedNameAsBytes(), Bytes.toBytes(killedRS.get().toString())); // TODO: This test could do more to verify fix. It could create a table // and do round-robin assign. It should fail if zombie RS. HBASE-19515. // Wait until the RS no longer shows as registered in Master. while (onlineServersList.size() > (NUM_RS + 1)) { Thread.sleep(100); onlineServersList = master.getMaster().getServerManager().getOnlineServersList(); } } finally { // Shutdown is messy with complaints about fs being closed. Why? TODO. cluster.shutdown(); cluster.join(); TEST_UTIL.shutdownMiniDFSCluster(); TEST_UTIL.shutdownMiniZKCluster(); TEST_UTIL.cleanupTestDir(); } }
Example 14
Source File: TestHRegionReplayEvents.java From hbase with Apache License 2.0 | 4 votes |
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniDFSCluster(1); }
Example 15
Source File: TestFileLink.java From hbase with Apache License 2.0 | 4 votes |
/** * Test that link is still readable even when the current file gets deleted. * * NOTE: This test is valid only on HDFS. * When a file is deleted from a local file-system, it is simply 'unlinked'. * The inode, which contains the file's data, is not deleted until all * processes have finished with it. * In HDFS when the request exceed the cached block locations, * a query to the namenode is performed, using the filename, * and the deleted file doesn't exists anymore (FileNotFoundException). */ @Test public void testHDFSLinkReadDuringDelete() throws Exception { HBaseTestingUtility testUtil = new HBaseTestingUtility(); Configuration conf = testUtil.getConfiguration(); conf.setInt("dfs.blocksize", 1024 * 1024); conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024); testUtil.startMiniDFSCluster(1); MiniDFSCluster cluster = testUtil.getDFSCluster(); FileSystem fs = cluster.getFileSystem(); assertEquals("hdfs", fs.getUri().getScheme()); try { List<Path> files = new ArrayList<>(); for (int i = 0; i < 3; i++) { Path path = new Path(String.format("test-data-%d", i)); writeSomeData(fs, path, 1 << 20, (byte)i); files.add(path); } FileLink link = new FileLink(files); FSDataInputStream in = link.open(fs); try { byte[] data = new byte[8192]; int n; // Switch to file 1 n = in.read(data); dataVerify(data, n, (byte)0); fs.delete(files.get(0), true); skipBuffer(in, (byte)0); // Switch to file 2 n = in.read(data); dataVerify(data, n, (byte)1); fs.delete(files.get(1), true); skipBuffer(in, (byte)1); // Switch to file 3 n = in.read(data); dataVerify(data, n, (byte)2); fs.delete(files.get(2), true); skipBuffer(in, (byte)2); // No more files available try { n = in.read(data); assert(n <= 0); } catch (FileNotFoundException e) { assertTrue(true); } } finally { in.close(); } } finally { testUtil.shutdownMiniCluster(); } }
Example 16
Source File: TestHFileOutputFormat2.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testBlockStoragePolicy() throws Exception { util = new HBaseTestingUtility(); Configuration conf = util.getConfiguration(); conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD"); conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(HFileOutputFormat2.combineTableNameSuffix( TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD"); Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0])); Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1])); util.startMiniDFSCluster(3); FileSystem fs = util.getDFSCluster().getFileSystem(); try { fs.mkdirs(cf1Dir); fs.mkdirs(cf2Dir); // the original block storage policy would be HOT String spA = getStoragePolicyName(fs, cf1Dir); String spB = getStoragePolicyName(fs, cf2Dir); LOG.debug("Storage policy of cf 0: [" + spA + "]."); LOG.debug("Storage policy of cf 1: [" + spB + "]."); assertEquals("HOT", spA); assertEquals("HOT", spB); // alter table cf schema to change storage policies HFileOutputFormat2.configureStoragePolicy(conf, fs, HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); HFileOutputFormat2.configureStoragePolicy(conf, fs, HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); spA = getStoragePolicyName(fs, cf1Dir); spB = getStoragePolicyName(fs, cf2Dir); LOG.debug("Storage policy of cf 0: [" + spA + "]."); LOG.debug("Storage policy of cf 1: [" + spB + "]."); assertNotNull(spA); assertEquals("ONE_SSD", spA); assertNotNull(spB); assertEquals("ALL_SSD", spB); } finally { fs.delete(cf1Dir, true); fs.delete(cf2Dir, true); util.shutdownMiniDFSCluster(); } }