org.apache.hadoop.hbase.HBaseConfiguration Java Examples
The following examples show how to use
org.apache.hadoop.hbase.HBaseConfiguration.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HfileBulkImporter.java From super-cloudops with Apache License 2.0 | 7 votes |
/** * e.g.</br> * * <pre> * yarn jar super-devops-tool-hbase-migrator-master.jar \ * com.wl4g.devops.tool.hbase.migrator.HfileBulkImporter \ * -z emr-header-1:2181 \ * -t safeclound.tb_elec_power \ * -p /tmp-devops/safeclound.tb_elec_power * </pre> * * @param args * @throws Exception */ public static void main(String[] args) throws Exception { HbaseMigrateUtils.showBanner(); CommandLine line = new Builder().option("z", "zkaddr", null, "Zookeeper address.") .option("t", "tabname", null, "Hbase table name.") .option("p", "path", null, "Data hdfs path to be import. e.g. hdfs://localhost:9000/bak/safeclound.tb_air") .build(args); Configuration cfg = HBaseConfiguration.create(); cfg.set("hbase.zookeeper.quorum", line.getOptionValue("z")); Connection conn = ConnectionFactory.createConnection(cfg); Admin admin = conn.getAdmin(); Table table = conn.getTable(TableName.valueOf(line.getOptionValue("t"))); LoadIncrementalHFiles load = new LoadIncrementalHFiles(cfg); load.doBulkLoad(new Path(line.getOptionValue("p")), admin, table, conn.getRegionLocator(TableName.valueOf(line.getOptionValue("t")))); }
Example #2
Source File: Main.java From flink-learning with Apache License 2.0 | 6 votes |
private static void writeEventToHbase(String string, ParameterTool parameterTool) throws IOException { Configuration configuration = HBaseConfiguration.create(); configuration.set(HBASE_ZOOKEEPER_QUORUM, parameterTool.get(HBASE_ZOOKEEPER_QUORUM)); configuration.set(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, parameterTool.get(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT)); configuration.set(HBASE_RPC_TIMEOUT, parameterTool.get(HBASE_RPC_TIMEOUT)); configuration.set(HBASE_CLIENT_OPERATION_TIMEOUT, parameterTool.get(HBASE_CLIENT_OPERATION_TIMEOUT)); configuration.set(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, parameterTool.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)); Connection connect = ConnectionFactory.createConnection(configuration); Admin admin = connect.getAdmin(); if (!admin.tableExists(HBASE_TABLE_NAME)) { //检查是否有该表,如果没有,创建 admin.createTable(new HTableDescriptor(HBASE_TABLE_NAME).addFamily(new HColumnDescriptor(INFO_STREAM))); } Table table = connect.getTable(HBASE_TABLE_NAME); TimeStamp ts = new TimeStamp(new Date()); Date date = ts.getDate(); Put put = new Put(Bytes.toBytes(date.getTime())); put.addColumn(Bytes.toBytes(INFO_STREAM), Bytes.toBytes("test"), Bytes.toBytes(string)); table.put(put); table.close(); connect.close(); }
Example #3
Source File: TestHBaseBase.java From eagle with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUpHBase() { Configuration configuration = HBaseConfiguration.create(); configuration.set("zookeeper.znode.parent", getZkZnodeParent()); configuration.setInt("hbase.master.info.port", -1);//avoid port clobbering configuration.setInt("hbase.regionserver.info.port", -1);//avoid port clobbering hbase = new HBaseTestingUtility(configuration); try { hbase.startMiniCluster(); } catch (Exception e) { LOGGER.error("Error to start hbase mini cluster: " + e.getMessage(), e); throw new IllegalStateException(e); } System.setProperty("storage.hbase.autoCreateTable","false"); System.setProperty("storage.hbase.zookeeperZnodeParent", getZkZnodeParent()); System.setProperty("storage.hbase.zookeeperPropertyClientPort", String.valueOf(hbase.getZkCluster().getClientPort())); }
Example #4
Source File: TestTableInputFormat.java From hbase with Apache License 2.0 | 6 votes |
@Override protected void initialize(JobContext job) throws IOException { Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create( job.getConfiguration())); TableName tableName = TableName.valueOf("exampleTable"); // mandatory initializeTable(connection, tableName); byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; //optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); } Filter exampleFilter = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); scan.setFilter(exampleFilter); setScan(scan); }
Example #5
Source File: HBaseIndex.java From hudi with Apache License 2.0 | 6 votes |
private Connection getHBaseConnection() { Configuration hbaseConfig = HBaseConfiguration.create(); String quorum = config.getHbaseZkQuorum(); hbaseConfig.set("hbase.zookeeper.quorum", quorum); String zkZnodeParent = config.getHBaseZkZnodeParent(); if (zkZnodeParent != null) { hbaseConfig.set("zookeeper.znode.parent", zkZnodeParent); } String port = String.valueOf(config.getHbaseZkPort()); hbaseConfig.set("hbase.zookeeper.property.clientPort", port); try { return ConnectionFactory.createConnection(hbaseConfig); } catch (IOException e) { throw new HoodieDependentSystemUnavailableException(HoodieDependentSystemUnavailableException.HBASE, quorum + ":" + port); } }
Example #6
Source File: BackupSystemTable.java From hbase with Apache License 2.0 | 6 votes |
/** * Get backup system table descriptor * @return table's descriptor */ public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf)); ColumnFamilyDescriptorBuilder colBuilder = ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY); colBuilder.setMaxVersions(1); Configuration config = HBaseConfiguration.create(); int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY, BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT); colBuilder.setTimeToLive(ttl); ColumnFamilyDescriptor colSessionsDesc = colBuilder.build(); builder.setColumnFamily(colSessionsDesc); colBuilder = ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY); colBuilder.setTimeToLive(ttl); builder.setColumnFamily(colBuilder.build()); return builder.build(); }
Example #7
Source File: DropIndexDuringUpsertIT.java From phoenix with Apache License 2.0 | 6 votes |
@Before public void doSetup() throws Exception { Configuration conf = HBaseConfiguration.create(); setUpConfigForMiniCluster(conf); conf.setInt("hbase.client.retries.number", 2); conf.setInt("hbase.client.pause", 5000); conf.setInt("hbase.balancer.period", Integer.MAX_VALUE); conf.setLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, 0); util = new HBaseTestingUtility(conf); util.startMiniCluster(NUM_SLAVES); String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB); url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; Map<String, String> props = Maps.newHashMapWithExpectedSize(1); // Must update config before starting server props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); driver = initAndRegisterTestDriver(url, new ReadOnlyProps(props.entrySet().iterator())); }
Example #8
Source File: HBaseStreamPartitioner.java From opensoc-streaming with Apache License 2.0 | 6 votes |
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) { System.out.println("preparing HBaseStreamPartitioner for streamId " + stream.get_streamId()); this.targetTasks = targetTasks; this.targetTasksSize = this.targetTasks.size(); Configuration conf = HBaseConfiguration.create(); try { hTable = new HTable(conf, tableName); refreshRegionInfo(tableName); System.out.println("regionStartKeyRegionNameMap: " + regionStartKeyRegionNameMap); } catch (IOException e) { e.printStackTrace(); } }
Example #9
Source File: TestThriftConnection.java From hbase with Apache License 2.0 | 6 votes |
private static Connection createConnection(int port, boolean useHttp) throws IOException { Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ThriftConnection.class.getName()); if (useHttp) { conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS, ThriftConnection.HTTPThriftClientBuilder.class.getName()); } String host = HConstants.LOCALHOST; if (useHttp) { host = "http://" + host; } conf.set(Constants.HBASE_THRIFT_SERVER_NAME, host); conf.setInt(Constants.HBASE_THRIFT_SERVER_PORT, port); return ConnectionFactory.createConnection(conf); }
Example #10
Source File: IndexUpgradeTool.java From phoenix with Apache License 2.0 | 6 votes |
@VisibleForTesting public int executeTool() { Configuration conf = HBaseConfiguration.addHbaseResources(getConf()); try (Connection conn = getConnection(conf)) { ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class) .getQueryServices(); boolean status = extractTablesAndIndexes(conn.unwrap(PhoenixConnection.class)); if (status) { return executeTool(conn, queryServices, conf); } } catch (SQLException e) { LOGGER.severe("Something went wrong in executing tool "+ e); } return -1; }
Example #11
Source File: IndexScrutinyTool.java From phoenix with Apache License 2.0 | 6 votes |
private Job configureSubmittableJob(Job job, Path outputPath, Class<IndexScrutinyMapperForTest> mapperClass) throws Exception { Configuration conf = job.getConfiguration(); conf.setBoolean("mapreduce.job.user.classpath.first", true); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); job.setJarByClass(IndexScrutinyTool.class); job.setOutputFormatClass(NullOutputFormat.class); if (outputInvalidRows && OutputFormat.FILE.equals(outputFormat)) { job.setOutputFormatClass(TextOutputFormat.class); FileOutputFormat.setOutputPath(job, outputPath); } job.setMapperClass((mapperClass == null ? IndexScrutinyMapper.class : mapperClass)); job.setNumReduceTasks(0); // Set the Output classes job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); TableMapReduceUtil.addDependencyJars(job); return job; }
Example #12
Source File: HBaseRangerAuthorizationTest.java From ranger with Apache License 2.0 | 6 votes |
@Test public void testReadRowFromColFam2AsGroupIT() throws Exception { final Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", "localhost"); conf.set("hbase.zookeeper.property.clientPort", "" + port); conf.set("zookeeper.znode.parent", "/hbase-unsecure"); String user = "public"; UserGroupInformation ugi = UserGroupInformation.createUserForTesting(user, new String[] {"IT"}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(TableName.valueOf("temp")); // Read a row Get get = new Get(Bytes.toBytes("row1")); Result result = table.get(get); byte[] valResult = result.getValue(Bytes.toBytes("colfam2"), Bytes.toBytes("col1")); Assert.assertNull(valResult); conn.close(); return null; } }); }
Example #13
Source File: HBaseDao.java From metron with Apache License 2.0 | 6 votes |
@Override public synchronized void init(AccessConfig config) { if(this.tableInterface == null) { this.config = config; Map<String, Object> globalConfig = config.getGlobalConfigSupplier().get(); if(globalConfig == null) { throw new IllegalStateException("Cannot find the global config."); } String table = (String)globalConfig.get(HBASE_TABLE); String cf = (String) config.getGlobalConfigSupplier().get().get(HBASE_CF); if(table == null || cf == null) { throw new IllegalStateException("You must configure " + HBASE_TABLE + " and " + HBASE_CF + " in the global config."); } try { tableInterface = config.getTableProvider().getTable(HBaseConfiguration.create(), table); this.cf = cf.getBytes(StandardCharsets.UTF_8); } catch (IOException e) { throw new IllegalStateException("Unable to initialize HBaseDao: " + e.getMessage(), e); } } }
Example #14
Source File: TestThriftConnection.java From hbase with Apache License 2.0 | 6 votes |
private static ThriftServer startThriftServer(int port, boolean useHttp) { Configuration thriftServerConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); thriftServerConf.setInt(Constants.PORT_CONF_KEY, port); if (useHttp) { thriftServerConf.setBoolean(Constants.USE_HTTP_CONF_KEY, true); } ThriftServer server = new ThriftServer(thriftServerConf); Thread thriftServerThread = new Thread(() -> { try{ server.run(); } catch (Exception t) { LOG.error("Thrift Server failed", t); } }); thriftServerThread.setDaemon(true); thriftServerThread.start(); if (useHttp) { TEST_UTIL.waitFor(10000, () -> server.getHttpServer() != null); } else { TEST_UTIL.waitFor(10000, () -> server.getTserver() != null); } return server; }
Example #15
Source File: TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java From hbase with Apache License 2.0 | 6 votes |
@BeforeClass public static void beforeAllTests() throws Exception { groups = new String[] { RSGroupInfo.DEFAULT_GROUP }; servers = generateServers(3); groupMap = constructGroupInfo(servers, groups); tableDescs = constructTableDesc(false); Configuration conf = HBaseConfiguration.create(); conf.set("hbase.regions.slop", "0"); conf.setFloat("hbase.master.balancer.stochastic.readRequestCost", 10000f); conf.set("hbase.rsgroup.grouploadbalancer.class", StochasticLoadBalancer.class.getCanonicalName()); loadBalancer = new RSGroupBasedLoadBalancer(); loadBalancer.setRsGroupInfoManager(getMockedGroupInfoManager()); loadBalancer.setMasterServices(getMockedMaster()); loadBalancer.setConf(conf); loadBalancer.initialize(); }
Example #16
Source File: TestHbaseClient.java From Kylin with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws IOException { foo(6, 5); foo(5, 2); foo(3, 0); Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", "hbase_host"); conf.set("zookeeper.znode.parent", "/hbase-unsecure"); HTable table = new HTable(conf, "test1"); Put put = new Put(Bytes.toBytes("row1")); put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1")); put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val2")); table.put(put); table.close(); }
Example #17
Source File: ReplicationSink.java From hbase with Apache License 2.0 | 6 votes |
/** * Create a sink for replication * @param conf conf object * @param stopper boolean to tell this thread to stop * @throws IOException thrown when HDFS goes bad or bad file name */ public ReplicationSink(Configuration conf, Stoppable stopper) throws IOException { this.conf = HBaseConfiguration.create(conf); decorateConf(); this.metrics = new MetricsSink(); this.walEntrySinkFilter = setupWALEntrySinkFilter(); String className = conf.get("hbase.replication.source.fs.conf.provider", DefaultSourceFSConfigurationProvider.class.getCanonicalName()); try { Class<? extends SourceFSConfigurationProvider> c = Class.forName(className).asSubclass(SourceFSConfigurationProvider.class); this.provider = c.getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new IllegalArgumentException( "Configured source fs configuration provider class " + className + " throws error.", e); } }
Example #18
Source File: TestCommonsAES.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testAESAlgorithm() throws Exception { Configuration conf = HBaseConfiguration.create(); Cipher aes = Encryption.getCipher(conf, "AES"); assertEquals(CommonsCryptoAES.KEY_LENGTH, aes.getKeyLength()); assertEquals(CommonsCryptoAES.IV_LENGTH, aes.getIvLength()); Encryptor e = aes.getEncryptor(); e.setKey(new SecretKeySpec(Bytes.fromHex("2b7e151628aed2a6abf7158809cf4f3c"), "AES")); e.setIv(Bytes.fromHex("f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff")); ByteArrayOutputStream out = new ByteArrayOutputStream(); OutputStream cout = e.createEncryptionStream(out); cout.write(Bytes.fromHex("6bc1bee22e409f96e93d7e117393172a")); cout.write(Bytes.fromHex("ae2d8a571e03ac9c9eb76fac45af8e51")); cout.write(Bytes.fromHex("30c81c46a35ce411e5fbc1191a0a52ef")); cout.write(Bytes.fromHex("f69f2445df4f9b17ad2b417be66c3710")); cout.close(); ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray()); byte[] b = new byte[16]; IOUtils.readFully(in, b); assertTrue("Failed #1", Bytes.equals(b, Bytes.fromHex("874d6191b620e3261bef6864990db6ce"))); IOUtils.readFully(in, b); assertTrue("Failed #2", Bytes.equals(b, Bytes.fromHex("9806f66b7970fdff8617187bb9fffdff"))); IOUtils.readFully(in, b); assertTrue("Failed #3", Bytes.equals(b, Bytes.fromHex("5ae4df3edbd5d35e5b4f09020db03eab"))); IOUtils.readFully(in, b); assertTrue("Failed #4", Bytes.equals(b, Bytes.fromHex("1e031dda2fbe03d1792170a0f3009cee"))); }
Example #19
Source File: ThriftServer.java From hbase with Apache License 2.0 | 5 votes |
/** * Start up the Thrift2 server. */ public static void main(String[] args) throws Exception { final Configuration conf = HBaseConfiguration.create(); // for now, only time we return is on an argument error. final int status = ToolRunner.run(conf, new ThriftServer(conf), args); System.exit(status); }
Example #20
Source File: RemoteDictionaryStore.java From kylin with Apache License 2.0 | 5 votes |
static Connection getConnection() { Configuration conf = HBaseConfiguration.create(HadoopUtil.getCurrentConfiguration()); try { return ConnectionFactory.createConnection(conf); } catch (IOException ioe) { throw new IllegalStateException("Cannot connect to HBase.", ioe); } }
Example #21
Source File: ExpressionFilterTest.java From learning-hadoop with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { if (args.length < 2) { throw new Exception("Table name not specified."); } Configuration conf = HBaseConfiguration.create(); HTable table = new HTable(conf, args[0]); String startKey = args[1]; Expression exp = ExpressionFactory.eq(ExpressionFactory .toLong(ExpressionFactory.toString(ExpressionFactory .columnValue("family", "longStr2"))), ExpressionFactory .constant(Long.parseLong("99"))); ExpressionFilter expressionFilter = new ExpressionFilter(exp); Scan scan = new Scan(Bytes.toBytes(startKey), expressionFilter); int count = 0; ResultScanner scanner = table.getScanner(scan); Result r = scanner.next(); while (r != null) { count++; r = scanner.next(); } System.out .println("++ Scanning finished with count : " + count + " ++"); scanner.close(); }
Example #22
Source File: TestHeapMemoryManager.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testWhenClusterIsWriteHeavyWithOffheapMemstore() throws Exception { BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4)); Configuration conf = HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.10f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.05f); conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf); MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); // Empty block cache and but nearly filled memstore blockCache.setTestBlockSize(0); regionServerAccounting.setTestMemstoreSize((long) (maxHeapSize * 0.4 * 0.8)); // Let the system start with default values for memstore heap and block cache size. HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), regionServerAccounting); long oldMemstoreHeapSize = memStoreFlusher.memstoreSize; long oldBlockCacheSize = blockCache.maxSize; final ChoreService choreService = new ChoreService("TEST_SERVER_NAME"); heapMemoryManager.start(choreService); // this should not change anything with onheap memstore memStoreFlusher.flushType = FlushType.ABOVE_OFFHEAP_HIGHER_MARK; memStoreFlusher.requestFlush(null, FlushLifeCycleTracker.DUMMY); memStoreFlusher.requestFlush(null, FlushLifeCycleTracker.DUMMY); memStoreFlusher.requestFlush(null, FlushLifeCycleTracker.DUMMY); memStoreFlusher.requestFlush(null, FlushLifeCycleTracker.DUMMY); // Allow the tuner to run once and do necessary memory up Thread.sleep(1500); // No changes should be made by tuner as we already have lot of empty space assertEquals(oldMemstoreHeapSize, memStoreFlusher.memstoreSize); assertEquals(oldBlockCacheSize, blockCache.maxSize); }
Example #23
Source File: PhoenixConfigurationUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static void loadHBaseConfiguration(Job job) throws IOException { // load hbase-site.xml Configuration hbaseConf = HBaseConfiguration.create(); for (Map.Entry<String, String> entry : hbaseConf) { if (job.getConfiguration().get(entry.getKey()) == null) { job.getConfiguration().set(entry.getKey(), entry.getValue()); } } //In order to have phoenix working on a secured cluster TableMapReduceUtil.initCredentials(job); }
Example #24
Source File: ThreatIntelAdapter.java From metron with Apache License 2.0 | 5 votes |
@Override public boolean initializeAdapter(Map<String, Object> configuration) { PersistentAccessTracker accessTracker; String hbaseTable = config.getHBaseTable(); int expectedInsertions = config.getExpectedInsertions(); double falsePositives = config.getFalsePositiveRate(); String trackerHBaseTable = config.getTrackerHBaseTable(); String trackerHBaseCF = config.getTrackerHBaseCF(); long millisecondsBetweenPersist = config.getMillisecondsBetweenPersists(); BloomAccessTracker bat = new BloomAccessTracker(hbaseTable, expectedInsertions, falsePositives); Configuration hbaseConfig = HBaseConfiguration.create(); try { accessTracker = new PersistentAccessTracker( hbaseTable , UUID.randomUUID().toString() , config.getProvider().getTable(hbaseConfig, trackerHBaseTable) , trackerHBaseCF , bat , millisecondsBetweenPersist ); lookup = new EnrichmentLookup(config.getProvider().getTable(hbaseConfig, hbaseTable), config.getHBaseCF(), accessTracker); } catch (IOException e) { LOG.error("Unable to initialize ThreatIntelAdapter", e); return false; } return true; }
Example #25
Source File: InvalidListPruneTest.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@BeforeClass public static void startMiniCluster() throws Exception { // Setup the configuration to start HBase cluster with the invalid list pruning enabled conf = HBaseConfiguration.create(); conf.setBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, true); // Flush prune data to table quickly, so that tests don't need have to wait long to see updates conf.setLong(TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL, 0L); AbstractHBaseTableTest.startMiniCluster(); TransactionStateStorage txStateStorage = new InMemoryTransactionStateStorage(); TransactionManager txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector()); txManager.startAndWait(); // Do some transactional data operations txDataTable1 = TableName.valueOf("invalidListPruneTestTable1"); HTable hTable = createTable(txDataTable1.getName(), new byte[][]{family}, false, Collections.singletonList(TestTransactionProcessor.class.getName())); try (TransactionAwareHTable txTable = new TransactionAwareHTable(hTable, TxConstants.ConflictDetection.ROW)) { TransactionContext txContext = new TransactionContext(new InMemoryTxSystemClient(txManager), txTable); txContext.start(); for (int i = 0; i < MAX_ROWS; ++i) { txTable.put(new Put(Bytes.toBytes(i)).addColumn(family, qualifier, Bytes.toBytes(i))); } txContext.finish(); } testUtil.flush(txDataTable1); txManager.stopAndWait(); pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE)); dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public Table get() throws IOException { return testUtil.getConnection().getTable(pruneStateTable); } }); }
Example #26
Source File: Compressor.java From hbase with Apache License 2.0 | 5 votes |
private static void transformFile(Path input, Path output) throws IOException { Configuration conf = HBaseConfiguration.create(); FileSystem inFS = input.getFileSystem(conf); FileSystem outFS = output.getFileSystem(conf); WAL.Reader in = WALFactory.createReaderIgnoreCustomClass(inFS, input, conf); WALProvider.Writer out = null; try { if (!(in instanceof ReaderBase)) { System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName()); return; } boolean compress = ((ReaderBase)in).hasCompression(); conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress); out = WALFactory.createWALWriter(outFS, output, conf); WAL.Entry e = null; while ((e = in.next()) != null) out.append(e); } finally { in.close(); if (out != null) { out.close(); out = null; } } }
Example #27
Source File: TestStripeCompactionPolicy.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testSingleStripeDropDeletes() throws Exception { Configuration conf = HBaseConfiguration.create(); // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. conf.unset("hbase.hstore.compaction.min.size"); StripeCompactionPolicy policy = createPolicy(conf); // Verify the deletes can be dropped if there are no L0 files. Long[][] stripes = new Long[][] { new Long[] { 3L, 2L, 2L, 2L }, new Long[] { 6L } }; StripeInformationProvider si = createStripesWithSizes(0, 0, stripes); verifySingleStripeCompaction(policy, si, 0, true); // But cannot be dropped if there are. si = createStripesWithSizes(2, 2, stripes); verifySingleStripeCompaction(policy, si, 0, false); // Unless there are enough to cause L0 compaction. si = createStripesWithSizes(6, 2, stripes); ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>(); sfs.addSublist(si.getLevel0Files()); sfs.addSublist(si.getStripes().get(0)); verifyCompaction( policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries()); // If we cannot actually compact all files in some stripe, L0 is chosen. si = createStripesWithSizes(6, 2, new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } }); verifyCompaction(policy, si, si.getLevel0Files(), null, null, si.getStripeBoundaries()); // even if L0 has no file // if all files of stripe aren't selected, delete must not be dropped. stripes = new Long[][] { new Long[] { 100L, 3L, 2L, 2L, 2L }, new Long[] { 6L } }; si = createStripesWithSizes(0, 0, stripes); List<HStoreFile> compactFile = new ArrayList<>(); Iterator<HStoreFile> iter = si.getStripes().get(0).listIterator(1); while (iter.hasNext()) { compactFile.add(iter.next()); } verifyCompaction(policy, si, compactFile, false, 1, null, si.getStartRow(0), si.getEndRow(0), true); }
Example #28
Source File: DefaultSourceFSConfigurationProvider.java From hbase with Apache License 2.0 | 5 votes |
@Override public Configuration getConf(Configuration sinkConf, String replicationClusterId) throws IOException { if (sourceClustersConfs.get(replicationClusterId) == null) { synchronized (this.sourceClustersConfs) { if (sourceClustersConfs.get(replicationClusterId) == null) { LOG.info("Loading source cluster FS client conf for cluster " + replicationClusterId); // Load only user provided client configurations. Configuration sourceClusterConf = new Configuration(false); String replicationConfDir = sinkConf.get(HConstants.REPLICATION_CONF_DIR); if (replicationConfDir == null) { LOG.debug(HConstants.REPLICATION_CONF_DIR + " is not configured."); URL resource = HBaseConfiguration.class.getClassLoader().getResource("hbase-site.xml"); if (resource != null) { String path = resource.getPath(); replicationConfDir = path.substring(0, path.lastIndexOf("/")); } else { replicationConfDir = System.getenv("HBASE_CONF_DIR"); } } File confDir = new File(replicationConfDir, replicationClusterId); LOG.info("Loading source cluster " + replicationClusterId + " file system configurations from xml " + "files under directory " + confDir); String[] listofConfFiles = FileUtil.list(confDir); for (String confFile : listofConfFiles) { if (new File(confDir, confFile).isFile() && confFile.endsWith(XML)) { // Add all the user provided client conf files sourceClusterConf.addResource(new Path(confDir.getPath(), confFile)); } } this.sourceClustersConfs.put(replicationClusterId, sourceClusterConf); } } } return this.sourceClustersConfs.get(replicationClusterId); }
Example #29
Source File: HBaseConfigModule.java From phoenix-omid with Apache License 2.0 | 5 votes |
@Provides public Configuration provideHBaseConfig() throws IOException { Configuration configuration = HBaseConfiguration.create(); SecureHBaseConfig secureHBaseConfig = new SecureHBaseConfig(); secureHBaseConfig.setKeytab(keytab); secureHBaseConfig.setPrincipal(principal); HBaseLogin.loginIfNeeded(secureHBaseConfig); return configuration; }
Example #30
Source File: TestSplitLogWorker.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testAcquireMultiTasks() throws Exception { LOG.info("testAcquireMultiTasks"); SplitLogCounters.resetCounters(); final String TATAS = "tatas"; final ServerName RS = ServerName.valueOf("rs,1,1"); final int maxTasks = 3; Configuration testConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); testConf.setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, maxTasks); RegionServerServices mockedRS = getRegionServer(RS); for (int i = 0; i < maxTasks; i++) { zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } SplitLogWorker slw = new SplitLogWorker(ds, testConf, mockedRS, neverEndingTask); slw.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, maxTasks, WAIT_TIME); for (int i = 0; i < maxTasks; i++) { byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS + i)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(RS)); } } finally { stopSplitLogWorker(slw); } }