org.apache.hadoop.hbase.client.replication.ReplicationAdmin Java Examples
The following examples show how to use
org.apache.hadoop.hbase.client.replication.ReplicationAdmin.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BaseTest.java From hbase-connect-kafka with Apache License 2.0 | 5 votes |
/** * * @param configuration * @param peerName * @param tableCFs * @throws ReplicationException * @throws IOException */ protected void addPeer(final Configuration configuration,String peerName, Map<TableName, List<String>> tableCFs) throws ReplicationException, IOException { try (ReplicationAdmin replicationAdmin = new ReplicationAdmin(configuration)) { ReplicationPeerConfig peerConfig = new ReplicationPeerConfig() .setClusterKey(ZKConfig.getZooKeeperClusterKey(configuration)) .setReplicationEndpointImpl(HbaseEndpoint.class.getName()); replicationAdmin.addPeer(peerName, peerConfig, tableCFs); } }
Example #2
Source File: IndexerIT.java From hbase-indexer with Apache License 2.0 | 5 votes |
@Before public void setUpBeforeTest() throws Exception { if (!firstTest) { // Delete /ngdata from zookeeper System.out.println(">>> Deleting /ngdata node from ZooKeeper"); cleanZooKeeper("localhost:" + hbaseTestUtil.getZkCluster().getClientPort(), "/ngdata"); // Delete all hbase tables System.out.println(">>> Deleting all HBase tables"); Admin admin = connection.getAdmin(); for (HTableDescriptor table : admin.listTables()) { admin.disableTable(table.getTableName()); admin.deleteTable(table.getTableName()); } admin.close(); // Delete all replication peers System.out.println(">>> Deleting all replication peers from HBase"); ReplicationAdmin replAdmin = new ReplicationAdmin(conf); for (String peerId : replAdmin.listPeerConfigs().keySet()) { replAdmin.removePeer(peerId); } replAdmin.close(); SepTestUtil.waitOnAllReplicationPeersStopped(); // Clear Solr indexes System.out.println(">>> Clearing Solr indexes"); collection1.deleteByQuery("*:*"); collection1.commit(); collection2.deleteByQuery("*:*"); collection2.commit(); } else { firstTest = false; } main = new Main(); main.startServices(conf); }
Example #3
Source File: SepModelImpl.java From hbase-indexer with Apache License 2.0 | 5 votes |
@Override public boolean hasSubscription(String name) throws IOException { ReplicationAdmin replicationAdmin = new ReplicationAdmin(hbaseConf); try { String internalName = toInternalSubscriptionName(name); return replicationAdmin.listPeerConfigs().containsKey(internalName); } finally { Closer.close(replicationAdmin); } }
Example #4
Source File: TestKafkaReplication.java From hbase-connect-kafka with Apache License 2.0 | 4 votes |
/** * Removes the peer * @throws IOException * @throws ReplicationException */ private void removePeer() throws IOException, ReplicationException { try(ReplicationAdmin replicationAdmin = new ReplicationAdmin(utility.getConfiguration())) { replicationAdmin.removePeer(PEER_NAME); } }
Example #5
Source File: MutableIndexReplicationIT.java From phoenix with Apache License 2.0 | 4 votes |
private static void setupConfigsAndStartCluster() throws Exception { // cluster-1 lives at regular HBase home, so we don't need to change how phoenix handles // lookups // conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); // smaller log roll size to trigger more events setUpConfigForMiniCluster(conf1); conf1.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f); conf1.setInt("replication.source.size.capacity", 10240); conf1.setLong("replication.source.sleepforretries", 100); conf1.setInt("hbase.regionserver.maxlogs", 10); conf1.setLong("hbase.master.logcleaner.ttl", 10); conf1.setInt("zookeeper.recovery.retry", 1); conf1.setInt("zookeeper.recovery.retry.intervalmill", 10); conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setInt("replication.stats.thread.period.seconds", 5); conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); utility1 = new HBaseTestingUtility(conf1); utility1.startMiniZKCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster(); // Have to reset conf1 in case zk cluster location different // than default conf1 = utility1.getConfiguration(); zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true); admin = new ReplicationAdmin(conf1); LOG.info("Setup first Zk"); // Base conf2 on conf1 so it gets the right zk cluster, and general cluster configs conf2 = HBaseConfiguration.create(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); conf2.setBoolean("dfs.support.append", true); conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false); utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true); //replicate from cluster 1 -> cluster 2, but not back again admin.addPeer("1", utility2.getClusterKey()); LOG.info("Setup second Zk"); utility1.startMiniCluster(2); utility2.startMiniCluster(2); }