Java Code Examples for org.apache.hadoop.hbase.HTableDescriptor#setRegionReplication()
The following examples show how to use
org.apache.hadoop.hbase.HTableDescriptor#setRegionReplication() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRegionReplicasWithModifyTable.java From hbase with Apache License 2.0 | 6 votes |
private static void enableReplicationByModification(final TableName tableName, boolean withReplica, int initialReplicaCount, int enableReplicaCount, int splitCount) throws IOException, InterruptedException { HTableDescriptor htd = new HTableDescriptor(tableName); if (withReplica) { htd.setRegionReplication(initialReplicaCount); } if (splitCount > 0) { byte[][] splits = getSplits(splitCount); table = HTU.createTable(htd, new byte[][] { f }, splits, new Configuration(HTU.getConfiguration())); } else { table = HTU.createTable(htd, new byte[][] { f }, (byte[][]) null, new Configuration(HTU.getConfiguration())); } HBaseTestingUtility.setReplicas(HTU.getAdmin(), table.getName(), enableReplicaCount); }
Example 2
Source File: TestRegionReplicaReplicationEndpoint.java From hbase with Apache License 2.0 | 5 votes |
public void testRegionReplicaReplication(int regionReplication) throws Exception { // test region replica replication. Create a table with single region, write some data // ensure that data is replicated to the secondary region TableName tableName = TableName.valueOf("testRegionReplicaReplicationWithReplicas_" + regionReplication); HTableDescriptor htd = HTU.createTableDescriptor(TableName.valueOf(tableName.toString()), HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED); htd.setRegionReplication(regionReplication); HTU.getAdmin().createTable(htd); TableName tableNameNoReplicas = TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS"); HTU.deleteTableIfAny(tableNameNoReplicas); HTU.createTable(tableNameNoReplicas, HBaseTestingUtility.fam1); Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Table table = connection.getTable(tableName); Table tableNoReplicas = connection.getTable(tableNameNoReplicas); try { // load some data to the non-replicated table HTU.loadNumericRows(tableNoReplicas, HBaseTestingUtility.fam1, 6000, 7000); // load the data to the table HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000); verifyReplication(tableName, regionReplication, 0, 1000); } finally { table.close(); tableNoReplicas.close(); HTU.deleteTableIfAny(tableNameNoReplicas); connection.close(); } }
Example 3
Source File: TestRegionReplicaReplicationEndpoint.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testRegionReplicaWithoutMemstoreReplication() throws Exception { int regionReplication = 3; final TableName tableName = TableName.valueOf(name.getMethodName()); HTableDescriptor htd = HTU.createTableDescriptor(tableName); htd.setRegionReplication(regionReplication); htd.setRegionMemstoreReplication(false); HTU.getAdmin().createTable(htd); Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Table table = connection.getTable(tableName); try { // write data to the primary. The replicas should not receive the data final int STEP = 100; for (int i = 0; i < 3; ++i) { final int startRow = i * STEP; final int endRow = (i + 1) * STEP; LOG.info("Writing data from " + startRow + " to " + endRow); HTU.loadNumericRows(table, HBaseTestingUtility.fam1, startRow, endRow); verifyReplication(tableName, regionReplication, startRow, endRow, false); // Flush the table, now the data should show up in the replicas LOG.info("flushing table"); HTU.flush(tableName); verifyReplication(tableName, regionReplication, 0, endRow, true); } } finally { table.close(); connection.close(); } }
Example 4
Source File: TestRegionReplicaReplicationEndpoint.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception { // Tests a table with region replication 3. Writes some data, and causes flushes and // compactions. Verifies that the data is readable from the replicas. Note that this // does not test whether the replicas actually pick up flushed files and apply compaction // to their stores int regionReplication = 3; final TableName tableName = TableName.valueOf(name.getMethodName()); HTableDescriptor htd = HTU.createTableDescriptor(tableName); htd.setRegionReplication(regionReplication); HTU.getAdmin().createTable(htd); Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Table table = connection.getTable(tableName); try { // load the data to the table for (int i = 0; i < 6000; i += 1000) { LOG.info("Writing data from " + i + " to " + (i+1000)); HTU.loadNumericRows(table, HBaseTestingUtility.fam1, i, i+1000); LOG.info("flushing table"); HTU.flush(tableName); LOG.info("compacting table"); HTU.compact(tableName, false); } verifyReplication(tableName, regionReplication, 0, 1000); } finally { table.close(); connection.close(); } }
Example 5
Source File: TestReplicaWithCluster.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testCreateDeleteTable() throws IOException { // Create table then get the single region for our new table. HTableDescriptor hdt = HTU.createTableDescriptor(TableName.valueOf("testCreateDeleteTable"), HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(SlowMeCopro.class.getName()); Table table = HTU.createTable(hdt, new byte[][]{f}, null); Put p = new Put(row); p.addColumn(f, row, row); table.put(p); Get g = new Get(row); Result r = table.get(g); Assert.assertFalse(r.isStale()); try { // But if we ask for stale we will get it SlowMeCopro.cdl.set(new CountDownLatch(1)); g = new Get(row); g.setConsistency(Consistency.TIMELINE); r = table.get(g); Assert.assertTrue(r.isStale()); SlowMeCopro.cdl.get().countDown(); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } HTU.getAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); }
Example 6
Source File: TestReplicaWithCluster.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testReplicaGetWithPrimaryDown() throws IOException { // Create table then get the single region for our new table. HTableDescriptor hdt = HTU.createTableDescriptor(TableName.valueOf("testCreateDeleteTable"), HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(RegionServerStoppedCopro.class.getName()); try { Table table = HTU.createTable(hdt, new byte[][] { f }, null); Put p = new Put(row); p.addColumn(f, row, row); table.put(p); // Flush so it can be picked by the replica refresher thread HTU.flush(table.getName()); // Sleep for some time until data is picked up by replicas try { Thread.sleep(2 * REFRESH_PERIOD); } catch (InterruptedException e1) { LOG.error(e1.toString(), e1); } // But if we ask for stale we will get it Get g = new Get(row); g.setConsistency(Consistency.TIMELINE); Result r = table.get(g); Assert.assertTrue(r.isStale()); } finally { HTU.getAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); } }
Example 7
Source File: TestRegionReplicasAreDistributed.java From hbase with Apache License 2.0 | 5 votes |
private static void createTableDirectlyFromHTD(final TableName tableName) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); htd.setRegionReplication(3); // create a table with 3 replication table = HTU.createTable(htd, new byte[][] { f }, getSplits(20), new Configuration(HTU.getConfiguration())); }
Example 8
Source File: TestRegionReplicaReplicationEndpoint.java From hbase with Apache License 2.0 | 4 votes |
private void testRegionReplicaReplicationIgnores(boolean dropTable, boolean disableReplication) throws Exception { // tests having edits from a disabled or dropped table is handled correctly by skipping those // entries and further edits after the edits from dropped/disabled table can be replicated // without problems. final TableName tableName = TableName.valueOf( name.getMethodName() + "_drop_" + dropTable + "_disabledReplication_" + disableReplication); HTableDescriptor htd = HTU.createTableDescriptor(tableName); int regionReplication = 3; htd.setRegionReplication(regionReplication); HTU.deleteTableIfAny(tableName); HTU.getAdmin().createTable(htd); TableName toBeDisabledTable = TableName.valueOf( dropTable ? "droppedTable" : (disableReplication ? "disableReplication" : "disabledTable")); HTU.deleteTableIfAny(toBeDisabledTable); htd = HTU.createTableDescriptor(TableName.valueOf(toBeDisabledTable.toString()), HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED); htd.setRegionReplication(regionReplication); HTU.getAdmin().createTable(htd); // both tables are created, now pause replication HTU.getAdmin().disableReplicationPeer(ServerRegionReplicaUtil.getReplicationPeerId()); // now that the replication is disabled, write to the table to be dropped, then drop the table. Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Table table = connection.getTable(tableName); Table tableToBeDisabled = connection.getTable(toBeDisabledTable); HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000); RegionLocator rl = connection.getRegionLocator(toBeDisabledTable); HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY); byte[] encodedRegionName = hrl.getRegion().getEncodedNameAsBytes(); Cell cell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("A")) .setFamily(HTU.fam1).setValue(Bytes.toBytes("VAL")).setType(Type.Put).build(); Entry entry = new Entry( new WALKeyImpl(encodedRegionName, toBeDisabledTable, 1), new WALEdit() .add(cell)); HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table if (dropTable) { HTU.getAdmin().deleteTable(toBeDisabledTable); } else if (disableReplication) { htd.setRegionReplication(regionReplication - 2); HTU.getAdmin().modifyTable(htd); HTU.getAdmin().enableTable(toBeDisabledTable); } HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(0); MetricsSource metrics = mock(MetricsSource.class); ReplicationEndpoint.Context ctx = new ReplicationEndpoint.Context(rs, HTU.getConfiguration(), HTU.getConfiguration(), HTU.getTestFileSystem(), ServerRegionReplicaUtil.getReplicationPeerId(), UUID.fromString(rs.getClusterId()), rs.getReplicationSourceService().getReplicationPeers() .getPeer(ServerRegionReplicaUtil.getReplicationPeerId()), metrics, rs.getTableDescriptors(), rs); RegionReplicaReplicationEndpoint rrpe = new RegionReplicaReplicationEndpoint(); rrpe.init(ctx); rrpe.start(); ReplicationEndpoint.ReplicateContext repCtx = new ReplicationEndpoint.ReplicateContext(); repCtx.setEntries(Lists.newArrayList(entry, entry)); assertTrue(rrpe.replicate(repCtx)); verify(metrics, times(1)).incrLogEditsFiltered(eq(2L)); rrpe.stop(); if (disableReplication) { // enable replication again so that we can verify replication HTU.getAdmin().disableTable(toBeDisabledTable); // disable the table htd.setRegionReplication(regionReplication); HTU.getAdmin().modifyTable(htd); HTU.getAdmin().enableTable(toBeDisabledTable); } try { // load some data to the to-be-dropped table // load the data to the table HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000); // now enable the replication HTU.getAdmin().enableReplicationPeer(ServerRegionReplicaUtil.getReplicationPeerId()); verifyReplication(tableName, regionReplication, 0, 1000); } finally { table.close(); rl.close(); tableToBeDisabled.close(); HTU.deleteTableIfAny(toBeDisabledTable); connection.close(); } }
Example 9
Source File: TestReplicaWithCluster.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testReplicaScanWithPrimaryDown() throws IOException { // Create table then get the single region for our new table. HTableDescriptor hdt = HTU.createTableDescriptor(TableName.valueOf("testCreateDeleteTable"), HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(RegionServerStoppedCopro.class.getName()); try { Table table = HTU.createTable(hdt, new byte[][] { f }, null); Put p = new Put(row); p.addColumn(f, row, row); table.put(p); // Flush so it can be picked by the replica refresher thread HTU.flush(table.getName()); // Sleep for some time until data is picked up by replicas try { Thread.sleep(2 * REFRESH_PERIOD); } catch (InterruptedException e1) { LOG.error(e1.toString(), e1); } // But if we ask for stale we will get it // Instantiating the Scan class Scan scan = new Scan(); // Scanning the required columns scan.addFamily(f); scan.setConsistency(Consistency.TIMELINE); // Getting the scan result ResultScanner scanner = table.getScanner(scan); Result r = scanner.next(); Assert.assertTrue(r.isStale()); } finally { HTU.getAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); } }
Example 10
Source File: TestReplicaWithCluster.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testReplicaGetWithAsyncRpcClientImpl() throws IOException { HTU.getConfiguration().setBoolean("hbase.ipc.client.specificThreadForWriting", true); HTU.getConfiguration().set( "hbase.rpc.client.impl", "org.apache.hadoop.hbase.ipc.AsyncRpcClient"); // Create table then get the single region for our new table. HTableDescriptor hdt = HTU.createTableDescriptor( TableName.valueOf("testReplicaGetWithAsyncRpcClientImpl"), HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(SlowMeCopro.class.getName()); try { Table table = HTU.createTable(hdt, new byte[][] { f }, null); Put p = new Put(row); p.addColumn(f, row, row); table.put(p); // Flush so it can be picked by the replica refresher thread HTU.flush(table.getName()); // Sleep for some time until data is picked up by replicas try { Thread.sleep(2 * REFRESH_PERIOD); } catch (InterruptedException e1) { LOG.error(e1.toString(), e1); } try { // Create the new connection so new config can kick in Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Table t = connection.getTable(hdt.getTableName()); // But if we ask for stale we will get it SlowMeCopro.cdl.set(new CountDownLatch(1)); Get g = new Get(row); g.setConsistency(Consistency.TIMELINE); Result r = t.get(g); Assert.assertTrue(r.isStale()); SlowMeCopro.cdl.get().countDown(); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } } finally { HTU.getConfiguration().unset("hbase.ipc.client.specificThreadForWriting"); HTU.getConfiguration().unset("hbase.rpc.client.impl"); HTU.getAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); } }
Example 11
Source File: TestSnapshotTemporaryDirectory.java From hbase with Apache License 2.0 | 4 votes |
@Before public void setup() throws Exception { HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); htd.setRegionReplication(getNumReplicas()); UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); }
Example 12
Source File: TestSnapshotFromClient.java From hbase with Apache License 2.0 | 4 votes |
protected void createTable() throws Exception { HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); htd.setRegionReplication(getNumReplicas()); UTIL.createTable(htd, new byte[][]{TEST_FAM}, null); }