Java Code Examples for org.apache.hadoop.hbase.HTableDescriptor#getColumnFamilies()
The following examples show how to use
org.apache.hadoop.hbase.HTableDescriptor#getColumnFamilies() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Helper.java From antsdb with GNU Lesser General Public License v3.0 | 6 votes |
public static void truncateTable(Connection connection, String namespace, String tableName) { try { TableName table = TableName.valueOf(namespace, tableName); // get compression type Table htable = connection.getTable(table); HTableDescriptor tableDesc = htable.getTableDescriptor(); HColumnDescriptor[] families = tableDesc.getColumnFamilies(); Algorithm compressionType = families[0].getCompression(); // drop table dropTable(connection, namespace, tableName); // create table createTable(connection, namespace, tableName, compressionType); } catch (Exception ex) { throw new OrcaHBaseException("Failed to truncate table - " + tableName, ex); } }
Example 2
Source File: AlterTableIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testSetHTableAndHColumnProperties() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); String ddl = "CREATE TABLE T3 (\n" +"ID1 VARCHAR(15) NOT NULL,\n" +"ID2 VARCHAR(15) NOT NULL,\n" +"CREATED_DATE DATE,\n" +"CREATION_TIME BIGINT,\n" +"LAST_USED DATE,\n" +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) SALT_BUCKETS = 8"; Connection conn1 = DriverManager.getConnection(getUrl(), props); conn1.createStatement().execute(ddl); ddl = "ALTER TABLE T3 SET COMPACTION_ENABLED = FALSE, REPLICATION_SCOPE = 1"; conn1.createStatement().execute(ddl); try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) { HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("T3")); HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); assertEquals(1, columnFamilies.length); assertEquals("0", columnFamilies[0].getNameAsString()); assertEquals(1, columnFamilies[0].getScope()); assertEquals(false, tableDesc.isCompactionEnabled()); } }
Example 3
Source File: AlterTableIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testNewColumnFamilyInheritsTTLOfEmptyCF() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); String ddl = "CREATE TABLE NEWCFTTLTEST (\n" +"ID1 VARCHAR(15) NOT NULL,\n" +"ID2 VARCHAR(15) NOT NULL,\n" +"CREATED_DATE DATE,\n" +"CREATION_TIME BIGINT,\n" +"LAST_USED DATE,\n" +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) SALT_BUCKETS = 8, TTL = 1000"; Connection conn1 = DriverManager.getConnection(getUrl(), props); conn1.createStatement().execute(ddl); ddl = "ALTER TABLE NEWCFTTLTEST ADD CF.STRING VARCHAR"; conn1.createStatement().execute(ddl); try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) { HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("NEWCFTTLTEST")); HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); assertEquals(2, columnFamilies.length); assertEquals("0", columnFamilies[0].getNameAsString()); assertEquals(1000, columnFamilies[0].getTimeToLive()); assertEquals("CF", columnFamilies[1].getNameAsString()); assertEquals(1000, columnFamilies[1].getTimeToLive()); } }
Example 4
Source File: ModifyTableCommand.java From pinpoint with Apache License 2.0 | 6 votes |
@Override public boolean execute(HbaseAdminOperation hbaseAdminOperation) { HTableDescriptor htd = getHtd(); HColumnDescriptor[] hcds = htd.getColumnFamilies(); if (ArrayUtils.isEmpty(hcds)) { return false; } TableName tableName = htd.getTableName(); HTableDescriptor currentHtd = hbaseAdminOperation.getTableDescriptor(tableName); // Filter existing column families as column family modification is not supported. // We could use modifyTable(HTableDescriptor) to add column families, but this deletes existing column families // if they are not specified in HTableDescriptor and this may be dangerous. // Instead, use addColumn. boolean changesMade = false; for (HColumnDescriptor hcd : hcds) { if (!currentHtd.hasFamily(hcd.getName())) { logger.info("Adding {} to {} table.", hcd, tableName); hbaseAdminOperation.addColumn(tableName, hcd); changesMade = true; } } return changesMade; }
Example 5
Source File: HBaseUtils.java From bigdata-tutorial with Apache License 2.0 | 5 votes |
/** * print table info * * @param table */ public static void printTableInfo(HTableInterface table) { try { HTableDescriptor desc = table.getTableDescriptor(); LOGGER.info(">>>> Print Table {} Desc", new String(table.getTableName())); for (HColumnDescriptor colDesc : desc.getColumnFamilies()) { LOGGER.info(">>>> family column: {}", colDesc.getNameAsString()); } } catch (Exception ex) { LOGGER.error("printTable info Error:", ex); } }
Example 6
Source File: HBaseFactoryTest.java From bigdata-tutorial with Apache License 2.0 | 5 votes |
public void printTableDesc(String tableName) { try { HTableInterface table = getTable(tableName); HTableDescriptor desc = table.getTableDescriptor(); LOGGER.info(">>>> Print Table {} Desc", tableName); for (HColumnDescriptor colDesc : desc.getColumnFamilies()) { LOGGER.info(">>>> family column: {}", colDesc.getNameAsString()); } } catch (Exception ex) { LOGGER.error(">>>> Print table desc error:", ex); } }
Example 7
Source File: AlterTableIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testSettingPropertiesWhenTableHasDefaultColFamilySpecified() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); String ddl = "CREATE TABLE T11 (\n" +"ID1 VARCHAR(15) NOT NULL,\n" +"ID2 VARCHAR(15) NOT NULL,\n" +"CREATED_DATE DATE,\n" +"CREATION_TIME BIGINT,\n" +"CF.LAST_USED DATE,\n" +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) IMMUTABLE_ROWS=true, DEFAULT_COLUMN_FAMILY = 'XYZ'"; Connection conn = DriverManager.getConnection(getUrl(), props); conn.createStatement().execute(ddl); assertImmutableRows(conn, "T11", true); ddl = "ALTER TABLE T11 SET COMPACTION_ENABLED = FALSE, CF.REPLICATION_SCOPE=1, IMMUTABLE_ROWS = TRUE, TTL=1000"; conn.createStatement().execute(ddl); assertImmutableRows(conn, "T11", true); try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) { HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("T11")); HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); assertEquals(2, columnFamilies.length); assertEquals("CF", columnFamilies[0].getNameAsString()); assertEquals(1, columnFamilies[0].getScope()); assertEquals(1000, columnFamilies[0].getTimeToLive()); assertEquals("XYZ", columnFamilies[1].getNameAsString()); assertEquals(DEFAULT_REPLICATION_SCOPE, columnFamilies[1].getScope()); assertEquals(1000, columnFamilies[1].getTimeToLive()); assertEquals(Boolean.toString(false), tableDesc.getValue(HTableDescriptor.COMPACTION_ENABLED)); } }
Example 8
Source File: AlterTableIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testAddNewColumnFamilyProperties() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = DriverManager.getConnection(getUrl(), props); conn.setAutoCommit(false); try { conn.createStatement() .execute( "CREATE TABLE mixed_add_table " + " (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer " + " CONSTRAINT pk PRIMARY KEY (a_string)) immutable_rows=true , SALT_BUCKETS=3 "); String ddl = "Alter table mixed_add_table add cf3.col5 integer, cf4.col6 integer in_memory=true"; conn.createStatement().execute(ddl); try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) { HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("MIXED_ADD_TABLE")); assertTrue(tableDesc.isCompactionEnabled()); HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); assertEquals(5, columnFamilies.length); assertEquals("0", columnFamilies[0].getNameAsString()); assertFalse(columnFamilies[0].isInMemory()); assertEquals("CF1", columnFamilies[1].getNameAsString()); assertFalse(columnFamilies[1].isInMemory()); assertEquals("CF2", columnFamilies[2].getNameAsString()); assertFalse(columnFamilies[2].isInMemory()); assertEquals("CF3", columnFamilies[3].getNameAsString()); assertTrue(columnFamilies[3].isInMemory()); assertEquals("CF4", columnFamilies[4].getNameAsString()); assertTrue(columnFamilies[4].isInMemory()); } } finally { conn.close(); } }
Example 9
Source File: AlterTableIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testAddProperyToExistingColumnFamily() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = DriverManager.getConnection(getUrl(), props); conn.setAutoCommit(false); try { conn.createStatement() .execute( "CREATE TABLE exist_table " + " (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer " + " CONSTRAINT pk PRIMARY KEY (a_string)) immutable_rows=true , SALT_BUCKETS=3 "); String ddl = "Alter table exist_table add cf1.col5 integer in_memory=true"; conn.createStatement().execute(ddl); try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) { HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("EXIST_TABLE")); assertTrue(tableDesc.isCompactionEnabled()); HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); assertEquals(3, columnFamilies.length); assertEquals("0", columnFamilies[0].getNameAsString()); assertFalse(columnFamilies[0].isInMemory()); assertEquals("CF1", columnFamilies[1].getNameAsString()); assertTrue(columnFamilies[1].isInMemory()); assertEquals("CF2", columnFamilies[2].getNameAsString()); assertFalse(columnFamilies[2].isInMemory()); } } finally { conn.close(); } }
Example 10
Source File: AlterTableIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testSetHColumnPropertyAndAddColumnForDefaultCFForTableWithOnlyPKCols() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = DriverManager.getConnection(getUrl(), props); conn.setAutoCommit(false); try { String ddl = "create table IF NOT EXISTS SETHCPROPADDCOLPKONLY (" + " id char(1) NOT NULL," + " col1 integer NOT NULL," + " col2 bigint NOT NULL," + " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)" + " ) TTL=86400, SALT_BUCKETS = 4, DEFAULT_COLUMN_FAMILY='XYZ'"; conn.createStatement().execute(ddl); ddl = "ALTER TABLE SETHCPROPADDCOLPKONLY ADD COL3 INTEGER IN_MEMORY=true"; conn.createStatement().execute(ddl); conn.commit(); try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) { HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("SETHCPROPADDCOLPKONLY")); HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); assertEquals(1, columnFamilies.length); assertEquals(true, columnFamilies[0].isInMemory()); assertEquals("XYZ", columnFamilies[0].getNameAsString()); } } finally { conn.close(); } }
Example 11
Source File: AlterTableIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testSetHColumnPropertyAndAddColumnForNewCFForTableWithOnlyPKCols() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = DriverManager.getConnection(getUrl(), props); conn.setAutoCommit(false); try { String ddl = "create table IF NOT EXISTS SETHCPROPADDNEWCFCOLPKONLY (" + " id char(1) NOT NULL," + " col1 integer NOT NULL," + " col2 bigint NOT NULL," + " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)" + " ) TTL=86400, SALT_BUCKETS = 4, DEFAULT_COLUMN_FAMILY='XYZ'"; conn.createStatement().execute(ddl); ddl = "ALTER TABLE SETHCPROPADDNEWCFCOLPKONLY ADD NEWCF.COL3 INTEGER IN_MEMORY=true"; conn.createStatement().execute(ddl); conn.commit(); try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) { HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("SETHCPROPADDNEWCFCOLPKONLY")); HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); assertEquals(2, columnFamilies.length); assertEquals("NEWCF", columnFamilies[0].getNameAsString()); assertEquals(true, columnFamilies[0].isInMemory()); assertEquals("XYZ", columnFamilies[1].getNameAsString()); assertEquals(false, columnFamilies[1].isInMemory()); } } finally { conn.close(); } }
Example 12
Source File: HBaseCLI.java From cloud-bigtable-examples with Apache License 2.0 | 5 votes |
public void run(Connection connection, List<String> args) throws InvalidArgsException, IOException { String pattern = null; if (args.size() == 1) { pattern = args.get(0); } else if (args.size() != 0) { throw new InvalidArgsException(args); } Admin admin = connection.getAdmin(); HTableDescriptor[] tables; // We use the listTables() method on the Admin instance // to get a list of HTableDescriptor objects. if (pattern != null) { tables = admin.listTables(pattern); } else { tables = admin.listTables(); } // For each of the tables we get the table name and column families // registered with the table, and print them out. for (HTableDescriptor table : tables) { HColumnDescriptor[] columnFamilies = table.getColumnFamilies(); String columnFamilyNames = ""; for (HColumnDescriptor columnFamily : columnFamilies) { columnFamilyNames += columnFamily.getNameAsString() + ","; } if (columnFamilyNames.length() > 0) { columnFamilyNames = " <" + columnFamilyNames.substring(0, columnFamilyNames.length()) + ">"; } System.out.println(table.getTableName() + columnFamilyNames); } }
Example 13
Source File: SchemaTool.java From kite with Apache License 2.0 | 5 votes |
/** * add the column families which are not already present to the given table */ private void modifyTable(String tableName, HTableDescriptor newDescriptor) { LOG.info("Modifying table " + tableName); HColumnDescriptor[] newFamilies = newDescriptor.getColumnFamilies(); try { List<HColumnDescriptor> columnsToAdd = Lists.newArrayList(); HTableDescriptor currentFamilies = hbaseAdmin .getTableDescriptor(Bytes.toBytes(tableName)); for (HColumnDescriptor newFamily : newFamilies) { if (!currentFamilies.hasFamily(newFamily.getName())) { columnsToAdd.add(new HColumnDescriptor(newFamily.getName())); } } // Add all the necessary column families if (!columnsToAdd.isEmpty()) { hbaseAdmin.disableTable(tableName); try { for (HColumnDescriptor columnToAdd : columnsToAdd) { hbaseAdmin.addColumn(tableName, columnToAdd); } } finally { hbaseAdmin.enableTable(tableName); } } } catch (IOException e) { throw new DatasetException(e); } }
Example 14
Source File: PhoenixMetadata.java From presto with Apache License 2.0 | 4 votes |
private Map<String, Object> getTableProperties(ConnectorSession session, JdbcTableHandle handle) { ImmutableMap.Builder<String, Object> properties = ImmutableMap.builder(); try (PhoenixConnection connection = phoenixClient.getConnection(JdbcIdentity.from(session)); HBaseAdmin admin = connection.getQueryServices().getAdmin()) { String schemaName = toPhoenixSchemaName(Optional.ofNullable(handle.getSchemaName())).orElse(null); PTable table = getTable(connection, SchemaUtil.getTableName(schemaName, handle.getTableName())); boolean salted = table.getBucketNum() != null; StringJoiner joiner = new StringJoiner(","); List<PColumn> pkColumns = table.getPKColumns(); for (PColumn pkColumn : pkColumns.subList(salted ? 1 : 0, pkColumns.size())) { joiner.add(pkColumn.getName().getString()); } properties.put(PhoenixTableProperties.ROWKEYS, joiner.toString()); if (table.getBucketNum() != null) { properties.put(PhoenixTableProperties.SALT_BUCKETS, table.getBucketNum()); } if (table.isWALDisabled()) { properties.put(PhoenixTableProperties.DISABLE_WAL, table.isWALDisabled()); } if (table.isImmutableRows()) { properties.put(PhoenixTableProperties.IMMUTABLE_ROWS, table.isImmutableRows()); } String defaultFamilyName = QueryConstants.DEFAULT_COLUMN_FAMILY; if (table.getDefaultFamilyName() != null) { defaultFamilyName = table.getDefaultFamilyName().getString(); properties.put(PhoenixTableProperties.DEFAULT_COLUMN_FAMILY, defaultFamilyName); } HTableDescriptor tableDesc = admin.getTableDescriptor(table.getPhysicalName().getBytes()); HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); for (HColumnDescriptor columnFamily : columnFamilies) { if (columnFamily.getNameAsString().equals(defaultFamilyName)) { if (!"NONE".equals(columnFamily.getBloomFilterType().toString())) { properties.put(PhoenixTableProperties.BLOOMFILTER, columnFamily.getBloomFilterType().toString()); } if (columnFamily.getMaxVersions() != 1) { properties.put(PhoenixTableProperties.VERSIONS, columnFamily.getMaxVersions()); } if (columnFamily.getMinVersions() > 0) { properties.put(PhoenixTableProperties.MIN_VERSIONS, columnFamily.getMinVersions()); } if (!columnFamily.getCompression().toString().equals("NONE")) { properties.put(PhoenixTableProperties.COMPRESSION, columnFamily.getCompression().toString()); } if (columnFamily.getTimeToLive() < FOREVER) { properties.put(PhoenixTableProperties.TTL, columnFamily.getTimeToLive()); } break; } } } catch (IOException | SQLException e) { throw new PrestoException(PHOENIX_METADATA_ERROR, "Couldn't get Phoenix table properties", e); } return properties.build(); }
Example 15
Source File: AlterTableIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testSetHTableHColumnAndPhoenixTableProperties() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); String ddl = "CREATE TABLE T3 (\n" +"ID1 VARCHAR(15) NOT NULL,\n" +"ID2 VARCHAR(15) NOT NULL,\n" +"CREATED_DATE DATE,\n" +"CF1.CREATION_TIME BIGINT,\n" +"CF2.LAST_USED DATE,\n" +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) IMMUTABLE_ROWS=true"; Connection conn = DriverManager.getConnection(getUrl(), props); conn.createStatement().execute(ddl); assertImmutableRows(conn, "T3", true); ddl = "ALTER TABLE T3 SET COMPACTION_ENABLED = FALSE, VERSIONS = 10"; conn.createStatement().execute(ddl); ddl = "ALTER TABLE T3 SET COMPACTION_ENABLED = FALSE, CF1.MIN_VERSIONS = 1, CF2.MIN_VERSIONS = 3, MIN_VERSIONS = 8, IMMUTABLE_ROWS=false, CF1.KEEP_DELETED_CELLS = true, KEEP_DELETED_CELLS = false"; conn.createStatement().execute(ddl); assertImmutableRows(conn, "T3", false); try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) { HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("T3")); HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies(); assertEquals(3, columnFamilies.length); assertEquals("0", columnFamilies[0].getNameAsString()); assertEquals(8, columnFamilies[0].getMinVersions()); assertEquals(10, columnFamilies[0].getMaxVersions()); assertEquals(KeepDeletedCells.FALSE, columnFamilies[0].getKeepDeletedCellsAsEnum()); assertEquals("CF1", columnFamilies[1].getNameAsString()); assertEquals(1, columnFamilies[1].getMinVersions()); assertEquals(10, columnFamilies[1].getMaxVersions()); assertEquals(KeepDeletedCells.TRUE, columnFamilies[1].getKeepDeletedCellsAsEnum()); assertEquals("CF2", columnFamilies[2].getNameAsString()); assertEquals(3, columnFamilies[2].getMinVersions()); assertEquals(10, columnFamilies[2].getMaxVersions()); assertEquals(KeepDeletedCells.FALSE, columnFamilies[2].getKeepDeletedCellsAsEnum()); assertEquals(Boolean.toString(false), tableDesc.getValue(HTableDescriptor.COMPACTION_ENABLED)); } }
Example 16
Source File: MutableIndexReplicationIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testReplicationWithMutableIndexes() throws Exception { Connection conn = getConnection(); //create the primary and index tables conn.createStatement().execute( "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); conn.createStatement().execute( "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1)"); // make sure that the tables are empty, but reachable String query = "SELECT * FROM " + DATA_TABLE_FULL_NAME; ResultSet rs = conn.createStatement().executeQuery(query); assertFalse(rs.next()); //make sure there is no data in the table query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME; rs = conn.createStatement().executeQuery(query); assertFalse(rs.next()); // make sure the data tables are created on the remote cluster HBaseAdmin admin = utility1.getHBaseAdmin(); HBaseAdmin admin2 = utility2.getHBaseAdmin(); List<String> dataTables = new ArrayList<String>(); dataTables.add(DATA_TABLE_FULL_NAME); dataTables.add(INDEX_TABLE_FULL_NAME); for (String tableName : dataTables) { HTableDescriptor desc = admin.getTableDescriptor(TableName.valueOf(tableName)); //create it as-is on the remote cluster admin2.createTable(desc); LOG.info("Enabling replication on source table: "+tableName); HColumnDescriptor[] cols = desc.getColumnFamilies(); assertEquals(1, cols.length); // add the replication scope to the column HColumnDescriptor col = desc.removeFamily(cols[0].getName()); col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); desc.addFamily(col); //disable/modify/enable table so it has replication enabled admin.disableTable(desc.getTableName()); admin.modifyTable(tableName, desc); admin.enableTable(desc.getTableName()); LOG.info("Replication enabled on source table: "+tableName); } // load some data into the source cluster table PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)"); stmt.setString(1, "a"); // k stmt.setString(2, "x"); // v1 <- has index stmt.setString(3, "1"); // v2 stmt.execute(); conn.commit(); // make sure the index is working as expected query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME; rs = conn.createStatement().executeQuery(query); assertTrue(rs.next()); assertEquals("x", rs.getString(1)); assertFalse(rs.next()); conn.close(); /* Validate that we have replicated the rows to the remote cluster */ // other table can't be reached through Phoenix right now - would need to change how we // lookup tables. For right now, we just go through an HTable LOG.info("Looking up tables in replication target"); TableName[] tables = admin2.listTableNames(); HTable remoteTable = new HTable(utility2.getConfiguration(), tables[0]); for (int i = 0; i < REPLICATION_RETRIES; i++) { if (i >= REPLICATION_RETRIES - 1) { fail("Waited too much time for put replication on table " + remoteTable .getTableDescriptor().getNameAsString()); } if (ensureAnyRows(remoteTable)) { break; } LOG.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS + " for edits to get replicated"); Thread.sleep(REPLICATION_WAIT_TIME_MILLIS); } remoteTable.close(); }