Java Code Examples for org.apache.phoenix.query.ConnectionQueryServices#getAdmin()
The following examples show how to use
org.apache.phoenix.query.ConnectionQueryServices#getAdmin() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StatsCollectorAbstractIT.java From phoenix with Apache License 2.0 | 6 votes |
protected void splitTable(Connection conn, byte[] splitPoint, byte[] tabName) throws IOException, InterruptedException, SQLException { ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); int nRegionsNow = services.getAllTableRegions(tabName).size(); HBaseAdmin admin = services.getAdmin(); try { admin.split(tabName, splitPoint); int nTries = 0; int nRegions; do { Thread.sleep(2000); services.clearTableRegionCache(tabName); nRegions = services.getAllTableRegions(tabName).size(); nTries++; } while (nRegions == nRegionsNow && nTries < 10); if (nRegions == nRegionsNow) { fail(); } // FIXME: I see the commit of the stats finishing before this with a lower timestamp that the scan timestamp, // yet without this sleep, the query finds the old data. Seems like an HBase bug and a potentially serious one. Thread.sleep(8000); } finally { admin.close(); } }
Example 2
Source File: IndexTool.java From phoenix with Apache License 2.0 | 6 votes |
private void deleteBeforeRebuild(Connection conn) throws SQLException, IOException { if (MetaDataUtil.isViewIndex(pIndexTable.getPhysicalName().getString())) { throw new IllegalArgumentException(String.format( "%s is a view index. delete-all-and-rebuild is not supported for view indexes", indexTable)); } if (isLocalIndexBuild) { throw new IllegalArgumentException(String.format( "%s is a local index. delete-all-and-rebuild is not supported for local indexes", indexTable)); } else { ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices(); try (Admin admin = queryServices.getAdmin()){ TableName tableName = TableName.valueOf(qIndexTable); admin.disableTable(tableName); admin.truncateTable(tableName, true); } } }
Example 3
Source File: IndexUpgradeTool.java From phoenix with Apache License 2.0 | 6 votes |
private long executeToolForImmutableTables(ConnectionQueryServices queryServices, List<String> immutableList) { if (immutableList.isEmpty()) { return 0; } LOGGER.info("Started " + operation + " for immutable tables"); List<String> failedTables = new ArrayList<String>(); for (String dataTableFullName : immutableList) { try (Admin admin = queryServices.getAdmin()) { HashSet<String> indexes = tablesAndIndexes.get(dataTableFullName); LOGGER.info("Executing " + operation + " of " + dataTableFullName + " (immutable)"); disableTable(admin, dataTableFullName, indexes); modifyTable(admin, dataTableFullName, indexes); } catch (Throwable e) { LOGGER.severe("Something went wrong while disabling " + "or modifying immutable table " + e); handleFailure(queryServices, dataTableFullName, immutableList, failedTables); } } immutableList.removeAll(failedTables); long startWaitTime = EnvironmentEdgeManager.currentTimeMillis(); return startWaitTime; }
Example 4
Source File: RoundRobinResultIteratorIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testRoundRobinAfterTableSplit() throws Exception { String tableName = generateUniqueName(); byte[] tableNameBytes = Bytes.toBytes(tableName); int numRows = setupTableForSplit(tableName); Connection conn = getConnection(); ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); int nRegions = services.getAllTableRegions(tableNameBytes).size(); int nRegionsBeforeSplit = nRegions; Admin admin = services.getAdmin(); try { // Split is an async operation. So hoping 10 seconds is long enough time. // If the test tends to flap, then you might want to increase the wait time admin.split(TableName.valueOf(tableName)); CountDownLatch latch = new CountDownLatch(1); int nTries = 0; long waitTimeMillis = 2000; while (nRegions == nRegionsBeforeSplit && nTries < 10) { latch.await(waitTimeMillis, TimeUnit.MILLISECONDS); nRegions = services.getAllTableRegions(tableNameBytes).size(); nTries++; } String query = "SELECT * FROM " + tableName; Statement stmt = conn.createStatement(); stmt.setFetchSize(10); // this makes scanner caches to be replenished in parallel. ResultSet rs = stmt.executeQuery(query); int numRowsRead = 0; while (rs.next()) { numRowsRead++; } nRegions = services.getAllTableRegions(tableNameBytes).size(); // Region cache has been updated, as there are more regions now assertNotEquals(nRegions, nRegionsBeforeSplit); assertEquals(numRows, numRowsRead); } finally { admin.close(); } }
Example 5
Source File: TestUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static void addCoprocessor(Connection conn, String tableName, Class coprocessorClass) throws Exception { int priority = QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY + 100; ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); TableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName)); TableDescriptorBuilder descriptorBuilder = null; if (!descriptor.getCoprocessors().contains(coprocessorClass.getName())) { descriptorBuilder=TableDescriptorBuilder.newBuilder(descriptor); descriptorBuilder.addCoprocessor(coprocessorClass.getName(), null, priority, null); }else{ return; } final int retries = 10; int numTries = 10; descriptor = descriptorBuilder.build(); try (Admin admin = services.getAdmin()) { admin.modifyTable(descriptor); while (!admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor) && numTries > 0) { numTries--; if (numTries == 0) { throw new Exception( "Failed to add " + coprocessorClass.getName() + " after " + retries + " retries."); } Thread.sleep(1000); } } }
Example 6
Source File: ParameterizedIndexUpgradeToolIT.java From phoenix with Apache License 2.0 | 5 votes |
@Before public void setup () throws Exception { MockitoAnnotations.initMocks(this); optionsBuilder = new StringBuilder(); setClusterProperties(); setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator())); conn = DriverManager.getConnection(getUrl(), new Properties()); conn.setAutoCommit(true); String tenantId = generateUniqueName(); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); connTenant = DriverManager.getConnection(getUrl(), props); ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class) .getQueryServices(); admin = queryServices.getAdmin(); if (!mutable) { optionsBuilder.append(" IMMUTABLE_ROWS=true"); } tableDDLOptions = optionsBuilder.toString(); }
Example 7
Source File: IndexScrutinyToolForTenantIT.java From phoenix with Apache License 2.0 | 5 votes |
private void testWithOutput(OutputFormat outputFormat) throws Exception { connTenant.createStatement() .execute(String.format(upsertQueryStr, tenantViewName, tenantId, 1, "x")); connTenant.createStatement() .execute(String.format(upsertQueryStr, tenantViewName, tenantId, 2, "x2")); connTenant.createStatement() .execute(String.format(upsertQueryStr, tenantViewName, tenantId, 3, "x3")); connTenant.createStatement().execute(String.format("UPSERT INTO %s (\":ID\", \"0:NAME\") values (%d, '%s')", indexNameTenant, 5555, "wrongName")); connTenant.commit(); ConnectionQueryServices queryServices = connGlobal.unwrap(PhoenixConnection.class).getQueryServices(); Admin admin = queryServices.getAdmin(); TableName tableName = TableName.valueOf(viewIndexTableName); admin.disableTable(tableName); admin.truncateTable(tableName, false); String[] argValues = getArgValues("", tenantViewName, indexNameTenant, 10L, SourceTable.DATA_TABLE_SOURCE, true, outputFormat, null, tenantId, EnvironmentEdgeManager.currentTimeMillis()); List<Job> completedJobs = runScrutiny(argValues); assertEquals(1, completedJobs.size()); for (Job job : completedJobs) { assertTrue(job.isSuccessful()); Counters counters = job.getCounters(); assertEquals(0, getCounterValue(counters, VALID_ROW_COUNT)); assertEquals(3, getCounterValue(counters, INVALID_ROW_COUNT)); } }
Example 8
Source File: SkipScanAfterManualSplitIT.java From phoenix with Apache License 2.0 | 5 votes |
private static void initTable(String tableName) throws Exception { Connection conn = getConnection(); conn.createStatement().execute("CREATE TABLE " + tableName + "(" + "a VARCHAR PRIMARY KEY, b VARCHAR) " + TableDescriptorBuilder.MAX_FILESIZE + "=" + MAX_FILESIZE + "," + " SALT_BUCKETS = 4"); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)"); int rowCount = 0; for (int c1 = MIN_CHAR; c1 <= MAX_CHAR; c1++) { for (int c2 = MIN_CHAR; c2 <= MAX_CHAR; c2++) { String pk = Character.toString((char)c1) + Character.toString((char)c2); stmt.setString(1, pk); stmt.setString(2, PAYLOAD); stmt.execute(); rowCount++; if (rowCount % BATCH_SIZE == 0) { conn.commit(); } } } conn.commit(); ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); Admin admin = services.getAdmin(); try { admin.flush(TableName.valueOf(tableName)); } finally { admin.close(); } conn.close(); }
Example 9
Source File: IndexUpgradeTool.java From phoenix with Apache License 2.0 | 5 votes |
private void executeToolForMutableTables(Connection conn, ConnectionQueryServices queryServices, Configuration conf, ArrayList<String> mutableTables) { if (mutableTables.isEmpty()) { return; } LOGGER.info("Started " + operation + " for mutable tables"); List<String> failedTables = new ArrayList<>(); for (String dataTableFullName : mutableTables) { try (Admin admin = queryServices.getAdmin()) { HashSet<String> indexes = tablesAndIndexes.get(dataTableFullName); LOGGER.info("Executing " + operation + " of " + dataTableFullName); disableTable(admin, dataTableFullName, indexes); modifyTable(admin, dataTableFullName, indexes); enableTable(admin, dataTableFullName, indexes); LOGGER.info("Completed " + operation + " of " + dataTableFullName); } catch (Throwable e) { LOGGER.severe("Something went wrong while executing " + operation + " steps for "+ dataTableFullName + " " + e); handleFailure(queryServices, dataTableFullName, mutableTables, failedTables); } } mutableTables.removeAll(failedTables); // Opportunistically kick-off index rebuilds after upgrade operation rebuildIndexes(conn, conf, mutableTables); }
Example 10
Source File: IndexVerificationResultRepository.java From phoenix with Apache License 2.0 | 5 votes |
public void createResultTable(Connection connection) throws IOException, SQLException { ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices(); Admin admin = queryServices.getAdmin(); TableName resultTableName = TableName.valueOf(RESULT_TABLE_NAME); if (!admin.tableExists(resultTableName)) { ColumnFamilyDescriptor columnDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(RESULT_TABLE_COLUMN_FAMILY). setTimeToLive(MetaDataProtocol.DEFAULT_LOG_TTL).build(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(resultTableName). setColumnFamily(columnDescriptor).build(); admin.createTable(tableDescriptor); resultHTable = admin.getConnection().getTable(resultTableName); } }
Example 11
Source File: IndexVerificationOutputRepository.java From phoenix with Apache License 2.0 | 5 votes |
public void createOutputTable(Connection connection) throws IOException, SQLException { ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices(); Admin admin = queryServices.getAdmin(); TableName outputTableName = TableName.valueOf(OUTPUT_TABLE_NAME); if (!admin.tableExists(outputTableName)) { ColumnFamilyDescriptor columnDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(OUTPUT_TABLE_COLUMN_FAMILY). setTimeToLive(MetaDataProtocol.DEFAULT_LOG_TTL).build(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(outputTableName). setColumnFamily(columnDescriptor).build(); admin.createTable(tableDescriptor); outputTable = admin.getConnection().getTable(outputTableName); } }
Example 12
Source File: TestUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static void removeCoprocessor(Connection conn, String tableName, Class coprocessorClass) throws Exception { ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); TableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName)); TableDescriptorBuilder descriptorBuilder = null; if (descriptor.getCoprocessors().contains(coprocessorClass.getName())) { descriptorBuilder=TableDescriptorBuilder.newBuilder(descriptor); descriptorBuilder.removeCoprocessor(coprocessorClass.getName()); }else{ return; } final int retries = 10; int numTries = retries; descriptor = descriptorBuilder.build(); try (Admin admin = services.getAdmin()) { admin.modifyTable(descriptor); while (!admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor) && numTries > 0) { numTries--; if (numTries == 0) { throw new Exception( "Failed to remove " + coprocessorClass.getName() + " after " + retries + " retries."); } Thread.sleep(1000); } } }
Example 13
Source File: ProductMetricsIT.java From phoenix with Apache License 2.0 | 5 votes |
private static void destroyTable() throws Exception { // Physically delete HBase table so that splits occur as expected for each test Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices(); HBaseAdmin admin = services.getAdmin(); try { try { admin.disableTable(PRODUCT_METRICS_NAME); admin.deleteTable(PRODUCT_METRICS_NAME); } catch (TableNotFoundException e) { } } finally { admin.close(); } }
Example 14
Source File: StatsCollectorIT.java From phoenix with Apache License 2.0 | 5 votes |
private void compactTable(Connection conn, String tableName) throws IOException, InterruptedException, SQLException { ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); HBaseAdmin admin = services.getAdmin(); try { admin.flush(tableName); admin.majorCompact(tableName); Thread.sleep(10000); // FIXME: how do we know when compaction is done? } finally { admin.close(); } }
Example 15
Source File: SkipScanAfterManualSplitIT.java From phoenix with Apache License 2.0 | 5 votes |
private static void initTable() throws Exception { Connection conn = DriverManager.getConnection(getUrl()); conn.createStatement().execute("CREATE TABLE " + TABLE_NAME + "(" + "a VARCHAR PRIMARY KEY, b VARCHAR) " + HTableDescriptor.MAX_FILESIZE + "=" + MAX_FILESIZE + "," + " SALT_BUCKETS = 4"); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO s VALUES(?,?)"); int rowCount = 0; for (int c1 = MIN_CHAR; c1 <= MAX_CHAR; c1++) { for (int c2 = MIN_CHAR; c2 <= MAX_CHAR; c2++) { String pk = Character.toString((char)c1) + Character.toString((char)c2); stmt.setString(1, pk); stmt.setString(2, PAYLOAD); stmt.execute(); rowCount++; if (rowCount % BATCH_SIZE == 0) { conn.commit(); } } } conn.commit(); ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); HBaseAdmin admin = services.getAdmin(); try { admin.flush(TABLE_NAME); } finally { admin.close(); } conn.close(); }
Example 16
Source File: RoundRobinResultIteratorIT.java From phoenix with Apache License 2.0 | 4 votes |
private static int setupTableForSplit(String tableName) throws Exception { int batchSize = 25; int maxFileSize = 1024 * 10; int payLoadSize = 1024; String payload; StringBuilder buf = new StringBuilder(); for (int i = 0; i < payLoadSize; i++) { buf.append('a'); } payload = buf.toString(); int MIN_CHAR = 'a'; int MAX_CHAR = 'z'; Connection conn = getConnection(); conn.createStatement().execute("CREATE TABLE " + tableName + "(" + "a VARCHAR PRIMARY KEY, b VARCHAR) " + TableDescriptorBuilder.MAX_FILESIZE + "=" + maxFileSize + "," + " SALT_BUCKETS = " + NUM_SALT_BUCKETS); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)"); int rowCount = 0; for (int c1 = MIN_CHAR; c1 <= MAX_CHAR; c1++) { for (int c2 = MIN_CHAR; c2 <= MAX_CHAR; c2++) { String pk = Character.toString((char)c1) + Character.toString((char)c2); stmt.setString(1, pk); stmt.setString(2, payload); stmt.execute(); rowCount++; if (rowCount % batchSize == 0) { conn.commit(); } } } conn.commit(); ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); Admin admin = services.getAdmin(); try { admin.flush(TableName.valueOf(tableName)); } finally { admin.close(); } conn.close(); return rowCount; }
Example 17
Source File: TestUtil.java From phoenix with Apache License 2.0 | 4 votes |
/** * Runs a major compaction, and then waits until the compaction is complete before returning. * * @param tableName name of the table to be compacted */ public static void doMajorCompaction(Connection conn, String tableName) throws Exception { tableName = SchemaUtil.normalizeIdentifier(tableName); // We simply write a marker row, request a major compaction, and then wait until the marker // row is gone PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName)); ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); MutationState mutationState = pconn.getMutationState(); if (table.isTransactional()) { mutationState.startTransaction(table.getTransactionProvider()); } try (Table htable = mutationState.getHTable(table)) { byte[] markerRowKey = Bytes.toBytes("TO_DELETE"); Put put = new Put(markerRowKey); put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); htable.put(put); Delete delete = new Delete(markerRowKey); delete.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); htable.delete(delete); htable.close(); if (table.isTransactional()) { mutationState.commit(); } Admin hbaseAdmin = services.getAdmin(); hbaseAdmin.flush(TableName.valueOf(tableName)); hbaseAdmin.majorCompact(TableName.valueOf(tableName)); hbaseAdmin.close(); boolean compactionDone = false; while (!compactionDone) { Thread.sleep(6000L); Scan scan = new Scan(); scan.setStartRow(markerRowKey); scan.setStopRow(Bytes.add(markerRowKey, new byte[] { 0 })); scan.setRaw(true); try (Table htableForRawScan = services.getTable(Bytes.toBytes(tableName))) { ResultScanner scanner = htableForRawScan.getScanner(scan); List<Result> results = Lists.newArrayList(scanner); LOGGER.info("Results: " + results); compactionDone = results.isEmpty(); scanner.close(); } LOGGER.info("Compaction done: " + compactionDone); // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows if (!compactionDone && table.isTransactional()) { hbaseAdmin = services.getAdmin(); hbaseAdmin.flush(TableName.valueOf(tableName)); hbaseAdmin.majorCompact(TableName.valueOf(tableName)); hbaseAdmin.close(); } } } }
Example 18
Source File: DynamicColumnIT.java From phoenix with Apache License 2.0 | 4 votes |
@Before public void initTable() throws Exception { tableName = generateUniqueName(); try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) { ConnectionQueryServices services = pconn.getQueryServices(); try (Admin admin = services.getAdmin()) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)); builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_A)); builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_B)); admin.createTable(builder.build()); } try (Table hTable = services.getTable(Bytes.toBytes(tableName))) { // Insert rows using standard HBase mechanism with standard HBase "types" List<Row> mutations = new ArrayList<Row>(); byte[] dv = Bytes.toBytes("DV"); byte[] first = Bytes.toBytes("F"); byte[] f1v1 = Bytes.toBytes("F1V1"); byte[] f1v2 = Bytes.toBytes("F1V2"); byte[] f2v1 = Bytes.toBytes("F2V1"); byte[] f2v2 = Bytes.toBytes("F2V2"); byte[] key = Bytes.toBytes("entry1"); Put put = new Put(key); put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default")); put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first")); put.addColumn(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1")); put.addColumn(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2")); put.addColumn(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1")); put.addColumn(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2")); mutations.add(put); hTable.batch(mutations, null); // Create Phoenix table after HBase table was created through the native APIs // The timestamp of the table creation must be later than the timestamp of the data pconn.createStatement().execute("create table " + tableName + " (entry varchar not null," + " F varchar," + " A.F1v1 varchar," + " A.F1v2 varchar," + " B.F2v1 varchar" + " CONSTRAINT pk PRIMARY KEY (entry)) COLUMN_ENCODED_BYTES=NONE"); } } }
Example 19
Source File: IndexRebuildTaskIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testIndexRebuildTask() throws Throwable { String baseTable = generateUniqueName(); String viewName = generateUniqueName(); Connection conn = null; Connection tenantConn = null; try { conn = DriverManager.getConnection(getUrl()); conn.setAutoCommit(false); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT1); tenantConn =DriverManager.getConnection(getUrl(), props); String ddlFormat = "CREATE TABLE IF NOT EXISTS " + baseTable + " (" + " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR " + " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)" + " ) %s"; conn.createStatement().execute(generateDDL(ddlFormat)); conn.commit(); // Create a view String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable; tenantConn.createStatement().execute(viewDDL); // Create index String indexName = generateUniqueName(); String idxSDDL = String.format("CREATE INDEX %s ON %s (V1)", indexName, viewName); tenantConn.createStatement().execute(idxSDDL); // Insert rows int numOfValues = 1000; for (int i=0; i < numOfValues; i++){ tenantConn.createStatement().execute( String.format("UPSERT INTO %s VALUES('%s', '%s', '%s')", viewName, String.valueOf(i), "y", "z")); } tenantConn.commit(); waitForIndexRebuild(conn, indexName, PIndexState.ACTIVE); String viewIndexTableName = MetaDataUtil.getViewIndexPhysicalName(baseTable); ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices(); Table indexHTable = queryServices.getTable(Bytes.toBytes(viewIndexTableName)); int count = getUtility().countRows(indexHTable); assertEquals(numOfValues, count); // Alter to Unusable makes the index status inactive. // If I Alter to DISABLE, it fails to in Index tool while setting state to active due to Invalid transition. tenantConn.createStatement().execute( String.format("ALTER INDEX %s ON %s UNUSABLE", indexName, viewName)); tenantConn.commit(); // Remove index contents and try again Admin admin = queryServices.getAdmin(); TableName tableName = TableName.valueOf(viewIndexTableName); admin.disableTable(tableName); admin.truncateTable(tableName, false); count = getUtility().countRows(indexHTable); assertEquals(0, count); String data = "{\"IndexName\":\"" + indexName + "\"}"; // Run IndexRebuildTask TaskRegionObserver.SelfHealingTask task = new TaskRegionObserver.SelfHealingTask( TaskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS); Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD, TENANT1, null, viewName, PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true); task.run(); // Check task status and other column values. waitForTaskState(conn, PTable.TaskType.INDEX_REBUILD, viewName, PTable.TaskStatus.COMPLETED); // See that index is rebuilt and confirm index has rows count = getUtility().countRows(indexHTable); assertEquals(numOfValues, count); } finally { if (conn != null) { conn.createStatement().execute("DELETE " + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME + " WHERE TABLE_NAME ='" + viewName + "'"); conn.commit(); conn.close(); } if (tenantConn != null) { tenantConn.close(); } } }
Example 20
Source File: IndexToolForNonTxGlobalIndexIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testSecondaryGlobalIndexFailure() throws Exception { String schemaName = generateUniqueName(); String dataTableName = generateUniqueName(); String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTableName); String indexTableName = generateUniqueName(); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); try (Connection conn = DriverManager.getConnection(getUrl(), props)) { String stmString1 = "CREATE TABLE " + dataTableFullName + " (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, ZIP INTEGER) " + tableDDLOptions; conn.createStatement().execute(stmString1); String upsertQuery = String.format("UPSERT INTO %s VALUES(?, ?, ?)", dataTableFullName); PreparedStatement stmt1 = conn.prepareStatement(upsertQuery); // Insert two rows IndexToolIT.upsertRow(stmt1, 1); IndexToolIT.upsertRow(stmt1, 2); conn.commit(); String stmtString2 = String.format( "CREATE INDEX %s ON %s (LPAD(UPPER(NAME, 'en_US'),8,'x')||'_xyz') ASYNC ", indexTableName, dataTableFullName); conn.createStatement().execute(stmtString2); // Run the index MR job. IndexToolIT.runIndexTool(directApi, useSnapshot, schemaName, dataTableName, indexTableName); String qIndexTableName = SchemaUtil.getQualifiedTableName(schemaName, indexTableName); // Verify that the index table is in the ACTIVE state assertEquals(PIndexState.ACTIVE, TestUtil.getIndexState(conn, qIndexTableName)); ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices(); Admin admin = queryServices.getAdmin(); TableName tableName = TableName.valueOf(qIndexTableName); admin.disableTable(tableName); // Run the index MR job and it should fail (return -1) IndexToolIT.runIndexTool(directApi, useSnapshot, schemaName, dataTableName, indexTableName, null, -1, new String[0]); // Verify that the index table should be still in the ACTIVE state assertEquals(PIndexState.ACTIVE, TestUtil.getIndexState(conn, qIndexTableName)); } }