org.apache.phoenix.jdbc.PhoenixResultSet Java Examples
The following examples show how to use
org.apache.phoenix.jdbc.PhoenixResultSet.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PhoenixMetricsIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testReadMetricsForSelect() throws Exception { String tableName = generateUniqueName(); long numSaltBuckets = 6; String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = " + numSaltBuckets; Connection conn = DriverManager.getConnection(getUrl()); conn.createStatement().execute(ddl); long numRows = 1000; long numExpectedTasks = numSaltBuckets; insertRowsInTable(tableName, numRows); String query = "SELECT * FROM " + tableName; Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery(query); PhoenixResultSet resultSetBeingTested = rs.unwrap(PhoenixResultSet.class); changeInternalStateForTesting(resultSetBeingTested); while (resultSetBeingTested.next()) {} resultSetBeingTested.close(); Set<String> expectedTableNames = Sets.newHashSet(tableName); assertReadMetricValuesForSelectSql(Lists.newArrayList(numRows), Lists.newArrayList(numExpectedTasks), resultSetBeingTested, expectedTableNames); }
Example #2
Source File: PhoenixIndexDBWritable.java From phoenix with Apache License 2.0 | 6 votes |
@Override public void readFields(ResultSet resultSet) throws SQLException { // we do this once per mapper. if(columnCount == -1) { this.columnCount = resultSet.getMetaData().getColumnCount(); } if (columnCount > 0) { this.rowTs = resultSet.unwrap(PhoenixResultSet.class).getCurrentRow().getValue(0).getTimestamp(); } values = Lists.newArrayListWithCapacity(columnCount); for(int i = 0 ; i < columnCount ; i++) { Object value = resultSet.getObject(i + 1); values.add(value); } }
Example #3
Source File: RoundRobinResultIteratorIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testIteratorsPickedInRoundRobinFashionForSaltedTable() throws Exception { try (Connection conn = getConnection()) { String testTable = "testIteratorsPickedInRoundRobinFashionForSaltedTable".toUpperCase(); Statement stmt = conn.createStatement(); stmt.execute("CREATE TABLE " + testTable + "(K VARCHAR PRIMARY KEY) SALT_BUCKETS = 8"); PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); MockParallelIteratorFactory parallelIteratorFactory = new MockParallelIteratorFactory(); phxConn.setIteratorFactory(parallelIteratorFactory); ResultSet rs = stmt.executeQuery("SELECT * FROM " + testTable); StatementContext ctx = rs.unwrap(PhoenixResultSet.class).getContext(); PTable table = ctx.getResolver().getTables().get(0).getTable(); parallelIteratorFactory.setTable(table); PhoenixStatement pstmt = stmt.unwrap(PhoenixStatement.class); int numIterators = pstmt.getQueryPlan().getSplits().size(); assertEquals(8, numIterators); int numFetches = 2 * numIterators; List<String> iteratorOrder = new ArrayList<>(numFetches); for (int i = 1; i <= numFetches; i++) { rs.next(); iteratorOrder.add(rs.getString(1)); } /* * Because TableResultIterators are created in parallel in multiple threads, their relative order is not * deterministic. However, once the iterators are assigned to a RoundRobinResultIterator, the order in which * the next iterator is picked is deterministic - i1, i2, .. i7, i8, i1, i2, .. i7, i8, i1, i2, .. */ for (int i = 0; i < numIterators; i++) { assertEquals(iteratorOrder.get(i), iteratorOrder.get(i + numIterators)); } } }
Example #4
Source File: QueryLoggerIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testWithLoggingOFF() throws Exception{ String tableName = generateUniqueName(); createTableAndInsertValues(tableName, true); Properties props= new Properties(); props.setProperty(QueryServices.LOG_LEVEL, LogLevel.OFF.name()); Connection conn = DriverManager.getConnection(getUrl(),props); assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.OFF); // delete old data conn.createStatement().executeUpdate("delete from " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\""); conn.commit(); String query = "SELECT * FROM " + tableName; ResultSet rs = conn.createStatement().executeQuery(query); StatementContext context = ((PhoenixResultSet)rs).getContext(); assertEquals(context.getQueryLogger(), QueryLogger.NO_OP_INSTANCE); while (rs.next()) { rs.getString(1); rs.getString(2); } String logQuery = "SELECT count(*) FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\""; int delay = 5000; // sleep for sometime to let query log committed Thread.sleep(delay); rs = conn.createStatement().executeQuery(logQuery); assertTrue(rs.next()); assertEquals(rs.getInt(1), 0); assertFalse(rs.next()); conn.close(); }
Example #5
Source File: LocalIndexIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test // See https://issues.apache.org/jira/browse/PHOENIX-4289 public void testEstimatesWithLocalIndexes() throws Exception { String tableName = generateUniqueName(); String indexName = "IDX_" + generateUniqueName(); try (Connection conn = DriverManager.getConnection(getUrl())) { int guidePostWidth = 20; conn.createStatement() .execute("CREATE TABLE " + tableName + " (k INTEGER PRIMARY KEY, a bigint, b bigint)" + " GUIDE_POSTS_WIDTH=" + guidePostWidth); conn.createStatement().execute("upsert into " + tableName + " values (100,1,3)"); conn.createStatement().execute("upsert into " + tableName + " values (101,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (102,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (103,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (104,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (105,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (106,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (107,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (108,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (109,2,4)"); conn.commit(); conn.createStatement().execute( "CREATE LOCAL INDEX " + indexName + " ON " + tableName + " (a) INCLUDE (b) "); String ddl = "ALTER TABLE " + tableName + " SET USE_STATS_FOR_PARALLELIZATION = false"; conn.createStatement().execute(ddl); conn.createStatement().execute("UPDATE STATISTICS " + tableName + ""); } List<Object> binds = Lists.newArrayList(); try (Connection conn = DriverManager.getConnection(getUrl())) { String sql = "SELECT COUNT(*) " + " FROM " + tableName; ResultSet rs = conn.createStatement().executeQuery(sql); assertTrue("Index " + indexName + " should have been used", rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan().getTableRef() .getTable().getName().getString().equals(indexName)); Estimate info = getByteRowEstimates(conn, sql, binds); assertEquals((Long) 10l, info.getEstimatedRows()); assertTrue(info.getEstimateInfoTs() > 0); } }
Example #6
Source File: PhoenixMetricsIT.java From phoenix with Apache License 2.0 | 5 votes |
private void assertReadMetricValuesForSelectSql(ArrayList<Long> numRows, ArrayList<Long> numExpectedTasks, PhoenixResultSet resultSetBeingTested, Set<String> expectedTableNames) throws SQLException { Map<String, Map<MetricType, Long>> metrics = PhoenixRuntime.getRequestReadMetricInfo(resultSetBeingTested); int counter = 0; for (Entry<String, Map<MetricType, Long>> entry : metrics.entrySet()) { String tableName = entry.getKey(); expectedTableNames.remove(tableName); Map<MetricType, Long> metricValues = entry.getValue(); boolean taskCounterMetricsPresent = false; boolean taskExecutionTimeMetricsPresent = false; boolean memoryMetricsPresent = false; for (Entry<MetricType, Long> pair : metricValues.entrySet()) { MetricType metricType = pair.getKey(); long metricValue = pair.getValue(); long numTask = numExpectedTasks.get(counter); if (metricType.equals(TASK_EXECUTED_COUNTER)) { assertEquals(numTask, metricValue); taskCounterMetricsPresent = true; } else if (metricType.equals(TASK_EXECUTION_TIME)) { assertEquals(numTask * TASK_EXECUTION_TIME_DELTA, metricValue); taskExecutionTimeMetricsPresent = true; } else if (metricType.equals(MEMORY_CHUNK_BYTES)) { assertEquals(numTask * MEMORY_CHUNK_BYTES_DELTA, metricValue); memoryMetricsPresent = true; } } counter++; assertTrue(taskCounterMetricsPresent); assertTrue(taskExecutionTimeMetricsPresent); assertTrue(memoryMetricsPresent); } PhoenixRuntime.resetMetrics(resultSetBeingTested); assertTrue("Metrics not found tables " + Joiner.on(",").join(expectedTableNames), expectedTableNames.size() == 0); }
Example #7
Source File: PhoenixMetricsIT.java From phoenix with Apache License 2.0 | 5 votes |
private void changeInternalStateForTesting(PhoenixResultSet rs) { // get and set the internal state for testing purposes. ReadMetricQueue testMetricsQueue = new TestReadMetricsQueue(LogLevel.OFF,true); StatementContext ctx = (StatementContext)Whitebox.getInternalState(rs, "context"); Whitebox.setInternalState(ctx, "readMetricsQueue", testMetricsQueue); Whitebox.setInternalState(rs, "readMetricsQueue", testMetricsQueue); }
Example #8
Source File: IndexTool.java From phoenix with Apache License 2.0 | 5 votes |
private ValueGetter getIndexValueGetter(final PhoenixResultSet rs, List<String> dataColNames) { // map from data col name to index in ResultSet final Map<String, Integer> rsIndex = new HashMap<>(dataColNames.size()); int i = 1; for (String dataCol : dataColNames) { rsIndex.put(SchemaUtil.getEscapedFullColumnName(dataCol), i++); } return new ValueGetter() { final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable(); final ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(); @Override public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException { try { String fullColumnName = SchemaUtil.getEscapedFullColumnName(SchemaUtil .getColumnDisplayName(ref.getFamily(), ref.getQualifier())); byte[] colVal = rs.getBytes(rsIndex.get(fullColumnName)); valuePtr.set(colVal); } catch (SQLException e) { throw new IOException(e); } return valuePtr; } @Override public byte[] getRowKey() { rs.getCurrentRow().getKey(rowKeyPtr); return ByteUtil.copyKeyBytesIfNecessary(rowKeyPtr); } }; }
Example #9
Source File: QueryOptimizerTest.java From phoenix with Apache License 2.0 | 5 votes |
private static void assertQualifierRanges(ResultSet rs, int minQualifier, int maxQualifier) throws SQLException { Scan scan = rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan().getContext().getScan(); assertNotNull(scan.getAttribute(MIN_QUALIFIER)); assertNotNull(scan.getAttribute(MAX_QUALIFIER)); assertEquals(minQualifier, Bytes.toInt(scan.getAttribute(MIN_QUALIFIER))); assertEquals(maxQualifier, Bytes.toInt(scan.getAttribute(MAX_QUALIFIER))); }
Example #10
Source File: IndexScrutinyMapper.java From phoenix with Apache License 2.0 | 5 votes |
protected void queryTargetTable(Context context, PreparedStatement targetStatement, Map<String, Pair<Long, List<Object>>> targetPkToSourceValues) throws SQLException, IOException, InterruptedException { ResultSet targetResultSet = targetStatement.executeQuery(); while (targetResultSet.next()) { indxWritable.readFields(targetResultSet); List<Object> targetValues = indxWritable.getValues(); // first grab the PK and try to join against the source input // the query is such that first numTargetPkCols of the resultSet is the PK List<Object> pkObjects = new ArrayList<>(numTargetPkCols); for (int i = 0; i < numTargetPkCols; i++) { Object pkPart = targetResultSet.getObject(i + 1); pkObjects.add(pkPart); } Long targetTS = targetResultSet.unwrap(PhoenixResultSet.class).getCurrentRow().getValue(0).getTimestamp(); String targetPk = getPkHash(pkObjects); // use the pk to fetch the source table column values Pair<Long, List<Object>> sourceTsValues = targetPkToSourceValues.get(targetPk); Long sourceTS = sourceTsValues.getFirst(); List<Object> sourceValues = sourceTsValues.getSecond(); // compare values starting after the PK (i.e. covered columns) boolean isIndexedCorrectly = compareValues(numTargetPkCols, targetValues, sourceValues, context); if (isIndexedCorrectly) { context.getCounter(PhoenixScrutinyJobCounters.VALID_ROW_COUNT).increment(1); } else { context.getCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT).increment(1); if (outputInvalidRows) { outputInvalidRow(context, sourceValues, targetValues, sourceTS, targetTS); } } targetPkToSourceValues.remove(targetPk); } }
Example #11
Source File: PhoenixServerBuildIndexDBWritable.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void readFields(ResultSet resultSet) throws SQLException { Tuple row = resultSet.unwrap(PhoenixResultSet.class).getCurrentRow(); Cell kv = row.getValue(0); ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); // A single Cell will be returned with the count(*) - we decode that here rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); }
Example #12
Source File: RoundRobinResultIteratorWithStatsIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testRoundRobinBehavior() throws Exception { int nRows = 30000; try (Connection conn = DriverManager.getConnection(getUrl())) { conn.createStatement().execute("CREATE TABLE " + tableName + "(K VARCHAR PRIMARY KEY)"); PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?)"); for (int i = 1; i <= nRows; i++) { stmt.setString(1, i + ""); stmt.executeUpdate(); if ((i % 2000) == 0) { conn.commit(); } } conn.commit(); conn.createStatement().execute("UPDATE STATISTICS " + tableName); PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); MockParallelIteratorFactory parallelIteratorFactory = new MockParallelIteratorFactory(); phxConn.setIteratorFactory(parallelIteratorFactory); ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName); StatementContext ctx = rs.unwrap(PhoenixResultSet.class).getContext(); PTable table = ctx.getResolver().getTables().get(0).getTable(); parallelIteratorFactory.setTable(table); PhoenixStatement pstmt = stmt.unwrap(PhoenixStatement.class); int numIterators = pstmt.getQueryPlan().getSplits().size(); assertTrue(numIterators > 1); int numFetches = 2 * numIterators; List<String> iteratorOrder = new ArrayList<>(numFetches); for (int i = 1; i <= numFetches; i++) { rs.next(); iteratorOrder.add(rs.getString(1)); } /* * Because TableResultIterators are created in parallel in multiple threads, their relative order is not * deterministic. However, once the iterators are assigned to a RoundRobinResultIterator, the order in which * the next iterator is picked is deterministic - i1, i2, .. i7, i8, i1, i2, .. i7, i8, i1, i2, .. */ for (int i = 0; i < numIterators; i++) { assertEquals(iteratorOrder.get(i), iteratorOrder.get(i + numIterators)); } } }
Example #13
Source File: UpsertSelectIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test // See https://issues.apache.org/jira/browse/PHOENIX-4265 public void testLongCodecUsedForRowTimestamp() throws Exception { String tableName = generateUniqueName(); String indexName = generateUniqueName(); Properties props = new Properties(); props.setProperty(QueryServices.ENABLE_SERVER_SIDE_UPSERT_MUTATIONS, allowServerSideMutations); try (Connection conn = DriverManager.getConnection(getUrl(), props); Statement statement = conn.createStatement()) { statement.execute("CREATE IMMUTABLE TABLE " + tableName + " (k1 TIMESTAMP not null, k2 bigint not null, v bigint, constraint pk " + "primary key (k1 row_timestamp, k2)) SALT_BUCKETS = 9"); statement.execute("CREATE INDEX " + indexName + " ON " + tableName + " (v) INCLUDE (k2)"); try (PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES (?, ?, ?) ")) { stmt.setTimestamp(1, new Timestamp(1000)); stmt.setLong(2, 2000); stmt.setLong(3, 1000); stmt.executeUpdate(); stmt.setTimestamp(1, new Timestamp(2000)); stmt.setLong(2, 5000); stmt.setLong(3, 5); stmt.executeUpdate(); stmt.setTimestamp(1, new Timestamp(3000)); stmt.setLong(2, 5000); stmt.setLong(3, 5); stmt.executeUpdate(); stmt.setTimestamp(1, new Timestamp(4000)); stmt.setLong(2, 5000); stmt.setLong(3, 5); stmt.executeUpdate(); stmt.setTimestamp(1, new Timestamp(5000)); stmt.setLong(2, 2000); stmt.setLong(3, 10); stmt.executeUpdate(); stmt.setTimestamp(1, new Timestamp(6000)); stmt.setLong(2, 2000); stmt.setLong(3, 20); stmt.executeUpdate(); } conn.commit(); ResultSet rs = statement.executeQuery("SELECT " + " K2 FROM " + tableName + " WHERE V = 5"); assertTrue("Index " + indexName + " should have been used", rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan().getTableRef() .getTable().getName().getString().equals(indexName)); assertTrue(rs.next()); assertEquals(5000, rs.getLong("k2")); assertTrue(rs.next()); assertEquals(5000, rs.getLong("k2")); assertTrue(rs.next()); assertEquals(5000, rs.getLong("k2")); assertFalse(rs.next()); rs = statement.executeQuery("SELECT /*+ INDEX(" + tableName + " " + indexName + ") */ " + " K2 FROM " + tableName + " WHERE V = 5"); assertTrue("Index " + indexName + " should have been used", rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan() .getTableRef().getTable().getName().getString().equals(indexName)); assertTrue(rs.next()); assertEquals(5000, rs.getLong("k2")); assertTrue(rs.next()); assertEquals(5000, rs.getLong("k2")); assertTrue(rs.next()); assertEquals(5000, rs.getLong("k2")); assertFalse(rs.next()); } }
Example #14
Source File: RoundRobinResultIteratorIT.java From phoenix with Apache License 2.0 | 4 votes |
private static ResultIterator getResultIterator(ResultSet rs) throws SQLException { return rs.unwrap(PhoenixResultSet.class).getUnderlyingIterator(); }
Example #15
Source File: ExplainPlanWithStatsEnabledIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test // See https://issues.apache.org/jira/browse/PHOENIX-4287 public void testEstimatesForAggregateQueries() throws Exception { String tableName = generateUniqueName(); try (Connection conn = DriverManager.getConnection(getUrl())) { int guidePostWidth = 20; String ddl = "CREATE TABLE " + tableName + " (k INTEGER PRIMARY KEY, a bigint, b bigint)" + " GUIDE_POSTS_WIDTH=" + guidePostWidth + " SPLIT ON (102, 105, 108)"; conn.createStatement().execute(ddl); conn.createStatement().execute("upsert into " + tableName + " values (100,1,3)"); conn.createStatement().execute("upsert into " + tableName + " values (101,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (102,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (103,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (104,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (105,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (106,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (107,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (108,2,4)"); conn.createStatement().execute("upsert into " + tableName + " values (109,2,4)"); conn.commit(); conn.createStatement().execute("UPDATE STATISTICS " + tableName + ""); } List<Object> binds = Lists.newArrayList(); try (Connection conn = DriverManager.getConnection(getUrl())) { String sql = "SELECT COUNT(*) " + " FROM " + tableName; // We don't have the use stats for parallelization property // set on the table. In this case, we end up defaulting to the // value set in config which is true. ResultSet rs = conn.createStatement().executeQuery(sql); // stats are being used for parallelization. So number of scans is higher. assertEquals(11, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan() .getScans().get(0).size()); assertTrue(rs.next()); assertEquals(10, rs.getInt(1)); Estimate info = getByteRowEstimates(conn, sql, binds); assertEquals((Long) 10L, info.getEstimatedRows()); assertTrue(info.getEstimateInfoTs() > 0); // Now, let's disable USE_STATS_FOR_PARALLELIZATION on the table conn.createStatement().execute( "ALTER TABLE " + tableName + " SET USE_STATS_FOR_PARALLELIZATION = " + false); rs = conn.createStatement().executeQuery(sql); // stats are not being used for parallelization. So number of scans is lower. assertEquals(4, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan() .getScans().get(0).size()); assertTrue(rs.next()); assertEquals(10, rs.getInt(1)); info = getByteRowEstimates(conn, sql, binds); assertEquals((Long) 10L, info.getEstimatedRows()); assertTrue(info.getEstimateInfoTs() > 0); // assert that the aggregate query on view also works correctly String viewName = "V_" + generateUniqueName(); conn.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName + " USE_STATS_FOR_PARALLELIZATION = false"); sql = "SELECT COUNT(*) FROM " + viewName; rs = conn.createStatement().executeQuery(sql); // stats are not being used for parallelization. So number of scans is lower. assertEquals(4, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan() .getScans().get(0).size()); assertTrue(rs.next()); assertEquals(10, rs.getInt(1)); info = getByteRowEstimates(conn, sql, binds); assertEquals((Long) 10L, info.getEstimatedRows()); assertTrue(info.getEstimateInfoTs() > 0); // Now let's make sure that when using stats for parallelization, our estimates // and query results stay the same for view and base table conn.createStatement().execute( "ALTER TABLE " + tableName + " SET USE_STATS_FOR_PARALLELIZATION=true"); sql = "SELECT COUNT(*) FROM " + tableName; // query the table rs = conn.createStatement().executeQuery(sql); // stats are being used for parallelization. So number of scans is higher. assertEquals(11, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan() .getScans().get(0).size()); assertTrue(rs.next()); assertEquals(10, rs.getInt(1)); info = getByteRowEstimates(conn, sql, binds); assertEquals((Long) 10L, info.getEstimatedRows()); assertTrue(info.getEstimateInfoTs() > 0); conn.createStatement() .execute("ALTER VIEW " + viewName + " SET USE_STATS_FOR_PARALLELIZATION=true"); sql = "SELECT COUNT(*) FROM " + viewName; // query the view rs = conn.createStatement().executeQuery(sql); // stats are not being used for parallelization. So number of scans is higher. assertEquals(11, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan() .getScans().get(0).size()); assertTrue(rs.next()); assertEquals(10, rs.getInt(1)); info = getByteRowEstimates(conn, sql, binds); assertEquals((Long) 10L, info.getEstimatedRows()); assertTrue(info.getEstimateInfoTs() > 0); } }
Example #16
Source File: QueryLoggerIT.java From phoenix with Apache License 2.0 | 4 votes |
private void testPreparedStatement(LogLevel loglevel) throws Exception{ String tableName = generateUniqueName(); createTableAndInsertValues(tableName, true); Properties props= new Properties(); props.setProperty(QueryServices.LOG_LEVEL, loglevel.name()); Connection conn = DriverManager.getConnection(getUrl(),props); assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),loglevel); final MyClock clock = new MyClock(100); EnvironmentEdgeManager.injectEdge(clock); try{ String query = "SELECT * FROM " + tableName +" where V = ?"; StatementContext context; PreparedStatement pstmt = conn.prepareStatement(query); pstmt.setString(1, "value5"); try (ResultSet rs = pstmt.executeQuery()) { context = ((PhoenixResultSet) rs).getContext(); while (rs.next()) { rs.getString(1); rs.getString(2); } } String queryId = context.getQueryLogger().getQueryId(); String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\""; int delay = 5000; // sleep for sometime to let query log committed Thread.sleep(delay); String explainQuery = "Explain " + "SELECT * FROM " + tableName + " where V = 'value5'"; try (ResultSet explainRS = conn.createStatement() .executeQuery(explainQuery); ResultSet rs = conn.createStatement().executeQuery(logQuery)) { boolean foundQueryLog = false; while (rs.next()) { if (rs.getString(QUERY_ID).equals(queryId)) { foundQueryLog = true; assertEquals(rs.getString(BIND_PARAMETERS), loglevel == LogLevel.TRACE ? "value5" : null); assertEquals(rs.getString(USER), System.getProperty("user.name")); assertEquals(rs.getString(CLIENT_IP), InetAddress.getLocalHost().getHostAddress()); assertEquals(rs.getString(EXPLAIN_PLAN), QueryUtil.getExplainPlan(explainRS)); assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), context.getScan().toJSON()); assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 1); assertEquals(rs.getString(QUERY), query); assertEquals(rs.getString(QUERY_STATUS), QueryStatus.COMPLETED.toString()); assertTrue(LogLevel.TRACE == loglevel ? rs.getString(SCAN_METRICS_JSON).contains("scanMetrics") : rs.getString(SCAN_METRICS_JSON) == null); assertEquals(rs.getTimestamp(START_TIME).getTime(), 100); assertEquals(rs.getString(TENANT_ID), null); } } assertTrue(foundQueryLog); conn.close(); } }finally { EnvironmentEdgeManager.injectEdge(null); } }
Example #17
Source File: QueryOptimizerTest.java From phoenix with Apache License 2.0 | 4 votes |
private static void assertQualifierRangesNotPresent(ResultSet rs) throws SQLException { Scan scan = rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan().getContext().getScan(); assertNull(scan.getAttribute(MIN_QUALIFIER)); assertNull(scan.getAttribute(MAX_QUALIFIER)); }
Example #18
Source File: QueryLoggerIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testInfoLogs() throws Exception{ String tableName = generateUniqueName(); createTableAndInsertValues(tableName, true); Properties props= new Properties(); props.setProperty(QueryServices.LOG_LEVEL, LogLevel.INFO.name()); Connection conn = DriverManager.getConnection(getUrl(),props); assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.INFO); String query = "SELECT * FROM " + tableName; StatementContext context; try (ResultSet rs = conn.createStatement().executeQuery(query)) { context = ((PhoenixResultSet) rs).getContext(); while (rs.next()) { rs.getString(1); rs.getString(2); } } String queryId = context.getQueryLogger().getQueryId(); String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\""; int delay = 5000; // sleep for sometime to let query log committed Thread.sleep(delay); try (ResultSet rs = conn.createStatement().executeQuery(logQuery)) { boolean foundQueryLog = false; while (rs.next()) { if (rs.getString(QUERY_ID).equals(queryId)) { foundQueryLog = true; assertEquals(rs.getString(USER), System.getProperty("user.name")); assertEquals(rs.getString(CLIENT_IP), InetAddress.getLocalHost().getHostAddress()); assertEquals(rs.getString(EXPLAIN_PLAN), null); assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), null); assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10); assertEquals(rs.getString(QUERY), query); assertEquals(rs.getString(QUERY_STATUS), QueryStatus.COMPLETED.toString()); assertEquals(rs.getString(TENANT_ID), null); } } assertTrue(foundQueryLog); conn.close(); } }
Example #19
Source File: QueryLoggerIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testDebugLogs() throws Exception { String tableName = generateUniqueName(); createTableAndInsertValues(tableName, true); Properties props= new Properties(); props.setProperty(QueryServices.LOG_LEVEL, LogLevel.DEBUG.name()); Connection conn = DriverManager.getConnection(getUrl(),props); assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.DEBUG); String query = "SELECT * FROM " + tableName; StatementContext context; try (ResultSet rs = conn.createStatement().executeQuery(query)) { context = ((PhoenixResultSet) rs).getContext(); while (rs.next()) { rs.getString(1); rs.getString(2); } } String queryId = context.getQueryLogger().getQueryId(); String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\""; int delay = 5000; // sleep for sometime to let query log committed Thread.sleep(delay); try (ResultSet explainRS = conn.createStatement().executeQuery("Explain " + query); ResultSet rs = conn.createStatement().executeQuery(logQuery)) { boolean foundQueryLog = false; while (rs.next()) { if (rs.getString(QUERY_ID).equals(queryId)) { foundQueryLog = true; assertEquals(rs.getString(BIND_PARAMETERS), null); assertEquals(rs.getString(USER), System.getProperty("user.name")); assertEquals(rs.getString(CLIENT_IP), InetAddress.getLocalHost().getHostAddress()); assertEquals(rs.getString(EXPLAIN_PLAN), QueryUtil.getExplainPlan(explainRS)); assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), context.getScan().toJSON()); assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10); assertEquals(rs.getString(QUERY), query); assertEquals(rs.getString(QUERY_STATUS), QueryStatus.COMPLETED.toString()); assertEquals(rs.getString(TENANT_ID), null); assertTrue(rs.getString(SCAN_METRICS_JSON) == null); assertEquals(rs.getString(EXCEPTION_TRACE), null); } else { //confirm we are not logging system queries assertFalse(rs.getString(QUERY).toString().contains(SYSTEM_CATALOG_SCHEMA)); } } assertTrue(foundQueryLog); conn.close(); } }
Example #20
Source File: BaseIndexIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testReturnedTimestamp() throws Exception { String tenantId = getOrganizationId(); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = DriverManager.getConnection(getUrl(), props); try { String indexName = generateUniqueName(); String tableName = initATableValues(generateUniqueName(), tenantId, getDefaultSplits(tenantId), new Date(System.currentTimeMillis()), null, getUrl(), tableDDLOptions); String ddl = "CREATE "+ (localIndex ? "LOCAL " : "") + " INDEX " + indexName + " on " + tableName + "(A_STRING) INCLUDE (B_STRING)"; conn.createStatement().executeUpdate(ddl); String query = "SELECT ENTITY_ID,A_STRING,B_STRING FROM " + tableName + " WHERE organization_id=? and entity_id=?"; PreparedStatement statement = conn.prepareStatement(query); statement.setString(1, tenantId); long currentTime = EnvironmentEdgeManager.currentTimeMillis(); String entityId = mutable ? ROW5 : Integer.toString(Math.abs(RAND.nextInt() % 1000000000)); PreparedStatement ddlStatement = conn.prepareStatement("UPSERT INTO " + tableName + "(ORGANIZATION_ID, ENTITY_ID,A_STRING) VALUES('" + tenantId + "',?,?)"); ddlStatement.setString(1, entityId); ddlStatement.setString(2, Integer.toString(Math.abs(RAND.nextInt() % 1000000000))); ddlStatement.executeUpdate(); conn.commit(); statement.setString(2, entityId); ResultSet rs = statement.executeQuery(); assertTrue (rs.next()); assertTrue(rs.unwrap(PhoenixResultSet.class).getCurrentRow().getValue(0).getTimestamp() >= currentTime); assertEquals(rs.getString(1).trim(), entityId); assertFalse(rs.next()); currentTime = EnvironmentEdgeManager.currentTimeMillis(); entityId = mutable ? ROW5 : Integer.toString(Math.abs(RAND.nextInt() % 1000000000)); ddlStatement = conn.prepareStatement("UPSERT INTO " + tableName + "(ORGANIZATION_ID, ENTITY_ID,B_STRING) VALUES('" + tenantId + "',?,?)"); ddlStatement.setString(1, entityId); ddlStatement.setString(2, Integer.toString(Math.abs(RAND.nextInt() % 1000000000))); ddlStatement.executeUpdate(); conn.commit(); statement.setString(2, entityId); rs = statement.executeQuery(); assertTrue (rs.next()); assertTrue(rs.unwrap(PhoenixResultSet.class).getCurrentRow().getValue(0).getTimestamp() >= currentTime); assertEquals(rs.getString(1).trim(), entityId); assertFalse(rs.next()); } finally { conn.close(); } }
Example #21
Source File: UpsertCompiler.java From phoenix with Apache License 2.0 | 4 votes |
private static MutationState upsertSelect(PhoenixStatement statement, TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes) throws SQLException { try { PhoenixConnection connection = statement.getConnection(); ConnectionQueryServices services = connection.getQueryServices(); int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); int batchSize = Math.min(connection.getMutateBatchSize(), maxSize); boolean isAutoCommit = connection.getAutoCommit(); byte[][] values = new byte[columnIndexes.length][]; int rowCount = 0; Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(batchSize); PTable table = tableRef.getTable(); ResultSet rs = new PhoenixResultSet(iterator, projector, statement); ImmutableBytesWritable ptr = new ImmutableBytesWritable(); while (rs.next()) { for (int i = 0; i < values.length; i++) { PColumn column = table.getColumns().get(columnIndexes[i]); byte[] bytes = rs.getBytes(i+1); ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes); Object value = rs.getObject(i+1); int rsPrecision = rs.getMetaData().getPrecision(i+1); Integer precision = rsPrecision == 0 ? null : rsPrecision; int rsScale = rs.getMetaData().getScale(i+1); Integer scale = rsScale == 0 ? null : rsScale; // We are guaranteed that the two column will have compatible types, // as we checked that before. if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), precision, scale, column.getMaxLength(),column.getScale())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) .setColumnName(column.getName().getString()) .setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build().buildException(); } column.getDataType().coerceBytes(ptr, value, column.getDataType(), precision, scale, SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder()); values[i] = ByteUtil.copyKeyBytesIfNecessary(ptr); } setValues(values, pkSlotIndexes, columnIndexes, table, mutation); rowCount++; // Commit a batch if auto commit is true and we're at our batch size if (isAutoCommit && rowCount % batchSize == 0) { MutationState state = new MutationState(tableRef, mutation, 0, maxSize, connection); connection.getMutationState().join(state); connection.commit(); mutation.clear(); } } // If auto commit is true, this last batch will be committed upon return return new MutationState(tableRef, mutation, rowCount / batchSize * batchSize, maxSize, connection); } finally { iterator.close(); } }
Example #22
Source File: IndexTool.java From phoenix with Apache License 2.0 | 4 votes |
private void splitIndexTable(PhoenixConnection pConnection, boolean autosplit, int autosplitNumRegions, double samplingRate) throws SQLException, IOException, IllegalArgumentException { int numRegions; try (org.apache.hadoop.hbase.client.Connection tempHConn = getTemporaryHConnection(pConnection); RegionLocator regionLocator = tempHConn.getRegionLocator(TableName.valueOf(qDataTable))) { numRegions = regionLocator.getStartKeys().length; if (autosplit && (numRegions <= autosplitNumRegions)) { LOGGER.info(String.format( "Will not split index %s because the data table only has %s regions, autoSplitNumRegions=%s", pIndexTable.getPhysicalName(), numRegions, autosplitNumRegions)); return; // do nothing if # of regions is too low } } // build a tablesample query to fetch index column values from the data table DataSourceColNames colNames = new DataSourceColNames(pDataTable, pIndexTable); String qTableSample = String.format("%s TABLESAMPLE(%.2f)", qDataTable, samplingRate); List<String> dataColNames = colNames.getDataColNames(); final String dataSampleQuery = QueryUtil.constructSelectStatement(qTableSample, dataColNames, null, Hint.NO_INDEX, true); IndexMaintainer maintainer = IndexMaintainer.create(pDataTable, pIndexTable, pConnection); ImmutableBytesWritable dataRowKeyPtr = new ImmutableBytesWritable(); try (final PhoenixResultSet rs = pConnection.createStatement().executeQuery(dataSampleQuery) .unwrap(PhoenixResultSet.class); Admin admin = pConnection.getQueryServices().getAdmin()) { EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(numRegions); ValueGetter getter = getIndexValueGetter(rs, dataColNames); // loop over data table rows - build the index rowkey, put it in the histogram while (rs.next()) { rs.getCurrentRow().getKey(dataRowKeyPtr); // regionStart/EndKey only needed for local indexes, so we pass null byte[] indexRowKey = maintainer.buildRowKey(getter, dataRowKeyPtr, null, null, HConstants.LATEST_TIMESTAMP); histo.addValue(indexRowKey); } List<Bucket> buckets = histo.computeBuckets(); // do the split // to get the splits, we just need the right bound of every histogram bucket, excluding the last byte[][] splitPoints = new byte[buckets.size() - 1][]; int splitIdx = 0; for (Bucket b : buckets.subList(0, buckets.size() - 1)) { splitPoints[splitIdx++] = b.getRightBoundExclusive(); } // drop table and recreate with appropriate splits TableName indexTN = TableName.valueOf(pIndexTable.getPhysicalName().getBytes()); HTableDescriptor descriptor = admin.getTableDescriptor(indexTN); admin.disableTable(indexTN); admin.deleteTable(indexTN); admin.createTable(descriptor, splitPoints); } }
Example #23
Source File: UpsertCompiler.java From phoenix with Apache License 2.0 | 4 votes |
public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, boolean useServerTimestamp, boolean prefixSysColValues) throws SQLException { PhoenixStatement statement = childContext.getStatement(); PhoenixConnection connection = statement.getConnection(); ConnectionQueryServices services = connection.getQueryServices(); int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); int batchSize = Math.min(connection.getMutateBatchSize(), maxSize); // we automatically flush the mutations when either auto commit is enabled, or // the target table is transactional (in that case changes are not visible until we commit) final boolean autoFlush = connection.getAutoCommit() || tableRef.getTable().isTransactional(); int sizeOffset = 0; int numSplColumns = (tableRef.getTable().isMultiTenant() ? 1 : 0) + (tableRef.getTable().getViewIndexId() != null ? 1 : 0); byte[][] values = new byte[columnIndexes.length + numSplColumns][]; if(prefixSysColValues) { int i = 0; if(tableRef.getTable().isMultiTenant()) { values[i++] = connection.getTenantId().getBytes(); } if(tableRef.getTable().getViewIndexId() != null) { values[i++] = PSmallint.INSTANCE.toBytes(tableRef.getTable().getViewIndexId()); } } int rowCount = 0; MultiRowMutationState mutation = new MultiRowMutationState(batchSize); PTable table = tableRef.getTable(); IndexMaintainer indexMaintainer = null; byte[][] viewConstants = null; if (table.getIndexType() == IndexType.LOCAL) { PTable parentTable = statement .getConnection() .getMetaDataCache() .getTableRef( new PTableKey(statement.getConnection().getTenantId(), table .getParentName().getString())).getTable(); indexMaintainer = table.getIndexMaintainer(parentTable, connection); viewConstants = IndexUtil.getViewConstants(parentTable); } try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); while (rs.next()) { for (int i = 0, j = numSplColumns; j < values.length; j++, i++) { PColumn column = table.getColumns().get(columnIndexes[i]); byte[] bytes = rs.getBytes(i + 1); ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes); Object value = rs.getObject(i + 1); int rsPrecision = rs.getMetaData().getPrecision(i + 1); Integer precision = rsPrecision == 0 ? null : rsPrecision; int rsScale = rs.getMetaData().getScale(i + 1); Integer scale = rsScale == 0 ? null : rsScale; // We are guaranteed that the two column will have compatible types, // as we checked that before. if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), SortOrder.getDefault(), precision, scale, column.getMaxLength(), column.getScale())) { throw new SQLExceptionInfo.Builder( SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName( column.getName().getString()) .setMessage("value=" + column.getDataType() .toStringLiteral(ptr, null)).build() .buildException(); } column.getDataType().coerceBytes(ptr, value, column.getDataType(), precision, scale, SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable()); values[j] = ByteUtil.copyKeyBytesIfNecessary(ptr); } setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp, indexMaintainer, viewConstants, null, numSplColumns); rowCount++; // Commit a batch if auto commit is true and we're at our batch size if (autoFlush && rowCount % batchSize == 0) { MutationState state = new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection); connection.getMutationState().join(state); connection.getMutationState().send(); mutation.clear(); } } if (autoFlush) { // If auto commit is true, this last batch will be committed upon return sizeOffset = rowCount / batchSize * batchSize; } return new MutationState(tableRef, mutation, sizeOffset, maxSize, maxSizeBytes, connection); } }
Example #24
Source File: PhoenixRuntime.java From phoenix with Apache License 2.0 | 2 votes |
/** * Reset the read metrics collected in the result set. * * @see {@link #getRequestReadMetrics(ResultSet)} {@link #getOverAllReadRequestMetrics(ResultSet)} * @param rs * @throws SQLException */ public static void resetMetrics(ResultSet rs) throws SQLException { PhoenixResultSet prs = rs.unwrap(PhoenixResultSet.class); prs.resetMetrics(); }
Example #25
Source File: PhoenixRuntime.java From phoenix with Apache License 2.0 | 2 votes |
/** * Method to expose the overall metrics associated with executing a query via phoenix. A typical pattern of * accessing request level read metrics and overall read query metrics is: * * <pre> * {@code * Map<String, Map<MetricType, Long>> overAllQueryMetrics = null; * Map<String, Map<MetricType, Long>> requestReadMetrics = null; * try (ResultSet rs = stmt.executeQuery()) { * while(rs.next()) { * ..... * } * overAllQueryMetrics = PhoenixRuntime.getOverAllReadRequestMetrics(rs); * requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs); * PhoenixRuntime.resetMetrics(rs); * } * </pre> * * @param rs * result set to get the metrics for * @return a map of metric name -> metric value * @throws SQLException */ public static Map<MetricType, Long> getOverAllReadRequestMetricInfo(ResultSet rs) throws SQLException { PhoenixResultSet resultSet = rs.unwrap(PhoenixResultSet.class); return resultSet.getOverAllRequestReadMetrics(); }
Example #26
Source File: PhoenixRuntime.java From phoenix with Apache License 2.0 | 2 votes |
/** * Method to expose the metrics associated with performing reads using the passed result set. A typical pattern is: * * <pre> * {@code * Map<String, Map<MetricType, Long>> overAllQueryMetrics = null; * Map<String, Map<MetricType, Long>> requestReadMetrics = null; * try (ResultSet rs = stmt.executeQuery()) { * while(rs.next()) { * ..... * } * overAllQueryMetrics = PhoenixRuntime.getOverAllReadRequestMetrics(rs); * requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs); * PhoenixRuntime.resetMetrics(rs); * } * </pre> * * @param rs * result set to get the metrics for * @return a map of (table name) -> (map of (metric name) -> (metric value)) * @throws SQLException */ public static Map<String, Map<MetricType, Long>> getRequestReadMetricInfo(ResultSet rs) throws SQLException { PhoenixResultSet resultSet = rs.unwrap(PhoenixResultSet.class); return resultSet.getReadMetrics(); }