Java Code Examples for org.apache.phoenix.jdbc.PhoenixConnection#prepareStatement()
The following examples show how to use
org.apache.phoenix.jdbc.PhoenixConnection#prepareStatement() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KeyRangeClipTest.java From phoenix with Apache License 2.0 | 6 votes |
private static byte[] getRange(PhoenixConnection pconn, List<Object> startValues) throws SQLException { byte[] lowerRange; if (startValues == null) { lowerRange = KeyRange.UNBOUND; } else { String upsertValues = StringUtils.repeat("?,", startValues.size()).substring(0,startValues.size() * 2 - 1); String upsertStmt = "UPSERT INTO T VALUES(" + upsertValues + ")"; PreparedStatement stmt = pconn.prepareStatement(upsertStmt); for (int i = 0; i < startValues.size(); i++) { stmt.setObject(i+1, startValues.get(i)); } stmt.execute(); Cell startCell = PhoenixRuntime.getUncommittedDataIterator(pconn).next().getSecond().get(0); lowerRange = CellUtil.cloneRow(startCell); pconn.rollback(); } return lowerRange; }
Example 2
Source File: ExplainPlanWithStatsEnabledIT.java From phoenix with Apache License 2.0 | 6 votes |
private static void assertUseStatsForQueryFlag(String tableName, PhoenixConnection conn, Boolean expected) throws TableNotFoundException, SQLException { assertEquals(expected, conn.unwrap(PhoenixConnection.class).getMetaDataCache() .getTableRef(new PTableKey(null, tableName)).getTable() .useStatsForParallelization()); String query = "SELECT USE_STATS_FOR_PARALLELIZATION FROM SYSTEM.CATALOG WHERE TABLE_NAME = ? AND COLUMN_NAME IS NULL AND COLUMN_FAMILY IS NULL AND TENANT_ID IS NULL"; PreparedStatement stmt = conn.prepareStatement(query); stmt.setString(1, tableName); ResultSet rs = stmt.executeQuery(); rs.next(); boolean b = rs.getBoolean(1); if (expected == null) { assertTrue(rs.wasNull()); } else { assertEquals(expected, b); } }
Example 3
Source File: QueryUtil.java From phoenix with Apache License 2.0 | 6 votes |
public static PreparedStatement getCatalogsStmt(PhoenixConnection connection) throws SQLException { List<String> parameterValues = new ArrayList<String>(4); StringBuilder buf = new StringBuilder("select \n" + " DISTINCT " + TENANT_ID + " " + TABLE_CAT + " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + " where " + COLUMN_NAME + " is null" + " and " + COLUMN_FAMILY + " is null" + " and " + TENANT_ID + " is not null"); addTenantIdFilter(connection, buf, null, parameterValues); buf.append(" order by " + TENANT_ID); PreparedStatement stmt = connection.prepareStatement(buf.toString()); for(int i = 0; i < parameterValues.size(); i++) { stmt.setString(i+1, parameterValues.get(i)); } return stmt; }
Example 4
Source File: Task.java From phoenix with Apache License 2.0 | 6 votes |
public static void deleteTask(PhoenixConnection conn, PTable.TaskType taskType, Timestamp ts, String tenantId, String schemaName, String tableName, boolean accessCheckEnabled) throws IOException { PreparedStatement stmt = null; try { stmt = conn.prepareStatement("DELETE FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME + " WHERE " + PhoenixDatabaseMetaData.TASK_TYPE + " = ? AND " + PhoenixDatabaseMetaData.TASK_TS + " = ? AND " + PhoenixDatabaseMetaData.TENANT_ID + (tenantId == null ? " IS NULL " : " = '" + tenantId + "'") + " AND " + PhoenixDatabaseMetaData.TABLE_SCHEM + (schemaName == null ? " IS NULL " : " = '" + schemaName + "'") + " AND " + PhoenixDatabaseMetaData.TABLE_NAME + " = ?"); stmt.setByte(1, taskType.getSerializedValue()); stmt.setTimestamp(2, ts); stmt.setString(3, tableName); } catch (SQLException e) { throw new IOException(e); } mutateSystemTaskTable(conn, stmt, accessCheckEnabled); }
Example 5
Source File: Task.java From phoenix with Apache License 2.0 | 6 votes |
public static void addTask(PhoenixConnection conn, PTable.TaskType taskType, String tenantId, String schemaName, String tableName, String taskStatus, String data, Integer priority, Timestamp startTs, Timestamp endTs, boolean accessCheckEnabled) throws IOException { PreparedStatement stmt; try { stmt = conn.prepareStatement("UPSERT INTO " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME + " ( " + PhoenixDatabaseMetaData.TASK_TYPE + ", " + PhoenixDatabaseMetaData.TENANT_ID + ", " + PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + PhoenixDatabaseMetaData.TABLE_NAME + ", " + PhoenixDatabaseMetaData.TASK_STATUS + ", " + PhoenixDatabaseMetaData.TASK_PRIORITY + ", " + PhoenixDatabaseMetaData.TASK_TS + ", " + PhoenixDatabaseMetaData.TASK_END_TS + ", " + PhoenixDatabaseMetaData.TASK_DATA + " ) VALUES(?,?,?,?,?,?,?,?,?)"); stmt = setValuesToAddTaskPS(stmt, taskType, tenantId, schemaName, tableName, taskStatus, data, priority, startTs, endTs); } catch (SQLException e) { throw new IOException(e); } mutateSystemTaskTable(conn, stmt, accessCheckEnabled); }
Example 6
Source File: CSVCommonsLoaderIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testCSVCommonsUpsert_WithArray() throws Exception { CSVParser parser = null; PhoenixConnection conn = null; try { // Create table String statements = "CREATE TABLE IF NOT EXISTS ARRAY_TABLE " + "(ID BIGINT NOT NULL PRIMARY KEY, VALARRAY INTEGER ARRAY);"; conn = DriverManager.getConnection(getUrl()).unwrap( PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); // Upsert CSV file CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "ARRAY_TABLE", null, true, ',', '"', null, "!"); csvUtil.upsert( new StringReader("ID,VALARRAY\n" + "1,2!3!4\n")); // Compare Phoenix ResultSet with CSV file content PreparedStatement statement = conn .prepareStatement("SELECT ID, VALARRAY FROM ARRAY_TABLE"); ResultSet phoenixResultSet = statement.executeQuery(); assertTrue(phoenixResultSet.next()); assertEquals(1L, phoenixResultSet.getLong(1)); assertEquals( PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Integer[]{2, 3, 4}), phoenixResultSet.getArray(2)); assertFalse(phoenixResultSet.next()); } finally { if (parser != null) parser.close(); if (conn != null) conn.close(); } }
Example 7
Source File: MutationStateIT.java From phoenix with Apache License 2.0 | 5 votes |
private void upsertRows(PhoenixConnection conn, String fullTableName) throws SQLException { PreparedStatement stmt = conn.prepareStatement("upsert into " + fullTableName + " (organization_id, entity_id, score) values (?,?,?)"); for (int i = 0; i < 10000; i++) { stmt.setString(1, "AAAA" + i); stmt.setString(2, "BBBB" + i); stmt.setInt(3, 1); stmt.execute(); } }
Example 8
Source File: QueryMoreIT.java From phoenix with Apache License 2.0 | 5 votes |
private void upsertRows(PhoenixConnection conn, String fullTableName) throws SQLException { PreparedStatement stmt = conn.prepareStatement("upsert into " + fullTableName + " (organization_id, entity_id, score) values (?,?,?)"); for (int i = 0; i < 4; i++) { stmt.setString(1, "AAAA" + i); stmt.setString(2, "BBBB" + i); stmt.setInt(3, 1); stmt.execute(); } }
Example 9
Source File: CSVCommonsLoaderIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testCSVCommonsUpsert_WithTimestamp() throws Exception { CSVParser parser = null; PhoenixConnection conn = null; try { // Create table String statements = "CREATE TABLE IF NOT EXISTS TS_TABLE " + "(ID BIGINT NOT NULL PRIMARY KEY, TS TIMESTAMP);"; conn = DriverManager.getConnection(getUrl()).unwrap( PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); // Upsert CSV file CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "TS_TABLE", ImmutableList.<String>of(), true, ',', '"', null, "!"); csvUtil.upsert( new StringReader("ID,TS\n" + "1,1970-01-01 00:00:10\n" + "2,1970-01-01 00:00:10.123\n")); // Compare Phoenix ResultSet with CSV file content PreparedStatement statement = conn .prepareStatement("SELECT ID, TS FROM TS_TABLE ORDER BY ID"); ResultSet phoenixResultSet = statement.executeQuery(); assertTrue(phoenixResultSet.next()); assertEquals(1L, phoenixResultSet.getLong(1)); assertEquals(10000L, phoenixResultSet.getTimestamp(2).getTime()); assertTrue(phoenixResultSet.next()); assertEquals(2L, phoenixResultSet.getLong(1)); assertEquals(10123L, phoenixResultSet.getTimestamp(2).getTime()); assertFalse(phoenixResultSet.next()); } finally { if (parser != null) parser.close(); if (conn != null) conn.close(); } }
Example 10
Source File: CSVCommonsLoaderIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testCSVCommonsUpsert_WithArray() throws Exception { CSVParser parser = null; PhoenixConnection conn = null; try { // Create table String statements = "CREATE TABLE IF NOT EXISTS ARRAY_TABLE " + "(ID BIGINT NOT NULL PRIMARY KEY, VALARRAY INTEGER ARRAY);"; conn = DriverManager.getConnection(getUrl()).unwrap( PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); // Upsert CSV file CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "ARRAY_TABLE", ImmutableList.<String>of(), true, ',', '"', null, "!"); csvUtil.upsert( new StringReader("ID,VALARRAY\n" + "1,2!3!4\n")); // Compare Phoenix ResultSet with CSV file content PreparedStatement statement = conn .prepareStatement("SELECT ID, VALARRAY FROM ARRAY_TABLE"); ResultSet phoenixResultSet = statement.executeQuery(); assertTrue(phoenixResultSet.next()); assertEquals(1L, phoenixResultSet.getLong(1)); assertEquals( PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Integer[]{2, 3, 4}), phoenixResultSet.getArray(2)); assertFalse(phoenixResultSet.next()); } finally { if (parser != null) parser.close(); if (conn != null) conn.close(); } }
Example 11
Source File: QueryUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static PreparedStatement getSuperTablesStmt(PhoenixConnection connection, String catalog, String schemaPattern, String tableNamePattern) throws SQLException { List<String> parameterValues = new ArrayList<String>(4); StringBuilder buf = new StringBuilder("select \n" + TENANT_ID + " " + TABLE_CAT + "," + // Use tenantId for catalog TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_FAMILY + " " + SUPERTABLE_NAME + " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + " where " + COLUMN_NAME + " is null" + " and " + LINK_TYPE + " = " + PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()); addTenantIdFilter(connection, buf, catalog, parameterValues); if (schemaPattern != null) { buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like ?" )); if(schemaPattern.length() > 0) { parameterValues.add(schemaPattern); } } if (tableNamePattern != null) { buf.append(" and " + TABLE_NAME + " like ?" ); parameterValues.add(tableNamePattern); } buf.append(" order by " + TENANT_ID + "," + TABLE_SCHEM + "," +TABLE_NAME + "," + SUPERTABLE_NAME); PreparedStatement stmt = connection.prepareStatement(buf.toString()); for(int i = 0; i < parameterValues.size(); i++) { stmt.setString(i+1, parameterValues.get(i)); } return stmt; }
Example 12
Source File: QueryUtil.java From phoenix with Apache License 2.0 | 5 votes |
/** * Util that generates a PreparedStatement against syscat to fetch schema listings. */ public static PreparedStatement getSchemasStmt( PhoenixConnection connection, String catalog, String schemaPattern) throws SQLException { List<String> parameterValues = new ArrayList<String>(4); StringBuilder buf = new StringBuilder("select distinct \n" + TABLE_SCHEM + "," + TENANT_ID + " " + TABLE_CATALOG + " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + " where " + COLUMN_NAME + " is null"); addTenantIdFilter(connection, buf, catalog, parameterValues); if (schemaPattern != null) { buf.append(" and " + TABLE_SCHEM + " like ?"); parameterValues.add(schemaPattern); } if (SchemaUtil.isNamespaceMappingEnabled(null, connection.getQueryServices().getProps())) { buf.append(" and " + TABLE_NAME + " = '" + MetaDataClient.EMPTY_TABLE + "'"); } // TODO: we should union this with SYSTEM.SEQUENCE too, but we only have support for // UNION ALL and we really need UNION so that it dedups. PreparedStatement stmt = connection.prepareStatement(buf.toString()); for(int i = 0; i < parameterValues.size(); i++) { stmt.setString(i+1, parameterValues.get(i)); } return stmt; }
Example 13
Source File: UpgradeUtil.java From phoenix with Apache License 2.0 | 5 votes |
private static void updateLink(PhoenixConnection conn, String srcTableName, String destTableName, PName schemaName, PName tableName) throws SQLException { String updateLinkSql = String.format(UPDATE_LINK, destTableName); boolean hasTenantId = conn.getTenantId() != null && conn.getTenantId().getBytes().length!=0; if (hasTenantId) { updateLinkSql += " AND TENANT_ID = ? "; } PreparedStatement updateLinkStatment = conn.prepareStatement(updateLinkSql); updateLinkStatment.setString(1, schemaName.getString()); updateLinkStatment.setString(2, schemaName.getString()); updateLinkStatment.setString(3, tableName.getString()); updateLinkStatment.setString(4, srcTableName); if (hasTenantId) { updateLinkStatment.setString(5, conn.getTenantId().getString()); } updateLinkStatment.execute(); String deleteLinkSql = DELETE_LINK; if (hasTenantId) { deleteLinkSql += (" AND TENANT_ID = ? "); } PreparedStatement deleteLinkStatment = conn.prepareStatement(deleteLinkSql); deleteLinkStatment.setString(1, schemaName.getString()); deleteLinkStatment.setString(2, schemaName.getString()); deleteLinkStatment.setString(3, tableName.getString()); deleteLinkStatment.setString(4, srcTableName); if (hasTenantId) { deleteLinkStatment.setString(5, conn.getTenantId().getString()); } deleteLinkStatment.execute(); }
Example 14
Source File: UpgradeUtil.java From phoenix with Apache License 2.0 | 5 votes |
private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection metaConnection, String tenantId, String schemaName, String viewOrTableName, int baseColumnCount) throws SQLException { try (PreparedStatement stmt = metaConnection.prepareStatement(UPSERT_BASE_COLUMN_COUNT_IN_HEADER_ROW)) { stmt.setString(1, tenantId); stmt.setString(2, schemaName); stmt.setString(3, viewOrTableName); stmt.setString(4, null); stmt.setString(5, null); stmt.setInt(6, baseColumnCount); stmt.executeUpdate(); } }
Example 15
Source File: CSVCommonsLoaderIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testCSVCommonsUpsert_WithTimestamp() throws Exception { CSVParser parser = null; PhoenixConnection conn = null; try { // Create table String statements = "CREATE TABLE IF NOT EXISTS TS_TABLE " + "(ID BIGINT NOT NULL PRIMARY KEY, TS TIMESTAMP);"; conn = DriverManager.getConnection(getUrl()).unwrap( PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); // Upsert CSV file CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "TS_TABLE", null, true, ',', '"', null, "!"); csvUtil.upsert( new StringReader("ID,TS\n" + "1,1970-01-01 00:00:10\n" + "2,1970-01-01 00:00:10.123\n")); // Compare Phoenix ResultSet with CSV file content PreparedStatement statement = conn .prepareStatement("SELECT ID, TS FROM TS_TABLE ORDER BY ID"); ResultSet phoenixResultSet = statement.executeQuery(); assertTrue(phoenixResultSet.next()); assertEquals(1L, phoenixResultSet.getLong(1)); assertEquals(10000L, phoenixResultSet.getTimestamp(2).getTime()); assertTrue(phoenixResultSet.next()); assertEquals(2L, phoenixResultSet.getLong(1)); assertEquals(10123L, phoenixResultSet.getTimestamp(2).getTime()); assertFalse(phoenixResultSet.next()); } finally { if (parser != null) parser.close(); if (conn != null) conn.close(); } }
Example 16
Source File: CsvUpsertExecutor.java From phoenix with Apache License 2.0 | 5 votes |
/** * Static constructor method for creating a CsvUpsertExecutor. * * @param conn Phoenix connection upon which upserts are to be performed * @param tableName name of the table in which upserts are to be performed * @param columnInfoList description of the columns to be upserted to, in the same order as in the CSV input * @param upsertListener listener that will be notified of upserts, can be null * @param arrayElementSeparator separator string to delimit string representations of arrays * @return the created CsvUpsertExecutor */ public static CsvUpsertExecutor create(PhoenixConnection conn, String tableName, List<ColumnInfo> columnInfoList, UpsertListener upsertListener, String arrayElementSeparator) { PreparedStatement preparedStatement = null; try { String upsertSql = QueryUtil.constructUpsertStatement(tableName, columnInfoList); LOG.info("Upserting SQL data with {}", upsertSql); preparedStatement = conn.prepareStatement(upsertSql); } catch (SQLException e) { throw new RuntimeException(e); } return new CsvUpsertExecutor(conn, columnInfoList, preparedStatement, upsertListener, arrayElementSeparator); }
Example 17
Source File: QueryUtil.java From phoenix with Apache License 2.0 | 4 votes |
public static PreparedStatement getIndexInfoStmt(PhoenixConnection connection, String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { if (unique) { // No unique indexes return null; } List<String> parameterValues = new ArrayList<String>(4); StringBuilder buf = new StringBuilder("select \n" + TENANT_ID + " " + TABLE_CAT + ",\n" + // use this column for column family name TABLE_SCHEM + ",\n" + DATA_TABLE_NAME + " " + TABLE_NAME + ",\n" + "true NON_UNIQUE,\n" + "null INDEX_QUALIFIER,\n" + TABLE_NAME + " INDEX_NAME,\n" + DatabaseMetaData.tableIndexOther + " TYPE,\n" + ORDINAL_POSITION + ",\n" + COLUMN_NAME + ",\n" + "CASE WHEN " + COLUMN_FAMILY + " IS NOT NULL THEN null WHEN " + SORT_ORDER + " = " + (SortOrder.DESC.getSystemValue()) + " THEN 'D' ELSE 'A' END ASC_OR_DESC,\n" + "null CARDINALITY,\n" + "null PAGES,\n" + "null FILTER_CONDITION,\n" + // Include data type info, though not in spec ExternalSqlTypeIdFunction.NAME + "(" + DATA_TYPE + ") AS " + DATA_TYPE + ",\n" + SqlTypeNameFunction.NAME + "(" + DATA_TYPE + ") AS " + TYPE_NAME + ",\n" + DATA_TYPE + " " + TYPE_ID + ",\n" + COLUMN_FAMILY + ",\n" + COLUMN_SIZE + ",\n" + ARRAY_SIZE + "\nfrom " + SYSTEM_CATALOG + "\nwhere "); buf.append(TABLE_SCHEM + (schema == null || schema.length() == 0 ? " is null" : " = ?" )); if(schema != null && schema.length() > 0) { parameterValues.add(schema); } buf.append("\nand " + DATA_TABLE_NAME + " = ?" ); parameterValues.add(table); buf.append("\nand " + COLUMN_NAME + " is not null" ); addTenantIdFilter(connection, buf, catalog, parameterValues); buf.append("\norder by INDEX_NAME," + ORDINAL_POSITION); PreparedStatement stmt = connection.prepareStatement(buf.toString()); for(int i = 0; i < parameterValues.size(); i++) { stmt.setString(i+1, parameterValues.get(i)); } return stmt; }
Example 18
Source File: QueryDatabaseMetaDataIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testCreateOnExistingTable() throws Exception { PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); String tableName = MDTEST_NAME; String schemaName = MDTEST_SCHEMA_NAME; byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a")); byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b")); byte[] cfC = Bytes.toBytes("c"); byte[][] familyNames = new byte[][] {cfB, cfC}; byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName); HBaseAdmin admin = pconn.getQueryServices().getAdmin(); try { admin.disableTable(htableName); admin.deleteTable(htableName); admin.enableTable(htableName); } catch (org.apache.hadoop.hbase.TableNotFoundException e) { } @SuppressWarnings("deprecation") HTableDescriptor descriptor = new HTableDescriptor(htableName); for (byte[] familyName : familyNames) { HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName); descriptor.addFamily(columnDescriptor); } admin.createTable(descriptor); long ts = nextTimestamp(); Properties props = new Properties(); props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5)); PhoenixConnection conn1 = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class); ensureTableCreated(getUrl(), tableName, null, ts); descriptor = admin.getTableDescriptor(htableName); assertEquals(3,descriptor.getColumnFamilies().length); HColumnDescriptor cdA = descriptor.getFamily(cfA); assertNotEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCellsAsEnum()); assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using WITH assertEquals(1,cdA.getMaxVersions());// Overriden using WITH HColumnDescriptor cdB = descriptor.getFamily(cfB); // Allow KEEP_DELETED_CELLS to be false for VIEW assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCellsAsEnum()); assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the original value. // CF c should stay the same since it's not a Phoenix cf. HColumnDescriptor cdC = descriptor.getFamily(cfC); assertNotNull("Column family not found", cdC); assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCellsAsEnum()); assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding()); assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName())); assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName())); assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName())); admin.close(); int rowCount = 5; String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)"; PreparedStatement ps = conn1.prepareStatement(upsert); for (int i = 0; i < rowCount; i++) { ps.setString(1, Integer.toString(i)); ps.setInt(2, i+1); ps.setInt(3, i+2); ps.execute(); } conn1.commit(); conn1.close(); props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6)); Connection conn2 = DriverManager.getConnection(getUrl(), props); String query = "SELECT count(1) FROM " + tableName; ResultSet rs = conn2.createStatement().executeQuery(query); assertTrue(rs.next()); assertEquals(rowCount, rs.getLong(1)); query = "SELECT id, col1,col2 FROM " + tableName; rs = conn2.createStatement().executeQuery(query); for (int i = 0; i < rowCount; i++) { assertTrue(rs.next()); assertEquals(Integer.toString(i),rs.getString(1)); assertEquals(i+1, rs.getInt(2)); assertEquals(i+2, rs.getInt(3)); } assertFalse(rs.next()); conn2.close(); }