Java Code Examples for java.sql.Statement#clearBatch()
The following examples show how to use
java.sql.Statement#clearBatch() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SWStatementTest.java From skywalking with Apache License 2.0 | 6 votes |
@Test public void testBatch() throws SQLException, MalformedURLException { Statement statement = multiHostConnection.createStatement(); statement.addBatch("UPDATE test SET a = 1 WHERE b = 2"); int[] resultSet = statement.executeBatch(); statement.clearBatch(); verify(mysqlStatement).executeBatch(); verify(mysqlStatement).addBatch(anyString()); verify(mysqlStatement).clearBatch(); TraceSegment traceSegment = segmentStorage.getTraceSegments().get(0); List<AbstractTracingSpan> spans = SegmentHelper.getSpans(traceSegment); assertThat(spans.size(), is(1)); assertDBSpan(spans.get(0), "Mysql/JDBI/Statement/executeBatch", ""); }
Example 2
Source File: BatchUpdateTest.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public void testEmptyStatementBatch() throws SQLException { Statement stmt = createStatement(); // try executing a batch which nothing in it. Should work. println("Positive Statement: clear the batch and run the empty batch"); stmt.clearBatch(); assertBatchUpdateCounts(new int[0], stmt.executeBatch()); stmt.close(); commit(); }
Example 3
Source File: IotdbBasic.java From iotdb-benchmark with Apache License 2.0 | 5 votes |
public void insertOneBatch(List<String> sqls, ThreadLocal<Long> totalTime, ThreadLocal<Long> errorCount) { Statement statement; int[] result; long errorNum = 0; try { statement = connection.createStatement(); for(String sql : sqls){ statement.addBatch(sql); } long startTime = System.currentTimeMillis(); result = statement.executeBatch(); statement.clearBatch(); statement.close(); long endTime = System.currentTimeMillis(); long costTime = endTime - startTime; for (int i = 0; i < result.length; i++) { if (result[i] == -1) { errorNum++; } } if (errorNum > 0) { LOGGER.info("Batch insert failed, the failed number is {}! ", errorNum); } else { totalTime.set(totalTime.get() + costTime); } errorCount.set(errorCount.get() + errorNum); } catch (SQLException e) { e.printStackTrace(); } }
Example 4
Source File: BatchUpdateTest.java From gemfirexd-oss with Apache License 2.0 | 5 votes |
public void testEmptyStatementBatch() throws SQLException { Statement stmt = createStatement(); // try executing a batch which nothing in it. Should work. println("Positive Statement: clear the batch and run the empty batch"); stmt.clearBatch(); assertBatchUpdateCounts(new int[0], stmt.executeBatch()); stmt.close(); commit(); }
Example 5
Source File: BatchUpdateTest.java From gemfirexd-oss with Apache License 2.0 | 5 votes |
public void testCombinationsOfClearBatch() throws SQLException { Statement stmt = createStatement(); println("Positive Statement: add 3 statements, clear and execute batch"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.clearBatch(); // Batch should be cleared, there should be no update count assertBatchUpdateCounts(new int[0], stmt.executeBatch()); assertTableRowCount("T1", 0); println("Positive Statement: add 3 statements, clear batch, " + "add 3 more statements and execute batch"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.clearBatch(); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); assertBatchUpdateCounts(new int[] {1,1,1}, stmt.executeBatch()); assertTableRowCount("T1", 3); stmt.close(); commit(); }
Example 6
Source File: DB.java From fastquery with Apache License 2.0 | 5 votes |
static int[] executeBatch(String sqlFile,String[] quotes, BiConsumer<Statement, String> consumer) { Connection conn = QueryContext.getConn(); Statement stat = null; Stream<String> stream = parserSQLFile(sqlFile); try { QueryContext.disableAutoCommit(); final Statement st = conn.createStatement(); stat = st; stream.forEach(s -> { if(quotes != null) { int len = quotes.length; for (int i = 0; i < len; i++) { String val = quotes[i]; if(val == null) { val = ""; } s = s.replaceAll("\\$\\["+i+"]", val); } } consumer.accept(st, s); }); int[] ints = stat.executeBatch(); stat.clearBatch(); QueryContext.commit(); return ints; } catch (Exception e) { try { QueryContext.rollback(); } catch (SQLException e1) { throw new RepositoryException(e1.getMessage(), e1); } throw new RepositoryException(e); } finally { close(null, stat); } }
Example 7
Source File: BatchUpdateTest.java From gemfirexd-oss with Apache License 2.0 | 5 votes |
public void testCombinationsOfClearBatch() throws SQLException { Statement stmt = createStatement(); println("Positive Statement: add 3 statements, clear and execute batch"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.clearBatch(); // Batch should be cleared, there should be no update count assertBatchUpdateCounts(new int[0], stmt.executeBatch()); assertTableRowCount("T1", 0); println("Positive Statement: add 3 statements, clear batch, " + "add 3 more statements and execute batch"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.clearBatch(); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); assertBatchUpdateCounts(new int[] {1,1,1}, stmt.executeBatch()); assertTableRowCount("T1", 3); stmt.close(); commit(); }
Example 8
Source File: BatchUpdateTest.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public void testCombinationsOfClearBatch() throws SQLException { Statement stmt = createStatement(); println("Positive Statement: add 3 statements, clear and execute batch"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.clearBatch(); // Batch should be cleared, there should be no update count assertBatchUpdateCounts(new int[0], stmt.executeBatch()); assertTableRowCount("T1", 0); println("Positive Statement: add 3 statements, clear batch, " + "add 3 more statements and execute batch"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.clearBatch(); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); stmt.addBatch("insert into t1 values(2)"); assertBatchUpdateCounts(new int[] {1,1,1}, stmt.executeBatch()); assertTableRowCount("T1", 3); stmt.close(); commit(); }
Example 9
Source File: StatementIT.java From snowflake-jdbc with Apache License 2.0 | 4 votes |
@Test public void testExecuteBatch() throws Exception { Connection connection = getConnection(); Statement statement = connection.createStatement(); connection.setAutoCommit(false); // mixed of ddl/dml in batch statement.addBatch("create or replace table test_batch(a string, b integer)"); statement.addBatch("insert into test_batch values('str1', 1), ('str2', 2)"); statement.addBatch("update test_batch set test_batch.b = src.b + 5 from " + "(select 'str1' as a, 2 as b) src where test_batch.a = src.a"); int[] rowCounts = statement.executeBatch(); connection.commit(); assertThat(rowCounts.length, is(3)); assertThat(rowCounts[0], is(0)); assertThat(rowCounts[1], is(2)); assertThat(rowCounts[2], is(1)); List<String> batchQueryIDs = statement.unwrap(SnowflakeStatement.class).getBatchQueryIDs(); assertEquals(3, batchQueryIDs.size()); assertEquals(statement.unwrap(SnowflakeStatement.class).getQueryID(), batchQueryIDs.get(2)); ResultSet resultSet = statement.executeQuery("select * from test_batch order by b asc"); resultSet.next(); assertThat(resultSet.getInt("B"), is(2)); resultSet.next(); assertThat(resultSet.getInt("B"), is(7)); statement.clearBatch(); // one of the batch is query instead of ddl/dml // it should continuing processing try { statement.addBatch("insert into test_batch values('str3', 3)"); statement.addBatch("select * from test_batch"); statement.addBatch("select * from test_batch_not_exist"); statement.addBatch("insert into test_batch values('str4', 4)"); statement.executeBatch(); fail(); } catch (BatchUpdateException e) { rowCounts = e.getUpdateCounts(); assertThat(e.getErrorCode(), is( ERROR_CODE_DOMAIN_OBJECT_DOES_NOT_EXIST)); assertThat(rowCounts[0], is(1)); assertThat(rowCounts[1], is(Statement.SUCCESS_NO_INFO)); assertThat(rowCounts[2], is(Statement.EXECUTE_FAILED)); assertThat(rowCounts[3], is(1)); connection.rollback(); } statement.clearBatch(); statement.addBatch("put file://" + getFullPathFileInResource(TEST_DATA_FILE) + " @%test_batch auto_compress=false"); File tempFolder = tmpFolder.newFolder("test_downloads_folder"); statement.addBatch("get @%test_batch file://" + tempFolder); rowCounts = statement.executeBatch(); assertThat(rowCounts.length, is(2)); assertThat(rowCounts[0], is(Statement.SUCCESS_NO_INFO)); assertThat(rowCounts[0], is(Statement.SUCCESS_NO_INFO)); statement.clearBatch(); statement.execute("drop table if exists test_batch"); statement.close(); connection.close(); }
Example 10
Source File: BatchStatementTest.java From high-performance-java-persistence with Apache License 2.0 | 4 votes |
@Override protected void onEnd(Statement statement) throws SQLException { int[] updateCount = statement.executeBatch(); statement.clearBatch(); }
Example 11
Source File: BatchUpdateTest.java From spliceengine with GNU Affero General Public License v3.0 | 4 votes |
public void testTransactionErrorBatch() throws SQLException { // conn is just default connection Connection conn = getConnection(); Connection conn2 = openDefaultConnection(); conn.setAutoCommit(false); conn2.setAutoCommit(false); Statement stmt = conn.createStatement(); Statement stmt2 = conn2.createStatement(); int[] updateCount = null; println("Negative Statement: statement testing time out" + " while getting the lock in the batch"); stmt.execute("insert into t1 values(1)"); stmt2.execute("insert into t1 values(2)"); stmt.addBatch("update t1 set c1=3 where c1=2"); stmt2.addBatch("update t1 set c1=4 where c1=1"); try { stmt.executeBatch(); fail ("Batch is expected to fail"); updateCount = stmt2.executeBatch(); } catch (BatchUpdateException bue) { /* Ensure the exception is time out while getting lock */ if (usingEmbedded()) assertSQLState("40XL1", bue); else if (usingDerbyNetClient()) assertSQLState("XJ208", bue); updateCount = ((BatchUpdateException)bue).getUpdateCounts(); if (updateCount != null) { if (usingEmbedded()) assertEquals("first statement in the batch caused time out" + " while getting the lock, there should be no update count", 0, updateCount.length); else if (usingDerbyNetClient()) /* first statement in the batch caused time out while getting * the lock, there should be 1 update count of -3 */ assertBatchUpdateCounts(new int[] {-3}, updateCount); } } conn.rollback(); conn2.rollback(); stmt.clearBatch(); stmt2.clearBatch(); stmt.close(); stmt2.close(); commit(); conn2.close(); }
Example 12
Source File: BatchUpdateOperator.java From ymate-platform-v2 with Apache License 2.0 | 4 votes |
@Override protected int __doExecute() throws Exception { Statement _statement = null; AccessorEventContext _context = null; boolean _hasEx = false; try { IAccessor _accessor = new BaseAccessor(this.getAccessorConfig()); if (StringUtils.isNotBlank(this.getSQL())) { _statement = _accessor.getPreparedStatement(this.getConnectionHolder().getConnection(), this.getSQL()); // for (SQLBatchParameter _batchParam : this.__batchParameters) { for (int i = 0; i < _batchParam.getParameters().size(); i++) { SQLParameter _param = _batchParam.getParameters().get(i); if (_param.getValue() == null) { ((PreparedStatement) _statement).setNull(i + 1, 0); } else { ((PreparedStatement) _statement).setObject(i + 1, _param.getValue()); } } ((PreparedStatement) _statement).addBatch(); } } else { _statement = _accessor.getStatement(this.getConnectionHolder().getConnection()); } // for (String _batchSQL : this.__batchSQL) { _statement.addBatch(_batchSQL); } // if (this.getAccessorConfig() != null) { this.getAccessorConfig().beforeStatementExecution(_context = new AccessorEventContext(_statement, Type.OPT.BATCH_UPDATE)); } effectCounts = _statement.executeBatch(); // 累计受影响的总记录数 for (int _c : effectCounts) { effectCountsTotal += _c; } return effectCountsTotal; } catch (Exception ex) { _hasEx = true; throw ex; } finally { if (!_hasEx && this.getAccessorConfig() != null && _context != null) { this.getAccessorConfig().afterStatementExecution(_context); } if (_statement != null) { _statement.clearBatch(); _statement.close(); } } }
Example 13
Source File: UnsupportedOperationStatementTest.java From shardingsphere with Apache License 2.0 | 4 votes |
@Test(expected = SQLFeatureNotSupportedException.class) public void assertClearBatch() throws SQLException { for (Statement each : statements) { each.clearBatch(); } }
Example 14
Source File: StatementIT.java From snowflake-jdbc with Apache License 2.0 | 4 votes |
@Test public void testExecuteLargeBatch() throws SQLException { Connection connection = getConnection(); Statement statement = connection.createStatement(); /** Generate a table with several rows and 1 column named test_large_batch * Note: to truly test that executeLargeBatch works with a number of rows greater than MAX_INT, replace rowcount * => 15 in next code line with rowcount => 2147483648, or some other number larger than MAX_INT. Test will take * about 15 minutes to run. */ statement.execute("create or replace table test_large_batch (a number) as (select * from (select 5 from table" + "(generator(rowcount => 15)) v));"); // update values in table so that all rows are updated statement.addBatch("update test_large_batch set a = 7 where a = 5;"); long[] rowsUpdated = statement.executeLargeBatch(); assertThat(rowsUpdated.length, is(1)); long testVal = 15L; assertThat(rowsUpdated[0], is(testVal)); statement.clearBatch(); /** To test SQLException for integer overflow when using executeBatch() for row updates of larger than MAX_INT, * uncomment the following lines of code. Test will take about 15 minutes to run. * * statement.execute("create or replace table test_large_batch (a number) as (select * from (select 5 from table" + * "(generator(rowcount => 2147483648)) v));"); * statement.addBatch("update test_large_batch set a = 7 where a = 5;"); * try * { * int[] rowsUpdated = statement.executeBatch(); * fail(); * } * catch (SnowflakeSQLException e) * { * assertEquals((int) ErrorCode.EXECUTE_BATCH_INTEGER_OVERFLOW.getMessageCode(), e.getErrorCode()); * assertEquals(ErrorCode.EXECUTE_BATCH_INTEGER_OVERFLOW.getSqlState(), e.getSQLState()); * } * statement.clearBatch(); * */ statement.execute("drop table if exists test_large_batch"); statement.close(); connection.close(); }
Example 15
Source File: UnsupportedOperationStatementTest.java From sharding-jdbc-1.5.1 with Apache License 2.0 | 4 votes |
@Test(expected = SQLFeatureNotSupportedException.class) public void assertClearBatch() throws SQLException { for (Statement each : statements) { each.clearBatch(); } }
Example 16
Source File: BatchITest.java From sql4es with Apache License 2.0 | 4 votes |
@Test public void testAddingRemoving() throws SQLException{ Statement st = DriverManager.getConnection("jdbc:sql4es://localhost:9300/"+index+"?test").createStatement(); st.addBatch("CREATE TABLE monkey (_id \"type:keyword\")"); st.addBatch("INSERT INTO monkey (myInt) VALUES (1)"); st.addBatch("INSERT INTO monkey (myInt) VALUES (2), (3), (4)"); st.clearBatch(); int[] res = st.executeBatch(); assertEquals(0, res.length); //st.executeUpdate("CREATE TABLE monkey (_id \"type:keyword\")"); st.addBatch("INSERT INTO monkey (myInt) VALUES (1)"); st.addBatch("INSERT INTO monkey (myInt) VALUES (2), (3), (4)"); res = st.executeBatch(); assertEquals(2, res.length); flush(); refresh(); ResultSet rs = st.executeQuery("SELECT myInt FROM monkey"); int count = 0; while(rs.next()){ count++; assert(rs.getInt(1) >= 1 && rs.getInt(1) <= 4 ); } assertEquals(4, count); st.addBatch("INSERT INTO monkey (myInt) VALUES (5), (6)"); st.addBatch("INSERT INTO monkey (myInt) VALUES (7), (8)"); st.addBatch("DELETE FROM monkey WHERE myInt <= 2"); // not possible to test myInt >= 5 because a flush is required res = st.executeBatch(); assertEquals(3, res.length); flush(); refresh(); rs = st.executeQuery("SELECT myInt FROM monkey"); count = 0; while(rs.next()){ count++; assert(rs.getInt(1) > 2 ); } assertEquals(6, count); st.addBatch("CREATE TABLE monkey2 (_id \"type:keyword\")"); st.addBatch("INSERT INTO monkey2 (myInt) VALUES (1)"); st.addBatch("INSERT INTO monkey2 (myInt) VALUES (2), (3), (4)"); st.addBatch("DROP TABLE "+index); res = st.executeBatch(); assertEquals(4, res.length); flush(); refresh(); try{ rs = st.executeQuery("SELECT myInt FROM monkey2"); assert(false); }catch(Exception e){} st.close(); }
Example 17
Source File: BatchUpdateTest.java From gemfirexd-oss with Apache License 2.0 | 4 votes |
public void testTransactionErrorBatch() throws SQLException { // conn is just default connection Connection conn = getConnection(); Connection conn2 = openDefaultConnection(); conn.setAutoCommit(false); conn2.setAutoCommit(false); // GemStone changes BEGIN conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); conn2.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); // GemStone changes END Statement stmt = conn.createStatement(); Statement stmt2 = conn2.createStatement(); int[] updateCount = null; println("Negative Statement: statement testing time out" + " while getting the lock in the batch"); stmt.execute("insert into t1 values(1)"); stmt2.execute("insert into t1 values(2)"); stmt.addBatch("update t1 set c1=3 where c1=2"); stmt2.addBatch("update t1 set c1=4 where c1=1"); try { stmt.executeBatch(); fail ("Batch is expected to fail"); updateCount = stmt2.executeBatch(); } catch (BatchUpdateException bue) { /* Ensure the exception is time out while getting lock */ if (usingEmbedded()) assertSQLState("40XL1", bue); else if (usingDerbyNetClient()) assertSQLState("XJ208", bue); updateCount = ((BatchUpdateException)bue).getUpdateCounts(); if (updateCount != null) { if (usingEmbedded()) assertEquals("first statement in the batch caused time out" + " while getting the lock, there should be no update count", 0, updateCount.length); else if (usingDerbyNetClient()) /* first statement in the batch caused time out while getting * the lock, there should be 1 update count of -3 */ assertBatchUpdateCounts(new int[] {-3}, updateCount); } } conn.rollback(); conn2.rollback(); stmt.clearBatch(); stmt2.clearBatch(); stmt.close(); stmt2.close(); commit(); conn2.close(); }
Example 18
Source File: SqlUtil.java From sagacity-sqltoy with Apache License 2.0 | 4 votes |
/** * @todo <b>sql文件自动创建到数据库</b> * @param conn * @param sqlContent * @param batchSize * @param autoCommit * @throws Exception */ public static void executeBatchSql(Connection conn, String sqlContent, Integer batchSize, Boolean autoCommit) throws Exception { String splitSign = DataSourceUtils.getDatabaseSqlSplitSign(conn); // 剔除sql中的注释 sqlContent = SqlUtil.clearMark(sqlContent); if (splitSign.indexOf("go") != -1) { sqlContent = StringUtil.clearMistyChars(sqlContent, " "); } // sqlserver sybase 数据库以go 分割,则整个sql文件作为一个语句执行 String[] statments = StringUtil.splitExcludeSymMark(sqlContent, splitSign, sqlCommentfilters); boolean hasSetAutoCommit = false; // 是否自动提交 if (autoCommit != null && autoCommit.booleanValue() != conn.getAutoCommit()) { conn.setAutoCommit(autoCommit.booleanValue()); hasSetAutoCommit = true; } Statement stat = null; try { stat = conn.createStatement(); int meter = 0; int realBatch = (batchSize == null || batchSize.intValue() > 1) ? batchSize.intValue() : 100; int totalRows = statments.length; int i = 0; for (String sql : statments) { if (StringUtil.isNotBlank(sql)) { meter++; logger.debug("正在批量执行的sql:{}", sql); stat.addBatch(sql); } if ((meter % realBatch) == 0 || i + 1 == totalRows) { stat.executeBatch(); stat.clearBatch(); } i++; } } catch (SQLException e) { e.printStackTrace(); throw e; } finally { if (stat != null) { stat.close(); stat = null; } } // 恢复conn原始autoCommit默认值 if (hasSetAutoCommit) { conn.setAutoCommit(!autoCommit); } }
Example 19
Source File: IoTDBAuthorizationIT.java From incubator-iotdb with Apache License 2.0 | 4 votes |
@Test @Ignore public void authPerformanceTest() throws ClassNotFoundException, SQLException { Class.forName(Config.JDBC_DRIVER_NAME); Connection adminCon = DriverManager .getConnection(Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root"); Statement adminStmt = adminCon.createStatement(); adminStmt.execute("CREATE USER tempuser 'temppw'"); adminStmt.execute("SET STORAGE GROUP TO root.a"); int privilegeCnt = 500; for (int i = 0; i < privilegeCnt; i++) { adminStmt.execute("CREATE TIMESERIES root.a.b" + i + " WITH DATATYPE=INT32,ENCODING=PLAIN"); adminStmt.execute("GRANT USER tempuser PRIVILEGES 'INSERT_TIMESERIES' ON root.a.b" + i); } Connection userCon = DriverManager .getConnection(Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "tempuser", "temppw"); Statement userStmt = userCon.createStatement(); int insertCnt = 20000; int batchSize = 500; long time; time = System.currentTimeMillis(); for (int i = 0; i < insertCnt; ) { for (int j = 0; j < batchSize; j++) { userStmt.addBatch( "INSERT INTO root.a(timestamp, b" + (privilegeCnt - 1) + ") VALUES (" + (i++ + 1) + ", 100)"); } userStmt.executeBatch(); userStmt.clearBatch(); } if (logger.isInfoEnabled()) { logger.info("User inserted {} data points used {} ms with {} privileges.", insertCnt, System.currentTimeMillis() - time, privilegeCnt); } time = System.currentTimeMillis(); for (int i = 0; i < insertCnt; ) { for (int j = 0; j < batchSize; j++) { adminStmt.addBatch( "INSERT INTO root.a(timestamp, b0) VALUES (" + (i++ + 1 + insertCnt) + ", 100)"); } adminStmt.executeBatch(); adminStmt.clearBatch(); } if (logger.isInfoEnabled()) { logger.info("User inserted {} data points used {} ms with {} privileges.", insertCnt, System.currentTimeMillis() - time, privilegeCnt); } adminCon.close(); userCon.close(); }
Example 20
Source File: BatchTest.java From incubator-iotdb with Apache License 2.0 | 4 votes |
@SuppressWarnings("serial") @Test public void testExecuteBatchSQL1() throws SQLException, TException { Statement statement = connection.createStatement(); statement.addBatch("sql1"); resp = new TSStatus(); resp = RpcUtils.getStatus(Collections.singletonList(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS))); when(client.executeBatchStatement(any(TSExecuteBatchStatementReq.class))).thenReturn(resp); int[] result = statement.executeBatch(); assertEquals(1, result.length); List<TSStatus> resExpected = new ArrayList<TSStatus>() { { add(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); add(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); add(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); add(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); add(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); add(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); add(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); add(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); add(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); } }; resp.setSubStatus(resExpected); statement.clearBatch(); statement.addBatch("SET STORAGE GROUP TO root.ln.wf01.wt01"); statement.addBatch( "CREATE TIMESERIES root.ln.wf01.wt01.status WITH DATATYPE=BOOLEAN, ENCODING=PLAIN"); statement.addBatch( "CREATE TIMESERIES root.ln.wf01.wt01.temperature WITH DATATYPE=FLOAT, ENCODING=RLE"); statement .addBatch("insert into root.ln.wf01.wt01(timestamp,status) values(1509465600000,true)"); statement .addBatch("insert into root.ln.wf01.wt01(timestamp,status) values(1509465660000,true)"); statement .addBatch("insert into root.ln.wf01.wt01(timestamp,status) vvvvvv(1509465720000,false)"); statement.addBatch( "insert into root.ln.wf01.wt01(timestamp,temperature) values(1509465600000,25.957603)"); statement.addBatch( "insert into root.ln.wf01.wt01(timestamp,temperature) values(1509465660000,24.359503)"); statement.addBatch( "insert into root.ln.wf01.wt01(timestamp,temperature) vvvvvv(1509465720000,20.092794)"); result = statement.executeBatch(); assertEquals(resp.getSubStatus().size(), result.length); for (int i = 0; i < resp.getSubStatus().size(); i++) { assertEquals(resExpected.get(i).code, result[i]); } statement.clearBatch(); }