Java Code Examples for com.datastax.driver.core.ResultSet#all()
The following examples show how to use
com.datastax.driver.core.ResultSet#all() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CqlMetaDaoImpl.java From staash with Apache License 2.0 | 6 votes |
private void LoadDbToTableMap() { ResultSet rs = session .execute("select column1 from "+MetaConstants.META_KEY_SPACE+"."+MetaConstants.META_COLUMN_FAMILY+ " where key='com.test.entity.type.paas.table';"); List<Row> rows = rs.all(); for (Row row : rows) { String key = row.getString(0).split("\\.")[0]; String table = row.getString(0).split("\\.")[1]; List<String> currval = null; currval = dbToTableMap.get(key); if (currval == null) { currval = new ArrayList<String>(); } currval.add(table); dbToTableMap.put(key, currval); } }
Example 2
Source File: AbstractUpsertOutputOperatorCodecsTest.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Test public void testForSingleRowInsertWithOverridingConsistency() throws Exception { User aUser = new User(); aUser.setUserid("userWithConsistency" + System.currentTimeMillis()); FullName fullName = new FullName("first" + System.currentTimeMillis(), "last" + System.currentTimeMillis()); aUser.setUsername(fullName); Address address = new Address("city21", "Street31", 12, null); aUser.setCurrentaddress(address); UpsertExecutionContext<User> anUpdate = new UpsertExecutionContext<>(); anUpdate.setOverridingConsistencyLevel(ConsistencyLevel.LOCAL_SERIAL); anUpdate.setPayload(aUser); userUpsertOperator.beginWindow(8); userUpsertOperator.input.process(anUpdate); userUpsertOperator.endWindow(); ResultSet results = userUpsertOperator.session.execute( "SELECT * FROM unittests.users WHERE userid = '" + aUser.getUserid() + "'"); List<Row> rows = results.all(); assertEquals(rows.size(), 1); assertTrue(results.isExhausted()); }
Example 3
Source File: CassandraMetadataDAO.java From conductor with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") @Override public List<WorkflowDef> getAllWorkflowDefs() { try { ResultSet resultSet = session.execute(selectAllWorkflowDefsStatement.bind(WORKFLOW_DEF_INDEX_KEY)); List<Row> rows = resultSet.all(); if (rows.size() == 0) { LOGGER.info("No workflow definitions were found."); return Collections.EMPTY_LIST; } return rows.stream() .map(row -> { String defNameVersion = row.getString(WORKFLOW_DEF_NAME_VERSION_KEY); String[] tokens = defNameVersion.split(INDEX_DELIMITER); return getWorkflowDef(tokens[0], Integer.parseInt(tokens[1])).orElse(null); }) .filter(Objects::nonNull) .collect(Collectors.toList()); } catch (Exception e) { Monitors.error(CLASS_NAME, "getAllWorkflowDefs"); String errorMsg = "Error retrieving all workflow defs"; LOGGER.error(errorMsg, e); throw new ApplicationException(Code.BACKEND_ERROR, errorMsg, e); } }
Example 4
Source File: CassandraConnectorITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testCassandraBatchRowFormat() throws Exception { OutputFormat<Row> sink = new CassandraRowOutputFormat(injectTableName(INSERT_DATA_QUERY), builder); try { sink.configure(new Configuration()); sink.open(0, 1); for (Row value : rowCollection) { sink.writeRecord(value); } } finally { sink.close(); } ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); List<com.datastax.driver.core.Row> rows = rs.all(); Assert.assertEquals(rowCollection.size(), rows.size()); }
Example 5
Source File: CassandraConnectorITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testCassandraBatchRowFormat() throws Exception { OutputFormat<Row> sink = new CassandraRowOutputFormat(injectTableName(INSERT_DATA_QUERY), builder); try { sink.configure(new Configuration()); sink.open(0, 1); for (Row value : rowCollection) { sink.writeRecord(value); } } finally { sink.close(); } ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); List<com.datastax.driver.core.Row> rows = rs.all(); Assert.assertEquals(rowCollection.size(), rows.size()); }
Example 6
Source File: CassandraConnectorITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testCassandraScalaTupleAtLeastSink() throws Exception { CassandraScalaProductSink<scala.Tuple3<String, Integer, Integer>> sink = new CassandraScalaProductSink<>(injectTableName(INSERT_DATA_QUERY), builder); List<scala.Tuple3<String, Integer, Integer>> scalaTupleCollection = new ArrayList<>(20); for (int i = 0; i < 20; i++) { scalaTupleCollection.add(new scala.Tuple3<>(UUID.randomUUID().toString(), i, 0)); } try { sink.open(new Configuration()); for (scala.Tuple3<String, Integer, Integer> value : scalaTupleCollection) { sink.invoke(value, SinkContextUtil.forTimestamp(0)); } } finally { sink.close(); } ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); List<com.datastax.driver.core.Row> rows = rs.all(); Assert.assertEquals(scalaTupleCollection.size(), rows.size()); for (com.datastax.driver.core.Row row : rows) { scalaTupleCollection.remove(new scala.Tuple3<>(row.getString("id"), row.getInt("counter"), row.getInt("batch_id"))); } Assert.assertEquals(0, scalaTupleCollection.size()); }
Example 7
Source File: HttpPrimeQueryIntegrationTest.java From simulacron with Apache License 2.0 | 6 votes |
@Test public void testQueryPrimeSimple() throws Exception { RequestPrime prime = HttpTestUtil.createSimplePrimedQuery("Select * FROM TABLE2"); HttpTestResponse response = server.prime(prime); assertNotNull(response); RequestPrime responseQuery = om.readValue(response.body, RequestPrime.class); assertThat(responseQuery).isEqualTo(prime); String contactPoint = HttpTestUtil.getContactPointString(server.getCluster(), 0); ResultSet set = HttpTestUtil.makeNativeQuery("Select * FROM TABLE2", contactPoint); List<Row> results = set.all(); assertThat(1).isEqualTo(results.size()); Row row1 = results.get(0); String column1 = row1.getString("column1"); assertThat(column1).isEqualTo("column1"); Long column2 = row1.getLong("column2"); assertThat(column2).isEqualTo(new Long(2)); }
Example 8
Source File: CassandraFactory.java From database-transform-tool with Apache License 2.0 | 6 votes |
/** * 描述: 查询数据库表名[未实现] * 时间: 2017年11月15日 上午11:29:59 * @author yi.zhang * @return 返回表 */ @Deprecated public List<String> queryTables(){ try { List<String> tables = new ArrayList<String>(); String useQuery = "describe tables"; ResultSet rs = session.execute(useQuery); for (Row row : rs.all()) { String table = row.getString(1); tables.add(table); } return tables; } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } return null; }
Example 9
Source File: CassandraConnectorITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testCassandraTableSink() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(4); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env); DataStreamSource<Row> source = env.fromCollection(rowCollection); tEnv.registerDataStream("testFlinkTable", source); tEnv.registerTableSink( "cassandraTable", new CassandraAppendTableSink(builder, injectTableName(INSERT_DATA_QUERY)).configure( new String[]{"f0", "f1", "f2"}, new TypeInformation[]{Types.STRING, Types.INT, Types.INT} )); tEnv.sqlQuery("select * from testFlinkTable").insertInto("cassandraTable"); env.execute(); ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); // validate that all input was correctly written to Cassandra List<Row> input = new ArrayList<>(rowCollection); List<com.datastax.driver.core.Row> output = rs.all(); for (com.datastax.driver.core.Row o : output) { Row cmp = new Row(3); cmp.setField(0, o.getString(0)); cmp.setField(1, o.getInt(2)); cmp.setField(2, o.getInt(1)); Assert.assertTrue("Row " + cmp + " was written to Cassandra but not in input.", input.remove(cmp)); } Assert.assertTrue("The input data was not completely written to Cassandra", input.isEmpty()); }
Example 10
Source File: CassandraConnectorITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testCassandraScalaTuplePartialColumnUpdate() throws Exception { CassandraSinkBaseConfig config = CassandraSinkBaseConfig.newBuilder().setIgnoreNullFields(true).build(); CassandraScalaProductSink<scala.Tuple3<String, Integer, Integer>> sink = new CassandraScalaProductSink<>(injectTableName(INSERT_DATA_QUERY), builder, config); String id = UUID.randomUUID().toString(); Integer counter = 1; Integer batchId = 0; // Send partial records across multiple request scala.Tuple3<String, Integer, Integer> scalaTupleRecordFirst = new scala.Tuple3<>(id, counter, null); scala.Tuple3<String, Integer, Integer> scalaTupleRecordSecond = new scala.Tuple3<>(id, null, batchId); try { sink.open(new Configuration()); sink.invoke(scalaTupleRecordFirst, SinkContextUtil.forTimestamp(0)); sink.invoke(scalaTupleRecordSecond, SinkContextUtil.forTimestamp(0)); } finally { sink.close(); } ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); List<com.datastax.driver.core.Row> rows = rs.all(); Assert.assertEquals(1, rows.size()); // Since nulls are ignored, we should be reading one complete record for (com.datastax.driver.core.Row row : rows) { Assert.assertEquals(new scala.Tuple3<>(id, counter, batchId), new scala.Tuple3<>(row.getString("id"), row.getInt("counter"), row.getInt("batch_id"))); } }
Example 11
Source File: HttpContainerIntegrationTest.java From simulacron with Apache License 2.0 | 5 votes |
private void testVerifyQueryParticularCluster(Function<ClusterSpec, String> f) { HttpClient client = vertx.createHttpClient(); ClusterSpec clusterQueried = this.createMultiNodeCluster(client, "3,3"); ClusterSpec clusterUnused = this.createMultiNodeCluster(client, "3,3"); String query = "Select * FROM TABLE2_" + clusterQueried.getName(); RequestPrime prime = createSimplePrimedQuery(query); HttpTestResponse response = this.primeSimpleRequest(client, prime, "/prime" + "/" + f.apply(clusterQueried)); Iterator<NodeSpec> nodeIteratorQueried = clusterQueried.getNodes().iterator(); Iterator<NodeSpec> nodeIteratorUnused = clusterUnused.getNodes().iterator(); while (nodeIteratorQueried.hasNext()) { NodeSpec node = nodeIteratorQueried.next(); String contactPoint = HttpTestUtil.getContactPointStringByNodeID(node); ResultSet set = HttpTestUtil.makeNativeQuery(query, contactPoint); List<Row> results = set.all(); assertThat(1).isEqualTo(results.size()); } while (nodeIteratorUnused.hasNext()) { String contactPointUnused = HttpTestUtil.getContactPointStringByNodeID(nodeIteratorUnused.next()); ResultSet setUnused = HttpTestUtil.makeNativeQuery(query, contactPointUnused); List<Row> resultsUnused = setUnused.all(); assertThat(0).isEqualTo(resultsUnused.size()); } }
Example 12
Source File: CassandraConnectorITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testCassandraTableSink() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(4); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env); DataStreamSource<Row> source = env.fromCollection(rowCollection); tEnv.createTemporaryView("testFlinkTable", source); ((TableEnvironmentInternal) tEnv).registerTableSinkInternal( "cassandraTable", new CassandraAppendTableSink(builder, injectTableName(INSERT_DATA_QUERY)).configure( new String[]{"f0", "f1", "f2"}, new TypeInformation[]{Types.STRING, Types.INT, Types.INT} )); TableResult tableResult = tEnv.sqlQuery("select * from testFlinkTable").executeInsert("cassandraTable"); tableResult.getJobClient().get().getJobExecutionResult(Thread.currentThread().getContextClassLoader()).get(); ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); // validate that all input was correctly written to Cassandra List<Row> input = new ArrayList<>(rowCollection); List<com.datastax.driver.core.Row> output = rs.all(); for (com.datastax.driver.core.Row o : output) { Row cmp = new Row(3); cmp.setField(0, o.getString(0)); cmp.setField(1, o.getInt(2)); cmp.setField(2, o.getInt(1)); Assert.assertTrue("Row " + cmp + " was written to Cassandra but not in input.", input.remove(cmp)); } Assert.assertTrue("The input data was not completely written to Cassandra", input.isEmpty()); }
Example 13
Source File: TestCassandraTarget.java From datacollector with Apache License 2.0 | 4 votes |
@Test public void testWriteSingleRecord() throws InterruptedException, StageException { final String tableName = "test.trips"; List<CassandraFieldMappingConfig> fieldMappings = ImmutableList.of( new CassandraFieldMappingConfig("[0]", "driver_id"), new CassandraFieldMappingConfig("[1]", "trip_id"), new CassandraFieldMappingConfig("[2]", "time"), new CassandraFieldMappingConfig("[3]", "x"), new CassandraFieldMappingConfig("[4]", "y"), new CassandraFieldMappingConfig("[5]", "dt"), new CassandraFieldMappingConfig("[6]", "ts"), new CassandraFieldMappingConfig("[7]", "time_id"), new CassandraFieldMappingConfig("[8]", "unique_id") ); CassandraTargetConfig conf = new CassandraTargetConfig(); conf.contactPoints.add(cassandra.getContainerIpAddress()); conf.port = cassandra.getMappedPort(CASSANDRA_NATIVE_PORT); conf.protocolVersion = ProtocolVersion.V4; conf.authProviderOption = AuthProviderOption.NONE; conf.compression = CassandraCompressionCodec.NONE; conf.columnNames = fieldMappings; conf.qualifiedTableName = tableName; Target target = new CassandraTarget(conf); TargetRunner targetRunner = new TargetRunner.Builder(CassandraDTarget.class, target).build(); long now = System.currentTimeMillis(); LocalDate dt = LocalDate.fromMillisSinceEpoch(now); Date ts = new Date(); Record record = RecordCreator.create(); List<Field> fields = new ArrayList<>(); fields.add(Field.create(1)); fields.add(Field.create(2)); fields.add(Field.create(3)); fields.add(Field.create(4.0)); fields.add(Field.create(5.0)); fields.add(Field.create(Field.Type.DATE, new Date(dt.getMillisSinceEpoch()))); fields.add(Field.create(Field.Type.DATETIME, ts)); fields.add(Field.create(SAMPLE_TIMEUUID)); fields.add(Field.create(SAMPLE_UUID)); record.set(Field.create(fields)); List<Record> singleRecord = ImmutableList.of(record); targetRunner.runInit(); targetRunner.runWrite(singleRecord); // Should not be any error records. Assert.assertTrue(targetRunner.getErrorRecords().isEmpty()); Assert.assertTrue(targetRunner.getErrors().isEmpty()); targetRunner.runDestroy(); ResultSet resultSet = session.execute("SELECT * FROM test.trips"); List<Row> allRows = resultSet.all(); Assert.assertEquals(1, allRows.size()); Row row = allRows.get(0); Assert.assertEquals(1, row.getInt("driver_id")); Assert.assertEquals(2, row.getInt("trip_id")); Assert.assertEquals(3, row.getInt("time")); Assert.assertEquals(4.0, row.getDouble("x"), EPSILON); Assert.assertEquals(5.0, row.getDouble("y"), EPSILON); Assert.assertEquals(dt, row.getDate("dt")); Assert.assertEquals(ts, row.getTimestamp("ts")); Assert.assertEquals(SAMPLE_TIMEUUID, row.getUUID("time_id").toString()); Assert.assertEquals(SAMPLE_UUID, row.getUUID("unique_id").toString()); }
Example 14
Source File: CassandraIOTest.java From beam with Apache License 2.0 | 4 votes |
private List<Row> getRows(String table) { ResultSet result = session.execute( String.format("select person_id,person_name from %s.%s", CASSANDRA_KEYSPACE, table)); return result.all(); }
Example 15
Source File: CassandraTimeseries.java From yb-sample-apps with Apache License 2.0 | 4 votes |
@Override public long doRead() { // Pick a ransom data source. DataSource dataSource = dataSources.get(random.nextInt(dataSources.size())); // Make sure it has emitted data, otherwise there is nothing to read. if (!dataSource.getHasEmittedData()) { return 0; } long startTs = dataSource.getStartTs(); long endTs = dataSource.getEndTs(); // Bind the select statement. BoundStatement select = getPreparedSelect().bind().setString("userId", dataSource.getUserId()) .setString("nodeId", dataSource.getNodeId()) .setString("metricId", dataSource.getRandomMetricId()) .setTimestamp("startTs", new Date(startTs)) .setTimestamp("endTs", new Date(endTs)); // Make the query. ResultSet rs = getCassandraClient().execute(select); List<Row> rows = rs.all(); // TODO: there is still a verification bug that needs to be tracked down. // If the load tester is not able to keep up, data verification will be turned off. // int expectedNumDataPoints = dataSource.getExpectedNumDataPoints(startTs, endTs); // if (expectedNumDataPoints == -1 && !verificationDisabled.get()) { // verificationDisabled.set(true); // long writeLag = dataSource.getWriteLag(); // if (maxWriteLag.get() < writeLag) { // maxWriteLag.set(writeLag); // } // } // // If the load tester is able to keep up, we may end up inserting the latest data point a // // little after the timestamp it denotes. This causes that data point to expire a little later // // than the timestamp it denotes, causing some unpredictability on when the last data point // // will expire. To get over this, we allow for a fuzzy match on the number of results // // returned. // if (expectedNumDataPoints > -1 && Math.abs(rows.size() - expectedNumDataPoints) > 1) { // StringBuilder sb = new StringBuilder(); // for (Row row : rows) { // sb.append(row.toString() + " | "); // } // LOG.warn("Read " + rows.size() + " data points from DB, expected " + // expectedNumDataPoints + " data points, query [" + select_stmt + "], " + // "results from DB: { " + sb.toString() + " }, " + // "debug info: " + dataSource.printDebugInfo(startTs, endTs)); // } return 1; }
Example 16
Source File: AbstractUpsertOutputOperatorCodecsTest.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Test public void testForCollectionRemovalAndIfExists() throws Exception { User aUser = new User(); String userId = "user" + System.currentTimeMillis(); aUser.setUserid(userId); FullName fullName = new FullName("first12" + System.currentTimeMillis(), "last12" + System.currentTimeMillis()); aUser.setUsername(fullName); Set<String> emails = new HashSet<>(); emails.add(new String("1")); emails.add(new String("2")); aUser.setEmails(emails); UpsertExecutionContext<User> originalEntry = new UpsertExecutionContext<>(); originalEntry.setPayload(aUser); UpsertExecutionContext<User> subsequentUpdateForEmails = new UpsertExecutionContext<>(); subsequentUpdateForEmails.setCollectionMutationStyle( UpsertExecutionContext.CollectionMutationStyle.REMOVE_FROM_EXISTING_COLLECTION); subsequentUpdateForEmails.setNullHandlingMutationStyle( UpsertExecutionContext.NullHandlingMutationStyle.IGNORE_NULL_COLUMNS); subsequentUpdateForEmails.setUpdateOnlyIfPrimaryKeyExists(true); User oldUser = new User(); oldUser.setUserid(userId + System.currentTimeMillis()); // overriding with a non-existent user id Set<String> updatedEmails = new HashSet<>(); updatedEmails.add(new String("1")); oldUser.setEmails(updatedEmails); subsequentUpdateForEmails.setPayload(oldUser); userUpsertOperator.beginWindow(4); userUpsertOperator.input.process(originalEntry); userUpsertOperator.input.process(subsequentUpdateForEmails); userUpsertOperator.endWindow(); ResultSet results = userUpsertOperator.session.execute( "SELECT * FROM unittests.users WHERE userid = '" + userId + "'"); List<Row> rows = results.all(); Row userRow = rows.get(0); Set<String> existingEmailsEntry = userRow.getSet("emails", String.class); assertEquals(2, existingEmailsEntry.size()); assertEquals("" + 1, "" + existingEmailsEntry.iterator().next()); }
Example 17
Source File: AbstractUpsertOutputOperatorCodecsTest.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Test public void testForListAppendAndIfExists() throws Exception { User aUser = new User(); String userId = "user" + System.currentTimeMillis(); aUser.setUserid(userId); FullName fullName = new FullName("first" + System.currentTimeMillis(), "last" + System.currentTimeMillis()); aUser.setUsername(fullName); Address address = new Address("street1", "city1", 13, null); aUser.setCurrentaddress(address); Set<String> emails = new HashSet<>(); emails.add(new String("1")); emails.add(new String("2")); aUser.setEmails(emails); List<Integer> topScores = new ArrayList<>(); topScores.add(1); topScores.add(2); aUser.setTopScores(topScores); UpsertExecutionContext<User> originalEntry = new UpsertExecutionContext<>(); originalEntry.setPayload(aUser); UpsertExecutionContext<User> subsequentUpdateForTopScores = new UpsertExecutionContext<>(); subsequentUpdateForTopScores.setListPlacementStyle( UpsertExecutionContext.ListPlacementStyle.APPEND_TO_EXISTING_LIST); subsequentUpdateForTopScores.setCollectionMutationStyle( UpsertExecutionContext.CollectionMutationStyle.ADD_TO_EXISTING_COLLECTION); subsequentUpdateForTopScores.setNullHandlingMutationStyle( UpsertExecutionContext.NullHandlingMutationStyle.IGNORE_NULL_COLUMNS); subsequentUpdateForTopScores.setUpdateOnlyIfPrimaryKeyExists(true); User oldUser = new User(); oldUser.setUserid(userId + System.currentTimeMillis()); List<Integer> topScoresAppended = new ArrayList<>(); topScoresAppended.add(3); oldUser.setTopScores(topScoresAppended); subsequentUpdateForTopScores.setPayload(oldUser); userUpsertOperator.beginWindow(5); userUpsertOperator.input.process(originalEntry); userUpsertOperator.input.process(subsequentUpdateForTopScores); userUpsertOperator.endWindow(); ResultSet results = userUpsertOperator.session.execute( "SELECT * FROM unittests.users WHERE userid = '" + userId + "'"); List<Row> rows = results.all(); Row userRow = rows.get(0); List<Integer> topScoresEntry = userRow.getList("top_scores", Integer.class); assertEquals(2, topScoresEntry.size()); assertEquals("" + 2, "" + topScoresEntry.get(1)); }
Example 18
Source File: HttpPrimeQueryIntegrationTest.java From simulacron with Apache License 2.0 | 4 votes |
@Test public void testQueryPrimeNamedParamSimple() throws Exception { HashMap<String, String> paramTypes = new HashMap<>(); paramTypes.put("id", "bigint"); paramTypes.put("id2", "bigint"); HashMap<String, Object> params = new HashMap<>(); params.put("id", new Long(1)); params.put("id2", new Long(2)); RequestPrime prime = HttpTestUtil.createSimpleParameterizedQuery( "SELECT * FROM users WHERE id = :id and id2 = :id2", params, paramTypes); HttpTestResponse response = server.prime(prime); assertNotNull(response); RequestPrime responseQuery = om.readValue(response.body, RequestPrime.class); assertThat(responseQuery).isEqualTo(prime); Map<String, Object> values = ImmutableMap.<String, Object>of("id", new Long(1), "id2", new Long(2)); String contactPoint = HttpTestUtil.getContactPointString(server.getCluster(), 0); ResultSet set = HttpTestUtil.makeNativeQueryWithNameParams( "SELECT * FROM users WHERE id = :id and id2 = :id2", contactPoint, values); List<Row> results = set.all(); assertThat(1).isEqualTo(results.size()); Row row1 = results.get(0); String column1 = row1.getString("column1"); assertThat(column1).isEqualTo("column1"); Long column2 = row1.getLong("column2"); assertThat(column2).isEqualTo(new Long(2)); // Try with the wrong values values = ImmutableMap.<String, Object>of("id", new Long(2), "id2", new Long(2)); set = HttpTestUtil.makeNativeQueryWithNameParams( "SELECT * FROM users WHERE id = :id and id2 = :id2", contactPoint, values); assertThat(set.all().size()).isEqualTo(0); // Try with the wrong number of values values = ImmutableMap.<String, Object>of("id", new Long(2)); set = HttpTestUtil.makeNativeQueryWithNameParams( "SELECT * FROM users WHERE id = :id and id2 = :id2", contactPoint, values); assertThat(set.all().size()).isEqualTo(0); // No values values = ImmutableMap.<String, Object>of(); set = HttpTestUtil.makeNativeQueryWithNameParams( "SELECT * FROM users WHERE id = :id and id2 = :id2", contactPoint, values); assertThat(set.all().size()).isEqualTo(0); }
Example 19
Source File: TestCassandraTarget.java From datacollector with Apache License 2.0 | 4 votes |
@Test public void testWriteRecordsOnErrorDiscard() throws Exception { final String tableName = "test.trips"; List<CassandraFieldMappingConfig> fieldMappings = ImmutableList.of( new CassandraFieldMappingConfig("[0]", "driver_id"), new CassandraFieldMappingConfig("[1]", "trip_id"), new CassandraFieldMappingConfig("[2]", "time"), new CassandraFieldMappingConfig("[3]", "x"), new CassandraFieldMappingConfig("[4]", "y"), new CassandraFieldMappingConfig("[5]", "time_id"), new CassandraFieldMappingConfig("[6]", "unique_id") ); CassandraTargetConfig conf = new CassandraTargetConfig(); conf.contactPoints.add(cassandra.getContainerIpAddress()); conf.port = cassandra.getMappedPort(CASSANDRA_NATIVE_PORT); conf.protocolVersion = ProtocolVersion.V4; conf.authProviderOption = AuthProviderOption.NONE; conf.compression = CassandraCompressionCodec.NONE; conf.columnNames = fieldMappings; conf.qualifiedTableName = tableName; Target target = new CassandraTarget(conf); TargetRunner targetRunner = new TargetRunner.Builder(CassandraDTarget.class, target) .setOnRecordError(OnRecordError.DISCARD) .build(); Record record = RecordCreator.create(); List<Field> fields = new ArrayList<>(); fields.add(Field.create(1.3)); fields.add(Field.create(2)); fields.add(Field.create(3)); fields.add(Field.create(4.0)); fields.add(Field.create(5.0)); fields.add(Field.create(SAMPLE_TIMEUUID)); fields.add(Field.create(SAMPLE_UUID)); record.set(Field.create(fields)); List<Record> singleRecord = ImmutableList.of(record); targetRunner.runInit(); targetRunner.runWrite(singleRecord); // Should not be any error records if we are discarding. Assert.assertTrue(targetRunner.getErrorRecords().isEmpty()); Assert.assertTrue(targetRunner.getErrors().isEmpty()); targetRunner.runDestroy(); ResultSet resultSet = session.execute("SELECT * FROM test.trips"); List<Row> allRows = resultSet.all(); Assert.assertEquals(0, allRows.size()); }
Example 20
Source File: TestCassandraTarget.java From datacollector with Apache License 2.0 | 4 votes |
@Test public void testInternalSubBatching() throws Exception { final String tableName = "test.trips"; List<CassandraFieldMappingConfig> fieldMappings = ImmutableList.of( new CassandraFieldMappingConfig("[0]", "driver_id"), new CassandraFieldMappingConfig("[1]", "trip_id"), new CassandraFieldMappingConfig("[2]", "time"), new CassandraFieldMappingConfig("[3]", "x"), new CassandraFieldMappingConfig("[4]", "y"), new CassandraFieldMappingConfig("[5]", "time_id"), new CassandraFieldMappingConfig("[6]", "unique_id") ); CassandraTargetConfig conf = new CassandraTargetConfig(); conf.contactPoints.add(cassandra.getContainerIpAddress()); conf.port = cassandra.getMappedPort(CASSANDRA_NATIVE_PORT); conf.protocolVersion = ProtocolVersion.V4; conf.authProviderOption = AuthProviderOption.NONE; conf.compression = CassandraCompressionCodec.NONE; conf.columnNames = fieldMappings; conf.qualifiedTableName = tableName; conf.maxBatchSize = 35; Target target = new CassandraTarget(conf); TargetRunner targetRunner = new TargetRunner.Builder(CassandraDTarget.class, target).build(); List<Record> records = new ArrayList<>(); for (int i = 0; i < 1000; i++) { Record record = RecordCreator.create(); List<Field> fields = new ArrayList<>(); fields.add(Field.create(i)); fields.add(Field.create(2)); fields.add(Field.create(3)); fields.add(Field.create(4.0)); fields.add(Field.create(5.0)); fields.add(Field.create(SAMPLE_TIMEUUID)); fields.add(Field.create(SAMPLE_UUID)); record.set(Field.create(fields)); records.add(record); } targetRunner.runInit(); targetRunner.runWrite(records); // Should not be any error records. Assert.assertTrue(targetRunner.getErrorRecords().isEmpty()); Assert.assertTrue(targetRunner.getErrors().isEmpty()); targetRunner.runDestroy(); // simple verification that there are as many records as expected ResultSet resultSet = session.execute("SELECT * FROM test.trips"); List<Row> allRows = resultSet.all(); Assert.assertEquals(1000, allRows.size()); }