Java Code Examples for com.datastax.driver.core.ResultSetFuture#getUninterruptibly()
The following examples show how to use
com.datastax.driver.core.ResultSetFuture#getUninterruptibly() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CassandraStorage.java From cassandra-reaper with Apache License 2.0 | 6 votes |
private Collection<? extends RepairRun> getRepairRunsWithStateForCluster( Collection<UUID> clusterRepairRunsId, RunState runState) { Collection<RepairRun> repairRuns = Sets.newHashSet(); List<ResultSetFuture> futures = Lists.newArrayList(); for (UUID repairRunId : clusterRepairRunsId) { futures.add(session.executeAsync(getRepairRunPrepStmt.bind(repairRunId))); } for (ResultSetFuture future : futures) { ResultSet repairRunResult = future.getUninterruptibly(); for (Row row : repairRunResult) { repairRuns.add(buildRepairRunFromRow(row, row.getUUID("id"))); } } return repairRuns.stream().filter(repairRun -> repairRun.getRunState() == runState).collect(Collectors.toSet()); }
Example 2
Source File: CassandraTarget.java From datacollector with Apache License 2.0 | 6 votes |
private void getTaskResult(Map<ResultSetFuture, Record> tasks) { for (ResultSetFuture task : tasks.keySet()) { try { task.getUninterruptibly(conf.writeTimeout, TimeUnit.MILLISECONDS); } catch (TimeoutException e) { LOG.debug(Errors.CASSANDRA_11.getMessage(), conf.writeTimeout, e); Record errorRecord = tasks.get(task); errorRecordHandler.onError( new OnRecordErrorException( errorRecord, Errors.CASSANDRA_11, conf.writeTimeout, e.toString(), e ) ); } } tasks.clear(); }
Example 3
Source File: CassandraStatement.java From cassandra-jdbc-wrapper with Apache License 2.0 | 6 votes |
public int[] executeBatch() throws SQLException { int[] returnCounts= new int[batchQueries.size()]; List<ResultSetFuture> futures = new ArrayList<ResultSetFuture>(); if (logger.isTraceEnabled() || this.connection.debugMode) logger.debug("CQL statements: "+ batchQueries.size()); for(String q:batchQueries){ if (logger.isTraceEnabled() || this.connection.debugMode) logger.debug("CQL: "+ q); SimpleStatement stmt = new SimpleStatement(q); stmt.setConsistencyLevel(this.connection.defaultConsistencyLevel); ResultSetFuture resultSetFuture = this.connection.getSession().executeAsync(stmt); futures.add(resultSetFuture); } int i=0; for (ResultSetFuture future : futures){ future.getUninterruptibly(); returnCounts[i]=1; i++; } return returnCounts; }
Example 4
Source File: CaseController.java From skywalking with Apache License 2.0 | 5 votes |
private void executeAsync(Session session) { logger.info("execute in async"); ResultSetFuture createKeyspaceDataResultSetFuture = session.executeAsync(CREATE_KEYSPACE_SQL); ResultSet createKeyspaceDataResultSet = createKeyspaceDataResultSetFuture.getUninterruptibly(); logger.info("CREATE KEYSPACE result: " + createKeyspaceDataResultSet.toString()); ResultSetFuture createTableDataResultSetFuture = session.executeAsync(CREATE_TABLE_SQL); ResultSet createTableDataResultSet = createTableDataResultSetFuture.getUninterruptibly(); logger.info("CREATE TABLE result: " + createTableDataResultSet.toString()); PreparedStatement insertDataPreparedStatement = session.prepare(INSERT_DATA_SQL); ResultSetFuture insertDataResultSetFuture = session.executeAsync(insertDataPreparedStatement.bind("101", "foobar")); ResultSet insertDataResultSet = insertDataResultSetFuture.getUninterruptibly(); logger.info("INSERT result: " + insertDataResultSet.toString()); PreparedStatement selectDataPreparedStatement = session.prepare(SELECT_DATA_SQL); ResultSetFuture resultSetFuture = session.executeAsync(selectDataPreparedStatement.bind("101")); ResultSet resultSet = resultSetFuture.getUninterruptibly(); Row row = resultSet.one(); logger.info("SELECT result: id: {}, value: {}", row.getString("id"), row.getString("value")); PreparedStatement deleteDataPreparedStatement = session.prepare(DELETE_DATA_SQL); ResultSetFuture deleteDataResultSetFuture = session.executeAsync(deleteDataPreparedStatement.bind("101")); ResultSet deleteDataResultSet = deleteDataResultSetFuture.getUninterruptibly(); logger.info("DELETE result: " + deleteDataResultSet.toString()); ResultSetFuture dropTableDataResultSetFuture = session.executeAsync(DROP_TABLE_SQL); ResultSet dropTableDataResultSet = dropTableDataResultSetFuture.getUninterruptibly(); logger.info("DROP TABLE result: " + dropTableDataResultSet.toString()); ResultSetFuture dropKeyspaceDataResultSetFuture = session.executeAsync(DROP_KEYSPACE); ResultSet dropKeyspaceDataResultSet = dropKeyspaceDataResultSetFuture.getUninterruptibly(); logger.info("DROP KEYSPACE result: " + dropKeyspaceDataResultSet.toString()); }
Example 5
Source File: CassandraStorage.java From cassandra-reaper with Apache License 2.0 | 4 votes |
@Override public List<GenericMetric> getMetrics( String clusterName, Optional<String> host, String metricDomain, String metricType, long since) { List<GenericMetric> metrics = Lists.newArrayList(); List<ResultSetFuture> futures = Lists.newArrayList(); List<String> timeBuckets = Lists.newArrayList(); long now = DateTime.now().getMillis(); long startTime = since; // Compute the hourly buckets since the requested lower bound timestamp while (startTime < now) { timeBuckets.add(DateTime.now().withMillis(startTime).toString(HOURLY_FORMATTER)); startTime += 3600000; } for (String timeBucket:timeBuckets) { if (host.isPresent()) { //metric = ? and cluster = ? and time_bucket = ? and host = ? and ts >= ? and ts <= ? futures.add(session.executeAsync( getMetricsForHostPrepStmt.bind( metricDomain, metricType, clusterName, timeBucket, host.get()))); } else { futures.add( session.executeAsync( getMetricsForClusterPrepStmt.bind( metricDomain, metricType, clusterName, timeBucket))); } } for (ResultSetFuture future : futures) { for (Row row : future.getUninterruptibly()) { // Filtering on the timestamp lower bound since it's not filtered in cluster wide metrics requests if (row.getTimestamp("ts").getTime() >= since) { metrics.add( GenericMetric.builder() .withClusterName(row.getString("cluster")) .withHost(row.getString("host")) .withMetricType(row.getString("metric_type")) .withMetricScope(row.getString("metric_scope")) .withMetricName(row.getString("metric_name")) .withMetricAttribute(row.getString("metric_attribute")) .withTs(new DateTime(row.getTimestamp("ts"))) .withValue(row.getDouble("value")) .build()); } } } return metrics; }
Example 6
Source File: CassandraIndexer.java From newts with Apache License 2.0 | 4 votes |
@Override public void update(Collection<Sample> samples) { Timer.Context ctx = m_updateTimer.time(); Set<StatementGenerator> generators = Sets.newHashSet(); Map<Context, Map<Resource, ResourceMetadata>> cacheQueue = Maps.newHashMap(); for (Sample sample : samples) { maybeIndexResource(cacheQueue, generators, sample.getContext(), sample.getResource()); maybeIndexResourceAttributes(cacheQueue, generators, sample.getContext(), sample.getResource()); maybeAddMetricName(cacheQueue, generators, sample.getContext(), sample.getResource(), sample.getName()); } try { if (!generators.isEmpty()) { synchronized(statementsInFlight) { generators.removeAll(statementsInFlight); statementsInFlight.addAll(generators); } m_inserts.mark(generators.size()); // Asynchronously execute the statements List<ResultSetFuture> futures = Lists.newArrayList(); for (Statement statementToExecute : toStatements(generators)) { futures.add(m_session.executeAsync(statementToExecute)); } for (ResultSetFuture future : futures) { future.getUninterruptibly(); } } // Order matters here; We want the cache updated only after a successful Cassandra write. for (Context context : cacheQueue.keySet()) { for (Map.Entry<Resource, ResourceMetadata> entry : cacheQueue.get(context).entrySet()) { m_cache.merge(context, entry.getKey(), entry.getValue()); } } } finally { synchronized(statementsInFlight) { statementsInFlight.removeAll(generators); } ctx.stop(); } }