com.datastax.driver.core.querybuilder.Batch Java Examples
The following examples show how to use
com.datastax.driver.core.querybuilder.Batch.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CassandraSessionImpl.java From ignite with Apache License 2.0 | 6 votes |
/** * Tunes CQL statement execution options (consistency level, fetch option and etc.). * * @param statement Statement. * @return Modified statement. */ private Statement tuneStatementExecutionOptions(Statement statement) { String qry = ""; if (statement instanceof BoundStatement) qry = ((BoundStatement)statement).preparedStatement().getQueryString().trim().toLowerCase(); else if (statement instanceof PreparedStatement) qry = ((PreparedStatement)statement).getQueryString().trim().toLowerCase(); boolean readStatement = qry.startsWith("select"); boolean writeStatement = statement instanceof Batch || statement instanceof BatchStatement || qry.startsWith("insert") || qry.startsWith("delete") || qry.startsWith("update"); if (readStatement && readConsistency != null) statement.setConsistencyLevel(readConsistency); if (writeStatement && writeConsistency != null) statement.setConsistencyLevel(writeConsistency); if (fetchSize != null) statement.setFetchSize(fetchSize); return statement; }
Example #2
Source File: MetricCassandraCollector.java From realtime-analytics with GNU General Public License v2.0 | 6 votes |
private void runBatchInsert(List<Insert> insertRequest) { try { Batch batch; if (config.getLoggedBatch()) { batch = QueryBuilder.batch(insertRequest .toArray(new RegularStatement[insertRequest.size()])); } else { batch = QueryBuilder.unloggedBatch(insertRequest .toArray(new RegularStatement[insertRequest.size()])); } totalCassandraInsertRequest.addAndGet(insertRequest.size()); ResultSetFuture future = cassandraSession.executeAsync(batch); CallBackListener listener = new CallBackListener(future, null); future.addListener(listener, pool); incrementBatchInsertCounter(); pendingRequestCounter.incrementAndGet(); } catch (Throwable ex) { LOGGER.error("Error publising metrics in MetricCassandraCollector:" + ex.getMessage()); cassandraErrorCount.increment(); registerError(ex); } finally { insertRequest.clear(); } }
Example #3
Source File: MetricCassandraCollector.java From realtime-analytics with GNU General Public License v2.0 | 6 votes |
private void runBatchUpdate(List<Update> updateRequest) { try { Batch batch; if (config.getLoggedBatch()) { batch = QueryBuilder.batch(updateRequest .toArray(new RegularStatement[updateRequest.size()])); } else { batch = QueryBuilder.unloggedBatch(updateRequest .toArray(new RegularStatement[updateRequest.size()])); } totalCassandraUpdateRequest.addAndGet(updateRequest.size()); ResultSetFuture future = cassandraSession.executeAsync(batch); CallBackListener listener = new CallBackListener(future, null); future.addListener(listener, pool); incrementBatchUpdateCounter(); pendingRequestCounter.incrementAndGet(); } catch (Throwable ex) { LOGGER.error("Error publising metrics in MetricCassandraCollector:" + ex.getMessage()); cassandraErrorCount.increment(); registerError(ex); } finally { updateRequest.clear(); } }
Example #4
Source File: CassandraUtils.java From deep-spark with Apache License 2.0 | 5 votes |
public static <W> void doCql3SaveToCassandra(RDD<W> rdd, ICassandraDeepJobConfig<W> writeConfig, Function1<W, Tuple2<Cells, Cells>> transformer) { if (!writeConfig.getIsWriteConfig()) { throw new IllegalArgumentException("Provided configuration object is not suitable for writing"); } Tuple2<Map<String, ByteBuffer>, Map<String, ByteBuffer>> tuple = new Tuple2<>(null, null); RDD<Tuple2<Cells, Cells>> mappedRDD = rdd.map(transformer, ClassTag$.MODULE$.<Tuple2<Cells, Cells>>apply(tuple.getClass())); ((CassandraDeepJobConfig) writeConfig).createOutputTableIfNeeded(mappedRDD.first()); final int pageSize = writeConfig.getBatchSize(); int offset = 0; List<Tuple2<Cells, Cells>> elements = Arrays.asList((Tuple2<Cells, Cells>[]) mappedRDD.collect()); List<Tuple2<Cells, Cells>> split; do { split = elements.subList(pageSize * (offset++), Math.min(pageSize * offset, elements.size())); Batch batch = QueryBuilder.batch(); for (Tuple2<Cells, Cells> t : split) { Tuple2<String[], Object[]> bindVars = Utils.prepareTuple4CqlDriver(t); Insert insert = QueryBuilder .insertInto(quote(writeConfig.getKeyspace()), quote(writeConfig.getTable())) .values(bindVars._1(), bindVars._2()); batch.add(insert); } writeConfig.getSession().execute(batch); } while (!split.isEmpty() && split.size() == pageSize); }
Example #5
Source File: SettableDataMapperTest.java From SimpleFlatMapper with MIT License | 3 votes |
@Test public void testInsertDbObjectsBatch() throws Exception { testInSession(new Callback() { @Override public void call(Session session) throws Exception { Batch bs = QueryBuilder.batch(); bs.add(new SimpleStatement(QUERY)); bs.add(new SimpleStatement(QUERY)); PreparedStatement preparedStatement = session.prepare(bs); DatastaxBinder<List<DbObject>> datastaxBinder = DatastaxMapperFactory.newInstance().useAsm(false).mapFrom(new TypeReference<List<DbObject>>() { }); Statement statement = datastaxBinder.mapTo(dbObjects, preparedStatement); statement.enableTracing(); session.execute(statement); checkObjectInserted(session, 0); checkObjectInserted(session, 1); } }); }