com.datastax.driver.core.BatchStatement Java Examples
The following examples show how to use
com.datastax.driver.core.BatchStatement.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PreferencesDAOImpl.java From arcusplatform with Apache License 2.0 | 6 votes |
@Override public void merge(UUID personId, UUID placeId, Map<String, Object> prefs) { Map<String, String> encodedPrefs = encodeAttributesToJson(prefs); BatchStatement batchStatement = new BatchStatement(); for (Map.Entry<String, String> encodedPref : encodedPrefs.entrySet()) { BoundStatement boundStatement = new BoundStatement(mergeStatement) .setString(0, encodedPref.getKey()) .setString(1, encodedPref.getValue()) .setUUID(Cols.PERSON_ID, personId) .setUUID(Cols.PLACE_ID, placeId); batchStatement.add(boundStatement); } try (Context context = mergeTimer.time()) { session.execute(batchStatement); } }
Example #2
Source File: CassandraJobStore.java From titus-control-plane with Apache License 2.0 | 6 votes |
private BatchStatement getArchiveJobBatchStatement(Job job) { String jobId = job.getId(); int bucket = activeJobIdsBucketManager.getItemBucket(jobId); String jobJsonString = writeJobToString(job); Statement deleteJobStatement = deleteActiveJobStatement.bind(jobId); Statement deleteJobIdStatement = deleteActiveJobIdStatement.bind(bucket, jobId); Statement insertJobStatement = insertArchivedJobStatement.bind(jobId, jobJsonString); BatchStatement statement = new BatchStatement(); statement.add(deleteJobStatement); statement.add(deleteJobIdStatement); statement.add(insertJobStatement); return statement; }
Example #3
Source File: CassandraJobStore.java From titus-control-plane with Apache License 2.0 | 6 votes |
/** * Moving task between jobs requires the following Cassandra updates: * <ul> * <li>Update the active_jobs table with the new jobFrom record</li> * <li>Update the active_jobs table with the new jobTo record</li> * <li>Update task record in the active_tasks table (to include the new job id)</li> * <li>Remove a record from the active_task_ids table for the jobFrom/taskId pair</li> * <li>Add a new record in the active_task_ids for the jobTo/taskId pair</li> * </ul> */ @Override public Completable moveTask(Job jobFrom, Job jobTo, Task taskAfter) { return Observable.fromCallable((Callable<Statement>) () -> { checkIfJobIsActive(jobFrom.getId()); checkIfJobIsActive(jobTo.getId()); String taskJsonString = ObjectMappers.writeValueAsString(mapper, taskAfter); transactionLogger().logBeforeUpdate(insertActiveTaskStatement, "moveTask", taskJsonString); BatchStatement batchStatement = new BatchStatement(); batchStatement.add(insertActiveJobStatement.bind(jobFrom.getId(), ObjectMappers.writeValueAsString(mapper, jobFrom))); batchStatement.add(insertActiveJobStatement.bind(jobTo.getId(), ObjectMappers.writeValueAsString(mapper, jobTo))); batchStatement.add(insertActiveTaskStatement.bind(taskAfter.getId(), taskJsonString)); batchStatement.add(deleteActiveTaskIdStatement.bind(jobFrom.getId(), taskAfter.getId())); batchStatement.add(insertActiveTaskIdStatement.bind(jobTo.getId(), taskAfter.getId())); return batchStatement; }).flatMap(this::execute).toCompletable().doOnCompleted(() -> transactionLogger().logAfterUpdate(insertActiveTaskStatement, "moveTask", taskAfter)); }
Example #4
Source File: SetAccountOwner.java From arcusplatform with Apache License 2.0 | 6 votes |
@Override public void execute(ExecutionContext context, boolean autoRollback) throws CommandExecutionException { PreparedStatement stmt = context.getSession().prepare(update); List<Row> rows = context.getSession().execute("SELECT * FROM authorization_grant").all(); BatchStatement batch = new BatchStatement(); rows.forEach((r) -> { if(r.getBool("accountowner")) { batch.add(new BoundStatement(stmt) .setUUID("owner", r.getUUID("entityId")) .setUUID("id", r.getUUID("accountid"))); } }); context.getSession().execute(batch); }
Example #5
Source File: CassandraManager.java From logging-log4j2 with Apache License 2.0 | 6 votes |
private FactoryData(final SocketAddress[] contactPoints, final ColumnMapping[] columns, final boolean useTls, final String clusterName, final String keyspace, final String table, final String username, final String password, final boolean useClockForTimestampGenerator, final int bufferSize, final boolean batched, final BatchStatement.Type batchType) { super(bufferSize, null); this.contactPoints = convertAndAddDefaultPorts(contactPoints); this.columns = columns; this.useTls = useTls; this.clusterName = clusterName; this.keyspace = keyspace; this.table = table; this.username = username; this.password = password; this.useClockForTimestampGenerator = useClockForTimestampGenerator; this.batched = batched; this.batchType = batchType; }
Example #6
Source File: CassandraJobStore.java From titus-control-plane with Apache License 2.0 | 6 votes |
@Override public Completable replaceTask(Task oldTask, Task newTask) { return Observable.fromCallable((Callable<Statement>) () -> { String jobId = newTask.getJobId(); checkIfJobIsActive(jobId); String taskId = newTask.getId(); String taskJsonString = ObjectMappers.writeValueAsString(mapper, newTask); BatchStatement batchStatement = getArchiveTaskBatchStatement(oldTask); Statement insertTaskStatement = insertActiveTaskStatement.bind(taskId, taskJsonString); Statement insertTaskIdStatement = insertActiveTaskIdStatement.bind(jobId, taskId); batchStatement.add(insertTaskStatement); batchStatement.add(insertTaskIdStatement); return batchStatement; }).flatMap(this::execute).toCompletable(); }
Example #7
Source File: CassandraJobStore.java From titus-control-plane with Apache License 2.0 | 6 votes |
@Override public Completable storeTask(Task task) { return Observable.fromCallable((Callable<Statement>) () -> { String jobId = task.getJobId(); String taskId = task.getId(); checkIfJobIsActive(jobId); String taskJsonString = ObjectMappers.writeValueAsString(mapper, task); Statement taskStatement = insertActiveTaskStatement.bind(taskId, taskJsonString); Statement taskIdStatement = insertActiveTaskIdStatement.bind(jobId, taskId); BatchStatement batchStatement = new BatchStatement(); batchStatement.add(taskStatement); batchStatement.add(taskIdStatement); transactionLogger().logBeforeCreate(insertActiveTaskStatement, "storeTask", task); return batchStatement; }).flatMap(statement -> execute(statement).doOnNext(rs -> transactionLogger().logAfterCreate(insertActiveTaskStatement, "storeTask", task)) ).toCompletable(); }
Example #8
Source File: CassandraSyncIT.java From glowroot with Apache License 2.0 | 6 votes |
@Override public void transactionMarker() throws Exception { BatchStatement batchStatement = new BatchStatement(); batchStatement.add(new SimpleStatement( "INSERT INTO test.users (id, fname, lname) VALUES (100, 'f100', 'l100')")); batchStatement.add(new SimpleStatement( "INSERT INTO test.users (id, fname, lname) VALUES (101, 'f101', 'l101')")); PreparedStatement preparedStatement = session.prepare("INSERT INTO test.users (id, fname, lname) VALUES (?, ?, ?)"); for (int i = 200; i < 210; i++) { BoundStatement boundStatement = new BoundStatement(preparedStatement); boundStatement.bind(i, "f" + i, "l" + i); batchStatement.add(boundStatement); } batchStatement.add(new SimpleStatement( "INSERT INTO test.users (id, fname, lname) VALUES (300, 'f300', 'l300')")); session.execute(batchStatement); }
Example #9
Source File: AuthorizationGrantDAOImpl.java From arcusplatform with Apache License 2.0 | 6 votes |
@Override public void save(AuthorizationGrant grant) { Preconditions.checkNotNull(grant, "grant must not be null"); Preconditions.checkNotNull(grant.getEntityId(), "entity id must not be null"); Preconditions.checkNotNull(grant.getAccountId(), "account id must not be null"); Preconditions.checkNotNull(grant.getPlaceId(), "place id must not be null"); // uses upsert semantics where an insert statement will update the existing row if it already exists BatchStatement batch = new BatchStatement(); batch.add(bindUpsert(upsert, grant)); batch.add(bindUpsert(upsertByPlace, grant)); try(Context ctxt = upsertTimer.time()) { this.session.execute(batch); } }
Example #10
Source File: Cassandra2xDefaultMapDAO.java From cumulusrdf with Apache License 2.0 | 6 votes |
@Override public void setAll(final Map<K, V> pairs) { if (pairs.size() == 0) { return; } BatchStatement batchStatement = new BatchStatement(); for (Map.Entry<K, V> entry : pairs.entrySet()) { batchStatement.add(getInsertStatement(entry.getKey(), entry.getValue())); } try { _session.execute(batchStatement); } catch (Exception e) { _log.error("failed to insert batch of " + pairs.size() + " dictionary entries", e); } }
Example #11
Source File: CassandraEventData.java From yb-sample-apps with Apache License 2.0 | 6 votes |
@Override public long doWrite(int threadIdx) { // Pick a random data source. DataSource dataSource = dataSources.get(random.nextInt(dataSources.size())); long numKeysWritten = 0; BatchStatement batch = new BatchStatement(); // Enter a batch of data points. long ts = dataSource.getDataEmitTs(); for (int i = 0; i < appConfig.batchSize; i++) { batch.add(getPreparedInsert().bind().setString("device_id", dataSource.getDeviceId()).setLong("ts", ts) .setString("event_type", dataSource.getEventType()) .setBytesUnsafe("value", getValue(dataSource.getDeviceId()))); numKeysWritten++; ts++; } dataSource.setLastEmittedTs(ts); getCassandraClient().execute(batch); return numKeysWritten; }
Example #12
Source File: InvitationDAOImpl.java From arcusplatform with Apache License 2.0 | 6 votes |
@Override public void cancel(Invitation invitation) { Preconditions.checkNotNull(invitation, "invitation is required"); try(Context timer = cancelTimer.time()) { BatchStatement stmt = new BatchStatement(); BoundStatement tblDel = new BoundStatement(delete); tblDel.setString(Column.code.name(), invitation.getCode()); stmt.add(tblDel); BoundStatement placeIdxDel = new BoundStatement(deletePlaceIdx); placeIdxDel.setString(Column.code.name(), invitation.getCode()); placeIdxDel.setUUID(Column.placeId.name(), UUID.fromString(invitation.getPlaceId())); stmt.add(placeIdxDel); if(invitation.getInviteeId() != null) { BoundStatement personIdxDel = new BoundStatement(deletePersonIdx); personIdxDel.setString(Column.code.name(), invitation.getCode()); personIdxDel.setUUID(Column.inviteeId.name(), UUID.fromString(invitation.getInviteeId())); stmt.add(personIdxDel); } session.execute(stmt); } }
Example #13
Source File: BatchHandlerTest.java From scalardb with Apache License 2.0 | 6 votes |
@Test public void handle_CorrectHandlerAndAtLeastOneConditionalPutGiven_ShouldSetConsistencyProperly() { // Arrange configureBehavior(); mutations = prepareNonConditionalPuts(); mutations.get(1).withCondition(new PutIfNotExists()); when(session.execute(any(Statement.class))).thenReturn(results); when(results.wasApplied()).thenReturn(true); spy = prepareSpiedBatchHandler(); // Act Assert assertThatCode( () -> { spy.handle(mutations); }) .doesNotThrowAnyException(); // Assert verify(spy).setConsistencyForConditionalMutation(any(BatchStatement.class)); }
Example #14
Source File: DeepCqlRecordWriter.java From deep-spark with Apache License 2.0 | 6 votes |
/** * Executes cql batch statements in Cassandra */ @Override public void run() { LOG.debug("[" + this + "] Executing batch write to cassandra"); try { final PreparedStatement preparedStatement = sessionWithHost.prepare(cql); final BatchStatement batchStatement = new BatchStatement(BatchStatement.Type.UNLOGGED); for (final List<Object> record : records) { batchStatement.add(preparedStatement.bind(record.toArray(new Object[record.size()]))); } sessionWithHost.execute(batchStatement); } catch (Exception e) { LOG.error("[" + this + "] Exception occurred while trying to execute batch in cassandra: " + e.getMessage()); } }
Example #15
Source File: CassandraVideoV2Dao.java From arcusplatform with Apache License 2.0 | 6 votes |
@Override public ListenableFuture<?> delete(UUID placeId, UUID recordingId, boolean isFavorite, Date purgeTime, int purgePartitionId) { BatchStatement stmt = new BatchStatement(Type.UNLOGGED); addDeleteStatements(stmt, placeId, recordingId, isFavorite, purgeTime, purgePartitionId); // Add to Purge table if it's favorite if(isFavorite) { VideoMetadata metadata = findByPlaceAndId(placeId, recordingId); metadata.setDeletionTime(purgeTime); metadata.setDeletionPartition(purgePartitionId); addStatementsForRemoveFromFavoriteTables(stmt, metadata); } long startTime = System.nanoTime(); ResultSetFuture result = session.executeAsync(stmt); result.addListener(() -> DeleteTimer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS), MoreExecutors.directExecutor()); return result; }
Example #16
Source File: CassandraVideoV2Dao.java From arcusplatform with Apache License 2.0 | 6 votes |
@Override public void update(UUID placeId, UUID recordingId, long ttlInSeconds, Map<String, Object> attributes) { if(attributes == null || attributes.isEmpty()) { return; } long expiration = VideoV2Util.createExpirationFromTTL(recordingId, ttlInSeconds); long actualTtlInSeconds = VideoV2Util.createActualTTL(recordingId, expiration); BatchStatement stmt = new BatchStatement(); for (Map.Entry<String,Object> entry : attributes.entrySet()) { switch (entry.getKey()) { case RecordingCapability.ATTR_NAME: stmt.add(recordingMetadataTable.insertField(recordingId, expiration, actualTtlInSeconds, MetadataAttribute.NAME, (String) entry.getValue())); break; default: VIDEO_ATTR_READONLY.inc(); throw new ErrorEventException(Errors.invalidRequest("attribute is not writable: " + entry.getKey())); } } long startTime = System.nanoTime(); if(!session.execute(stmt).wasApplied()) { throw new NotFoundException(Address.platformService(recordingId, "recording")); } UpdateVideoTimer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); }
Example #17
Source File: CassandraVideoV2Dao.java From arcusplatform with Apache License 2.0 | 6 votes |
private void addStatementsForRemoveFromFavoriteTables(BatchStatement stmt, VideoMetadata metadata) { UUID placeId = metadata.getPlaceId(); UUID recordingId = metadata.getRecordingId(); //Delete placeRecordingIndexFavorite for(String tag: metadata.getTags()) { stmt.add(placeRecordingIndexFavorite.deleteTag(placeId, recordingId, tag)); } stmt.add(placeRecordingIndexFavorite.deleteVideo(placeId, recordingId, PlaceRecordingIndexV2Table.Type.RECORDING)); stmt.add(placeRecordingIndexFavorite.deleteCamera(placeId, recordingId, metadata.getCameraId())); if(metadata.isDeleted()) { stmt.add(placeRecordingIndexFavorite.deleteDeleted(placeId, recordingId)); } //Delete metadata stmt.add(recordingMetadataFavoriteTable.deleteRecording(recordingId)); //Delete recording stmt.add(recordingFavoriteTable.deleteRecording(recordingId)); //Add to purge table Date purgeAt = null; if(metadata.getDeletionTime() != null && metadata.getDeletionTime().before(new Date())) { //already expired purgeAt = VideoUtil.getPurgeTimestamp(config.getPurgeDelay(), TimeUnit.MILLISECONDS); }else{ purgeAt = metadata.getDeletionTime(); } addPurgeStatements(stmt, placeId, recordingId, purgeAt, metadata.getDeletionPartition(), metadata.getLoc(), true); }
Example #18
Source File: CassandraOperationImpl.java From sunbird-lms-service with MIT License | 5 votes |
@Override public Response batchInsert( String keyspaceName, String tableName, List<Map<String, Object>> records) { long startTime = System.currentTimeMillis(); ProjectLogger.log( "Cassandra Service batchInsert method started at ==" + startTime, LoggerEnum.INFO); Session session = connectionManager.getSession(keyspaceName); Response response = new Response(); BatchStatement batchStatement = new BatchStatement(); ResultSet resultSet = null; try { for (Map<String, Object> map : records) { Insert insert = QueryBuilder.insertInto(keyspaceName, tableName); map.entrySet() .stream() .forEach( x -> { insert.value(x.getKey(), x.getValue()); }); batchStatement.add(insert); } resultSet = session.execute(batchStatement); response.put(Constants.RESPONSE, Constants.SUCCESS); } catch (QueryExecutionException | QueryValidationException | NoHostAvailableException | IllegalStateException e) { ProjectLogger.log("Cassandra Batch Insert Failed." + e.getMessage(), e); throw new ProjectCommonException( ResponseCode.SERVER_ERROR.getErrorCode(), ResponseCode.SERVER_ERROR.getErrorMessage(), ResponseCode.SERVER_ERROR.getResponseCode()); } logQueryElapseTime("batchInsert", startTime); return response; }
Example #19
Source File: CassandraBasedAuditRepository.java From atlas with Apache License 2.0 | 5 votes |
@Override public void putEventsV2(List<EntityAuditEventV2> events) throws AtlasBaseException { BoundStatement stmt = new BoundStatement(insertStatement); BatchStatement batch = new BatchStatement(); events.forEach(event -> batch.add(stmt.bind(event.getEntityId(), event.getTimestamp(), event.getAction().toString(), event.getUser(), event.getDetails(), (persistEntityDefinition ? event.getEntityDefinitionString() : null)))); cassSession.execute(batch); }
Example #20
Source File: CassandraOperationImpl.java From sunbird-lms-service with MIT License | 5 votes |
/** * This method updates all the records in a batch * * @param keyspaceName * @param tableName * @param records * @return */ // @Override public Response batchUpdateById( String keyspaceName, String tableName, List<Map<String, Object>> records) { long startTime = System.currentTimeMillis(); ProjectLogger.log( "Cassandra Service batchUpdateById method started at ==" + startTime, LoggerEnum.INFO); Session session = connectionManager.getSession(keyspaceName); Response response = new Response(); BatchStatement batchStatement = new BatchStatement(); ResultSet resultSet = null; try { for (Map<String, Object> map : records) { Update update = createUpdateStatement(keyspaceName, tableName, map); batchStatement.add(update); } resultSet = session.execute(batchStatement); response.put(Constants.RESPONSE, Constants.SUCCESS); } catch (QueryExecutionException | QueryValidationException | NoHostAvailableException | IllegalStateException e) { ProjectLogger.log("Cassandra Batch Update Failed." + e.getMessage(), e); throw new ProjectCommonException( ResponseCode.SERVER_ERROR.getErrorCode(), ResponseCode.SERVER_ERROR.getErrorMessage(), ResponseCode.SERVER_ERROR.getResponseCode()); } logQueryElapseTime("batchUpdateById", startTime); return response; }
Example #21
Source File: DBasicMetricsRW.java From blueflood with Apache License 2.0 | 5 votes |
/** * Inserts a collection of metrics in a batch using an unlogged * {@link BatchStatement} * * @param metrics * @return */ private void insertMetricsInBatch(Collection<IMetric> metrics) throws IOException { BatchStatement batch = new BatchStatement(BatchStatement.Type.UNLOGGED); for (IMetric metric : metrics) { BoundStatement bound = simpleNumberIO.getBoundStatementForMetric(metric); batch.add(bound); Instrumentation.markFullResMetricWritten(); Locator locator = metric.getLocator(); if( !LocatorCache.getInstance().isLocatorCurrentInBatchLayer(locator) ) { LocatorCache.getInstance().setLocatorCurrentInBatchLayer(locator); batch.add(locatorIO.getBoundStatementForLocator( locator )); } // if we are recording delayed metrics, we may need to do an // extra insert if ( isRecordingDelayedMetrics ) { BoundStatement bs = getBoundStatementForMetricIfDelayed(metric); if ( bs != null ) { batch.add(bs); } } } LOG.trace(String.format("insert batch statement size=%d", batch.size())); try { DatastaxIO.getSession().execute(batch); } catch ( Exception ex ) { Instrumentation.markWriteError(); LOG.error(String.format("error writing batch of %d metrics", batch.size()), ex ); } }
Example #22
Source File: SaveToCassandraActionTest.java From Decision with Apache License 2.0 | 5 votes |
@Before public void before() throws Exception { cassandraServer = new CassandraServer(); cassandraServer.start(); saveToCassandraActionExecutionFunction = new SaveToCassandraActionExecutionFunction("localhost", 9042, 50, BatchStatement.Type.UNLOGGED); }
Example #23
Source File: DAbstractMetricsRW.java From blueflood with Apache License 2.0 | 5 votes |
/** * This method inserts a collection of {@link com.rackspacecloud.blueflood.service.SingleRollupWriteContext} objects * to the appropriate Cassandra column family. * * It performs the inserts by executing an UNLOGGED BATCH statement. * * @param writeContexts * * @throws IOException */ @Override public void insertRollups(List<SingleRollupWriteContext> writeContexts) { if (writeContexts.size() == 0) { return; } Timer.Context ctx = Instrumentation.getWriteTimerContext( writeContexts.get( 0 ).getDestinationCF().getName() ); try { BatchStatement batch = new BatchStatement(BatchStatement.Type.UNLOGGED); for (SingleRollupWriteContext writeContext : writeContexts) { Rollup rollup = writeContext.getRollup(); Locator locator = writeContext.getLocator(); Granularity granularity = writeContext.getGranularity(); int ttl = getTtl(locator, rollup.getRollupType(), granularity); // lookup the right writer RollupType rollupType = writeContext.getRollup().getRollupType(); DAbstractMetricIO io = getIO(rollupType.name().toLowerCase(), granularity); Statement statement = io.createStatement(locator, writeContext.getTimestamp(), rollup, writeContext.getGranularity(), ttl); batch.add(statement); } Session session = DatastaxIO.getSession(); session.execute(batch); } catch (Exception ex) { Instrumentation.markWriteError(); LOG.error(String.format("error writing locator batch of size %s, granularity %s", writeContexts.size(), writeContexts.get(0).getGranularity()), ex); } finally { ctx.stop(); } }
Example #24
Source File: CassandraTransactionalStore.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
public CassandraTransactionalStore() { super(); metaTable = DEFAULT_META_TABLE; metaTableAppIdColumn = DEFAULT_APP_ID_COL; metaTableOperatorIdColumn = DEFAULT_OPERATOR_ID_COL; metaTableWindowColumn = DEFAULT_WINDOW_COL; batchCommand = new BatchStatement(); inTransaction = false; }
Example #25
Source File: CassandraStatementExecuteQueryInterceptor.java From pinpoint with Apache License 2.0 | 5 votes |
private String retrieveSql(Object args0) { if (args0 instanceof BoundStatement) { return ((BoundStatement) args0).preparedStatement().getQueryString(); } else if (args0 instanceof RegularStatement) { return ((RegularStatement) args0).getQueryString(); } else if (args0 instanceof WrappedStatementGetter) { return retrieveWrappedStatement((WrappedStatementGetter) args0); } else if (args0 instanceof BatchStatement) { // we could unroll all the batched statements and append ; between them if need be but it could be too long. return null; } else if (args0 instanceof String) { return (String) args0; } return null; }
Example #26
Source File: CassandraVideoV2Dao.java From arcusplatform with Apache License 2.0 | 5 votes |
@Override public void completeAndDelete(UUID placeId, UUID recordingId, double duration, long size, Date purgeTime, int purgePartitionId, long ttlInSeconds) { BatchStatement stmt = new BatchStatement(Type.UNLOGGED); // recording will be atomic, and place_recording will be atomic, but they will be independently atomic to save performance stmt.setRetryPolicy(DowngradingConsistencyRetryPolicy.INSTANCE); addDurationAndSizeStatements(stmt, placeId, recordingId, duration, size, ttlInSeconds); addDeleteStatements(stmt, placeId, recordingId, false, purgeTime, purgePartitionId); executeAndUpdateTimer(session, stmt, CompleteTimer); }
Example #27
Source File: CQLStashTableDAO.java From emodb with Apache License 2.0 | 5 votes |
public void addTokenRangesForTable(String stashId, AstyanaxStorage readStorage, TableJson tableJson) { String placement = readStorage.getPlacementName(); ensureStashTokenRangeTableExists(); String tableInfo = JsonHelper.asJson(tableJson.getRawJson()); Session session = _placementCache.get(_systemTablePlacement).getKeyspace().getCqlSession(); // Add two records for each shard for the table: one which identifies the start token for the shard, and // one that identifies (exclusively) the end token for the shard. This will allow for efficient range queries // later on. Iterator<ByteBufferRange> tableTokenRanges = readStorage.scanIterator(null); // To prevent sending over-large batches split into groups of 8 ranges which results in 16 statements per batch Iterators.partition(tableTokenRanges, 8).forEachRemaining(ranges -> { BatchStatement batchStatement = new BatchStatement(); for (ByteBufferRange range : ranges) { batchStatement.add(QueryBuilder.insertInto(STASH_TOKEN_RANGE_TABLE) .value(STASH_ID_COLUMN, stashId) .value(DATA_CENTER_COLUMN, _dataCenters.getSelf().getName()) .value(PLACEMENT_COLUMN, placement) .value(RANGE_TOKEN_COLUMN, range.getStart()) .value(IS_START_TOKEN_COLUMN, true) .value(TABLE_JSON_COLUMN, tableInfo)); batchStatement.add(QueryBuilder.insertInto(STASH_TOKEN_RANGE_TABLE) .value(STASH_ID_COLUMN, stashId) .value(DATA_CENTER_COLUMN, _dataCenters.getSelf().getName()) .value(PLACEMENT_COLUMN, placement) .value(RANGE_TOKEN_COLUMN, range.getEnd()) .value(IS_START_TOKEN_COLUMN, false) .value(TABLE_JSON_COLUMN, tableInfo)); } session.execute(batchStatement.setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM)); }); }
Example #28
Source File: CassandraVideoV2Dao.java From arcusplatform with Apache License 2.0 | 5 votes |
private void complete(UUID placeId, UUID recordingId, long expiration, long actualTtlInSeconds, double duration, long size) { BatchStatement stmt = new BatchStatement(Type.UNLOGGED); // recording will be atomic, and place_recording will be atomic, but they will be independently atomic to save performance stmt.setRetryPolicy(DowngradingConsistencyRetryPolicy.INSTANCE); addDurationAndSizeStatements(stmt, placeId, recordingId, duration, size, expiration, actualTtlInSeconds); // Recording Metadata Index Mutations stmt.add(placeRecordingIndex.insertRecording(placeId, recordingId, size, expiration, actualTtlInSeconds)); executeAndUpdateTimer(session, stmt, CompleteTimer); }
Example #29
Source File: UniqueValueSerializationStrategyProxyImpl.java From usergrid with Apache License 2.0 | 5 votes |
@Override public BatchStatement writeCQL(final ApplicationScope applicationScope, final UniqueValue uniqueValue, final int timeToLive ){ final MigrationRelationship<UniqueValueSerializationStrategy> migration = getMigrationRelationShip(); if ( migration.needsMigration() ) { migration.from.writeCQL( applicationScope, uniqueValue, timeToLive ); migration.to.writeCQL( applicationScope, uniqueValue, timeToLive ); } return migration.to.writeCQL( applicationScope, uniqueValue, timeToLive ); }
Example #30
Source File: CassandraPersonalization.java From yb-sample-apps with Apache License 2.0 | 5 votes |
@Override public long doWrite(int threadIdx) { BatchStatement batch = new BatchStatement(); PreparedStatement insert = getPreparedInsert(); Key key = getSimpleLoadGenerator().getKeyToWrite(); try { int totalCouponCount = 0; for (int i = 0; i < appConfig.numStores; i++) { String customerId = key.asString(); String storeId = Integer.toString(i); int couponCount = appConfig.numNewCouponsPerCustomer / appConfig.numStores; for (int j = 0; j < couponCount; j++) { Coupon coupon = coupons.elementAt(j); batch.add(insert.bind(customerId, storeId, coupon.code, coupon.beginDate, coupon.endDate, Double.valueOf(generateRandomRelevanceScore()))); } totalCouponCount += couponCount; } ResultSet resultSet = getCassandraClient().execute(batch); LOG.debug("Wrote coupon count: " + totalCouponCount + ", return code: " + resultSet.toString()); getSimpleLoadGenerator().recordWriteSuccess(key); return 1; } catch (Exception e) { getSimpleLoadGenerator().recordWriteFailure(key); throw e; } }