Java Code Examples for com.netflix.astyanax.MutationBatch#withRow()
The following examples show how to use
com.netflix.astyanax.MutationBatch#withRow() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AstyanaxStorageProvider.java From emodb with Apache License 2.0 | 6 votes |
private static void deleteDataColumns(AstyanaxTable table, String blobId, ColumnList<Composite> columns, ConsistencyLevel consistency, Long timestamp) { for (AstyanaxStorage storage : table.getWriteStorage()) { BlobPlacement placement = (BlobPlacement) storage.getPlacement(); // Any columns with a timestamp older than the one we expect must be from an old version // of the blob. This should be rare, but if it happens clean up and delete the old data. MutationBatch mutation = placement.getKeyspace().prepareMutationBatch(consistency); ColumnListMutation<Composite> row = mutation.withRow( placement.getBlobColumnFamily(), storage.getRowKey(blobId)); boolean found = false; for (Column<Composite> column : columns) { if (null != timestamp && column.getTimestamp() < timestamp) { if (ColumnGroup.B.name().equals(column.getName().get(0, AsciiSerializer.get()))) { int chunkId = column.getName().get(1, IntegerSerializer.get()); row.deleteColumn(getColumn(ColumnGroup.B, chunkId)) .deleteColumn(getColumn(ColumnGroup.Z, chunkId)); found = true; } } } if (found) { execute(mutation); } } }
Example 2
Source File: InstanceDataDAOCassandra.java From Raigad with Apache License 2.0 | 6 votes |
public void createInstanceEntry(RaigadInstance instance) throws Exception { logger.info("Creating new instance entry"); String key = getRowKey(instance); // If the key exists throw exception if (getInstance(instance.getApp(), instance.getDC(), instance.getId()) != null) { logger.info(String.format("Key already exists: %s", key)); return; } // Grab the lock getLock(instance); MutationBatch mutationBatch = bootKeyspace.prepareMutationBatch(); ColumnListMutation<String> columnListMutation = mutationBatch.withRow(CF_INSTANCES, key); columnListMutation.putColumn(CN_CLUSTER, instance.getApp(), null); columnListMutation.putColumn(CN_AZ, instance.getAvailabilityZone(), null); columnListMutation.putColumn(CN_INSTANCEID, instance.getInstanceId(), null); columnListMutation.putColumn(CN_HOSTNAME, instance.getHostName(), null); columnListMutation.putColumn(CN_IP, instance.getHostIP(), null); columnListMutation.putColumn(CN_LOCATION, instance.getDC(), null); columnListMutation.putColumn(CN_ASGNAME, instance.getAsg(), null); columnListMutation.putColumn(CN_UPDATETIME, TimeUUIDUtils.getUniqueTimeUUIDinMicros(), null); mutationBatch.execute(); }
Example 3
Source File: SimpleReverseIndexer.java From staash with Apache License 2.0 | 6 votes |
@Override public void tagId(String id, Map<String, String> tags) throws IndexerException { MutationBatch mb = keyspace.prepareMutationBatch(); ColumnListMutation<String> idRow = mb.withRow(dataCf, id); UUID uuid = TimeUUIDUtils.getUniqueTimeUUIDinMicros(); for (Map.Entry<String, String> tag : tags.entrySet()) { String rowkey = tag.getKey() + "=" + tag.getValue(); System.out.println("Rowkey: " + rowkey); mb.withRow(indexCf, tag.getKey() + "=" + tag.getValue()) .putEmptyColumn(new IndexEntry(id, uuid)); // idRow.putColumn(tag.getKey(), tag.getValue()); } try { mb.execute(); } catch (ConnectionException e) { throw new IndexerException("Failed to store tags : " + tags + " for id " + id, e); } }
Example 4
Source File: AstyanaxThriftDataTableResource.java From staash with Apache License 2.0 | 6 votes |
public void updateRow(String key, RowData rowData) throws PaasException { LOG.info("Update row: " + rowData.toString()); invariant(); MutationBatch mb = keyspace.prepareMutationBatch(); if (rowData.hasSchemalessRows()) { ColumnListMutation<ByteBuffer> mbRow = mb.withRow(this.columnFamily, serializers.keyAsByteBuffer(key)); for (Entry<String, Map<String, String>> row : rowData.getSrows().getRows().entrySet()) { for (Entry<String, String> column : row.getValue().entrySet()) { mbRow.putColumn(serializers.columnAsByteBuffer(column.getKey()), serializers.valueAsByteBuffer(column.getKey(), column.getValue())); } } } try { mb.execute(); } catch (ConnectionException e) { throw new PaasException( String.format("Failed to update row '%s' in column family '%s.%s'" , key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()), e); } }
Example 5
Source File: AstyanaxThriftDataTableResource.java From staash with Apache License 2.0 | 6 votes |
@Override public void updateColumn(String key, String column, String value) throws NotFoundException, PaasException { LOG.info("Update row"); invariant(); MutationBatch mb = keyspace.prepareMutationBatch(); ColumnListMutation<ByteBuffer> mbRow = mb.withRow(this.columnFamily, serializers.keyAsByteBuffer(key)); mbRow.putColumn(serializers.columnAsByteBuffer(column), serializers.valueAsByteBuffer(column, value)); try { mb.execute(); } catch (ConnectionException e) { throw new PaasException( String.format("Failed to update row '%s' in column family '%s.%s'" , key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()), e); } }
Example 6
Source File: AstyanaxThriftDataTableResource.java From staash with Apache License 2.0 | 6 votes |
@Override public void deleteColumn(String key, String column) throws PaasException { LOG.info("Update row"); invariant(); MutationBatch mb = keyspace.prepareMutationBatch(); ColumnListMutation<ByteBuffer> mbRow = mb.withRow(this.columnFamily, serializers.keyAsByteBuffer(key)); mbRow.deleteColumn(serializers.columnAsByteBuffer(column)); try { mb.execute(); } catch (ConnectionException e) { throw new PaasException( String.format("Failed to update row '%s' in column family '%s.%s'" , key, this.keyspace.getKeyspaceName(), this.columnFamily.getName()), e); } }
Example 7
Source File: AstyanaxWriter.java From blueflood with Apache License 2.0 | 6 votes |
public void writeMetadata(Table<Locator, String, String> metaTable) throws ConnectionException { ColumnFamily cf = CassandraModel.CF_METRICS_METADATA; Timer.Context ctx = Instrumentation.getBatchWriteTimerContext(CassandraModel.CF_METRICS_METADATA_NAME); MutationBatch batch = keyspace.prepareMutationBatch(); try { for (Locator locator : metaTable.rowKeySet()) { Map<String, String> metaRow = metaTable.row(locator); ColumnListMutation<String> mutation = batch.withRow(cf, locator); for (Map.Entry<String, String> meta : metaRow.entrySet()) { mutation.putColumn(meta.getKey(), meta.getValue(), StringMetadataSerializer.get(), null); } } try { batch.execute(); } catch (ConnectionException e) { Instrumentation.markWriteError(e); log.error("Connection exception persisting metadata", e); throw e; } } finally { ctx.stop(); } }
Example 8
Source File: AstyanaxDataWriterDAO.java From emodb with Apache License 2.0 | 5 votes |
@Timed (name = "bv.emodb.sorAstyanaxDataWriterDAO.storeCompactedDeltas", absolute = true) @Override public void storeCompactedDeltas(Table tbl, String key, List<History> histories, WriteConsistency consistency) { checkNotNull(tbl, "table"); checkNotNull(key, "key"); checkNotNull(histories, "histories"); checkNotNull(consistency, "consistency"); AstyanaxTable table = (AstyanaxTable) tbl; for (AstyanaxStorage storage : table.getWriteStorage()) { DeltaPlacement placement = (DeltaPlacement) storage.getPlacement(); CassandraKeyspace keyspace = placement.getKeyspace(); ByteBuffer rowKey = storage.getRowKey(key); MutationBatch mutation = keyspace.prepareMutationBatch(SorConsistencies.toAstyanax(consistency)); ColumnListMutation<UUID> rowMutation = mutation.withRow(placement.getDeltaHistoryColumnFamily(), rowKey); for (History history : histories) { rowMutation.putColumn(history.getChangeId(), _changeEncoder.encodeHistory(history), Ttls.toSeconds(_historyStore.getHistoryTtl(), 1, null)); } execute(mutation, "store %d compacted deltas for placement %s, table %s, key %s", histories.size(), placement.getName(), table.getName(), key); } }
Example 9
Source File: InstanceDataDAOCassandra.java From Raigad with Apache License 2.0 | 5 votes |
private void getLock(RaigadInstance instance) throws Exception { String choosingkey = getChoosingKey(instance); MutationBatch m = bootKeyspace.prepareMutationBatch(); ColumnListMutation<String> clm = m.withRow(CF_LOCKS, choosingkey); // Expire in 6 sec clm.putColumn(instance.getInstanceId(), instance.getInstanceId(), new Integer(6)); m.execute(); int count = bootKeyspace.prepareQuery(CF_LOCKS).getKey(choosingkey).getCount().execute().getResult(); if (count > 1) { // Need to delete my entry m.withRow(CF_LOCKS, choosingkey).deleteColumn(instance.getInstanceId()); m.execute(); throw new Exception(String.format("More than 1 contender for lock %s %d", choosingkey, count)); } String lockKey = getLockingKey(instance); OperationResult<ColumnList<String>> result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute(); if (result.getResult().size() > 0 && !result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) { throw new Exception(String.format("Lock already taken %s", lockKey)); } clm = m.withRow(CF_LOCKS, lockKey); clm.putColumn(instance.getInstanceId(), instance.getInstanceId(), new Integer(600)); m.execute(); Thread.sleep(100); result = bootKeyspace.prepareQuery(CF_LOCKS).getKey(lockKey).execute(); if (result.getResult().size() == 1 && result.getResult().getColumnByIndex(0).getName().equals(instance.getInstanceId())) { logger.info("Got lock " + lockKey); return; } else { throw new Exception(String.format("Cannot insert lock %s", lockKey)); } }
Example 10
Source File: AShardStateIO.java From blueflood with Apache License 2.0 | 5 votes |
@Override public void putShardState(int shard, Map<Granularity, Map<Integer, UpdateStamp>> slotTimes) throws IOException { AstyanaxIO astyanaxIO = AstyanaxIO.singleton(); Timer.Context ctx = Instrumentation.getWriteTimerContext(CassandraModel.CF_METRICS_STATE_NAME); try { MutationBatch mutationBatch = astyanaxIO.getKeyspace().prepareMutationBatch(); ColumnListMutation<SlotState> mutation = mutationBatch.withRow(CassandraModel.CF_METRICS_STATE, (long)shard); for (Map.Entry<Granularity, Map<Integer, UpdateStamp>> granEntry : slotTimes.entrySet()) { Granularity g = granEntry.getKey(); for (Map.Entry<Integer, UpdateStamp> entry : granEntry.getValue().entrySet()) { // granularity,slot,state SlotState slotState = new SlotState(g, entry.getKey(), entry.getValue().getState()); mutation.putColumn(slotState, entry.getValue().getTimestamp()); /* Note: this method used to set the timestamp of the Cassandra column to entry.getValue().getTimestamp() * 1000, i.e. the collection time. That implementation was changed because it could cause delayed metrics not to rollup. Consider you are getting out of order metrics M1 and M2, with collection times T1 and T2 with T2>T1, belonging to same slot Assume M2 arrives first. The slot gets marked active and rolled up and the state is set as Rolled. Now, assume M1 arrives. We update the slot state to active, set the slot timestamp to T1, and while persisting we set it, we set the column timestamp to be T1*1000, but because the T1 < T2, Cassandra wasn't updating it. */ } } if (!mutationBatch.isEmpty()) try { mutationBatch.execute(); } catch (ConnectionException e) { Instrumentation.markWriteError(e); LOG.error("Error persisting shard state", e); throw new IOException(e); } } finally { ctx.stop(); } }
Example 11
Source File: HystrixCassandraPut.java From Nicobar with Apache License 2.0 | 5 votes |
@Override protected Void run() throws Exception { MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(ConsistencyLevel.CL_QUORUM); // Setting columns in a standard column ColumnListMutation<String> cm = m.withRow(columnFamily, rowKey); for (String key : attributes.keySet()) { Object o = attributes.get(key); if (o != null) { // unfortunately the 'putColumn' method does not nicely figure // out what type the Object is so we need to do it manually if (o instanceof String) { cm.putColumn(key, (String) o, ttlSeconds); } else if (o instanceof Boolean) { cm.putColumn(key, (Boolean) o, ttlSeconds); } else if (o instanceof Integer) { cm.putColumn(key, (Integer) o, ttlSeconds); } else if (o instanceof Long) { cm.putColumn(key, (Long) o, ttlSeconds); } else if (o instanceof Double) { cm.putColumn(key, (Double) o, ttlSeconds); } else if (o instanceof Date) { cm.putColumn(key, (Date) o, ttlSeconds); } else if (o instanceof byte[]) { cm.putColumn(key, (byte[]) o, ttlSeconds); } else if (o instanceof ByteBuffer) { cm.putColumn(key, (ByteBuffer) o, ttlSeconds); } else { throw new IllegalArgumentException("Unsupported object instance type: " + o.getClass().getSimpleName()); } } } m.execute(); return null; }
Example 12
Source File: HystrixCassandraDeleteColumns.java From Nicobar with Apache License 2.0 | 5 votes |
@Override protected Void run() throws Exception { MutationBatch m = keyspace.prepareMutationBatch(); ColumnListMutation<String> mutation = m.withRow(columnFamily, rowKey); for (String column: columnNames) { mutation = mutation.deleteColumn(column); } m.execute(); return null; }
Example 13
Source File: AstyanaxWriter.java From blueflood with Apache License 2.0 | 4 votes |
public void insertMetrics(Collection<IMetric> metrics, ColumnFamily cf, boolean isRecordingDelayedMetrics, Clock clock) throws ConnectionException { Timer.Context ctx = Instrumentation.getWriteTimerContext(cf.getName()); Multimap<Locator, IMetric> map = asMultimap(metrics); MutationBatch batch = keyspace.prepareMutationBatch(); try { for (Locator locator : map.keySet()) { ColumnListMutation<Long> mutation = batch.withRow(cf, locator); for (IMetric metric : map.get(locator)) { mutation.putColumn( metric.getCollectionTime(), metric.getMetricValue(), (AbstractSerializer) (Serializers.serializerFor(metric.getMetricValue().getClass())), metric.getTtlInSeconds()); if (cf.getName().equals(CassandraModel.CF_METRICS_PREAGGREGATED_FULL_NAME)) { Instrumentation.markFullResPreaggregatedMetricWritten(); } if (isRecordingDelayedMetrics) { //retaining the same conditional logic that was used to perform insertLocator(locator, batch). insertLocatorIfDelayed(metric, batch, clock); } } if (!LocatorCache.getInstance().isLocatorCurrentInBatchLayer(locator)) { insertLocator(locator, batch); LocatorCache.getInstance().setLocatorCurrentInBatchLayer(locator); } } try { batch.execute(); } catch (ConnectionException e) { Instrumentation.markWriteError(e); log.error("Connection exception persisting data", e); throw e; } } finally { ctx.stop(); } }