org.apache.cassandra.thrift.Mutation Java Examples
The following examples show how to use
org.apache.cassandra.thrift.Mutation.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ThriftTarget.java From hdfs2cass with Apache License 2.0 | 6 votes |
@Override public boolean accept(final OutputHandler handler, final PType<?> pType) { if (pType instanceof PTableType) { PTableType pTableType = (PTableType) pType; PType<?> keyType = pTableType.getKeyType(); PType<?> valueType = pTableType.getValueType(); List<PType> subTypes = valueType.getSubTypes(); if (ByteBuffer.class.equals(keyType.getTypeClass()) && Collection.class.equals(valueType.getTypeClass()) && subTypes.size() == 1 && Mutation.class.equals(subTypes.get(0).getTypeClass())) { handler.configure(this, pType); return true; } } return false; }
Example #2
Source File: CassandraTransaction.java From Doradus with Apache License 2.0 | 6 votes |
private static Mutation createMutation(byte[] colName, byte[] colValue, long timestamp) { if (colValue == null) { colValue = EMPTY_BYTES; } Column col = new Column(); col.setName(colName); col.setValue(colValue); col.setTimestamp(timestamp); ColumnOrSuperColumn cosc = new ColumnOrSuperColumn(); cosc.setColumn(col); Mutation mutation = new Mutation(); mutation.setColumn_or_supercolumn(cosc); return mutation; }
Example #3
Source File: ThriftByFieldNamesFn.java From hdfs2cass with Apache License 2.0 | 5 votes |
private List<Mutation> getMutations(final T input) { List<Mutation> mutations = Lists.newArrayList(); long timestamp = getTimestamp(input); Optional<Integer> ttl = getTtl(input); for (Schema.Field field : input.getSchema().getFields()) { int fieldPos = field.pos(); if (fieldPos == rowKeyIndex || fieldPos == ttlIndex || fieldPos == timestampIndex) { continue; } Object fieldValue = input.get(fieldPos); Column column = new Column(); column.setName(ByteBufferUtil.bytes(field.name())); column.setTimestamp(timestamp); if (ttl.isPresent()) { column.setTtl(ttl.get()); } column.setValue(CassandraRecordUtils.toByteBuffer(fieldValue)); Mutation mutation = new Mutation(); mutation.column_or_supercolumn = new ColumnOrSuperColumn(); mutation.column_or_supercolumn.column = column; mutations.add(mutation); } return mutations; }
Example #4
Source File: CassandraRecordUtils.java From hdfs2cass with Apache License 2.0 | 5 votes |
public static Mutation createMutation(Object name, Object value, long timestamp, int ttl) { Column column = new Column(); column.setName(toByteBuffer(name)); column.setValue(toByteBuffer(value)); column.setTimestamp(timestamp); if (ttl > 0) { column.setTtl(ttl); } Mutation mutation = new Mutation(); mutation.column_or_supercolumn = new ColumnOrSuperColumn(); mutation.column_or_supercolumn.column = column; return mutation; }
Example #5
Source File: LegacyHdfsToThrift.java From hdfs2cass with Apache License 2.0 | 5 votes |
/** * Thrift-based import requires us to provide {@link org.apache.cassandra.thrift.Mutation}. * Therefore we convert each input line into one. * * @param inputRow byte representation of the input row as it was read from Avro file * @return wraps the record into something that blends nicely into Crunch */ @Override public ThriftRecord map(ByteBuffer inputRow) { LegacyInputFormat row = LegacyInputFormat.parse(inputRow); ByteBuffer key = CassandraRecordUtils.toByteBuffer(row.getRowkey()); long ts = Objects.firstNonNull(row.getTimestamp(), DateTimeUtils.currentTimeMillis()); int ttl = Objects.firstNonNull(row.getTtl(), 0l).intValue(); Mutation mutation = CassandraRecordUtils.createMutation( row.getColname(), row.getColval(), ts, ttl); return ThriftRecord.of(key, mutation); }
Example #6
Source File: CrunchBulkRecordWriter.java From hdfs2cass with Apache License 2.0 | 5 votes |
private void setTypes(Mutation mutation) { if (cfType == null) { if (mutation.getColumn_or_supercolumn().isSetSuper_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column()) cfType = CFType.SUPER; else cfType = CFType.NORMAL; if (mutation.getColumn_or_supercolumn().isSetCounter_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column()) colType = ColType.COUNTER; else colType = ColType.NORMAL; } }
Example #7
Source File: CassandraTransaction.java From Doradus with Apache License 2.0 | 5 votes |
private static Mutation createDeleteColumnMutation(byte[] colName, long timestamp) { SlicePredicate slicePred = new SlicePredicate(); slicePred.addToColumn_names(ByteBuffer.wrap(colName)); Deletion deletion = new Deletion(); deletion.setPredicate(slicePred); deletion.setTimestamp(timestamp); Mutation mutation = new Mutation(); mutation.setDeletion(deletion); return mutation; }
Example #8
Source File: ThriftInserter.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public void run(final ThriftClient client) throws IOException { final ByteBuffer key = getKey(); final List<Column> columns = getColumns(); List<Mutation> mutations = new ArrayList<>(columns.size()); for (Column c : columns) { ColumnOrSuperColumn column = new ColumnOrSuperColumn().setColumn(c); mutations.add(new Mutation().setColumn_or_supercolumn(column)); } Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations); final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row); timeWithRetry(new RunOp() { @Override public boolean run() throws Exception { client.batch_mutate(record, settings.command.consistencyLevel); return true; } @Override public int partitionCount() { return 1; } @Override public int rowCount() { return 1; } }); }
Example #9
Source File: ThriftCounterAdder.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public void run(final ThriftClient client) throws IOException { List<CounterColumn> columns = new ArrayList<>(); for (ByteBuffer name : select().select(settings.columns.names)) columns.add(new CounterColumn(name, counteradd.next())); List<Mutation> mutations = new ArrayList<>(columns.size()); for (CounterColumn c : columns) { ColumnOrSuperColumn cosc = new ColumnOrSuperColumn().setCounter_column(c); mutations.add(new Mutation().setColumn_or_supercolumn(cosc)); } Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations); final ByteBuffer key = getKey(); final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row); timeWithRetry(new RunOp() { @Override public boolean run() throws Exception { client.batch_mutate(record, settings.command.consistencyLevel); return true; } @Override public int partitionCount() { return 1; } @Override public int rowCount() { return 1; } }); }
Example #10
Source File: BulkRecordWriter.java From stratio-cassandra with Apache License 2.0 | 5 votes |
private void setTypes(Mutation mutation) { if (cfType == null) { if (mutation.getColumn_or_supercolumn().isSetSuper_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column()) cfType = CFType.SUPER; else cfType = CFType.NORMAL; if (mutation.getColumn_or_supercolumn().isSetCounter_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column()) colType = ColType.COUNTER; else colType = ColType.NORMAL; } }
Example #11
Source File: ThriftByFieldNamesFn.java From hdfs2cass with Apache License 2.0 | 4 votes |
@Override public ThriftRecord map(T input) { ByteBuffer key = getRowKey(input); List<Mutation> values = getMutations(input); return ThriftRecord.of(key, values); }
Example #12
Source File: ThriftConverter.java From hdfs2cass with Apache License 2.0 | 4 votes |
@Override public Class<Collection<Mutation>> getValueClass() { return (Class<Collection<Mutation>>) (Class<?>) Collection.class; }
Example #13
Source File: CassandraOutputData.java From learning-hadoop with Apache License 2.0 | 4 votes |
/** * Adds a kettle row to a thrift-based batch (builds the map of keys to * mutations). * * @param thriftBatch * the map of keys to mutations * @param colFamilyName * the name of the column family (table) to insert into * @param inputMeta * Kettle input row meta data * @param keyIndex * the index of the incoming field to use as the key for * inserting * @param row * the Kettle row * @param cassandraMeta * meta data on the columns in the cassandra column family * (table) * @param insertFieldsNotInMetaData * true if any Kettle fields that are not in the Cassandra column * family (table) meta data are to be inserted. This is * irrelevant if the user has opted to have the step initially * update the Cassandra meta data for incoming fields that are * not known about. * * @return true if the row was added to the batch * * @throws KettleException * if a problem occurs */ public static boolean addRowToThriftBatch( Map<ByteBuffer, Map<String, List<Mutation>>> thriftBatch, String colFamilyName, RowMetaInterface inputMeta, int keyIndex, Object[] row, CassandraColumnMetaData cassandraMeta, boolean insertFieldsNotInMetaData, LogChannelInterface log, boolean isAsIndexColumn) throws KettleException { if (!preAddChecks(inputMeta, keyIndex, row, log)) { return false; } ValueMetaInterface keyMeta = inputMeta.getValueMeta(keyIndex); ByteBuffer keyBuff = cassandraMeta.kettleValueToByteBuffer(keyMeta, row[keyIndex], true); Map<String, List<Mutation>> mapCF = thriftBatch.get(keyBuff); List<Mutation> mutList = null; // check to see if we have already got some mutations for this key in // the batch if (mapCF != null) { mutList = mapCF.get(colFamilyName); } else { mapCF = new HashMap<String, List<Mutation>>(1); mutList = new ArrayList<Mutation>(); } for (int i = 0; i < inputMeta.size(); i++) { if (i != keyIndex) { ValueMetaInterface colMeta = inputMeta.getValueMeta(i); String colName = colMeta.getName(); if (!cassandraMeta.columnExistsInSchema(colName) && !insertFieldsNotInMetaData) { continue; } // don't insert if null! if (colMeta.isNull(row[i])) { continue; } Column col = new Column( cassandraMeta.columnNameToByteBuffer(colName)); if (isAsIndexColumn) { col = col.setValue(cassandraMeta.kettleValueToByteBuffer( colMeta, "-", false)); } else { col = col.setValue(cassandraMeta.kettleValueToByteBuffer( colMeta, row[i], false)); } col = col.setTimestamp(System.currentTimeMillis()); ColumnOrSuperColumn cosc = new ColumnOrSuperColumn(); cosc.setColumn(col); Mutation mut = new Mutation(); mut.setColumn_or_supercolumn(cosc); mutList.add(mut); } } // column family name -> mutations mapCF.put(colFamilyName, mutList); // row key -> column family - > mutations thriftBatch.put(keyBuff, mapCF); return true; }
Example #14
Source File: ThriftConverter.java From hdfs2cass with Apache License 2.0 | 4 votes |
@Override public Collection<Mutation> outputValue(final Pair<ByteBuffer, Collection<Mutation>> value) { return value.second(); }
Example #15
Source File: ThriftConverter.java From hdfs2cass with Apache License 2.0 | 4 votes |
@Override public ByteBuffer outputKey(final Pair<ByteBuffer, Collection<Mutation>> value) { return value.first(); }
Example #16
Source File: ThriftConverter.java From hdfs2cass with Apache License 2.0 | 4 votes |
@Override public Pair<ByteBuffer, Iterable<Collection<Mutation>>> convertIterableInput(final ByteBuffer key, final Iterable<Collection<Mutation>> value) { return Pair.of(key, value); }
Example #17
Source File: ThriftConverter.java From hdfs2cass with Apache License 2.0 | 4 votes |
@Override public Pair<ByteBuffer, Collection<Mutation>> convertInput(final ByteBuffer key, final Collection<Mutation> value) { return Pair.of(key, value); }
Example #18
Source File: ThriftRecord.java From hdfs2cass with Apache License 2.0 | 4 votes |
@Override public Pair<ByteBuffer, Collection<Mutation>> map(final ThriftRecord input) { return input.asPair(); }
Example #19
Source File: ThriftRecord.java From hdfs2cass with Apache License 2.0 | 4 votes |
public List<Mutation> getValues() { return values; }
Example #20
Source File: ThriftRecord.java From hdfs2cass with Apache License 2.0 | 4 votes |
public Pair<ByteBuffer, Collection<Mutation>> asPair() { Collection<Mutation> collection = values; return Pair.of(key, collection); }
Example #21
Source File: ThriftRecord.java From hdfs2cass with Apache License 2.0 | 4 votes |
public static ThriftRecord of(final ByteBuffer key, final Mutation... values) { return of(key, Lists.newArrayList(values)); }
Example #22
Source File: CassandraRecordUtils.java From hdfs2cass with Apache License 2.0 | 4 votes |
public static Mutation createMutation(final Object name, final Object value) { return createMutation(name, value, DateTimeUtils.currentTimeMillis(), 0); }
Example #23
Source File: CassandraSuperPut.java From Hive-Cassandra with Apache License 2.0 | 4 votes |
@Override public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException { ConsistencyLevel flevel = getConsistencyLevel(jc); int batchMutation = getBatchMutationSize(jc); Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(); Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>(); int count = 0; for (CassandraPut c : subColumns) { List<Column> columns = new ArrayList<Column>(); for (CassandraColumn col : c.getColumns()) { Column cassCol = new Column(); cassCol.setValue(col.getValue()); cassCol.setTimestamp(col.getTimeStamp()); cassCol.setName(col.getColumn()); columns.add(cassCol); ColumnOrSuperColumn thisSuperCol = new ColumnOrSuperColumn(); thisSuperCol.setSuper_column(new SuperColumn(c.getKey(), columns)); Mutation mutation = new Mutation(); mutation.setColumn_or_supercolumn(thisSuperCol); List<Mutation> mutList = maps.get(col.getColumnFamily()); if (mutList == null) { mutList = new ArrayList<Mutation>(); maps.put(col.getColumnFamily(), mutList); } mutList.add(mutation); count ++; if (count == batchMutation) { mutation_map.put(key, maps); commitChanges(keySpace, client, flevel, mutation_map); //reset mutation map, maps and count; mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(); maps = new HashMap<String, List<Mutation>>(); count = 0; } } } if(count > 0) { mutation_map.put(key, maps); commitChanges(keySpace, client, flevel, mutation_map); } }
Example #24
Source File: CassandraPut.java From Hive-Cassandra with Apache License 2.0 | 4 votes |
@Override public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException { ConsistencyLevel flevel = getConsistencyLevel(jc); int batchMutation = getBatchMutationSize(jc); Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(); Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>(); int count = 0; // TODO check for counter for (CassandraColumn col : columns) { Column cassCol = new Column(); cassCol.setValue(col.getValue()); cassCol.setTimestamp(col.getTimeStamp()); cassCol.setName(col.getColumn()); ColumnOrSuperColumn thisCol = new ColumnOrSuperColumn(); thisCol.setColumn(cassCol); Mutation mutation = new Mutation(); mutation.setColumn_or_supercolumn(thisCol); List<Mutation> mutList = maps.get(col.getColumnFamily()); if (mutList == null) { mutList = new ArrayList<Mutation>(); maps.put(col.getColumnFamily(), mutList); } mutList.add(mutation); count ++; if (count == batchMutation) { mutation_map.put(key, maps); commitChanges(keySpace, client, flevel, mutation_map); //reset mutation map, maps and count; mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>(); maps = new HashMap<String, List<Mutation>>(); count = 0; } } if(count > 0) { mutation_map.put(key, maps); commitChanges(keySpace, client, flevel, mutation_map); } }
Example #25
Source File: CassandraOutputData.java From learning-hadoop with Apache License 2.0 | 4 votes |
public static Map<ByteBuffer, Map<String, List<Mutation>>> newThriftBatch( int numRows) { return new HashMap<ByteBuffer, Map<String, List<Mutation>>>(numRows); }
Example #26
Source File: ThriftRecord.java From hdfs2cass with Apache License 2.0 | 2 votes |
/** * @param key Cassandra row (i.e. partition) key * @param values List of columns belonging to this row * @return */ public static ThriftRecord of(final ByteBuffer key, final List<Mutation> values) { return new ThriftRecord(key, values); }
Example #27
Source File: ThriftRecord.java From hdfs2cass with Apache License 2.0 | 2 votes |
/** * A ThriftRecord consists of Cassandra row key and a collection of * {@link org.apache.cassandra.thrift.Mutation}. * Mutations are passed to {@link org.apache.cassandra.hadoop.BulkOutputFormat} * and correspond to column insertions. * Mutations can be in any order. One row can be split into multiple ThriftRecords, Cassandra * will eventually handle this. * Placing 5,000+ mutations in one causes A LOT of memory pressure and should be avoided. * * @param key Cassandra row (i.e. partition) key * @param values List of columns belonging to this row */ public ThriftRecord(final ByteBuffer key, final List<Mutation> values) { this.key = key; this.values = values; }