org.apache.cassandra.thrift.KeyRange Java Examples
The following examples show how to use
org.apache.cassandra.thrift.KeyRange.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DBConn.java From Doradus with Apache License 2.0 | 5 votes |
private static String toString(KeyRange keyRange) { ByteBuffer startKey = keyRange.start_key; String startKeyStr = "<null>"; if (startKey != null) { startKeyStr = Utils.toString(startKey.array(), startKey.arrayOffset(), startKey.limit()); } if (startKeyStr.length() == 0) { startKeyStr = "<first>"; } ByteBuffer endKey = keyRange.end_key; String endKeyStr = "<null>"; if (endKey != null) { endKeyStr = Utils.toString(endKey.array(), endKey.arrayOffset(), endKey.limit()); } if (endKeyStr.length() == 0) { endKeyStr = "<last>"; } StringBuilder buffer = new StringBuilder(); if (startKeyStr.equals("<first>") && endKeyStr.equals("<last>")) { buffer.append("Keys(<all>)"); } else if (startKeyStr.equals(endKeyStr)) { buffer.append("Key('"); buffer.append(startKeyStr); buffer.append("')"); } else { buffer.append("Keys('"); buffer.append(startKeyStr); buffer.append("' to '"); buffer.append(endKeyStr); buffer.append("')"); } return buffer.toString(); }
Example #2
Source File: CassandraDefs.java From Doradus with Apache License 2.0 | 5 votes |
/** * Create a KeyRange that begins at the given row key. * * @param startRowKey Starting row key as a byte[]. * @return KeyRange that starts at the given row, open-ended. */ static KeyRange keyRangeStartRow(byte[] startRowKey) { KeyRange keyRange = new KeyRange(); keyRange.setStart_key(startRowKey); keyRange.setEnd_key(EMPTY_BYTE_BUFFER); keyRange.setCount(MAX_ROWS_BATCH_SIZE); return keyRange; }
Example #3
Source File: CassandraDefs.java From Doradus with Apache License 2.0 | 5 votes |
static KeyRange keyRangeStartRow(byte[] startRowKey, int count) { KeyRange keyRange = new KeyRange(); keyRange.setStart_key(startRowKey == null ? EMPTY_BYTES : startRowKey); keyRange.setEnd_key(EMPTY_BYTES); keyRange.setCount(count); return keyRange; }
Example #4
Source File: CassandraDefs.java From Doradus with Apache License 2.0 | 5 votes |
/** * Create a KeyRange that selects a single row with the given key. * * @param rowKey Row key as a byte[]. * @return KeyRange that starts and ends with the given key. */ static KeyRange keyRangeSingleRow(byte[] rowKey) { KeyRange keyRange = new KeyRange(); keyRange.setStart_key(rowKey); keyRange.setEnd_key(rowKey); keyRange.setCount(1); return keyRange; }
Example #5
Source File: InputFormatGrakn.java From grakn with GNU Affero General Public License v3.0 | 4 votes |
public List<org.apache.hadoop.mapreduce.InputSplit> getSplits(JobContext context) throws IOException { Configuration conf = HadoopCompat.getConfiguration(context); validateConfiguration(conf); keyspace = ConfigHelper.getInputKeyspace(conf); cfName = ConfigHelper.getInputColumnFamily(conf); partitioner = ConfigHelper.getInputPartitioner(conf); LOG.trace("partitioner is {}", partitioner); // canonical ranges, split into pieces, fetching the splits in parallel ExecutorService executor = new ThreadPoolExecutor(0, 128, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); List<org.apache.hadoop.mapreduce.InputSplit> splits = new ArrayList<>(); try (CqlSession session = getInputSession(ConfigHelper.getInputInitialAddress(conf).split(","), conf)) { List<Future<List<org.apache.hadoop.mapreduce.InputSplit>>> splitfutures = new ArrayList<>(); KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf); Range<Token> jobRange = null; if (jobKeyRange != null) { if (jobKeyRange.start_key != null) { if (!partitioner.preservesOrder()) { throw new UnsupportedOperationException("KeyRange based on keys can only be used with a order preserving partitioner"); } if (jobKeyRange.start_token != null) { throw new IllegalArgumentException("only start_key supported"); } if (jobKeyRange.end_token != null) { throw new IllegalArgumentException("only start_key supported"); } jobRange = new Range<>(partitioner.getToken(jobKeyRange.start_key), partitioner.getToken(jobKeyRange.end_key)); } else if (jobKeyRange.start_token != null) { jobRange = new Range<>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token), partitioner.getTokenFactory().fromString(jobKeyRange.end_token)); } else { LOG.warn("ignoring jobKeyRange specified without start_key or start_token"); } } Metadata metadata = session.getMetadata(); // canonical ranges and nodes holding replicas Map<TokenRange, Set<Node>> masterRangeNodes = getRangeMap(keyspace, metadata); for (TokenRange range : masterRangeNodes.keySet()) { if (jobRange == null) { // for each tokenRange, pick a live owner and ask it to compute bite-sized splits splitfutures.add(executor.submit(new SplitCallable(range, masterRangeNodes.get(range), conf, session))); } else { TokenRange jobTokenRange = rangeToTokenRange(metadata, jobRange); if (range.intersects(jobTokenRange)) { for (TokenRange intersection : range.intersectWith(jobTokenRange)) { // for each tokenRange, pick a live owner and ask it to compute bite-sized splits splitfutures.add(executor.submit(new SplitCallable(intersection, masterRangeNodes.get(range), conf, session))); } } } } // wait until we have all the results back for (Future<List<org.apache.hadoop.mapreduce.InputSplit>> futureInputSplits : splitfutures) { try { splits.addAll(futureInputSplits.get()); } catch (Exception e) { throw new IOException("Could not get input splits", e); } } } finally { executor.shutdownNow(); } Collections.shuffle(splits, new Random(System.nanoTime())); return splits; }
Example #6
Source File: CassandraInputData.java From learning-hadoop with Apache License 2.0 | 4 votes |
public void sliceModeInit(CassandraColumnMetaData meta, List<String> colNames, int maxRows, int maxCols, int rowBatchSize, int colBatchSize) throws KettleException { m_newSliceQuery = true; m_requestedCols = colNames; m_sliceRowsMax = maxRows; m_sliceColsMax = maxCols; m_sliceRowsBatchSize = rowBatchSize; m_sliceColsBatchSize = colBatchSize; m_rowIndex = 0; m_colIndex = 0; if (m_sliceColsBatchSize <= 0) { m_sliceColsBatchSize = Integer.MAX_VALUE; } if (m_sliceRowsBatchSize <= 0) { m_sliceRowsBatchSize = Integer.MAX_VALUE; } List<ByteBuffer> specificCols = null; if (m_requestedCols != null && m_requestedCols.size() > 0) { specificCols = new ArrayList<ByteBuffer>(); // encode the textual column names for (String colName : m_requestedCols) { ByteBuffer encoded = meta.columnNameToByteBuffer(colName); specificCols.add(encoded); } } m_slicePredicate = new SlicePredicate(); if (specificCols == null) { m_sliceRange = new SliceRange(ByteBuffer.wrap(new byte[0]), ByteBuffer.wrap(new byte[0]), false, m_sliceColsBatchSize); m_slicePredicate.setSlice_range(m_sliceRange); } else { m_slicePredicate.setColumn_names(specificCols); } m_keyRange = new KeyRange(m_sliceRowsBatchSize); m_keyRange.setStart_key(new byte[0]); m_keyRange.setEnd_key(new byte[0]); m_colParent = new ColumnParent(meta.getColumnFamilyName()); m_converted = new ArrayList<Object[]>(); }
Example #7
Source File: ColumnFamilyWideRowRecordReader.java From Hive-Cassandra with Apache License 2.0 | 4 votes |
private void maybeInit() { // check if we need another row if (rows != null && columnsRead < rowPageSize) { columnsRead = 0; startToken = partitioner.getTokenFactory().toString(partitioner.getToken(rows.get(0).key)); predicate.getSlice_range().setStart(startSlicePredicate); rows = null; prevStartSlice = null; totalRead++; } if (startToken == null) { startToken = split.getStartToken(); } else if (startToken.equals(split.getEndToken()) && rows == null) { // reached end of the split return; } KeyRange keyRange = new KeyRange(batchRowCount) .setStart_token(startToken) .setEnd_token(split.getEndToken()); try { rows = client.get_range_slices(new ColumnParent(cfName), predicate, keyRange, consistencyLevel); // nothing new? reached the end if (rows.isEmpty()) { rows = null; return; } //detect infinite loop if (prevStartSlice != null && ByteBufferUtil.compareUnsigned(prevStartSlice, predicate.slice_range.start) == 0) { rows = null; return; } // prepare for the next slice to be read KeySlice row = rows.get(0); if (row.getColumnsSize() > 0) { ColumnOrSuperColumn cosc = row.getColumns().get(row.getColumnsSize() - 1); prevStartSlice = predicate.slice_range.start; //prepare next slice if (cosc.column != null) { predicate.slice_range.start = cosc.column.name; } if (cosc.super_column != null) { predicate.slice_range.start = cosc.super_column.name; } if (cosc.counter_column != null) { predicate.slice_range.start = cosc.counter_column.name; } if (cosc.counter_super_column != null) { predicate.slice_range.start = cosc.counter_super_column.name; } columnsRead = row.getColumnsSize(); //If we've hit the max columns then rm the last column //to make sure we don't know where to start next without overlap if (columnsRead == rowPageSize) { row.getColumns().remove(columnsRead - 1); } } } catch (Exception e) { throw new RuntimeException(e); } }