org.apache.cassandra.dht.IPartitioner Java Examples
The following examples show how to use
org.apache.cassandra.dht.IPartitioner.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RangeUtils.java From deep-spark with Apache License 2.0 | 6 votes |
/** * Gets the list of token for each cluster machine.<br/> * The concrete class of the token depends on the partitioner used.<br/> * * @param query the query to execute against the given session to obtain the list of tokens. * @param sessionWithHost the pair object containing both the session and the name of the machine to which we're connected to. * @param partitioner the partitioner used in the cluster. * @return a map containing, for each cluster machine, the list of tokens. Tokens are not returned in any particular * order. */ static Map<String, Iterable<Comparable>> fetchTokens(String query, final Pair<Session, String> sessionWithHost, IPartitioner partitioner) { ResultSet rSet = sessionWithHost.left.execute(query); final AbstractType tkValidator = partitioner.getTokenValidator(); final Map<String, Iterable<Comparable>> tokens = Maps.newHashMap(); Iterable<Pair<String, Iterable<Comparable>>> pairs = transform(rSet.all(), new FetchTokensRowPairFunction(sessionWithHost, tkValidator)); for (Pair<String, Iterable<Comparable>> pair : pairs) { tokens.put(pair.left, pair.right); } return tokens; }
Example #2
Source File: SSTableReader.java From stratio-cassandra with Apache License 2.0 | 6 votes |
/** * Open a RowIndexedReader which already has its state initialized (by SSTableWriter). */ static SSTableReader internalOpen(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary isummary, IFilter bf, long maxDataAge, StatsMetadata sstableMetadata, OpenReason openReason) { assert desc != null && partitioner != null && ifile != null && dfile != null && isummary != null && bf != null && sstableMetadata != null; return new SSTableReader(desc, components, metadata, partitioner, ifile, dfile, isummary, bf, maxDataAge, sstableMetadata, openReason); }
Example #3
Source File: ThriftRangeUtilsTest.java From deep-spark with Apache License 2.0 | 6 votes |
private static <K extends Comparable, T extends Token<K>> void testDeepTokenRanges(IPartitioner<T> partitioner, K startToken, K endToken, List<String> endpoints, List<DeepTokenRange> expectedRanges) { ThriftRangeUtils utils = new ThriftRangeUtils(partitioner, "", 0, "", "", 0); Token.TokenFactory tokenFactory = partitioner.getTokenFactory(); AbstractType tokenType = partitioner.getTokenValidator(); String start = tokenFactory.toString(tokenFactory.fromByteArray(tokenType.decompose(startToken))); String end = tokenFactory.toString(tokenFactory.fromByteArray(tokenType.decompose(endToken))); CfSplit thriftSplit = new CfSplit(start, end, 0); List<DeepTokenRange> actualRanges = utils.deepTokenRanges(Arrays.asList(thriftSplit), endpoints); assertEquals(actualRanges, expectedRanges); }
Example #4
Source File: ThriftRangeUtils.java From deep-spark with Apache License 2.0 | 6 votes |
/** * Builds a new {@link ThriftRangeUtils}. * * @param partitioner the partitioner. * @param host the host address. * @param rpcPort the host RPC port. * @param keyspace the keyspace name. * @param columnFamily the column family name. * @param splitSize the number of rows per split. */ public ThriftRangeUtils(IPartitioner partitioner, String host, int rpcPort, String keyspace, String columnFamily, int splitSize) { this.host = host; this.rpcPort = rpcPort; this.splitSize = splitSize; this.keyspace = keyspace; this.columnFamily = columnFamily; tokenType = partitioner.getTokenValidator(); tokenFactory = partitioner.getTokenFactory(); minToken = (Comparable) partitioner.getMinimumToken().token; }
Example #5
Source File: DeepRecordReader.java From deep-spark with Apache License 2.0 | 6 votes |
/** * Initialized this object. * <p> * Creates a new client and row iterator. * </p> */ private void initialize() { cfName = config.getTable(); if (!ArrayUtils.isEmpty(config.getInputColumns())) { columns = StringUtils.join(config.getInputColumns(), ","); } partitioner = Utils.newTypeInstance(config.getPartitionerClassName(), IPartitioner.class); try { session = createConnection(); retrieveKeys(); } catch (Exception e) { throw new DeepIOException(e); } rowIterator = new RowIterator(); }
Example #6
Source File: SSTableReader.java From stratio-cassandra with Apache License 2.0 | 6 votes |
private SSTableReader(Descriptor desc, Set<Component> components, CFMetaData metadata, IPartitioner partitioner, SegmentedFile ifile, SegmentedFile dfile, IndexSummary indexSummary, IFilter bloomFilter, long maxDataAge, StatsMetadata sstableMetadata, OpenReason openReason) { this(desc, components, metadata, partitioner, maxDataAge, sstableMetadata, openReason); this.ifile = ifile; this.dfile = dfile; this.indexSummary = indexSummary; this.bf = bloomFilter; this.setup(false); }
Example #7
Source File: IndexSummaryTest.java From stratio-cassandra with Apache License 2.0 | 6 votes |
@Test public void testAddEmptyKey() throws Exception { IPartitioner p = new RandomPartitioner(); try (IndexSummaryBuilder builder = new IndexSummaryBuilder(1, 1, BASE_SAMPLING_LEVEL)) { builder.maybeAddEntry(p.decorateKey(ByteBufferUtil.EMPTY_BYTE_BUFFER), 0); IndexSummary summary = builder.build(p); assertEquals(1, summary.size()); assertEquals(0, summary.getPosition(0)); assertArrayEquals(new byte[0], summary.getKey(0)); DataOutputBuffer dos = new DataOutputBuffer(); IndexSummary.serializer.serialize(summary, dos, false); DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dos.toByteArray())); IndexSummary loaded = IndexSummary.serializer.deserialize(dis, p, false, 1, 1); assertEquals(1, loaded.size()); assertEquals(summary.getPosition(0), loaded.getPosition(0)); assertArrayEquals(summary.getKey(0), summary.getKey(0)); summary.close(); loaded.close(); } }
Example #8
Source File: RangeUtils.java From deep-spark with Apache License 2.0 | 6 votes |
private static List<DeepTokenRange> splitRanges( final List<DeepTokenRange> ranges, final IPartitioner p, final int bisectFactor) { if (bisectFactor == 1) { return ranges; } Iterable<DeepTokenRange> bisectedRanges = concat(transform(ranges, new Function<DeepTokenRange, List<DeepTokenRange>>() { @Nullable @Override public List<DeepTokenRange> apply( @Nullable DeepTokenRange input) { final List<DeepTokenRange> splittedRanges = new ArrayList<>(); bisectTokeRange(input, p, bisectFactor, splittedRanges); return splittedRanges; } })); return Lists.newArrayList(bisectedRanges); }
Example #9
Source File: RangeUtils.java From deep-spark with Apache License 2.0 | 6 votes |
/** * Returns the token ranges that will be mapped to Spark partitions. * * @param config the Deep configuration object. * @return the list of computed token ranges. */ public static List<DeepTokenRange> getSplits(CassandraDeepJobConfig config) { Map<String, Iterable<Comparable>> tokens = new HashMap<>(); IPartitioner p = getPartitioner(config); Pair<Session, String> sessionWithHost = CassandraClientProvider.getSession( config.getHost(), config, false); String queryLocal = "select tokens from system.local"; tokens.putAll(fetchTokens(queryLocal, sessionWithHost, p)); String queryPeers = "select peer, tokens from system.peers"; tokens.putAll(fetchTokens(queryPeers, sessionWithHost, p)); List<DeepTokenRange> merged = mergeTokenRanges(tokens, sessionWithHost.left, p); return splitRanges(merged, p, config.getBisectFactor()); }
Example #10
Source File: RangeUtils.java From deep-spark with Apache License 2.0 | 6 votes |
/** * Given a token, fetches the list of replica machines holding that token. * * @param token the token whose replicas we want to fetch. * @param session the connection to the cluster. * @param partitioner the partitioner used in the cluster. * @return the list of replica machines holding that token. */ private static List<String> initReplicas( final Comparable token, final Session session, final IPartitioner partitioner) { final AbstractType tkValidator = partitioner.getTokenValidator(); final Metadata metadata = session.getCluster().getMetadata(); @SuppressWarnings("unchecked") Set<Host> replicas = metadata.getReplicas(quote(session.getLoggedKeyspace()), ByteBuffer.wrap(token.toString().getBytes())); return Lists.newArrayList(Iterables.transform(replicas, new Function<Host, String>() { @Nullable @Override public String apply( @Nullable Host input) { assert input != null; return input.getAddress().getHostName(); } })); }
Example #11
Source File: RangeUtils.java From deep-spark with Apache License 2.0 | 6 votes |
/** * Merges the list of tokens for each cluster machine to a single list of token ranges. * * @param tokens the map of tokens for each cluster machine. * @param session the connection to the cluster. * @param p the partitioner used in the cluster. * @return the merged lists of tokens transformed to DeepTokenRange(s). The returned collection is shuffled. */ static List<DeepTokenRange> mergeTokenRanges(Map<String, Iterable<Comparable>> tokens, final Session session, final IPartitioner p) { final Iterable<Comparable> allRanges = Ordering.natural().sortedCopy(concat(tokens.values())); final Comparable maxValue = Ordering.natural().max(allRanges); final Comparable minValue = (Comparable) p.minValue(maxValue.getClass()).getToken().token; Function<Comparable, Set<DeepTokenRange>> map = new MergeTokenRangesFunction(maxValue, minValue, session, p, allRanges); Iterable<DeepTokenRange> concatenated = concat(transform(allRanges, map)); Set<DeepTokenRange> dedup = Sets.newHashSet(concatenated); return Ordering.natural().sortedCopy(dedup); }
Example #12
Source File: OperationTest.java From sasi with Apache License 2.0 | 6 votes |
@Test public void testSatisfiedByWithMultipleTerms() { final ByteBuffer comment = UTF8Type.instance.decompose("comment"); final ColumnFamilyStore store = Keyspace.open("sasecondaryindex").getColumnFamilyStore("saindexed1"); final IPartitioner<?> partitioner = StorageService.getPartitioner(); ColumnFamily cf = ArrayBackedSortedColumns.factory.create(store.metadata); cf.addColumn(new Column(comment, UTF8Type.instance.decompose("software engineer is working on a project"), System.currentTimeMillis())); Operation.Builder builder = new Operation.Builder(OperationType.AND, UTF8Type.instance, controller, new IndexExpression(comment, IndexOperator.EQ, UTF8Type.instance.decompose("eng is a work"))); Operation op = builder.complete(); Assert.assertTrue(op.satisfiedBy(new Row(partitioner.decorateKey(UTF8Type.instance.decompose("key1")), cf), null, false)); builder = new Operation.Builder(OperationType.AND, UTF8Type.instance, controller, new IndexExpression(comment, IndexOperator.EQ, UTF8Type.instance.decompose("soft works fine"))); op = builder.complete(); Assert.assertTrue(op.satisfiedBy(new Row(partitioner.decorateKey(UTF8Type.instance.decompose("key1")), cf), null, false)); }
Example #13
Source File: MerkleTree.java From stratio-cassandra with Apache License 2.0 | 6 votes |
public MerkleTree deserialize(DataInput in, int version) throws IOException { byte hashdepth = in.readByte(); long maxsize = in.readLong(); long size = in.readLong(); IPartitioner partitioner; try { partitioner = FBUtilities.newPartitioner(in.readUTF()); } catch (ConfigurationException e) { throw new IOException(e); } // full range Token left = Token.serializer.deserialize(in); Token right = Token.serializer.deserialize(in); Range<Token> fullRange = new Range<>(left, right, partitioner); MerkleTree mt = new MerkleTree(partitioner, fullRange, hashdepth, maxsize); mt.size = size; mt.root = Hashable.serializer.deserialize(in, version); return mt; }
Example #14
Source File: SerializationsTest.java From stratio-cassandra with Apache License 2.0 | 6 votes |
private void testValidationCompleteWrite() throws IOException { IPartitioner p = new RandomPartitioner(); // empty validation MerkleTree mt = new MerkleTree(p, FULL_RANGE, MerkleTree.RECOMMENDED_DEPTH, (int) Math.pow(2, 15)); Validator v0 = new Validator(DESC, FBUtilities.getBroadcastAddress(), -1); ValidationComplete c0 = new ValidationComplete(DESC, mt); // validation with a tree mt = new MerkleTree(p, FULL_RANGE, MerkleTree.RECOMMENDED_DEPTH, Integer.MAX_VALUE); for (int i = 0; i < 10; i++) mt.split(p.getRandomToken()); Validator v1 = new Validator(DESC, FBUtilities.getBroadcastAddress(), -1); ValidationComplete c1 = new ValidationComplete(DESC, mt); // validation failed ValidationComplete c3 = new ValidationComplete(DESC); testRepairMessageWrite("service.ValidationComplete.bin", c0, c1, c3); }
Example #15
Source File: HintedHandOffManager.java From stratio-cassandra with Apache License 2.0 | 6 votes |
/** * Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for * nodes which are never officially down/failed. */ private void scheduleAllDeliveries() { logger.debug("Started scheduleAllDeliveries"); // Force a major compaction to get rid of the tombstones and expired hints. Do it once, before we schedule any // individual replay, to avoid N - 1 redundant individual compactions (when N is the number of nodes with hints // to deliver to). compact(); IPartitioner p = StorageService.getPartitioner(); RowPosition minPos = p.getMinimumToken().minKeyBound(); Range<RowPosition> range = new Range<>(minPos, minPos, p); IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<CellName>of()); List<Row> rows = hintStore.getRangeSlice(range, null, filter, Integer.MAX_VALUE, System.currentTimeMillis()); for (Row row : rows) { UUID hostId = UUIDGen.getUUID(row.key.getKey()); InetAddress target = StorageService.instance.getTokenMetadata().getEndpointForHostId(hostId); // token may have since been removed (in which case we have just read back a tombstone) if (target != null) scheduleHintDelivery(target, false); } logger.debug("Finished scheduleAllDeliveries"); }
Example #16
Source File: CassandraUtils.java From sstable-tools with Apache License 2.0 | 6 votes |
public static Cluster loadTablesFromRemote(String host, int port, String cfidOverrides) throws IOException { Map<String, UUID> cfs = parseOverrides(cfidOverrides); Cluster.Builder builder = Cluster.builder().addContactPoints(host).withPort(port); Cluster cluster = builder.build(); Metadata metadata = cluster.getMetadata(); IPartitioner partitioner = FBUtilities.newPartitioner(metadata.getPartitioner()); if (DatabaseDescriptor.getPartitioner() == null) DatabaseDescriptor.setPartitionerUnsafe(partitioner); for (com.datastax.driver.core.KeyspaceMetadata ksm : metadata.getKeyspaces()) { if (!ksm.getName().equals("system")) { for (TableMetadata tm : ksm.getTables()) { String name = ksm.getName()+"."+tm.getName(); try { CassandraUtils.tableFromCQL( new ByteArrayInputStream(tm.asCQLQuery().getBytes()), cfs.get(name) != null ? cfs.get(name) : tm.getId()); } catch(SyntaxException e) { // ignore tables that we cant parse (probably dse) logger.debug("Ignoring table " + name + " due to syntax exception " + e.getMessage()); } } } } return cluster; }
Example #17
Source File: SSTable.java From stratio-cassandra with Apache License 2.0 | 5 votes |
protected SSTable(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner) { // In almost all cases, metadata shouldn't be null, but allowing null allows to create a mostly functional SSTable without // full schema definition. SSTableLoader use that ability assert descriptor != null; assert components != null; assert partitioner != null; this.descriptor = descriptor; Set<Component> dataComponents = new HashSet<>(components); this.compression = dataComponents.contains(Component.COMPRESSION_INFO); this.components = new CopyOnWriteArraySet<>(dataComponents); this.metadata = metadata; this.partitioner = partitioner; }
Example #18
Source File: StreamingTransferTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
private void transferRanges(ColumnFamilyStore cfs) throws Exception { IPartitioner p = StorageService.getPartitioner(); List<Range<Token>> ranges = new ArrayList<>(); // wrapped range ranges.add(new Range<Token>(p.getToken(ByteBufferUtil.bytes("key1")), p.getToken(ByteBufferUtil.bytes("key0")))); new StreamPlan("StreamingTransferTest").transferRanges(LOCAL, cfs.keyspace.getName(), ranges, cfs.getColumnFamilyName()).execute().get(); }
Example #19
Source File: StreamingTransferTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
private void transferSSTables(SSTableReader sstable) throws Exception { IPartitioner p = StorageService.getPartitioner(); List<Range<Token>> ranges = new ArrayList<>(); ranges.add(new Range<>(p.getMinimumToken(), p.getToken(ByteBufferUtil.bytes("key1")))); ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("key2")), p.getMinimumToken())); transfer(sstable, ranges); }
Example #20
Source File: StreamingTransferTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
@Test public void testRandomSSTableTransfer() throws Exception { final Keyspace keyspace = Keyspace.open("Keyspace1"); final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1"); Mutator mutator = new Mutator() { public void mutate(String key, String colName, long timestamp) throws Exception { ColumnFamily cf = ArrayBackedSortedColumns.factory.create(keyspace.getName(), cfs.name); cf.addColumn(column(colName, "value", timestamp)); cf.addColumn(new BufferCell(cellname("birthdate"), ByteBufferUtil.bytes(new Date(timestamp).toString()), timestamp)); Mutation rm = new Mutation("Keyspace1", ByteBufferUtil.bytes(key), cf); logger.debug("Applying row to transfer " + rm); rm.apply(); } }; // write a lot more data so the data is spread in more than 1 chunk. for (int i = 1; i <= 6000; i++) mutator.mutate("key" + i, "col" + i, System.currentTimeMillis()); cfs.forceBlockingFlush(); Util.compactAll(cfs, Integer.MAX_VALUE).get(); SSTableReader sstable = cfs.getSSTables().iterator().next(); cfs.clearUnsafe(); IPartitioner p = StorageService.getPartitioner(); List<Range<Token>> ranges = new ArrayList<>(); ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("key1")), p.getToken(ByteBufferUtil.bytes("key1000")))); ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("key5")), p.getToken(ByteBufferUtil.bytes("key500")))); ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("key9")), p.getToken(ByteBufferUtil.bytes("key900")))); transfer(sstable, ranges); assertEquals(1, cfs.getSSTables().size()); assertEquals(7, Util.getRangeSlice(cfs).size()); }
Example #21
Source File: SerializationsTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
private void testRangeSliceCommandWrite() throws IOException { IPartitioner part = StorageService.getPartitioner(); AbstractBounds<RowPosition> bounds = new Range<Token>(part.getRandomToken(), part.getRandomToken()).toRowBounds(); RangeSliceCommand namesCmd = new RangeSliceCommand(statics.KS, "Standard1", statics.readTs, namesPred, bounds, 100); MessageOut<RangeSliceCommand> namesCmdMsg = namesCmd.createMessage(); RangeSliceCommand emptyRangeCmd = new RangeSliceCommand(statics.KS, "Standard1", statics.readTs, emptyRangePred, bounds, 100); MessageOut<RangeSliceCommand> emptyRangeCmdMsg = emptyRangeCmd.createMessage(); RangeSliceCommand regRangeCmd = new RangeSliceCommand(statics.KS, "Standard1", statics.readTs, nonEmptyRangePred, bounds, 100); MessageOut<RangeSliceCommand> regRangeCmdMsg = regRangeCmd.createMessage(); RangeSliceCommand namesCmdSup = new RangeSliceCommand(statics.KS, "Super1", statics.readTs, namesSCPred, bounds, 100); MessageOut<RangeSliceCommand> namesCmdSupMsg = namesCmdSup.createMessage(); RangeSliceCommand emptyRangeCmdSup = new RangeSliceCommand(statics.KS, "Super1", statics.readTs, emptyRangePred, bounds, 100); MessageOut<RangeSliceCommand> emptyRangeCmdSupMsg = emptyRangeCmdSup.createMessage(); RangeSliceCommand regRangeCmdSup = new RangeSliceCommand(statics.KS, "Super1", statics.readTs, nonEmptyRangeSCPred, bounds, 100); MessageOut<RangeSliceCommand> regRangeCmdSupMsg = regRangeCmdSup.createMessage(); DataOutputStreamAndChannel out = getOutput("db.RangeSliceCommand.bin"); namesCmdMsg.serialize(out, getVersion()); emptyRangeCmdMsg.serialize(out, getVersion()); regRangeCmdMsg.serialize(out, getVersion()); namesCmdSupMsg.serialize(out, getVersion()); emptyRangeCmdSupMsg.serialize(out, getVersion()); regRangeCmdSupMsg.serialize(out, getVersion()); out.close(); // test serializedSize testSerializedSize(namesCmd, RangeSliceCommand.serializer); testSerializedSize(emptyRangeCmd, RangeSliceCommand.serializer); testSerializedSize(regRangeCmd, RangeSliceCommand.serializer); testSerializedSize(namesCmdSup, RangeSliceCommand.serializer); testSerializedSize(emptyRangeCmdSup, RangeSliceCommand.serializer); testSerializedSize(regRangeCmdSup, RangeSliceCommand.serializer); }
Example #22
Source File: TokenMapper.java From stratio-cassandra with Apache License 2.0 | 5 votes |
/** * Returns a new {@link TokenMapper} instance for the current partitioner using the specified column family * metadata. * * @param metadata The column family metadata. * @return A new {@link TokenMapper} instance for the current partitioner. */ public static TokenMapper instance(CFMetaData metadata) { IPartitioner partitioner = DatabaseDescriptor.getPartitioner(); if (partitioner instanceof Murmur3Partitioner) { return new TokenMapperMurmur(metadata); } else { return new TokenMapperGeneric(metadata); } }
Example #23
Source File: IndexSummaryBuilder.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public IndexSummary build(IPartitioner partitioner) { // this method should only be called when we've finished appending records, so we truncate the // memory we're using to the exact amount required to represent it before building our summary entries.setCapacity(entries.length()); offsets.setCapacity(offsets.length()); return build(partitioner, null); }
Example #24
Source File: SSTableSimpleWriter.java From stratio-cassandra with Apache License 2.0 | 5 votes |
/** * Create a new writer. * @param directory the directory where to write the sstable * @param partitioner the partitioner * @param keyspace the keyspace name * @param columnFamily the column family name * @param comparator the column family comparator * @param subComparator the column family subComparator or null if not a Super column family. */ public SSTableSimpleWriter(File directory, IPartitioner partitioner, String keyspace, String columnFamily, AbstractType<?> comparator, AbstractType<?> subComparator) { this(directory, CFMetaData.denseCFMetaData(keyspace, columnFamily, comparator, subComparator), partitioner); }
Example #25
Source File: SSTableWriter.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public SSTableWriter(String filename, long keyCount, long repairedAt, CFMetaData metadata, IPartitioner partitioner, MetadataCollector sstableMetadataCollector) { super(Descriptor.fromFilename(filename), components(metadata), metadata, partitioner); this.repairedAt = repairedAt; if (compression) { dataFile = SequentialWriter.open(getFilename(), descriptor.filenameFor(Component.COMPRESSION_INFO), metadata.compressionParameters(), sstableMetadataCollector); dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile); } else { dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC))); dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode()); } iwriter = new IndexWriter(keyCount, dataFile); this.sstableMetadataCollector = sstableMetadataCollector; }
Example #26
Source File: SSTableSimpleUnsortedWriter.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public SSTableSimpleUnsortedWriter(File directory, IPartitioner partitioner, String keyspace, String columnFamily, AbstractType<?> comparator, AbstractType<?> subComparator, int bufferSizeInMB) { this(directory, partitioner, keyspace, columnFamily, comparator, subComparator, bufferSizeInMB, new CompressionParameters(null)); }
Example #27
Source File: SSTableImport.java From stratio-cassandra with Apache License 2.0 | 5 votes |
/** * Convert a JSON formatted file to an SSTable. * * @param jsonFile the file containing JSON formatted data * @param keyspace keyspace the data belongs to * @param cf column family the data belongs to * @param ssTablePath file to write the SSTable to * * @throws IOException for errors reading/writing input/output */ public int importJson(String jsonFile, String keyspace, String cf, String ssTablePath) throws IOException { ColumnFamily columnFamily = ArrayBackedSortedColumns.factory.create(keyspace, cf); IPartitioner partitioner = DatabaseDescriptor.getPartitioner(); int importedKeys = (isSorted) ? importSorted(jsonFile, columnFamily, ssTablePath, partitioner) : importUnsorted(jsonFile, columnFamily, ssTablePath, partitioner); if (importedKeys != -1) System.out.printf("%d keys imported successfully.%n", importedKeys); return importedKeys; }
Example #28
Source File: CassandraKeyspace.java From emodb with Apache License 2.0 | 5 votes |
public void errorIfPartitionerMismatch(Class<? extends IPartitioner> expectedPartitioner) { String mismatchedPartitioner = getMismatchedPartitioner(expectedPartitioner); if (mismatchedPartitioner != null) { throw new IllegalStateException(format( "Cassandra keyspace '%s' must be configured with the %s. It currently uses %s.", getName(), expectedPartitioner.getSimpleName(), mismatchedPartitioner)); } }
Example #29
Source File: DecoratedKey.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public static int compareTo(IPartitioner partitioner, ByteBuffer key, RowPosition position) { // delegate to Token.KeyBound if needed if (!(position instanceof DecoratedKey)) return -position.compareTo(partitioner.decorateKey(key)); DecoratedKey otherKey = (DecoratedKey) position; int cmp = partitioner.getToken(key).compareTo(otherKey.getToken()); return cmp == 0 ? ByteBufferUtil.compareUnsigned(key, otherKey.getKey()) : cmp; }
Example #30
Source File: TokenSerializer.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public static Collection<Token> deserialize(IPartitioner partitioner, DataInput in) throws IOException { Collection<Token> tokens = new ArrayList<Token>(); while (true) { int size = in.readInt(); if (size < 1) break; logger.trace("Reading token of {} bytes", size); byte[] bintoken = new byte[size]; in.readFully(bintoken); tokens.add(partitioner.getTokenFactory().fromByteArray(ByteBuffer.wrap(bintoken))); } return tokens; }