Java Code Examples for org.apache.cassandra.db.marshal.LongType#instance()
The following examples show how to use
org.apache.cassandra.db.marshal.LongType#instance() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LazyCassandraUtils.java From Hive-Cassandra with Apache License 2.0 | 6 votes |
public static AbstractType getCassandraType(PrimitiveObjectInspector oi) { switch (oi.getPrimitiveCategory()) { case BOOLEAN: return BooleanType.instance; case INT: return Int32Type.instance; case LONG: return LongType.instance; case FLOAT: return FloatType.instance; case DOUBLE: return DoubleType.instance; case STRING: return UTF8Type.instance; case BYTE: case SHORT: case BINARY: return BytesType.instance; case TIMESTAMP: return DateType.instance; default: throw new RuntimeException("Hive internal error."); } }
Example 2
Source File: OnDiskIndexTest.java From sasi with Apache License 2.0 | 6 votes |
@Test public void testSuperBlockRetrieval() throws Exception { OnDiskIndexBuilder builder = new OnDiskIndexBuilder(UTF8Type.instance, LongType.instance, OnDiskIndexBuilder.Mode.SPARSE); for (long i = 0; i < 100000; i++) builder.add(LongType.instance.decompose(i), keyAt(i), i); File index = File.createTempFile("on-disk-sa-multi-superblock-match", ".db"); index.deleteOnExit(); builder.finish(index); OnDiskIndex onDiskIndex = new OnDiskIndex(index, LongType.instance, new KeyConverter()); testSearchRangeWithSuperBlocks(onDiskIndex, 0, 500); testSearchRangeWithSuperBlocks(onDiskIndex, 300, 93456); testSearchRangeWithSuperBlocks(onDiskIndex, 210, 1700); testSearchRangeWithSuperBlocks(onDiskIndex, 530, 3200); Random random = new Random(0xdeadbeef); for (int i = 0; i < 100000; i += random.nextInt(1500)) // random steps with max of 1500 elements { for (int j = 0; j < 3; j++) testSearchRangeWithSuperBlocks(onDiskIndex, i, ThreadLocalRandom.current().nextInt(i, 100000)); } }
Example 3
Source File: OnDiskIndexTest.java From sasi with Apache License 2.0 | 5 votes |
private static Expression expressionFor(long lower, boolean lowerInclusive, long upper, boolean upperInclusive) { Expression expression = new Expression(ByteBufferUtil.EMPTY_BYTE_BUFFER, LongType.instance); expression.add(lowerInclusive ? IndexOperator.GTE : IndexOperator.GT, LongType.instance.decompose(lower)); expression.add(upperInclusive ? IndexOperator.LTE : IndexOperator.LT, LongType.instance.decompose(upper)); return expression; }
Example 4
Source File: ColumnTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
@Test public void testToStringFromDecomposedWithSufix() { String name = "my"; String sufix = "column"; LongType type = LongType.instance; Long composedValue = 5L; ByteBuffer decomposedValue = type.decompose(composedValue); Column<Long> column = Column.fromDecomposed(name, sufix, decomposedValue, type); Assert.assertEquals("Column{fullName=my.column, composedValue=5, type=LongType}", column.toString()); }
Example 5
Source File: Selection.java From stratio-cassandra with Apache License 2.0 | 5 votes |
private static ColumnSpecification makeWritetimeOrTTLSpec(CFMetaData cfm, Selectable.WritetimeOrTTL tot, ColumnIdentifier alias) { return new ColumnSpecification(cfm.ksName, cfm.cfName, alias == null ? new ColumnIdentifier(tot.toString(), true) : alias, tot.isWritetime ? LongType.instance : Int32Type.instance); }
Example 6
Source File: ColumnTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
@Test public void testToStringFromDecomposedWithoutSufix() { String name = "my_column"; LongType type = LongType.instance; Long composedValue = 5L; ByteBuffer decomposedValue = type.decompose(composedValue); Column<Long> column = Column.fromDecomposed(name, decomposedValue, type); Assert.assertEquals("Column{fullName=my_column, composedValue=5, type=LongType}", column.toString()); }
Example 7
Source File: ColumnTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
@Test public void testCreateFromComposedWithSufix() { String name = "my"; String sufix = "column"; LongType type = LongType.instance; Long composedValue = 5L; ByteBuffer decomposedValue = type.decompose(composedValue); Column<Long> column = Column.fromComposed(name, sufix, composedValue, type); Assert.assertEquals(name, column.getName()); Assert.assertEquals("my.column", column.getFullName()); Assert.assertEquals(type, column.getType()); Assert.assertEquals(composedValue, column.getComposedValue()); Assert.assertEquals(decomposedValue, column.getDecomposedValue()); }
Example 8
Source File: CqlRecordWriter.java From stratio-cassandra with Apache License 2.0 | 5 votes |
private AbstractType<?> parseType(String type) throws ConfigurationException { try { // always treat counters like longs, specifically CCT.serialize is not what we need if (type != null && type.equals("org.apache.cassandra.db.marshal.CounterColumnType")) return LongType.instance; return TypeParser.parse(type); } catch (SyntaxException e) { throw new ConfigurationException(e.getMessage(), e); } }
Example 9
Source File: CassandraTypeConverterTest.java From debezium-incubator with Apache License 2.0 | 5 votes |
@Test public void testBigInt() { DataType bigIntType = DataType.bigint(); AbstractType<?> convertedType = CassandraTypeConverter.convert(bigIntType); LongType expectedType = LongType.instance; Assert.assertEquals(expectedType, convertedType); }
Example 10
Source File: ColumnTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
@Test public void testCreateFromComposedWithoutSufix() { String name = "my_column"; LongType type = LongType.instance; Long composedValue = 5L; ByteBuffer decomposedValue = type.decompose(composedValue); Column<Long> column = Column.fromComposed(name, composedValue, type); Assert.assertEquals(name, column.getName()); Assert.assertEquals(name, column.getFullName()); Assert.assertEquals(type, column.getType()); Assert.assertEquals(composedValue, column.getComposedValue()); Assert.assertEquals(decomposedValue, column.getDecomposedValue()); }
Example 11
Source File: ColumnTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
@Test public void testCreateFromDecomposedWithoutSufix() { String name = "my_column"; LongType type = LongType.instance; Long composedValue = 5L; ByteBuffer decomposedValue = type.decompose(composedValue); Column<Long> column = Column.fromDecomposed(name, decomposedValue, type); Assert.assertEquals(name, column.getName()); Assert.assertEquals(name, column.getFullName()); Assert.assertEquals(type, column.getType()); Assert.assertEquals(composedValue, column.getComposedValue()); Assert.assertEquals(decomposedValue, column.getDecomposedValue()); }
Example 12
Source File: ColumnTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
@Test public void testCreateFromDecomposedWithSufix() { String name = "my"; String sufix = "column"; LongType type = LongType.instance; Long composedValue = 5L; ByteBuffer decomposedValue = type.decompose(composedValue); Column<Long> column = Column.fromDecomposed(name, sufix, decomposedValue, type); Assert.assertEquals(name, column.getName()); Assert.assertEquals("my.column", column.getFullName()); Assert.assertEquals(type, column.getType()); Assert.assertEquals(composedValue, column.getComposedValue()); Assert.assertEquals(decomposedValue, column.getDecomposedValue()); }
Example 13
Source File: Murmur3Partitioner.java From stratio-cassandra with Apache License 2.0 | 4 votes |
public AbstractType<?> getTokenValidator() { return LongType.instance; }
Example 14
Source File: Longs.java From stratio-cassandra with Apache License 2.0 | 4 votes |
public Longs(String name, GeneratorConfig config) { super(LongType.instance, config, name, Long.class); }
Example 15
Source File: ResultSet.java From stratio-cassandra with Apache License 2.0 | 4 votes |
public static ResultSet.Metadata makeCountMetadata(String ksName, String cfName, ColumnIdentifier alias) { ColumnSpecification spec = new ColumnSpecification(ksName, cfName, alias == null ? COUNT_COLUMN : alias, LongType.instance); return new Metadata(Collections.singletonList(spec)); }
Example 16
Source File: Selection.java From stratio-cassandra with Apache License 2.0 | 4 votes |
public AbstractType<?> getType() { return isWritetime ? LongType.instance : Int32Type.instance; }
Example 17
Source File: Attributes.java From stratio-cassandra with Apache License 2.0 | 4 votes |
private ColumnSpecification timestampReceiver(String ksName, String cfName) { return new ColumnSpecification(ksName, cfName, new ColumnIdentifier("[timestamp]", true), LongType.instance); }
Example 18
Source File: OnDiskIndexTest.java From sasi with Apache License 2.0 | 4 votes |
@Test public void testRangeQueryWithExclusions() throws Exception { final long lower = 0; final long upper = 100000; OnDiskIndexBuilder builder = new OnDiskIndexBuilder(UTF8Type.instance, LongType.instance, OnDiskIndexBuilder.Mode.SPARSE); for (long i = lower; i <= upper; i++) builder.add(LongType.instance.decompose(i), keyAt(i), i); File index = File.createTempFile("on-disk-sa-except-long-ranges", "db"); index.deleteOnExit(); builder.finish(index); OnDiskIndex onDisk = new OnDiskIndex(index, LongType.instance, new KeyConverter()); ThreadLocalRandom random = ThreadLocalRandom.current(); // single exclusion // let's do small range first to figure out if searchPoint works properly validateExclusions(onDisk, lower, 50, Sets.newHashSet(42L)); // now let's do whole data set to test SPARSE searching validateExclusions(onDisk, lower, upper, Sets.newHashSet(31337L)); // pair of exclusions which would generate a split validateExclusions(onDisk, lower, random.nextInt(400, 800), Sets.newHashSet(42L, 154L)); validateExclusions(onDisk, lower, upper, Sets.newHashSet(31337L, 54631L)); // 3 exclusions which would generate a split and change bounds validateExclusions(onDisk, lower, random.nextInt(400, 800), Sets.newHashSet(42L, 154L)); validateExclusions(onDisk, lower, upper, Sets.newHashSet(31337L, 54631L)); validateExclusions(onDisk, lower, random.nextLong(400, upper), Sets.newHashSet(42L, 55L)); validateExclusions(onDisk, lower, random.nextLong(400, upper), Sets.newHashSet(42L, 55L, 93L)); validateExclusions(onDisk, lower, random.nextLong(400, upper), Sets.newHashSet(42L, 55L, 93L, 205L)); Set<Long> exclusions = Sets.newHashSet(3L, 12L, 13L, 14L, 27L, 54L, 81L, 125L, 384L, 771L, 1054L, 2048L, 78834L); // test that exclusions are properly bound by lower/upper of the expression Assert.assertEquals(392, validateExclusions(onDisk, lower, 400, exclusions, false)); Assert.assertEquals(101, validateExclusions(onDisk, lower, 100, Sets.newHashSet(-10L, -5L, -1L), false)); validateExclusions(onDisk, lower, upper, exclusions); Assert.assertEquals(100000, convert(onDisk.search(new Expression(ByteBufferUtil.EMPTY_BYTE_BUFFER, LongType.instance) .add(IndexOperator.NOT_EQ, LongType.instance.decompose(100L)))).size()); Assert.assertEquals(49, convert(onDisk.search(new Expression(ByteBufferUtil.EMPTY_BYTE_BUFFER, LongType.instance) .add(IndexOperator.LT, LongType.instance.decompose(50L)) .add(IndexOperator.NOT_EQ, LongType.instance.decompose(10L)))).size()); Assert.assertEquals(99998, convert(onDisk.search(new Expression(ByteBufferUtil.EMPTY_BYTE_BUFFER, LongType.instance) .add(IndexOperator.GT, LongType.instance.decompose(1L)) .add(IndexOperator.NOT_EQ, LongType.instance.decompose(20L)))).size()); onDisk.close(); }