com.datastax.driver.core.Cluster Java Examples
The following examples show how to use
com.datastax.driver.core.Cluster.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractCassandraProcessor.java From nifi with Apache License 2.0 | 6 votes |
/** * Uses a Cluster.Builder to create a Cassandra cluster reference using the given parameters * * @param contactPoints The contact points (hostname:port list of Cassandra nodes) * @param sslContext The SSL context (used for secure connections) * @param username The username for connection authentication * @param password The password for connection authentication * @param compressionType Enable compression at transport-level requests and responses. * @return A reference to the Cluster object associated with the given Cassandra configuration */ protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext, String username, String password, String compressionType) { Cluster.Builder builder = Cluster.builder().addContactPointsWithPorts(contactPoints); if (sslContext != null) { JdkSSLOptions sslOptions = JdkSSLOptions.builder() .withSSLContext(sslContext) .build(); builder = builder.withSSL(sslOptions); if(ProtocolOptions.Compression.SNAPPY.equals(compressionType)) { builder = builder.withCompression(ProtocolOptions.Compression.SNAPPY); } else if(ProtocolOptions.Compression.LZ4.equals(compressionType)) { builder = builder.withCompression(ProtocolOptions.Compression.LZ4); } } if (username != null && password != null) { builder = builder.withCredentials(username, password); } return builder.build(); }
Example #2
Source File: HadoopFormatIOCassandraTest.java From beam with Apache License 2.0 | 6 votes |
@BeforeClass public static void beforeClass() throws Exception { cassandraPort = NetworkTestHelper.getAvailableLocalPort(); cassandraNativePort = NetworkTestHelper.getAvailableLocalPort(); replacePortsInConfFile(); // Start the Embedded Cassandra Service cassandra.start(); final SocketOptions socketOptions = new SocketOptions(); // Setting this to 0 disables read timeouts. socketOptions.setReadTimeoutMillis(0); // This defaults to 5 s. Increase to a minute. socketOptions.setConnectTimeoutMillis(60 * 1000); cluster = Cluster.builder() .addContactPoint(CASSANDRA_HOST) .withClusterName("beam") .withSocketOptions(socketOptions) .withPort(cassandraNativePort) .build(); session = cluster.connect(); createCassandraData(); }
Example #3
Source File: CassandraTupleWriteAheadSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(1000); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000)); env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend")); CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource())) .setQuery("INSERT INTO zhisheng.values (id, counter) values (?, ?);") .enableWriteAheadLog() .setClusterBuilder(new ClusterBuilder() { private static final long serialVersionUID = 2793938419775311824L; @Override public Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello"); env.execute(); }
Example #4
Source File: CqlCount.java From cassandra-count with Apache License 2.0 | 6 votes |
private void setup() throws IOException, KeyStoreException, NoSuchAlgorithmException, KeyManagementException, CertificateException, UnrecoverableKeyException { // Connect to Cassandra Cluster.Builder clusterBuilder = Cluster.builder() .addContactPoint(host) .withPort(port) .withLoadBalancingPolicy(new TokenAwarePolicy(DCAwareRoundRobinPolicy.builder().build())); if (null != username) clusterBuilder = clusterBuilder.withCredentials(username, password); if (null != truststorePath) clusterBuilder = clusterBuilder.withSSL(createSSLOptions()); cluster = clusterBuilder.build(); if (null == cluster) { throw new IOException("Could not create cluster"); } session = cluster.connect(); }
Example #5
Source File: CassandraUtils.java From sstable-tools with Apache License 2.0 | 6 votes |
public static Cluster loadTablesFromRemote(String host, int port, String cfidOverrides) throws IOException { Map<String, UUID> cfs = parseOverrides(cfidOverrides); Cluster.Builder builder = Cluster.builder().addContactPoints(host).withPort(port); Cluster cluster = builder.build(); Metadata metadata = cluster.getMetadata(); IPartitioner partitioner = FBUtilities.newPartitioner(metadata.getPartitioner()); if (DatabaseDescriptor.getPartitioner() == null) DatabaseDescriptor.setPartitionerUnsafe(partitioner); for (com.datastax.driver.core.KeyspaceMetadata ksm : metadata.getKeyspaces()) { if (!ksm.getName().equals("system")) { for (TableMetadata tm : ksm.getTables()) { String name = ksm.getName()+"."+tm.getName(); try { CassandraUtils.tableFromCQL( new ByteArrayInputStream(tm.asCQLQuery().getBytes()), cfs.get(name) != null ? cfs.get(name) : tm.getId()); } catch(SyntaxException e) { // ignore tables that we cant parse (probably dse) logger.debug("Ignoring table " + name + " due to syntax exception " + e.getMessage()); } } } } return cluster; }
Example #6
Source File: CassandraPojoSinkExample.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #7
Source File: BatchTests.java From stratio-cassandra with Apache License 2.0 | 6 votes |
@BeforeClass() public static void setup() throws ConfigurationException, IOException { cassandra = new EmbeddedCassandraService(); cassandra.start(); cluster = Cluster.builder().addContactPoint("127.0.0.1").withPort(DatabaseDescriptor.getNativeTransportPort()).build(); session = cluster.connect(); session.execute("drop keyspace if exists junit;"); session.execute("create keyspace junit WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };"); session.execute("CREATE TABLE junit.noncounter (\n" + " id int PRIMARY KEY,\n" + " val text\n" + ");"); session.execute("CREATE TABLE junit.counter (\n" + " id int PRIMARY KEY,\n" + " val counter,\n" + ");"); noncounter = session.prepare("insert into junit.noncounter(id, val)values(?,?)"); counter = session.prepare("update junit.counter set val = val + ? where id = ?"); }
Example #8
Source File: TestCassandraSink.java From ingestion with Apache License 2.0 | 6 votes |
@Test public void stop() { final CassandraSink sink = new CassandraSink(); final Channel channel = mock(Channel.class); final Session session = mock(Session.class); final Cluster cluster = mock(Cluster.class); final Context ctx = new Context(); ctx.put("tables", "keyspace.table"); sink.configure(ctx); sink.setChannel(channel); sink.session = session; sink.cluster = cluster; sink.stop(); verify(session).isClosed(); verify(session).close(); verifyNoMoreInteractions(session); verify(cluster).isClosed(); verify(cluster).close(); verifyNoMoreInteractions(cluster); }
Example #9
Source File: CassandraDeepJobConfig.java From deep-spark with Apache License 2.0 | 6 votes |
@Override public synchronized Session getSession() { String id = this.getHost()+":"+this.cqlPort; if (!cassandraSession.containsKey(id)){ Cluster cluster = Cluster.builder() .withPort(this.cqlPort) .addContactPoint(this.getHost()) .withCredentials(this.username, this.password) .withProtocolVersion(PROTOCOL_VERSION) .build(); session = cluster.connect(quote(this.catalog)); cassandraSession.put(id,session); } return cassandraSession.get(id); }
Example #10
Source File: CassandraApplicationTest.java From examples with Apache License 2.0 | 6 votes |
@BeforeClass public static void setup() { try { cluster = Cluster.builder().addContactPoint(NODE).build(); session = cluster.connect(KEYSPACE); String createMetaTable = "CREATE TABLE IF NOT EXISTS " + KEYSPACE + "." + CassandraTransactionalStore.DEFAULT_META_TABLE + " ( " + CassandraTransactionalStore.DEFAULT_APP_ID_COL + " TEXT, " + CassandraTransactionalStore.DEFAULT_OPERATOR_ID_COL + " INT, " + CassandraTransactionalStore.DEFAULT_WINDOW_COL + " BIGINT, " + "PRIMARY KEY (" + CassandraTransactionalStore.DEFAULT_APP_ID_COL + ", " + CassandraTransactionalStore.DEFAULT_OPERATOR_ID_COL + ") " + ");"; session.execute(createMetaTable); String createTable = "CREATE TABLE IF NOT EXISTS " + KEYSPACE + "." + TABLE_NAME + " (id uuid PRIMARY KEY,age int,lastname text,test boolean,floatvalue float,doubleValue double,set1 set<int>,list1 list<int>,map1 map<text,int>,last_visited timestamp);"; session.execute(createTable); } catch (Throwable e) { DTThrowable.rethrow(e); } }
Example #11
Source File: CassandraPojoSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #12
Source File: ClusterHintsPollerTest.java From emodb with Apache License 2.0 | 6 votes |
@Test public void testClusterHintsPollerWhenNodeDown() throws UnknownHostException { ClusterHintsPoller clusterHintsPoller = new ClusterHintsPoller(); Session mockSession = mock(Session.class); Cluster mockCluster = mock(Cluster.class); Metadata mockMetadata = mock(Metadata.class); when(mockCluster.getMetadata()).thenReturn(mockMetadata); when(mockCluster.getClusterName()).thenReturn("test-cluster"); Host node1 = mock(Host.class); when(node1.getAddress()).thenReturn(InetAddress.getByName("127.0.0.1")); Host node2 = mock(Host.class); when(node2.getAddress()).thenReturn(InetAddress.getByName("127.0.0.2")); Host node3 = mock(Host.class); when(node3.getAddress()).thenReturn(InetAddress.getByName("127.0.0.3")); when(mockSession.getCluster()).thenReturn(mockCluster); // The first node queried is down when(mockSession.execute(any(Statement.class))).thenThrow(new NoHostAvailableException(ImmutableMap.<InetSocketAddress, Throwable>of())); when(mockMetadata.getAllHosts()).thenReturn(ImmutableSet.of(node1, node2, node3)); HintsPollerResult actualResult = clusterHintsPoller.getOldestHintsInfo(mockSession); // Make sure HintsPollerResult fails assertFalse(actualResult.areAllHostsPolling(), "Result should show hosts failing"); assertEquals(actualResult.getHostFailure(), ImmutableSet.of(InetAddress.getByName("127.0.0.1")), "Node 1 should return with host failure"); }
Example #13
Source File: SchemaManager.java From newts with Apache License 2.0 | 6 votes |
@Inject public SchemaManager(@Named("cassandra.keyspace") String keyspace, @Named("cassandra.host") String host, @Named("cassandra.port") int port, @Named("cassandra.username") String username, @Named("cassandra.password") String password, @Named("cassandra.ssl") boolean ssl) { m_keyspace = keyspace; Builder builder = Cluster.builder() .withPort(port) .addContactPoints(host.split(",")); if (username != null && password != null) { LOG.info("Using username: {} and password: XXXXXXXX", username); builder.withCredentials(username, password); } if (ssl) { LOG.info("Using SSL."); builder.withSSL(); } m_cluster= builder.build(); m_session = m_cluster.connect(); }
Example #14
Source File: ExecutionEngine.java From arcusplatform with Apache License 2.0 | 6 votes |
private Session createSession(Profile profile) { QueryOptions options = new QueryOptions(); options.setConsistencyLevel(profile.getConsistencyLevel()); Cluster.Builder builder = Cluster.builder(); builder.addContactPoints(profile.getNodes().toArray(new String[0])); builder.withPort(profile.getPort()); builder.withQueryOptions(options); if(!StringUtils.isBlank(profile.getUsername()) && !StringUtils.isBlank(profile.getPassword())) { builder.withCredentials(profile.getUsername(), profile.getPassword()); } Cluster cluster = builder.build(); Session session = cluster.connect(profile.getKeyspace()); return session; }
Example #15
Source File: SpringContextTest.java From tutorials with MIT License | 5 votes |
@BeforeClass public static void startCassandraEmbedded() throws InterruptedException, TTransportException, ConfigurationException, IOException { EmbeddedCassandraServerHelper.startEmbeddedCassandra(); final Cluster cluster = Cluster.builder().addContactPoints("127.0.0.1").withPort(9142).build(); final Session session = cluster.connect(); session.execute(KEYSPACE_CREATION_QUERY); session.execute(KEYSPACE_ACTIVATE_QUERY); Thread.sleep(5000); }
Example #16
Source File: CassandraStorageExtension.java From zipkin-dependencies with Apache License 2.0 | 5 votes |
static Cluster getCluster(InetSocketAddress contactPoint) { return Cluster.builder() .withoutJMXReporting() .addContactPointsWithPorts(contactPoint) .withRetryPolicy(ZipkinRetryPolicy.INSTANCE) .withPoolingOptions(new PoolingOptions().setMaxConnectionsPerHost(HostDistance.LOCAL, 1)) .build(); }
Example #17
Source File: CassandraDatastaxITBase.java From pinpoint with Apache License 2.0 | 5 votes |
public static void init(Cluster cluster) { try (Session systemSession = cluster.connect()) { String createKeyspace = String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = " + "{'class':'SimpleStrategy','replication_factor':'1'};", TEST_KEYSPACE); systemSession.execute(createKeyspace); String createTable = String.format("CREATE TABLE %s.%s (id text, value text, PRIMARY KEY(id))", TEST_KEYSPACE, TEST_TABLE); systemSession.execute(createTable); } }
Example #18
Source File: TestCassandraUtils.java From ingestion with Apache License 2.0 | 5 votes |
@Test public void getTableMetadataOnUnexistentKeyspace() { // session.getCluster().getMetadata().getKeyspace(keyspace); final Session session = mock(Session.class); final Cluster cluster = mock(Cluster.class); final Metadata metadata = mock(Metadata.class); when(session.getCluster()).thenReturn(cluster); when(cluster.getMetadata()).thenReturn(metadata); when(metadata.getKeyspace("keyspace")).thenReturn(null); thrown.expect(IllegalStateException.class); thrown.expectMessage("Keyspace keyspace does not exist"); CassandraUtils.getTableMetadata(session, "keyspace", "table"); }
Example #19
Source File: ClusterFactoryTest.java From james-project with Apache License 2.0 | 5 votes |
@Test void consistencyLevelShouldBeEqualToQuorum(DockerCassandra dockerCassandra) { Cluster cluster = ClusterFactory.create(dockerCassandra.configurationBuilder() .build()); ConsistencyLevel consistencyLevel = cluster.getConfiguration() .getQueryOptions() .getConsistencyLevel(); assertThat(consistencyLevel).isEqualTo(ConsistencyLevel.QUORUM); }
Example #20
Source File: CassandraStorage.java From copper-engine with Apache License 2.0 | 5 votes |
protected void createSchema(Session session, Cluster cluster) throws Exception { if (!createSchemaOnStartup) return; final KeyspaceMetadata metaData = cluster.getMetadata().getKeyspace(session.getLoggedKeyspace()); if (metaData.getTable("COP_WORKFLOW_INSTANCE") != null) { logger.info("skipping schema creation"); return; } logger.info("Creating tables..."); try (final BufferedReader br = new BufferedReader(new InputStreamReader(CassandraStorage.class.getResourceAsStream("copper-schema.cql")))) { StringBuilder cql = new StringBuilder(); String line; while ((line = br.readLine()) != null) { line = line.trim(); if (line.isEmpty()) continue; if (line.startsWith("--")) continue; if (line.endsWith(";")) { if (line.length() > 1) cql.append(line.substring(0, line.length() - 1)); String cqlCmd = cql.toString(); cql = new StringBuilder(); logger.info("Executing CQL {}", cqlCmd); session.execute(cqlCmd); } else { cql.append(line).append(" "); } } } }
Example #21
Source File: DockerCassandra.java From james-project with Apache License 2.0 | 5 votes |
public void dropKeyspace(String keyspace) { try (Cluster cluster = ClusterFactory.create(cassandra.superUserConfigurationBuilder().build())) { try (Session cassandraSession = cluster.newSession()) { boolean applied = cassandraSession.execute( SchemaBuilder.dropKeyspace(keyspace) .ifExists()) .wasApplied(); if (!applied) { throw new IllegalStateException("cannot drop keyspace '" + keyspace + "'"); } } } }
Example #22
Source File: ClusterFactoryTest.java From james-project with Apache License 2.0 | 5 votes |
void assertThatClusterIsContactable(Cluster cluster) { try (Session session = cluster.connect("system")) { session.execute( session.prepare(select() .fcall("NOW") .from("local")) .bind()); } catch (Exception e) { throw new AssertionError("expecting cluster can be connected but actually not", e); } }
Example #23
Source File: PutCassandraRecordTest.java From nifi with Apache License 2.0 | 5 votes |
@Override protected Cluster createCluster(List<InetSocketAddress> contactPoints, SSLContext sslContext, String username, String password, String compressionType) { Cluster mockCluster = mock(Cluster.class); try { Metadata mockMetadata = mock(Metadata.class); when(mockMetadata.getClusterName()).thenReturn("cluster1"); when(mockCluster.getMetadata()).thenReturn(mockMetadata); when(mockCluster.connect()).thenReturn(mockSession); when(mockCluster.connect(anyString())).thenReturn(mockSession); Configuration config = Configuration.builder().build(); when(mockCluster.getConfiguration()).thenReturn(config); ResultSetFuture future = mock(ResultSetFuture.class); ResultSet rs = CassandraQueryTestUtil.createMockResultSet(); PreparedStatement ps = mock(PreparedStatement.class); when(mockSession.prepare(anyString())).thenReturn(ps); BoundStatement bs = mock(BoundStatement.class); when(ps.bind()).thenReturn(bs); when(future.getUninterruptibly()).thenReturn(rs); try { doReturn(rs).when(future).getUninterruptibly(anyLong(), any(TimeUnit.class)); } catch (TimeoutException te) { throw new IllegalArgumentException("Mocked cluster doesn't time out"); } if (exceptionToThrow != null) { doThrow(exceptionToThrow).when(mockSession).executeAsync(anyString()); doThrow(exceptionToThrow).when(mockSession).executeAsync(any(Statement.class)); } else { when(mockSession.executeAsync(anyString())).thenReturn(future); when(mockSession.executeAsync(any(Statement.class))).thenReturn(future); } when(mockSession.getCluster()).thenReturn(mockCluster); } catch (Exception e) { fail(e.getMessage()); } return mockCluster; }
Example #24
Source File: CQLSession.java From cassandra-sidecar with Apache License 2.0 | 5 votes |
/** * Provides a Session connected only to the local node from configuration. If null it means the the connection was * not able to be established. The session still might throw a NoHostAvailableException if the local host goes * offline or otherwise unavailable. * * @return Session */ @Nullable public synchronized Session getLocalCql() { Cluster cluster = null; try { if (localSession == null) { cluster = Cluster.builder() .addContactPointsWithPorts(inet) .withLoadBalancingPolicy(wlp) .withQueryOptions(queryOptions) .withReconnectionPolicy(reconnectionPolicy) .withoutMetrics() // tests can create a lot of these Cluster objects, to avoid creating HWTs and // event thread pools for each we have the override .withNettyOptions(nettyOptions) .build(); localSession = cluster.connect(); } } catch (Exception e) { logger.debug("Failed to reach Cassandra", e); if (cluster != null) { try { cluster.close(); } catch (Exception ex) { logger.debug("Failed to close cluster in cleanup", ex); } } } return localSession; }
Example #25
Source File: CassandraClusterFactoryJavaConfigTest.java From spring-cloud-connectors with Apache License 2.0 | 5 votes |
@Test public void cloudCassandraConnectionFactoryConfig() { ApplicationContext testContext = getTestApplicationContext( CassandraClusterConfigWithServiceConfig.class, createService("my-service")); Cluster connector = testContext.getBean("my-service", getConnectorType()); assertThat(connector.getConfiguration().getSocketOptions().getSendBufferSize(), is(12345)); }
Example #26
Source File: ReaperCassandraSidecarIT.java From cassandra-reaper with Apache License 2.0 | 5 votes |
public static void initSchema() throws IOException { try (Cluster cluster = buildCluster(); Session tmpSession = cluster.connect()) { await().with().pollInterval(3, SECONDS).atMost(2, MINUTES).until(() -> { try { tmpSession.execute("DROP KEYSPACE IF EXISTS reaper_db"); return true; } catch (RuntimeException ex) { return false; } }); tmpSession.execute( "CREATE KEYSPACE reaper_db WITH replication = {" + BasicSteps.buildNetworkTopologyStrategyString(cluster) + "}"); } }
Example #27
Source File: BuildCluster.java From cassandra-jdbc-wrapper with Apache License 2.0 | 5 votes |
public static boolean isClusterActive(){ try{ Builder builder = Cluster.builder().withQueryOptions(new QueryOptions().setConsistencyLevel(ConsistencyLevel.QUORUM).setSerialConsistencyLevel(ConsistencyLevel.LOCAL_SERIAL)); cluster = builder.addContactPoint("127.0.0.1").build(); session = cluster.connect(); return true; } catch(Exception e){ return false; } }
Example #28
Source File: CassandraConnectorFactory.java From metacat with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} */ @Override public void stop() { super.stop(); // Stop the cassandra cluster final Cluster cluster = this.getInjector().getInstance(Cluster.class); if (cluster != null) { cluster.close(); } }
Example #29
Source File: ReaperCassandraIT.java From cassandra-reaper with Apache License 2.0 | 5 votes |
public static void initSchema() throws IOException { try (Cluster cluster = buildCluster(); Session tmpSession = cluster.connect()) { await().with().pollInterval(3, SECONDS).atMost(2, MINUTES).until(() -> { try { tmpSession.execute("DROP KEYSPACE IF EXISTS reaper_db"); return true; } catch (RuntimeException ex) { return false; } }); tmpSession.execute( "CREATE KEYSPACE reaper_db WITH replication = {" + BasicSteps.buildNetworkTopologyStrategyString(cluster) + "}"); } }
Example #30
Source File: PeerMetadataIntegrationTest.java From simulacron with Apache License 2.0 | 5 votes |
@Test public void testClusterDiscovery() throws Exception { // Validate that peers as appropriately discovered when connecting to a node. try (BoundCluster boundCluster = server.register(ClusterSpec.builder().withNodes(3, 3, 3)); Cluster driverCluster = defaultBuilder(boundCluster).build()) { BoundDataCenter dc0 = boundCluster.getDataCenters().iterator().next(); driverCluster.init(); // Should be 9 hosts assertThat(driverCluster.getMetadata().getAllHosts()).hasSize(9); // Connect and ensure pools are created to local dc hosts. Session session = driverCluster.connect(); // Verify hosts connected to are only those in the local DC. Collection<SocketAddress> connectedHosts = session .getState() .getConnectedHosts() .stream() .map(Host::getSocketAddress) .collect(Collectors.toList()); Collection<SocketAddress> dcHosts = dc0.getNodes().stream().map(BoundNode::getAddress).collect(Collectors.toList()); assertThat(connectedHosts).hasSameElementsAs(dcHosts); } }