com.datastax.driver.core.Cluster Scala Examples

The following examples show how to use com.datastax.driver.core.Cluster. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: EventsByInterval.scala    From spark-streaming-demo   with Apache License 2.0 6 votes vote down vote up
package com.datastax.examples.meetup

import com.datastax.driver.core.{Cluster, Session, Row}
import com.websudos.phantom.CassandraTable
import com.websudos.phantom.Implicits._
import scala.concurrent.Future


case class EventModel (
                event: String,
                interval: String,
                dimension: String,
                subtotal: Long
            )

sealed class EventRecord extends CassandraTable[EventRecord, EventModel]
{
  override val tableName = "events_by_interval"
  object event extends StringColumn(this) with PartitionKey[String]
  object interval extends StringColumn(this) with ClusteringOrder[String] with Descending
  object dimension extends StringColumn(this) with ClusteringOrder[String] with Ascending
  object subtotal extends CounterColumn(this)

  override def fromRow(row: Row): EventModel = {
    EventModel(
      event(row),
      interval(row),
      dimension(row),
      subtotal(row)
    )
  }
}

object Event extends EventRecord
{
  val keyspace = "demo"
  val cluster = Cluster.builder().addContactPoint("127.0.0.1").build()
  implicit val session = cluster.connect(keyspace)

//  def hourly(hashtag: String): Future[Seq[(String, Long)]] = {
//    select (_.interval, _.subtotal) where (_.event eqs hashtag) and (_.interval gte "M") and (_.interval lt "N") limit 60 fetch
//  }

  def dimensions(event: String, interval: String): Future[Seq[(String, Long)]] = {
    select (_.dimension, _.subtotal) where (_.event eqs event) and (_.interval eqs interval) limit 500 fetch
  }

} 
Example 2
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo3

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val get = withSchema {
    (authorId: String, postId: UUID) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId AND post_id = $postId
       """
        .prepared
        .executeAsync
        .oneOption
        .as(Post)
  }

  val result = get("test", UUID.fromString("a4a70900-24e1-11df-8924-001ff3591711"))
  val maybePost: Option[Post] = Await.result(result, Duration(1, "second"))
  println(maybePost.map(_.title).getOrElse("Post not found"))

  session.close()
  cluster.close()
} 
Example 3
Source File: CassandraDao.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package svc

import com.datastax.driver.core.Cluster
import io.gzet.recommender.Node

import scala.collection.JavaConversions._

class CassandraDao(cassandraHost: String, cassandraPort: Int) {

  private val cluster = Cluster.builder().addContactPoint(cassandraHost).withPort(cassandraPort).build()
  val session = cluster.connect()

  private def exists(tableName: String): Boolean = {
    cluster.getMetadata.getKeyspace("music").getTable(tableName) != null
  }

  def dropSongs = {
    session.execute(s"DROP table IF EXISTS music.hash;")
    session.execute(s"DROP table IF EXISTS music.record;")
  }

  def dropPlaylist = {
    session.execute(s"DROP table IF EXISTS music.nodes;")
    session.execute(s"DROP table IF EXISTS music.edges;")
  }

  def findSongsByHash(hash: String): List[Long] = {
    if(!exists("hash")) return List[Long]()
    val stmt = s"SELECT songs FROM music.hash WHERE id = '$hash';"
    val results = session.execute(stmt)
    results flatMap { row =>
      row.getList("songs", classOf[java.lang.Long]).map(_.toLong)
    } toList
  }

  def getSongs: List[String] = {
    if(!exists("record")) return List[String]()
    val stmt = s"SELECT name FROM music.record;"
    val results = session.execute(stmt)
    results map { row =>
      row.getString("name")
    } toList
  }

  def getSongName(songId: Long): Option[String] = {
    if(!exists("record")) return None
    val stmt = s"SELECT name FROM music.record WHERE id = $songId;"
    val results = session.execute(stmt)
    val songNames = results map { row =>
      row.getString("name")
    }

    if(songNames.isEmpty) None: Option[String] else Some(songNames.head)
  }

  def getSongId(songName: String): Option[Long] = {
    if(!exists("record")) return None
    val stmt = s"SELECT id FROM music.record WHERE name = '$songName';"
    val results = session.execute(stmt)
    val songIds = results map { row =>
      row.getLong("id")
    }

    if(songIds.isEmpty) None: Option[Long] else Some(songIds.head)
  }

  def getNodes: List[Node] = {
    if(!exists("nodes")) return List[Node]()
    val stmt = s"SELECT id, name, popularity FROM music.nodes;"
    val results = session.execute(stmt)
    results map { row =>
      val id = row.getLong("id")
      val name = row.getString("name")
      val popularity = row.getDouble("popularity")
      Node(id, name, popularity)
    } toList
  }

  def close() {
    session.close()
    cluster.close()
  }

} 
Example 4
Source File: CassandraDao.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package io.gzet.story.web.dao

import com.datastax.driver.core.Cluster
import io.gzet.story.model.Article
import io.gzet.story.util.SimhashUtils._
import io.gzet.story.web.SimpleConfig

import scala.collection.JavaConversions._
import scala.language.postfixOps

class CassandraDao() extends SimpleConfig {

  private val cluster = Cluster.builder().addContactPoint(cassandraHost).withPort(cassandraPort).build()
  val session = cluster.connect()

  def count(): Long = {
    val stmt = s"SELECT count(*) FROM $cassandraKeyspace.$cassandraTable;"
    val results = session.execute(stmt).all()
    results map { row =>
      row.getLong(0)
    } head
  }

  def findDuplicates(hash: Int): List[Article] = {
    searchmasks flatMap { mask =>
      val searchHash = mask ^ hash
      val stmt = s"SELECT hash, url, title, body FROM $cassandraKeyspace.$cassandraTable WHERE hash = $searchHash;"
      val results = session.execute(stmt).all()
      results map { row =>
        Article(row.getInt("hash"), row.getString("body"), row.getString("title"), row.getString("url"))
      }
    } toList
  }

} 
Example 5
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo6

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val create = withSchema {
    (authorId: String, title: String) =>
      cql"""
         INSERT INTO test.posts (author_id , post_id , post_title )
         VALUES ( $authorId, now(), $title);
       """.prepared.executeAsync
  }

  val listByAuthor = withSchema {
    (authorId: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId
       """
        .prepared
        .executeAsync
        .as(Post)
  }

  println(Await.result(create("test", "title"), Duration(1, "second")))
  println(Await.result(listByAuthor("test"), Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 6
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo5

import java.util.UUID
import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, comments: Map[Int, String])

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val getCommentsByLine = withSchema {
    (authorId: String, postId: UUID, line: Int) =>
      cql"""
         SELECT post_id, comments
         FROM test.posts
         WHERE author_id = $authorId
           AND post_id = $postId
           AND comments CONTAINS KEY $line
       """.prepared.as(Post)
  }

  val postId = UUID.fromString("a4a70900-24e1-11df-8924-001ff3591711")
  println(Await.result(getCommentsByLine("test", postId, 5), Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 7
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo2

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val listByAuthor = withSchema {
    (authorId: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId
       """
        .prepared
        .executeAsync
        .as(Post)
  }

  val result = listByAuthor("test")
  println(Await.result(result, Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 8
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo4

import java.util.UUID
import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val listByTag = withSchema {
    (tag: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE post_tags CONTAINS $tag
       """.prepared.as(Post)
  }

  val titlesF = listByTag("test").map(_.map(_.title))
  val titles = Await.result(titlesF, Duration(1, "second"))
  println("Matching titles:")
  titles.foreach(println)

  session.close()
  cluster.close()
} 
Example 9
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo1

import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import troy.driver.DSL._
import scala.concurrent.ExecutionContext.Implicits.global

object Main extends App {
  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val listByAuthor = withSchema {
    (authorId: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId
       """.prepared.executeAsync
  }


  println(listByAuthor("test"))

  session.close()
  cluster.close()
} 
Example 10
Source File: CassFormatEncoderVersionSpecific.scala    From scala-cass   with MIT License 5 votes vote down vote up
package com.weather.scalacass

import com.datastax.driver.core.{ Cluster, DataType, TupleValue }

trait LowPriorityCassFormatEncoderVersionSpecific {
  implicit def tupleFormat[TUP <: Product](implicit cluster: Cluster, underlying: TupleCassFormatEncoder[TUP]): CassFormatEncoder[TUP] = new CassFormatEncoder[TUP] {
    type From = TupleValue
    val cassDataType = cluster.getMetadata.newTupleType(underlying.dataTypes: _*)
    def encode(f: TUP): Result[From] = underlying.encode(f).right.map(ar => cassDataType.newValue(ar: _*))
  }
}
trait CassFormatEncoderVersionSpecific extends LowPriorityCassFormatEncoderVersionSpecific {
  import CassFormatEncoder.{ sameTypeCassFormatEncoder, transCassFormatEncoder }

  implicit val dateFormat: CassFormatEncoder[java.util.Date] = sameTypeCassFormatEncoder(DataType.timestamp)
  implicit val datastaxLocalDateFormat: CassFormatEncoder[com.datastax.driver.core.LocalDate] =
    sameTypeCassFormatEncoder(DataType.date)
  implicit val timeFormat: CassFormatEncoder[Time] = transCassFormatEncoder(DataType.time, time => Long.box(time.millis))
} 
Example 11
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo1

import java.util.UUID
import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, img: String) // Map doesn't work with Primitives yet. See: https://github.com/cassandra-scala/troy/issues/18

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val getCommentsByLine = withSchema.minVersion(2).maxVersion(2) {
    (authorId: String, postId: UUID) =>
      cql"""
         SELECT post_id, post_img
         FROM test.posts
         WHERE author_id = $authorId
           AND post_id = $postId
       """.prepared.as(Post)
  }

  val postId = UUID.fromString("a4a70900-24e1-11df-8924-001ff3591711")
  println(Await.result(getCommentsByLine("test", postId), Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 12
Source File: BaseSpec.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package meta

import java.util

import com.datastax.driver.core.{ Session, Cluster }
import org.cassandraunit.CQLDataLoader
import org.cassandraunit.dataset.CQLDataSet
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.concurrent.ScalaFutures
import org.scalatest._
import scala.concurrent.duration._

trait BaseSpec extends FlatSpec with BeforeAndAfterAll with BeforeAndAfterEach with ScalaFutures with Matchers {
  def port: Int = 9142
  def host: String = "127.0.0.1"

  private lazy val cluster = new Cluster.Builder().addContactPoints(host).withPort(port).build()
  implicit lazy val session: Session = cluster.connect()
  implicit val patienceTimeout = org.scalatest.concurrent.PatienceConfiguration.Timeout(10.seconds)

  def testDataFixtures: String = ""
  private lazy val fixtures = StringCQLDataSet(testDataFixtures, false, false, "test")
  private lazy val schema = new ClassPathCQLDataSet("schema.cql")

  override protected def beforeAll(): Unit = {
    EmbeddedCassandraServerHelper.startEmbeddedCassandra(1.minute.toMillis)
    loadClean()
    super.beforeEach()
  }

  override protected def afterAll(): Unit = {
    session.close()
    cluster.close()
    super.afterAll()
  }

  def loadClean() = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
    loadData(schema, fixtures)
  }

  def loadData(datasets: CQLDataSet*) = {
    val loader = new CQLDataLoader(session)
    datasets.foreach(loader.load)
  }
}

object Helpers {
  def splitStatements(statements: String) =
    statements.split(";").map(_.trim).filter(!_.isEmpty)
}

case class StringCQLDataSet(
    cqlStatements: String,
    isKeyspaceCreation: Boolean,
    isKeyspaceDeletion: Boolean,
    getKeyspaceName: String
) extends CQLDataSet {
  lazy val getCQLStatements = util.Arrays.asList(Helpers.splitStatements(cqlStatements): _*)

} 
Example 13
Source File: Playground.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package meta

import java.util.UUID

import com.datastax.driver.core.{ Cluster, Session }
import com.datastax.driver.core.utils.UUIDs

import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, Future }
import scala.util.Try

@schema object Schema

object Playground extends App {
  def query[I, O](x: String): I => Future[Seq[O]] = ???
  case class Post(id: UUID, title: String)

  import Schema._

  withSession { implicit session =>
    import scala.concurrent.ExecutionContext.Implicits.global

    @schemasafe val getAuthorPosts =
      query[(UUID, Int), Post]("select post_id, post_title from test.posts where author_id = ? AND post_rating >= ? ALLOW FILTERING;")

    val authorId = UUID.fromString("6287c470-e298-11e6-9b3d-ffeaf4ddcb54")
    println(Await.result(getAuthorPosts((authorId, 4)): Future[Iterable[Post]], Duration.Inf))
  }

  def withSession[T](f: Session => T) = {
    val cluster = new Cluster.Builder().addContactPoints("127.0.0.1").withPort(9042).build()
    val session: Session = cluster.connect()
    Try(f(session)).failed.foreach(println)
    session.close()
    cluster.close()
  }
} 
Example 14
Source File: CassandraSpec.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package macros

import java.util

import com.datastax.driver.core.{ Session, Cluster }
import org.cassandraunit.CQLDataLoader
import org.cassandraunit.dataset.CQLDataSet
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.concurrent.ScalaFutures
import org.scalatest._
import org.scalatest.time.{ Minute, Seconds, Span }
import scala.concurrent.duration._

trait CassandraSpec extends FlatSpec with BeforeAndAfterAll with BeforeAndAfterEach with ScalaFutures with Matchers {
  def port: Int = 9142
  def host: String = "127.0.0.1"

  private lazy val cluster = new Cluster.Builder().addContactPoints(host).withPort(port).build()
  implicit lazy val session: Session = cluster.connect()
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(1, Minute), interval = Span(5, Seconds))

  def testDataFixtures: String = ""
  private lazy val fixtures = StringCQLDataSet(testDataFixtures, false, false, "test")
  private lazy val schema = Seq(new ClassPathCQLDataSet("schema/01.cql"), new ClassPathCQLDataSet("schema/02.cql"))

  override protected def beforeAll(): Unit = {
    EmbeddedCassandraServerHelper.startEmbeddedCassandra(1.minute.toMillis)
    loadClean()
    super.beforeEach()
  }

  override protected def afterAll(): Unit = {
    session.close()
    cluster.close()
    super.afterAll()
  }

  def loadClean() = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
    loadData((schema :+ fixtures): _*)
  }

  def loadData(datasets: CQLDataSet*) = {
    val loader = new CQLDataLoader(session)
    datasets.foreach(loader.load)
  }
}

object Helpers {
  def splitStatements(statements: String) =
    statements.split(";").map(_.trim).filter(!_.isEmpty)
}

case class StringCQLDataSet(
    cqlStatements: String,
    isKeyspaceCreation: Boolean,
    isKeyspaceDeletion: Boolean,
    getKeyspaceName: String
) extends CQLDataSet {
  lazy val getCQLStatements = util.Arrays.asList(Helpers.splitStatements(cqlStatements): _*)

} 
Example 15
Source File: CassandraClient.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package store.cassandra

import java.net.InetSocketAddress

import com.datastax.driver.core.{Cluster, Session}
import env.CassandraConfig
import libs.logs.{IzanamiLogger, ZLogger}
import zio.{Task, UIO, ZManaged}

object CassandraClient {

  def cassandraClient(mayBeConfig: Option[CassandraConfig]): ZManaged[ZLogger, Throwable, Option[(Cluster, Session)]] =
    mayBeConfig
      .map { config =>
        ZManaged
          .make(
            ZLogger.info(s"Initializing Cassandra cluster for ${config}") *> Task {
              val adds = config.addresses.map { add =>
                val Array(host, port) = add.split(":")
                new InetSocketAddress(host, port.toInt)
              }
              val builder: Cluster.Builder = Cluster.builder
                .withoutJMXReporting()
                .addContactPointsWithPorts(adds: _*)

              val b: Cluster.Builder = config.clusterName.map(builder.withClusterName).getOrElse(builder)

              val cluster: Cluster = (for {
                username <- config.username
                password <- config.password
              } yield {
                b.withCredentials(username, password)
              }).getOrElse(b).build()

              cluster.init()

              val session = cluster.connect()

              cluster.connect().execute(s"""
                                     |CREATE KEYSPACE IF NOT EXISTS ${config.keyspace} WITH REPLICATION = {
                                     | 'class' : 'SimpleStrategy', 'replication_factor' : ${config.replicationFactor}
                                     |}""".stripMargin)

              (cluster, session)
            }
          )(t => UIO(t._1.close))
          .map(Some.apply)

      }
      .getOrElse(ZManaged.effectTotal(None))
} 
Example 16
Source File: ExperimentVariantEventCassandraServiceTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.cassandra.abtesting

import com.datastax.driver.core.{Cluster, Session}
import domains.abtesting.events.impl.ExperimentVariantEventCassandraService
import domains.abtesting.AbstractExperimentServiceTest
import domains.abtesting.events.ExperimentVariantEventService
import env.{CassandraConfig, DbDomainConfig, DbDomainConfigDetails}
import libs.logs.ZLogger
import org.scalatest.BeforeAndAfterAll
import store.cassandra.CassandraClient
import zio.{Exit, Reservation}

class ExperimentVariantEventCassandraServiceTest
    extends AbstractExperimentServiceTest("Cassandra")
    with BeforeAndAfterAll {

  val cassandraConfig = CassandraConfig(Seq("127.0.0.1:9042"), None, 1, "izanami_test")
  private val rDriver: Reservation[ZLogger, Throwable, Option[(Cluster, Session)]] =
    runtime.unsafeRun(CassandraClient.cassandraClient(Some(cassandraConfig)).reserve.provideLayer(ZLogger.live))

  val Some((_, session)) = runtime.unsafeRun(rDriver.acquire.provideLayer(ZLogger.live))

  override protected def afterAll(): Unit = {
    super.afterAll()
    runtime.unsafeRun(rDriver.release(Exit.unit).provideLayer(ZLogger.live))
  }

  override def dataStore(name: String): ExperimentVariantEventService.Service = ExperimentVariantEventCassandraService(
    session,
    DbDomainConfig(env.Cassandra, DbDomainConfigDetails(name, None), None),
    cassandraConfig
  )
} 
Example 17
Source File: CassandraJsonDataStoreTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.cassandra.store

import com.datastax.driver.core.{Cluster, Session}
import env.{CassandraConfig, DbDomainConfig, DbDomainConfigDetails}
import libs.logs.ZLogger
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import store.AbstractJsonDataStoreTest
import store.cassandra.CassandraClient
import store.cassandra.CassandraJsonDataStore
import zio.{Exit, Reservation}

class CassandraJsonDataStoreTest
    extends AbstractJsonDataStoreTest("Cassandra")
    with BeforeAndAfter
    with BeforeAndAfterAll {

  val cassandraConfig = CassandraConfig(Seq("127.0.0.1:9042"), None, 1, "izanami_test")

  private val rDriver: Reservation[ZLogger, Throwable, Option[(Cluster, Session)]] =
    runtime.unsafeRun(CassandraClient.cassandraClient(Some(cassandraConfig)).reserve.provideLayer(ZLogger.live))

  val Some((_, session)) = runtime.unsafeRun(rDriver.acquire.provideLayer(ZLogger.live))

  override protected def afterAll(): Unit = {
    super.afterAll()
    runtime.unsafeRun(rDriver.release(Exit.unit).provideLayer(ZLogger.live))
  }

  override def dataStore(name: String): CassandraJsonDataStore =
    CassandraJsonDataStore(
      session,
      cassandraConfig,
      DbDomainConfig(env.Cassandra, DbDomainConfigDetails(name, None), None)
    )

} 
Example 18
Source File: CassandraClient.scala    From scala-cass   with MIT License 5 votes vote down vote up
package com.weather.scalacass.util

import com.datastax.driver.core.Cluster

case class CassandraClient(hosts: List[String], port: Option[Int]) extends CassandraClientVersionSpecific {
  val cluster = {
    val c = Cluster.builder().addContactPoints(hosts: _*)
    port.foreach(c.withPort)
    clusterStartup(c)
    c.build()
  }
  val session = cluster.connect()

  def close() = cluster.close()
} 
Example 19
Source File: Implicits.scala    From scala-cass   with MIT License 5 votes vote down vote up
package com.weather.scalacass.jdk8

import com.weather.scalacass.{ CassFormatDecoder, CassFormatEncoder }
import com.weather.scalacass.CassFormatDecoderVersionSpecific.codecCassFormatDecoder
import CassFormatEncoder.sameTypeCassFormatEncoder
import java.time.{ Instant, LocalDate, LocalTime, ZonedDateTime }

import com.datastax.driver.core.{ Cluster, DataType }
import com.google.common.reflect.TypeToken

object Implicits {
  implicit val timeEncoder: CassFormatEncoder[LocalTime] = sameTypeCassFormatEncoder(DataType.time)
  implicit val timeDecoder: CassFormatDecoder[LocalTime] = codecCassFormatDecoder(TypeToken.of(classOf[LocalTime]))

  implicit val dateEncoder: CassFormatEncoder[LocalDate] = sameTypeCassFormatEncoder(DataType.date)
  implicit val dateDecoder: CassFormatDecoder[LocalDate] = codecCassFormatDecoder(TypeToken.of(classOf[LocalDate]))

  implicit val instantEncoder: CassFormatEncoder[Instant] = sameTypeCassFormatEncoder(DataType.timestamp)
  implicit val instantDecoder: CassFormatDecoder[Instant] = codecCassFormatDecoder(TypeToken.of(classOf[Instant]))

  implicit def zonedDateTimeEncoder(implicit cluster: Cluster): CassFormatEncoder[ZonedDateTime] =
    sameTypeCassFormatEncoder(cluster.getMetadata.newTupleType(DataType.timestamp, DataType.varchar))
  implicit val zonedDateTimeDecoder: CassFormatDecoder[ZonedDateTime] = codecCassFormatDecoder(TypeToken.of(classOf[ZonedDateTime]))
} 
Example 20
Source File: Implicits.scala    From scala-cass   with MIT License 5 votes vote down vote up
package com.weather.scalacass.joda

import com.datastax.driver.core.{ Cluster, DataType }
import com.google.common.reflect.TypeToken
import com.weather.scalacass.{ CassFormatDecoder, CassFormatEncoder }
import com.weather.scalacass.CassFormatEncoder.sameTypeCassFormatEncoder
import com.weather.scalacass.CassFormatDecoderVersionSpecific.codecCassFormatDecoder
import org.joda.time.{ DateTime, Instant, LocalDate, LocalTime }

object Implicits {
  implicit val timeEncoder: CassFormatEncoder[LocalTime] = sameTypeCassFormatEncoder(DataType.time)
  implicit val timeDecoder: CassFormatDecoder[LocalTime] = codecCassFormatDecoder(TypeToken.of(classOf[LocalTime]))

  implicit val dateEncoder: CassFormatEncoder[LocalDate] = sameTypeCassFormatEncoder(DataType.date)
  implicit val dateDecoder: CassFormatDecoder[LocalDate] = codecCassFormatDecoder(TypeToken.of(classOf[LocalDate]))

  implicit val instantEncoder: CassFormatEncoder[Instant] = sameTypeCassFormatEncoder(DataType.timestamp)
  implicit val instantDecoder: CassFormatDecoder[Instant] = codecCassFormatDecoder(TypeToken.of(classOf[Instant]))

  implicit def timestampEncoder(implicit cluster: Cluster): CassFormatEncoder[DateTime] =
    sameTypeCassFormatEncoder(cluster.getMetadata.newTupleType(DataType.timestamp, DataType.varchar))
  implicit val timestampDecoder: CassFormatDecoder[DateTime] = codecCassFormatDecoder(TypeToken.of(classOf[DateTime]))
} 
Example 21
Source File: CassandraAsyncContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import com.datastax.driver.core.Cluster
import com.typesafe.config.Config
import io.getquill.context.cassandra.util.FutureConversions._
import io.getquill.monad.ScalaFutureIOMonad
import io.getquill.util.{ ContextLogger, LoadConfig }

import scala.jdk.CollectionConverters._
import scala.concurrent.{ ExecutionContext, Future }

class CassandraAsyncContext[N <: NamingStrategy](
  naming:                     N,
  cluster:                    Cluster,
  keyspace:                   String,
  preparedStatementCacheSize: Long
)
  extends CassandraClusterSessionContext[N](naming, cluster, keyspace, preparedStatementCacheSize)
  with ScalaFutureIOMonad {

  def this(naming: N, config: CassandraContextConfig) = this(naming, config.cluster, config.keyspace, config.preparedStatementCacheSize)

  def this(naming: N, config: Config) = this(naming, CassandraContextConfig(config))

  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix))

  private val logger = ContextLogger(classOf[CassandraAsyncContext[_]])

  override type Result[T] = Future[T]
  override type RunQueryResult[T] = List[T]
  override type RunQuerySingleResult[T] = T
  override type RunActionResult = Unit
  override type RunBatchActionResult = Unit

  override def performIO[T](io: IO[T, _], transactional: Boolean = false)(implicit ec: ExecutionContext): Result[T] = {
    if (transactional) logger.underlying.warn("Cassandra doesn't support transactions, ignoring `io.transactional`")
    super.performIO(io)
  }

  def executeQuery[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor)(implicit executionContext: ExecutionContext): Result[RunQueryResult[T]] = {
    val statement = prepareAsyncAndGetStatement(cql, prepare, logger)
    statement.flatMap(st => session.executeAsync(st).asScala)
      .map(_.all.asScala.toList.map(extractor))
  }

  def executeQuerySingle[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor)(implicit executionContext: ExecutionContext): Result[RunQuerySingleResult[T]] = {
    executeQuery(cql, prepare, extractor).map(handleSingleResult)
  }

  def executeAction[T](cql: String, prepare: Prepare = identityPrepare)(implicit executionContext: ExecutionContext): Result[RunActionResult] = {
    val statement = prepareAsyncAndGetStatement(cql, prepare, logger)
    statement.flatMap(st => session.executeAsync(st).asScala).map(_ => ())
  }

  def executeBatchAction(groups: List[BatchGroup])(implicit executionContext: ExecutionContext): Result[RunBatchActionResult] = {
    Future.sequence {
      groups.flatMap {
        case BatchGroup(cql, prepare) =>
          prepare.map(executeAction(cql, _))
      }
    }.map(_ => ())
  }
} 
Example 22
Source File: CassandraClusterSessionContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import com.datastax.driver.core.{ Cluster, _ }
import io.getquill.context.cassandra.util.FutureConversions._
import io.getquill.context.cassandra.{ CassandraSessionContext, PrepareStatementCache }
import io.getquill.util.Messages.fail

import scala.jdk.CollectionConverters._
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Failure

abstract class CassandraClusterSessionContext[N <: NamingStrategy](
  val naming:                 N,
  cluster:                    Cluster,
  keyspace:                   String,
  preparedStatementCacheSize: Long
)
  extends CassandraSessionContext[N] {

  private lazy val asyncCache = new PrepareStatementCache[Future[PreparedStatement]](preparedStatementCacheSize)
  private lazy val syncCache = new PrepareStatementCache[PreparedStatement](preparedStatementCacheSize)

  protected lazy val session = cluster.connect(keyspace)

  protected val udtMetadata: Map[String, List[UserType]] = cluster.getMetadata.getKeyspaces.asScala.toList
    .flatMap(_.getUserTypes.asScala)
    .groupBy(_.getTypeName)

  def udtValueOf(udtName: String, keyspace: Option[String] = None): UDTValue =
    udtMetadata.getOrElse(udtName.toLowerCase, Nil) match {
      case udt :: Nil => udt.newValue()
      case Nil =>
        fail(s"Could not find UDT `$udtName` in any keyspace")
      case udts => udts
        .find(udt => keyspace.contains(udt.getKeyspace) || udt.getKeyspace == session.getLoggedKeyspace)
        .map(_.newValue())
        .getOrElse(fail(s"Could not determine to which keyspace `$udtName` UDT belongs. " +
          s"Please specify desired keyspace using UdtMeta"))
    }

  protected def prepare(cql: String): BoundStatement =
    syncCache(cql)(stmt => session.prepare(stmt)).bind()

  protected def prepareAsync(cql: String)(implicit executionContext: ExecutionContext): Future[BoundStatement] =
    asyncCache(cql)(stmt => session.prepareAsync(stmt).asScala andThen {
      case Failure(_) => asyncCache.invalidate(stmt)
    }).map(_.bind())

  def close(): Unit = {
    session.close()
    cluster.close()
  }
} 
Example 23
Source File: ClusterBuilder.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.cassandra.cluster

import io.getquill.util.Messages._
import scala.util.Try
import com.typesafe.config.Config
import com.typesafe.config.ConfigValueType
import java.lang.reflect.Method
import scala.jdk.CollectionConverters._
import com.datastax.driver.core.Cluster

object ClusterBuilder {

  def apply(cfg: Config) =
    set(Cluster.builder, cfg)

  private def set[T](instance: T, cfg: Config): T = {
    for (key <- cfg.entrySet.asScala.map(_.getKey.split('.').head)) {

      def tryMethod(m: Method) =
        m.getParameterTypes.toList match {
          case Nil =>
            Try(cfg.getBoolean(key)).map {
              case true  => m.invoke(instance)
              case false =>
            }
          case tpe :: Nil =>
            param(key, tpe, cfg)
              .map(p => m.invoke(instance, p.asInstanceOf[AnyRef]))
          case tpe :: tail =>
            val c = cfg.getConfig(key)
            tail.foldLeft(param("0", tpe, c).map(List(_))) {
              case (list, tpe) =>
                list.flatMap { l =>
                  val key = s"${l.size}"
                  param(key, tpe, c).map(l :+ _)
                }
            }.map { params =>
              m.invoke(instance, params.asInstanceOf[List[Object]]: _*)
            }
        }

      def tryMethods(m: List[Method]): Any =
        m match {
          case Nil       => fail(s"Invalid config key '$key'")
          case m :: tail => tryMethod(m).getOrElse(tryMethods(tail))
        }

      tryMethods {
        instance.getClass.getMethods.toList.filter { m =>
          m.getName == key ||
            m.getName == s"with${key.capitalize}" ||
            m.getName == s"add${key.capitalize}" ||
            m.getName == s"set${key.capitalize}"
        }
      }
    }

    instance
  }

  val stringArrayClass = java.lang.reflect.Array.newInstance(classOf[String], 0).getClass()

  private def param(key: String, tpe: Class[_], cfg: Config) =
    Try {
      if (tpe == classOf[String])
        cfg.getString(key)
      else if (tpe == stringArrayClass)
        cfg.getStringList(key).asScala.toArray
      else if (tpe == classOf[Int] || tpe == classOf[Integer])
        cfg.getInt(key)
      else if (tpe.isEnum)
        tpe.getMethod("valueOf", classOf[String]).invoke(tpe, cfg.getString(key))
      else if (cfg.getValue(key).valueType == ConfigValueType.STRING)
        getClass.getClassLoader.loadClass(cfg.getString(key)).getConstructor().newInstance()
      else
        set(tpe.getConstructor().newInstance(), cfg.getConfig(key))
    }
} 
Example 24
Source File: CassandraContextConfig.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import com.datastax.driver.core.Cluster
import com.typesafe.config.Config
import io.getquill.context.cassandra.cluster.ClusterBuilder

case class CassandraContextConfig(config: Config) {
  def preparedStatementCacheSize: Long =
    if (config.hasPath("preparedStatementCacheSize"))
      config.getLong("preparedStatementCacheSize")
    else
      1000
  def builder = ClusterBuilder(config.getConfig("session"))
  def cluster: Cluster = builder.build
  def keyspace: String = config.getString("keyspace")
} 
Example 25
Source File: CassandraSyncContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import com.datastax.driver.core.Cluster
import com.typesafe.config.Config
import io.getquill.monad.SyncIOMonad
import io.getquill.util.{ ContextLogger, LoadConfig }

import scala.jdk.CollectionConverters._

class CassandraSyncContext[N <: NamingStrategy](
  naming:                     N,
  cluster:                    Cluster,
  keyspace:                   String,
  preparedStatementCacheSize: Long
)
  extends CassandraClusterSessionContext[N](naming, cluster, keyspace, preparedStatementCacheSize)
  with SyncIOMonad {

  def this(naming: N, config: CassandraContextConfig) = this(naming, config.cluster, config.keyspace, config.preparedStatementCacheSize)
  def this(naming: N, config: Config) = this(naming, CassandraContextConfig(config))
  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix))

  private val logger = ContextLogger(classOf[CassandraSyncContext[_]])

  override type Result[T] = T
  override type RunQueryResult[T] = List[T]
  override type RunQuerySingleResult[T] = T
  override type RunActionResult = Unit
  override type RunBatchActionResult = Unit

  override def performIO[T](io: IO[T, _], transactional: Boolean = false): Result[T] = {
    if (transactional) logger.underlying.warn("Cassandra doesn't support transactions, ignoring `io.transactional`")
    super.performIO(io)
  }

  def executeQuery[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): List[T] = {
    val (params, bs) = prepare(this.prepare(cql))
    logger.logQuery(cql, params)
    session.execute(bs)
      .all.asScala.toList.map(extractor)
  }

  def executeQuerySingle[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): T =
    handleSingleResult(executeQuery(cql, prepare, extractor))

  def executeAction[T](cql: String, prepare: Prepare = identityPrepare): Unit = {
    val (params, bs) = prepare(this.prepare(cql))
    logger.logQuery(cql, params)
    session.execute(bs)
    ()
  }

  def executeBatchAction(groups: List[BatchGroup]): Unit =
    groups.foreach {
      case BatchGroup(cql, prepare) =>
        prepare.foreach(executeAction(cql, _))
    }
} 
Example 26
Source File: CassandraMonixContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import com.datastax.driver.core.{ Cluster, ResultSet, Row }
import com.typesafe.config.Config
import io.getquill.context.cassandra.CqlIdiom
import io.getquill.context.monix.{ MonixContext, Runner }
import io.getquill.util.{ ContextLogger, LoadConfig }
import io.getquill.context.cassandra.util.FutureConversions._
import monix.eval.Task
import monix.execution.Scheduler
import monix.reactive.Observable

import scala.jdk.CollectionConverters._
import scala.util.{ Failure, Success }

class CassandraMonixContext[N <: NamingStrategy](
  naming:                     N,
  cluster:                    Cluster,
  keyspace:                   String,
  preparedStatementCacheSize: Long
)
  extends CassandraClusterSessionContext[N](naming, cluster, keyspace, preparedStatementCacheSize)
  with MonixContext[CqlIdiom, N] {

  // not using this here
  override val effect = Runner.default

  def this(naming: N, config: CassandraContextConfig) = this(naming, config.cluster, config.keyspace, config.preparedStatementCacheSize)
  def this(naming: N, config: Config) = this(naming, CassandraContextConfig(config))
  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix))

  private val logger = ContextLogger(classOf[CassandraMonixContext[_]])

  override type StreamResult[T] = Observable[T]
  override type RunActionResult = Unit
  override type Result[T] = Task[T]

  override type RunQueryResult[T] = List[T]
  override type RunQuerySingleResult[T] = T
  override type RunBatchActionResult = Unit

  protected def page(rs: ResultSet): Task[Iterable[Row]] = Task.defer {
    val available = rs.getAvailableWithoutFetching
    val page = rs.asScala.take(available)

    if (rs.isFullyFetched)
      Task.now(page)
    else
      Task.fromFuture(rs.fetchMoreResults().asScalaWithDefaultGlobal).map(_ => page)
  }

  def streamQuery[T](fetchSize: Option[Int], cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Observable[T] = {

    Observable
      .fromTask(prepareRowAndLog(cql, prepare))
      .mapEvalF(p => session.executeAsync(p).asScalaWithDefaultGlobal)
      .flatMap(Observable.fromAsyncStateAction((rs: ResultSet) => page(rs).map((_, rs)))(_))
      .takeWhile(_.nonEmpty)
      .flatMap(Observable.fromIterable)
      .map(extractor)
  }

  def executeQuery[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Task[List[T]] = {
    streamQuery[T](None, cql, prepare, extractor)
      .foldLeftL(List[T]())({ case (l, r) => r +: l }).map(_.reverse)
  }

  def executeQuerySingle[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Task[T] =
    executeQuery(cql, prepare, extractor).map(handleSingleResult(_))

  def executeAction[T](cql: String, prepare: Prepare = identityPrepare): Task[Unit] = {
    prepareRowAndLog(cql, prepare)
      .flatMap(r => Task.fromFuture(session.executeAsync(r).asScalaWithDefaultGlobal))
      .map(_ => ())
  }

  def executeBatchAction(groups: List[BatchGroup]): Task[Unit] =
    Observable.fromIterable(groups).flatMap {
      case BatchGroup(cql, prepare) =>
        Observable.fromIterable(prepare)
          .flatMap(prep => Observable.fromTask(executeAction(cql, prep)))
          .map(_ => ())
    }.completedL

  private def prepareRowAndLog(cql: String, prepare: Prepare = identityPrepare): Task[PrepareRow] = {
    Task.async0[PrepareRow] { (scheduler, callback) =>
      implicit val executor: Scheduler = scheduler

      super.prepareAsync(cql)
        .map(prepare)
        .onComplete {
          case Success((params, bs)) =>
            logger.logQuery(cql, params)
            callback.onSuccess(bs)
          case Failure(ex) =>
            callback.onError(ex)
        }
    }
  }
} 
Example 27
Source File: CassandraStreamContext.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import com.datastax.driver.core.{ Cluster, ResultSet, Row }
import com.typesafe.config.Config
import io.getquill.context.cassandra.util.FutureConversions._
import io.getquill.util.{ ContextLogger, LoadConfig }
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.Scheduler.Implicits
import monix.reactive.Observable

import scala.jdk.CollectionConverters._
import scala.util.{ Failure, Success }

class CassandraStreamContext[N <: NamingStrategy](
  naming:                     N,
  cluster:                    Cluster,
  keyspace:                   String,
  preparedStatementCacheSize: Long
)
  extends CassandraClusterSessionContext[N](naming, cluster, keyspace, preparedStatementCacheSize) {

  def this(naming: N, config: CassandraContextConfig) = this(naming, config.cluster, config.keyspace, config.preparedStatementCacheSize)
  def this(naming: N, config: Config) = this(naming, CassandraContextConfig(config))
  def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix))

  private val logger = ContextLogger(classOf[CassandraStreamContext[_]])

  override type Result[T] = Observable[T]
  override type RunQueryResult[T] = T
  override type RunQuerySingleResult[T] = T
  override type RunActionResult = Unit
  override type RunBatchActionResult = Unit

  protected def page(rs: ResultSet): Task[Iterable[Row]] = Task.defer {
    val available = rs.getAvailableWithoutFetching
    val page = rs.asScala.take(available)

    if (rs.isFullyFetched)
      Task.now(page)
    else
      Task.fromFuture(rs.fetchMoreResults().asScala(Implicits.global)).map(_ => page)
  }

  def executeQuery[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Observable[T] = {

    Observable
      .fromTask(prepareRowAndLog(cql, prepare))
      .mapEvalF(p => session.executeAsync(p).asScala(Implicits.global))
      .flatMap(Observable.fromAsyncStateAction((rs: ResultSet) => page(rs).map((_, rs)))(_))
      .takeWhile(_.nonEmpty)
      .flatMap(Observable.fromIterable)
      .map(extractor)
  }

  def executeQuerySingle[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Observable[T] =
    executeQuery(cql, prepare, extractor)

  def executeAction[T](cql: String, prepare: Prepare = identityPrepare): Observable[Unit] = {
    Observable
      .fromTask(prepareRowAndLog(cql, prepare))
      .mapEvalF(p => session.executeAsync(p).asScala(Implicits.global))
      .map(_ => ())
  }

  def executeBatchAction(groups: List[BatchGroup]): Observable[Unit] =
    Observable.fromIterable(groups).flatMap {
      case BatchGroup(cql, prepare) =>
        Observable.fromIterable(prepare)
          .flatMap(executeAction(cql, _))
          .map(_ => ())
    }

  private def prepareRowAndLog(cql: String, prepare: Prepare = identityPrepare): Task[PrepareRow] = {
    Task.async0[PrepareRow] { (scheduler, callback) =>
      implicit val executor: Scheduler = scheduler

      super.prepareAsync(cql)
        .map(prepare)
        .onComplete {
          case Success((params, bs)) =>
            logger.logQuery(cql, params)
            callback.onSuccess(bs)
          case Failure(ex) =>
            callback.onError(ex)
        }
    }
  }
} 
Example 28
Source File: CassandraContainer.scala    From testcontainers-scala   with MIT License 5 votes vote down vote up
package com.dimafeng.testcontainers

import com.datastax.driver.core.Cluster
import org.testcontainers.containers.{CassandraContainer => JavaCassandraContainer}

class CassandraContainer(dockerImageNameOverride: Option[String] = None,
                         configurationOverride: Option[String] = None,
                         initScript: Option[String] = None,
                         jmxReporting: Boolean = false) extends SingleContainer[JavaCassandraContainer[_]] {

  val cassandraContainer: JavaCassandraContainer[_] = {
    if (dockerImageNameOverride.isEmpty) {
      new JavaCassandraContainer()
    } else {
      new JavaCassandraContainer(dockerImageNameOverride.get)
    }
  }

  if (configurationOverride.isDefined) cassandraContainer.withConfigurationOverride(configurationOverride.get)
  if (initScript.isDefined) cassandraContainer.withInitScript(initScript.get)
  if (jmxReporting) cassandraContainer.withJmxReporting(jmxReporting)

  override val container: JavaCassandraContainer[_] = cassandraContainer

  def cluster: Cluster = cassandraContainer.getCluster

  def username: String = cassandraContainer.getUsername

  def password: String = cassandraContainer.getPassword
}


object CassandraContainer {

  val defaultDockerImageName = "cassandra:3.11.2"

  def apply(dockerImageNameOverride: String = null,
            configurationOverride: String = null,
            initScript: String = null,
            jmxReporting: Boolean = false): CassandraContainer = new CassandraContainer(
    Option(dockerImageNameOverride),
    Option(configurationOverride),
    Option(initScript),
    jmxReporting
  )

  case class Def(
    dockerImageName: String = defaultDockerImageName,
    configurationOverride: Option[String] = None,
    initScript: Option[String] = None,
    jmxReporting: Boolean = false
  ) extends ContainerDef {

    override type Container = CassandraContainer

    override def createContainer(): CassandraContainer = {
      new CassandraContainer(
        dockerImageNameOverride = Some(dockerImageName),
        configurationOverride = configurationOverride,
        initScript = initScript,
        jmxReporting = jmxReporting
      )
    }
  }

} 
Example 29
Source File: BaseDBSpec.scala    From gemini   with GNU General Public License v3.0 5 votes vote down vote up
package tech.sourced.gemini

import com.datastax.driver.core.{Cluster, Session}
import org.scalatest.{BeforeAndAfterAll, Suite}
import tech.sourced.gemini.util.Logger
import scala.collection.JavaConverters._

case class HashtableItem(hashtable: Int, v: String, sha1: String)

trait BaseDBSpec extends BeforeAndAfterAll {
  this: Suite =>

  private val _logger = Logger("gemini")
  var keyspace : String = _
  var cassandra: Session = _

  override def beforeAll(): Unit = {
    super.beforeAll()

    val cluster = Cluster.builder()
      .addContactPoint(Gemini.defaultCassandraHost)
      .withPort(Gemini.defaultCassandraPort)
      .build()

    cassandra = cluster.connect()

    val gemini = Gemini(null, _logger, keyspace)
    gemini.dropSchema(cassandra)
    gemini.applySchema(cassandra)
  }

  def insertMeta(items: Iterable[RepoFile]): Unit = {
    val cols = Gemini.tables.metaCols
    items.foreach { case RepoFile(repo, commit, path, sha) =>
      val cql = s"""INSERT INTO $keyspace.${Gemini.tables.meta}
        (${cols.repo}, ${cols.commit}, ${cols.path}, ${cols.sha})
        VALUES ('$repo', '$commit', '$path', '$sha')"""
      cassandra.execute(cql)
    }
  }

  def insertHashtables(items: Iterable[HashtableItem], mode: String): Unit = {
    val cols = Gemini.tables.hashtablesCols
    items.foreach { case HashtableItem(ht, v, sha1) =>
      val cql = s"""INSERT INTO $keyspace.${Gemini.tables.hashtables(mode)}
        (${cols.hashtable}, ${cols.value}, ${cols.sha})
        VALUES ($ht, $v, '$sha1')"""
      cassandra.execute(cql)
    }
  }

  def insertDocFreq(docFreq: OrderedDocFreq, mode: String): Unit = {
    val docsCols = Gemini.tables.featuresDocsCols
    cassandra.execute(
      s"INSERT INTO $keyspace.${Gemini.tables.featuresDocs} (${docsCols.id}, ${docsCols.docs}) VALUES (?, ?)",
      mode, int2Integer(docFreq.docs)
    )

    val freqCols = Gemini.tables.featuresFreqCols
    docFreq.df.foreach { case(feature, weight) =>
      cassandra.execute(
        s"INSERT INTO $keyspace.${Gemini.tables.featuresFreq}" +
          s"(${freqCols.id}, ${freqCols.feature}, ${freqCols.weight}) VALUES (?, ?, ?)",
        mode, feature, int2Integer(weight)
      )
    }
  }

  override def afterAll(): Unit = {
    Gemini(null, _logger, keyspace).dropSchema(cassandra)
    cassandra.close()
    super.afterAll()
  }
} 
Example 30
Source File: StructuredStreamingWordCount.scala    From structured-streaming-application   with Apache License 2.0 5 votes vote down vote up
package knolx.spark

import com.datastax.driver.core.Cluster
import knolx.Config._
import knolx.KnolXLogger
import knolx.spark.CassandraForeachWriter.writeToCassandra
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, lit, sum}
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.StringType


object StructuredStreamingWordCount extends App with KnolXLogger {
  val cluster = Cluster.builder.addContactPoints(cassandraHosts).build
  val session = cluster.newSession()

  info("Creating Keypsace and tables in Cassandra...")
  session.execute(s"CREATE KEYSPACE IF NOT EXISTS $keyspace WITH " +
    "replication = {'class':'SimpleStrategy','replication_factor':1};")

  session.execute(s"CREATE TABLE IF NOT EXISTS $keyspace.wordcount ( word text PRIMARY KEY,count int );")

  info("Closing DB connection...")
  session.close()
  session.getCluster.close()

  info("Creating Spark Session")
  val spark = SparkSession.builder().master(sparkMaster).appName(sparkAppName).getOrCreate()
  spark.sparkContext.setLogLevel("WARN")

  info("Creating Streaming DF...")
  val dataStream =
    spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", bootstrapServer)
      .option("subscribe", topic)
      .load()

  info("Writing data to Cassandra...")
  val query =
    dataStream
      .select(col("value").cast(StringType).as("word"), lit(1).as("count"))
      .groupBy(col("word"))
      .agg(sum("count").as("count"))
      .writeStream
      .outputMode(OutputMode.Update())
      .foreach(writeToCassandra)
      .option("checkpointLocation", checkPointDir)
      .start()

  info("Waiting for the query to terminate...")
  query.awaitTermination()
  query.stop()
} 
Example 31
Source File: CassandraForeachWriter.scala    From structured-streaming-application   with Apache License 2.0 5 votes vote down vote up
package knolx.spark

import com.datastax.driver.core.{Cluster, Session}
import knolx.Config.{cassandraHosts, keyspace}
import org.apache.spark.sql.{ForeachWriter, Row}


object CassandraForeachWriter extends Serializable {
  val writeToCassandra = new ForeachWriter[Row] {
    private var cluster: Cluster = _
    private var session: Session = _

    override def process(row: Row): Unit = {
      val word = row.getString(0)
      val count = row.getLong(1)

      session.execute(s"insert into $keyspace.wordcount (word, count) values ('$word', $count);")
    }

    override def close(errorOrNull: Throwable): Unit = {
      session.close()
      session.getCluster.close()
    }

    override def open(partitionId: Long, version: Long): Boolean = {
      cluster = Cluster.builder.addContactPoints(cassandraHosts).build
      session = cluster.newSession()
      true
    }
  }
} 
Example 32
Source File: CassandraConnector.scala    From BusFloatingData   with Apache License 2.0 5 votes vote down vote up
package de.nierbeck.floating.data.server

import com.datastax.driver.core.{Cluster, Host, Metadata, Session}
import scala.collection.JavaConversions._


object CassandraConnector {

  import ServiceConfig._

  def connect(): Session = {
    val cluster = Cluster.builder().addContactPoint(cassandraNodeName).withPort(Integer.valueOf(cassandraNodePort)).build()
    val metadata:Metadata = cluster.getMetadata
//    logger.info("Connected to cluster: {}", metadata.getClusterName)
    println(s"Connected to cluster: ${metadata.getClusterName}")
    metadata.getAllHosts foreach {
      case host: Host =>
        println(s"Datatacenter: ${host.getDatacenter}; Host: ${host.getAddress}; Rack: ${host.getRack}")
//        logger.info("Datatacenter: {}; Host: {}; Rack: {}", host.getDatacenter, host.getAddress, host.getRack)
    }

    cluster.newSession()
  }

  def close(session: Session) {
    val cluster = session.getCluster
    session.close()
    cluster.close()
  }

} 
Example 33
Source File: CassandraEventLogSettings.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log.cassandra

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import akka.util.Helpers.Requiring

import com.datastax.driver.core.{ Cluster, ConsistencyLevel }
import com.typesafe.config.Config

import com.rbmhtechnology.eventuate.log._

import scala.collection.JavaConverters._
import scala.concurrent.duration._

class CassandraEventLogSettings(config: Config) extends EventLogSettings {
  import CassandraEventLogSettings._

  val writeTimeout: Long =
    config.getDuration("eventuate.log.write-timeout", TimeUnit.MILLISECONDS)

  val writeBatchSize: Int =
    config.getInt("eventuate.log.write-batch-size")

  val keyspace: String =
    config.getString("eventuate.log.cassandra.keyspace")

  val keyspaceAutoCreate: Boolean =
    config.getBoolean("eventuate.log.cassandra.keyspace-autocreate")

  val replicationFactor: Int =
    config.getInt("eventuate.log.cassandra.replication-factor")

  val tablePrefix: String =
    config.getString("eventuate.log.cassandra.table-prefix")

  val readConsistency: ConsistencyLevel =
    ConsistencyLevel.valueOf(config.getString("eventuate.log.cassandra.read-consistency"))

  val writeConsistency: ConsistencyLevel =
    ConsistencyLevel.valueOf(config.getString("eventuate.log.cassandra.write-consistency"))

  val writeRetryMax: Int =
    config.getInt("eventuate.log.cassandra.write-retry-max")

  val defaultPort: Int =
    config.getInt("eventuate.log.cassandra.default-port")

  val contactPoints =
    getContactPoints(config.getStringList("eventuate.log.cassandra.contact-points").asScala, defaultPort)

  val partitionSize: Long =
    config.getLong("eventuate.log.cassandra.partition-size")
      .requiring(
        _ > writeBatchSize,
        s"eventuate.log.cassandra.partition-size must be greater than eventuate.log.write-batch-size (${writeBatchSize})")

  val indexUpdateLimit: Int =
    config.getInt("eventuate.log.cassandra.index-update-limit")

  val initRetryMax: Int =
    config.getInt("eventuate.log.cassandra.init-retry-max")

  val initRetryDelay: FiniteDuration =
    config.getDuration("eventuate.log.cassandra.init-retry-delay", TimeUnit.MILLISECONDS).millis

  def deletionRetryDelay: FiniteDuration =
    ???

  val connectRetryMax: Int =
    config.getInt("eventuate.log.cassandra.connect-retry-max")

  val connectRetryDelay: FiniteDuration =
    config.getDuration("eventuate.log.cassandra.connect-retry-delay", TimeUnit.MILLISECONDS).millis

  val clusterBuilder: Cluster.Builder =
    Cluster.builder.addContactPointsWithPorts(contactPoints.asJava).withCredentials(
      config.getString("eventuate.log.cassandra.username"),
      config.getString("eventuate.log.cassandra.password"))
}

private object CassandraEventLogSettings {
  def getContactPoints(contactPoints: Seq[String], defaultPort: Int): Seq[InetSocketAddress] = {
    contactPoints match {
      case null | Nil => throw new IllegalArgumentException("a contact point list cannot be empty.")
      case hosts => hosts map {
        ipWithPort =>
          ipWithPort.split(":") match {
            case Array(host, port) => new InetSocketAddress(host, port.toInt)
            case Array(host)       => new InetSocketAddress(host, defaultPort)
            case msg               => throw new IllegalArgumentException(s"a contact point should have the form [host:port] or [host] but was: $msg.")
          }
      }
    }
  }
}