com.datastax.driver.core.Session Scala Examples

The following examples show how to use com.datastax.driver.core.Session. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: EventsByInterval.scala    From spark-streaming-demo   with Apache License 2.0 6 votes vote down vote up
package com.datastax.examples.meetup

import com.datastax.driver.core.{Cluster, Session, Row}
import com.websudos.phantom.CassandraTable
import com.websudos.phantom.Implicits._
import scala.concurrent.Future


case class EventModel (
                event: String,
                interval: String,
                dimension: String,
                subtotal: Long
            )

sealed class EventRecord extends CassandraTable[EventRecord, EventModel]
{
  override val tableName = "events_by_interval"
  object event extends StringColumn(this) with PartitionKey[String]
  object interval extends StringColumn(this) with ClusteringOrder[String] with Descending
  object dimension extends StringColumn(this) with ClusteringOrder[String] with Ascending
  object subtotal extends CounterColumn(this)

  override def fromRow(row: Row): EventModel = {
    EventModel(
      event(row),
      interval(row),
      dimension(row),
      subtotal(row)
    )
  }
}

object Event extends EventRecord
{
  val keyspace = "demo"
  val cluster = Cluster.builder().addContactPoint("127.0.0.1").build()
  implicit val session = cluster.connect(keyspace)

//  def hourly(hashtag: String): Future[Seq[(String, Long)]] = {
//    select (_.interval, _.subtotal) where (_.event eqs hashtag) and (_.interval gte "M") and (_.interval lt "N") limit 60 fetch
//  }

  def dimensions(event: String, interval: String): Future[Seq[(String, Long)]] = {
    select (_.dimension, _.subtotal) where (_.event eqs event) and (_.interval eqs interval) limit 500 fetch
  }

} 
Example 2
Source File: CassandraJsonDataStoreTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.cassandra.store

import com.datastax.driver.core.{Cluster, Session}
import env.{CassandraConfig, DbDomainConfig, DbDomainConfigDetails}
import libs.logs.ZLogger
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import store.AbstractJsonDataStoreTest
import store.cassandra.CassandraClient
import store.cassandra.CassandraJsonDataStore
import zio.{Exit, Reservation}

class CassandraJsonDataStoreTest
    extends AbstractJsonDataStoreTest("Cassandra")
    with BeforeAndAfter
    with BeforeAndAfterAll {

  val cassandraConfig = CassandraConfig(Seq("127.0.0.1:9042"), None, 1, "izanami_test")

  private val rDriver: Reservation[ZLogger, Throwable, Option[(Cluster, Session)]] =
    runtime.unsafeRun(CassandraClient.cassandraClient(Some(cassandraConfig)).reserve.provideLayer(ZLogger.live))

  val Some((_, session)) = runtime.unsafeRun(rDriver.acquire.provideLayer(ZLogger.live))

  override protected def afterAll(): Unit = {
    super.afterAll()
    runtime.unsafeRun(rDriver.release(Exit.unit).provideLayer(ZLogger.live))
  }

  override def dataStore(name: String): CassandraJsonDataStore =
    CassandraJsonDataStore(
      session,
      cassandraConfig,
      DbDomainConfig(env.Cassandra, DbDomainConfigDetails(name, None), None)
    )

} 
Example 3
Source File: CassandraServerSpecLike.scala    From Spark2Cassandra   with Apache License 2.0 5 votes vote down vote up
package com.github.jparkie.spark.cassandra

import java.net.{ InetAddress, InetSocketAddress }

import com.datastax.driver.core.Session
import com.datastax.spark.connector.cql.CassandraConnector
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.{ BeforeAndAfterAll, Suite }

trait CassandraServerSpecLike extends BeforeAndAfterAll { this: Suite =>
  // Remove protected modifier because of SharedSparkContext.
  override def beforeAll(): Unit = {
    super.beforeAll()

    EmbeddedCassandraServerHelper.startEmbeddedCassandra()
  }

  // Remove protected modifier because of SharedSparkContext.
  override def afterAll(): Unit = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()

    super.afterAll()
  }

  def getClusterName: String = {
    EmbeddedCassandraServerHelper.getClusterName
  }

  def getHosts: Set[InetAddress] = {
    val temporaryAddress =
      new InetSocketAddress(EmbeddedCassandraServerHelper.getHost, EmbeddedCassandraServerHelper.getNativeTransportPort)
        .getAddress

    Set(temporaryAddress)
  }

  def getNativeTransportPort: Int = {
    EmbeddedCassandraServerHelper.getNativeTransportPort
  }

  def getRpcPort: Int = {
    EmbeddedCassandraServerHelper.getRpcPort
  }

  def getCassandraConnector: CassandraConnector = {
    CassandraConnector(hosts = getHosts, port = getNativeTransportPort)
  }

  def createKeyspace(session: Session, keyspace: String): Unit = {
    session.execute(
      s"""CREATE KEYSPACE "$keyspace"
          |WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
       """.stripMargin
    )
  }
} 
Example 4
Source File: ExperimentVariantEventCassandraServiceTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.cassandra.abtesting

import com.datastax.driver.core.{Cluster, Session}
import domains.abtesting.events.impl.ExperimentVariantEventCassandraService
import domains.abtesting.AbstractExperimentServiceTest
import domains.abtesting.events.ExperimentVariantEventService
import env.{CassandraConfig, DbDomainConfig, DbDomainConfigDetails}
import libs.logs.ZLogger
import org.scalatest.BeforeAndAfterAll
import store.cassandra.CassandraClient
import zio.{Exit, Reservation}

class ExperimentVariantEventCassandraServiceTest
    extends AbstractExperimentServiceTest("Cassandra")
    with BeforeAndAfterAll {

  val cassandraConfig = CassandraConfig(Seq("127.0.0.1:9042"), None, 1, "izanami_test")
  private val rDriver: Reservation[ZLogger, Throwable, Option[(Cluster, Session)]] =
    runtime.unsafeRun(CassandraClient.cassandraClient(Some(cassandraConfig)).reserve.provideLayer(ZLogger.live))

  val Some((_, session)) = runtime.unsafeRun(rDriver.acquire.provideLayer(ZLogger.live))

  override protected def afterAll(): Unit = {
    super.afterAll()
    runtime.unsafeRun(rDriver.release(Exit.unit).provideLayer(ZLogger.live))
  }

  override def dataStore(name: String): ExperimentVariantEventService.Service = ExperimentVariantEventCassandraService(
    session,
    DbDomainConfig(env.Cassandra, DbDomainConfigDetails(name, None), None),
    cassandraConfig
  )
} 
Example 5
Source File: CassandraClient.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package store.cassandra

import java.net.InetSocketAddress

import com.datastax.driver.core.{Cluster, Session}
import env.CassandraConfig
import libs.logs.{IzanamiLogger, ZLogger}
import zio.{Task, UIO, ZManaged}

object CassandraClient {

  def cassandraClient(mayBeConfig: Option[CassandraConfig]): ZManaged[ZLogger, Throwable, Option[(Cluster, Session)]] =
    mayBeConfig
      .map { config =>
        ZManaged
          .make(
            ZLogger.info(s"Initializing Cassandra cluster for ${config}") *> Task {
              val adds = config.addresses.map { add =>
                val Array(host, port) = add.split(":")
                new InetSocketAddress(host, port.toInt)
              }
              val builder: Cluster.Builder = Cluster.builder
                .withoutJMXReporting()
                .addContactPointsWithPorts(adds: _*)

              val b: Cluster.Builder = config.clusterName.map(builder.withClusterName).getOrElse(builder)

              val cluster: Cluster = (for {
                username <- config.username
                password <- config.password
              } yield {
                b.withCredentials(username, password)
              }).getOrElse(b).build()

              cluster.init()

              val session = cluster.connect()

              cluster.connect().execute(s"""
                                     |CREATE KEYSPACE IF NOT EXISTS ${config.keyspace} WITH REPLICATION = {
                                     | 'class' : 'SimpleStrategy', 'replication_factor' : ${config.replicationFactor}
                                     |}""".stripMargin)

              (cluster, session)
            }
          )(t => UIO(t._1.close))
          .map(Some.apply)

      }
      .getOrElse(ZManaged.effectTotal(None))
} 
Example 6
Source File: CassandraSpec.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package macros

import java.util

import com.datastax.driver.core.{ Session, Cluster }
import org.cassandraunit.CQLDataLoader
import org.cassandraunit.dataset.CQLDataSet
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.concurrent.ScalaFutures
import org.scalatest._
import org.scalatest.time.{ Minute, Seconds, Span }
import scala.concurrent.duration._

trait CassandraSpec extends FlatSpec with BeforeAndAfterAll with BeforeAndAfterEach with ScalaFutures with Matchers {
  def port: Int = 9142
  def host: String = "127.0.0.1"

  private lazy val cluster = new Cluster.Builder().addContactPoints(host).withPort(port).build()
  implicit lazy val session: Session = cluster.connect()
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(1, Minute), interval = Span(5, Seconds))

  def testDataFixtures: String = ""
  private lazy val fixtures = StringCQLDataSet(testDataFixtures, false, false, "test")
  private lazy val schema = Seq(new ClassPathCQLDataSet("schema/01.cql"), new ClassPathCQLDataSet("schema/02.cql"))

  override protected def beforeAll(): Unit = {
    EmbeddedCassandraServerHelper.startEmbeddedCassandra(1.minute.toMillis)
    loadClean()
    super.beforeEach()
  }

  override protected def afterAll(): Unit = {
    session.close()
    cluster.close()
    super.afterAll()
  }

  def loadClean() = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
    loadData((schema :+ fixtures): _*)
  }

  def loadData(datasets: CQLDataSet*) = {
    val loader = new CQLDataLoader(session)
    datasets.foreach(loader.load)
  }
}

object Helpers {
  def splitStatements(statements: String) =
    statements.split(";").map(_.trim).filter(!_.isEmpty)
}

case class StringCQLDataSet(
    cqlStatements: String,
    isKeyspaceCreation: Boolean,
    isKeyspaceDeletion: Boolean,
    getKeyspaceName: String
) extends CQLDataSet {
  lazy val getCQLStatements = util.Arrays.asList(Helpers.splitStatements(cqlStatements): _*)

} 
Example 7
Source File: Dsl.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package driver

import com.datastax.driver.core.{ Session, Row, ResultSet, Statement }

import scala.concurrent.{ ExecutionContext, Future }

object DSL {
  import JavaConverters._
  import scala.collection.JavaConverters._

  implicit class ExternalDSL_RichStatement(val statement: Statement) extends AnyVal {
    def executeAsync(implicit session: Session, executionContext: ExecutionContext): Future[ResultSet] =
      session.executeAsync(statement).asScala

    def execute(implicit session: Session): ResultSet =
      session.execute(statement)

    def all(implicit session: Session, executionContext: ExecutionContext): Future[Seq[Row]] =
      statement.executeAsync.all

    def oneOption(implicit session: Session, executionContext: ExecutionContext): Future[Option[Row]] =
      statement.executeAsync.oneOption
  }

  implicit class ExternalDSL_FutureOfRichStatement(val statement: Future[Statement]) extends AnyVal {
    def executeAsync(implicit session: Session, executionContext: ExecutionContext): Future[ResultSet] =
      statement.flatMap(_.executeAsync)

    def all(implicit session: Session, executionContext: ExecutionContext): Future[Seq[Row]] =
      statement.executeAsync.all

    def oneOption(implicit session: Session, executionContext: ExecutionContext): Future[Option[Row]] =
      statement.executeAsync.oneOption
  }

  implicit class RichResultSet(val resultSet: ResultSet) extends AnyVal {
    def all() =
      resultSet.all.asScala

    def oneOption() =
      Option(resultSet.one)
  }

  implicit class RichFutureOfResultSet(val resultSet: Future[ResultSet]) extends AnyVal {
    def all(implicit executionContext: ExecutionContext): Future[Seq[Row]] =
      resultSet.map(_.all.asScala)

    def oneOption(implicit executionContext: ExecutionContext): Future[Option[Row]] =
      resultSet.map(r => Option(r.one()))
  }
} 
Example 8
Source File: Playground.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package meta

import java.util.UUID

import com.datastax.driver.core.{ Cluster, Session }
import com.datastax.driver.core.utils.UUIDs

import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, Future }
import scala.util.Try

@schema object Schema

object Playground extends App {
  def query[I, O](x: String): I => Future[Seq[O]] = ???
  case class Post(id: UUID, title: String)

  import Schema._

  withSession { implicit session =>
    import scala.concurrent.ExecutionContext.Implicits.global

    @schemasafe val getAuthorPosts =
      query[(UUID, Int), Post]("select post_id, post_title from test.posts where author_id = ? AND post_rating >= ? ALLOW FILTERING;")

    val authorId = UUID.fromString("6287c470-e298-11e6-9b3d-ffeaf4ddcb54")
    println(Await.result(getAuthorPosts((authorId, 4)): Future[Iterable[Post]], Duration.Inf))
  }

  def withSession[T](f: Session => T) = {
    val cluster = new Cluster.Builder().addContactPoints("127.0.0.1").withPort(9042).build()
    val session: Session = cluster.connect()
    Try(f(session)).failed.foreach(println)
    session.close()
    cluster.close()
  }
} 
Example 9
Source File: BaseSpec.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package meta

import java.util

import com.datastax.driver.core.{ Session, Cluster }
import org.cassandraunit.CQLDataLoader
import org.cassandraunit.dataset.CQLDataSet
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.concurrent.ScalaFutures
import org.scalatest._
import scala.concurrent.duration._

trait BaseSpec extends FlatSpec with BeforeAndAfterAll with BeforeAndAfterEach with ScalaFutures with Matchers {
  def port: Int = 9142
  def host: String = "127.0.0.1"

  private lazy val cluster = new Cluster.Builder().addContactPoints(host).withPort(port).build()
  implicit lazy val session: Session = cluster.connect()
  implicit val patienceTimeout = org.scalatest.concurrent.PatienceConfiguration.Timeout(10.seconds)

  def testDataFixtures: String = ""
  private lazy val fixtures = StringCQLDataSet(testDataFixtures, false, false, "test")
  private lazy val schema = new ClassPathCQLDataSet("schema.cql")

  override protected def beforeAll(): Unit = {
    EmbeddedCassandraServerHelper.startEmbeddedCassandra(1.minute.toMillis)
    loadClean()
    super.beforeEach()
  }

  override protected def afterAll(): Unit = {
    session.close()
    cluster.close()
    super.afterAll()
  }

  def loadClean() = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
    loadData(schema, fixtures)
  }

  def loadData(datasets: CQLDataSet*) = {
    val loader = new CQLDataLoader(session)
    datasets.foreach(loader.load)
  }
}

object Helpers {
  def splitStatements(statements: String) =
    statements.split(";").map(_.trim).filter(!_.isEmpty)
}

case class StringCQLDataSet(
    cqlStatements: String,
    isKeyspaceCreation: Boolean,
    isKeyspaceDeletion: Boolean,
    getKeyspaceName: String
) extends CQLDataSet {
  lazy val getCQLStatements = util.Arrays.asList(Helpers.splitStatements(cqlStatements): _*)

} 
Example 10
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo1

import java.util.UUID
import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, img: String) // Map doesn't work with Primitives yet. See: https://github.com/cassandra-scala/troy/issues/18

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val getCommentsByLine = withSchema.minVersion(2).maxVersion(2) {
    (authorId: String, postId: UUID) =>
      cql"""
         SELECT post_id, post_img
         FROM test.posts
         WHERE author_id = $authorId
           AND post_id = $postId
       """.prepared.as(Post)
  }

  val postId = UUID.fromString("a4a70900-24e1-11df-8924-001ff3591711")
  println(Await.result(getCommentsByLine("test", postId), Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 11
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo3

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val get = withSchema {
    (authorId: String, postId: UUID) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId AND post_id = $postId
       """
        .prepared
        .executeAsync
        .oneOption
        .as(Post)
  }

  val result = get("test", UUID.fromString("a4a70900-24e1-11df-8924-001ff3591711"))
  val maybePost: Option[Post] = Await.result(result, Duration(1, "second"))
  println(maybePost.map(_.title).getOrElse("Post not found"))

  session.close()
  cluster.close()
} 
Example 12
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo1

import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import troy.driver.DSL._
import scala.concurrent.ExecutionContext.Implicits.global

object Main extends App {
  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val listByAuthor = withSchema {
    (authorId: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId
       """.prepared.executeAsync
  }


  println(listByAuthor("test"))

  session.close()
  cluster.close()
} 
Example 13
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo4

import java.util.UUID
import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val listByTag = withSchema {
    (tag: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE post_tags CONTAINS $tag
       """.prepared.as(Post)
  }

  val titlesF = listByTag("test").map(_.map(_.title))
  val titles = Await.result(titlesF, Duration(1, "second"))
  println("Matching titles:")
  titles.foreach(println)

  session.close()
  cluster.close()
} 
Example 14
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo2

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val listByAuthor = withSchema {
    (authorId: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId
       """
        .prepared
        .executeAsync
        .as(Post)
  }

  val result = listByAuthor("test")
  println(Await.result(result, Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 15
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo5

import java.util.UUID
import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, comments: Map[Int, String])

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val getCommentsByLine = withSchema {
    (authorId: String, postId: UUID, line: Int) =>
      cql"""
         SELECT post_id, comments
         FROM test.posts
         WHERE author_id = $authorId
           AND post_id = $postId
           AND comments CONTAINS KEY $line
       """.prepared.as(Post)
  }

  val postId = UUID.fromString("a4a70900-24e1-11df-8924-001ff3591711")
  println(Await.result(getCommentsByLine("test", postId, 5), Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 16
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo6

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val create = withSchema {
    (authorId: String, title: String) =>
      cql"""
         INSERT INTO test.posts (author_id , post_id , post_title )
         VALUES ( $authorId, now(), $title);
       """.prepared.executeAsync
  }

  val listByAuthor = withSchema {
    (authorId: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId
       """
        .prepared
        .executeAsync
        .as(Post)
  }

  println(Await.result(create("test", "title"), Duration(1, "second")))
  println(Await.result(listByAuthor("test"), Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 17
Source File: Statements.scala    From spark-structured-streaming   with MIT License 5 votes vote down vote up
package com.kafkaToSparkToCass

import java.sql.Timestamp

import com.datastax.driver.core.Session

object Statements extends Serializable {

  def cql(id: String, time: Timestamp, ename: String): String = s"""
       insert into my_keyspace.test_table (user_id,time,event)
       values('$id', '$time', '$ename event')"""

  def createKeySpaceAndTable(session: Session, dropTable: Boolean = false) = {
    session.execute(
      """CREATE KEYSPACE  if not exists  my_keyspace WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };""")
    if (dropTable)
      session.execute("""drop table if exists my_keyspace.test_table""")

    session.execute(
      """create table if not exists my_keyspace.test_table ( user_id  text, time timestamp, event text, primary key((user_id), time) ) WITH CLUSTERING ORDER BY (time DESC)""")
  }
} 
Example 18
Source File: CassandraReadSideSessionProvider.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.persistence.cassandra

import akka.Done
import akka.actor.ActorSystem
import akka.actor.ExtendedActorSystem
import akka.event.Logging
import akka.persistence.cassandra.session.CassandraSessionSettings
import akka.persistence.cassandra.session.scaladsl.{ CassandraSession => AkkaScaladslCassandraSession }
import akka.persistence.cassandra.CassandraPluginConfig
import akka.persistence.cassandra.SessionProvider
import com.datastax.driver.core.Session

import scala.concurrent.ExecutionContext
import scala.concurrent.Future


private[lagom] object CassandraReadSideSessionProvider {
  def apply(
      system: ActorSystem,
      settings: CassandraSessionSettings,
      executionContext: ExecutionContext
  ): AkkaScaladslCassandraSession = {
    import akka.persistence.cassandra.ListenableFutureConverter
    import akka.util.Helpers.Requiring

    import scala.collection.JavaConverters._ // implicit asScala conversion

    val cfg = settings.config
    val replicationStrategy: String = CassandraPluginConfig.getReplicationStrategy(
      cfg.getString("replication-strategy"),
      cfg.getInt("replication-factor"),
      cfg.getStringList("data-center-replication-factors").asScala.toSeq
    )

    val keyspaceAutoCreate: Boolean = cfg.getBoolean("keyspace-autocreate")
    val keyspace: String = cfg
      .getString("keyspace")
      .requiring(
        !keyspaceAutoCreate || _ > "",
        "'keyspace' configuration must be defined, or use keyspace-autocreate=off"
      )

    def init(session: Session): Future[Done] = {
      implicit val ec = executionContext
      if (keyspaceAutoCreate) {
        val result1 =
          session.executeAsync(s"""
            CREATE KEYSPACE IF NOT EXISTS $keyspace
            WITH REPLICATION = { 'class' : $replicationStrategy }
            """).asScala
        result1
          .flatMap { _ =>
            session.executeAsync(s"USE $keyspace;").asScala
          }
          .map(_ => Done)
      } else if (keyspace != "")
        session.executeAsync(s"USE $keyspace;").asScala.map(_ => Done)
      else
        Future.successful(Done)
    }

    val metricsCategory = "lagom-" + system.name

    // using the scaladsl API because the init function
    new AkkaScaladslCassandraSession(
      system,
      SessionProvider(system.asInstanceOf[ExtendedActorSystem], settings.config),
      settings,
      executionContext,
      Logging.getLogger(system, this.getClass),
      metricsCategory,
      init
    )
  }
} 
Example 19
Source File: BaseDBSpec.scala    From gemini   with GNU General Public License v3.0 5 votes vote down vote up
package tech.sourced.gemini

import com.datastax.driver.core.{Cluster, Session}
import org.scalatest.{BeforeAndAfterAll, Suite}
import tech.sourced.gemini.util.Logger
import scala.collection.JavaConverters._

case class HashtableItem(hashtable: Int, v: String, sha1: String)

trait BaseDBSpec extends BeforeAndAfterAll {
  this: Suite =>

  private val _logger = Logger("gemini")
  var keyspace : String = _
  var cassandra: Session = _

  override def beforeAll(): Unit = {
    super.beforeAll()

    val cluster = Cluster.builder()
      .addContactPoint(Gemini.defaultCassandraHost)
      .withPort(Gemini.defaultCassandraPort)
      .build()

    cassandra = cluster.connect()

    val gemini = Gemini(null, _logger, keyspace)
    gemini.dropSchema(cassandra)
    gemini.applySchema(cassandra)
  }

  def insertMeta(items: Iterable[RepoFile]): Unit = {
    val cols = Gemini.tables.metaCols
    items.foreach { case RepoFile(repo, commit, path, sha) =>
      val cql = s"""INSERT INTO $keyspace.${Gemini.tables.meta}
        (${cols.repo}, ${cols.commit}, ${cols.path}, ${cols.sha})
        VALUES ('$repo', '$commit', '$path', '$sha')"""
      cassandra.execute(cql)
    }
  }

  def insertHashtables(items: Iterable[HashtableItem], mode: String): Unit = {
    val cols = Gemini.tables.hashtablesCols
    items.foreach { case HashtableItem(ht, v, sha1) =>
      val cql = s"""INSERT INTO $keyspace.${Gemini.tables.hashtables(mode)}
        (${cols.hashtable}, ${cols.value}, ${cols.sha})
        VALUES ($ht, $v, '$sha1')"""
      cassandra.execute(cql)
    }
  }

  def insertDocFreq(docFreq: OrderedDocFreq, mode: String): Unit = {
    val docsCols = Gemini.tables.featuresDocsCols
    cassandra.execute(
      s"INSERT INTO $keyspace.${Gemini.tables.featuresDocs} (${docsCols.id}, ${docsCols.docs}) VALUES (?, ?)",
      mode, int2Integer(docFreq.docs)
    )

    val freqCols = Gemini.tables.featuresFreqCols
    docFreq.df.foreach { case(feature, weight) =>
      cassandra.execute(
        s"INSERT INTO $keyspace.${Gemini.tables.featuresFreq}" +
          s"(${freqCols.id}, ${freqCols.feature}, ${freqCols.weight}) VALUES (?, ?, ?)",
        mode, feature, int2Integer(weight)
      )
    }
  }

  override def afterAll(): Unit = {
    Gemini(null, _logger, keyspace).dropSchema(cassandra)
    cassandra.close()
    super.afterAll()
  }
} 
Example 20
Source File: Database.scala    From gemini   with GNU General Public License v3.0 5 votes vote down vote up
package tech.sourced.gemini

import com.datastax.driver.core.{Row, Session}
import com.datastax.driver.core.querybuilder.QueryBuilder

import scala.collection.JavaConverters._

case class MetaCols(sha: String, repo: String, commit: String, path: String)
case class HashtablesCols(sha: String, hashtable: String, value: String)
case class FeaturesDocsCols(id: String, docs: String)
case class FeaturesFreqCols(id: String, feature: String, weight: String)


  def findFilesByHash(sha: String, conn: Session, keyspace: String, tables: Tables): Iterable[RepoFile] = {
    val query = QueryBuilder.select().all().from(keyspace, tables.meta)
      .where(QueryBuilder.eq(tables.metaCols.sha, sha))

    conn.execute(query).asScala.map(rowToRepoFile(tables))
  }

  def rowToRepoFile(tables: Tables)(row: Row): RepoFile = {
    val cols = tables.metaCols
    RepoFile(row.getString(cols.repo), row.getString(cols.commit), row.getString(cols.path), row.getString(cols.sha))
  }
} 
Example 21
Source File: CassandraForeachWriter.scala    From structured-streaming-application   with Apache License 2.0 5 votes vote down vote up
package knolx.spark

import com.datastax.driver.core.{Cluster, Session}
import knolx.Config.{cassandraHosts, keyspace}
import org.apache.spark.sql.{ForeachWriter, Row}


object CassandraForeachWriter extends Serializable {
  val writeToCassandra = new ForeachWriter[Row] {
    private var cluster: Cluster = _
    private var session: Session = _

    override def process(row: Row): Unit = {
      val word = row.getString(0)
      val count = row.getLong(1)

      session.execute(s"insert into $keyspace.wordcount (word, count) values ('$word', $count);")
    }

    override def close(errorOrNull: Throwable): Unit = {
      session.close()
      session.getCluster.close()
    }

    override def open(partitionId: Long, version: Long): Boolean = {
      cluster = Cluster.builder.addContactPoints(cassandraHosts).build
      session = cluster.newSession()
      true
    }
  }
} 
Example 22
Source File: CassandraConnector.scala    From BusFloatingData   with Apache License 2.0 5 votes vote down vote up
package de.nierbeck.floating.data.server

import com.datastax.driver.core.{Cluster, Host, Metadata, Session}
import scala.collection.JavaConversions._


object CassandraConnector {

  import ServiceConfig._

  def connect(): Session = {
    val cluster = Cluster.builder().addContactPoint(cassandraNodeName).withPort(Integer.valueOf(cassandraNodePort)).build()
    val metadata:Metadata = cluster.getMetadata
//    logger.info("Connected to cluster: {}", metadata.getClusterName)
    println(s"Connected to cluster: ${metadata.getClusterName}")
    metadata.getAllHosts foreach {
      case host: Host =>
        println(s"Datatacenter: ${host.getDatacenter}; Host: ${host.getAddress}; Rack: ${host.getRack}")
//        logger.info("Datatacenter: {}; Host: {}; Rack: {}", host.getDatacenter, host.getAddress, host.getRack)
    }

    cluster.newSession()
  }

  def close(session: Session) {
    val cluster = session.getCluster
    session.close()
    cluster.close()
  }

}