akka.persistence.query.PersistenceQuery Scala Examples

The following examples show how to use akka.persistence.query.PersistenceQuery. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: CassandraQueryJournalOverrideSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.query

import akka.actor.ExtendedActorSystem
import akka.persistence.PersistentRepr
import akka.persistence.cassandra.TestTaggingActor.Ack
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec, TestTaggingActor }
import akka.persistence.query.{ PersistenceQuery, ReadJournalProvider }
import akka.stream.testkit.scaladsl.TestSink
import com.typesafe.config.{ Config, ConfigFactory }

import scala.concurrent.duration._

class JournalOverride(as: ExtendedActorSystem, config: Config, configPath: String)
    extends CassandraReadJournal(as, config, configPath) {
  override private[akka] def mapEvent(pr: PersistentRepr) =
    PersistentRepr("cat", pr.sequenceNr, pr.persistenceId, pr.manifest, pr.deleted, pr.sender, pr.writerUuid)
}

class JournalOverrideProvider(as: ExtendedActorSystem, config: Config, configPath: String) extends ReadJournalProvider {
  override def scaladslReadJournal() = new JournalOverride(as, config, configPath)
  override def javadslReadJournal() = null
}

object CassandraQueryJournalOverrideSpec {

  val config = ConfigFactory.parseString("""
      akka.persistence.cassandra.query {
        class = "akka.persistence.cassandra.query.JournalOverrideProvider"
      }
    """.stripMargin).withFallback(CassandraLifecycle.config)

}

class CassandraQueryJournalOverrideSpec extends CassandraSpec(CassandraQueryJournalOverrideSpec.config) {

  lazy val journal =
    PersistenceQuery(system).readJournalFor[JournalOverride](CassandraReadJournal.Identifier)

  "Cassandra query journal override" must {
    "map events" in {
      val pid = "p1"
      val p1 = system.actorOf(TestTaggingActor.props(pid))
      p1 ! "not a cat"
      expectMsg(Ack)

      val currentEvents = journal.currentEventsByPersistenceId(pid, 0, Long.MaxValue)
      val currentProbe = currentEvents.map(_.event.toString).runWith(TestSink.probe[String])
      currentProbe.request(2)
      currentProbe.expectNext("cat")
      currentProbe.expectComplete()

      val liveEvents = journal.eventsByPersistenceId(pid, 0, Long.MaxValue)
      val liveProbe = liveEvents.map(_.event.toString).runWith(TestSink.probe[String])
      liveProbe.request(2)
      liveProbe.expectNext("cat")
      liveProbe.expectNoMessage(100.millis)
      liveProbe.cancel()

      val internalEvents = journal.eventsByPersistenceIdWithControl(pid, 0, Long.MaxValue, None)
      val internalProbe = internalEvents.map(_.event.toString).runWith(TestSink.probe[String])
      internalProbe.request(2)
      internalProbe.expectNext("cat")
      liveProbe.expectNoMessage(100.millis)
      liveProbe.cancel()
    }
  }
} 
Example 2
Source File: Main.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, Terminated }
import akka.actor.typed.SupervisorStrategy.restartWithBackoff
import akka.actor.typed.scaladsl.Actor.supervise
import akka.cluster.Cluster
import akka.cluster.bootstrap.ClusterBootstrap
import akka.cluster.http.management.ClusterHttpManagement
import akka.cluster.typed.{ ClusterSingleton, ClusterSingletonSettings }
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import akka.stream.{ ActorMaterializer, Materializer }
import pureconfig.loadConfigOrThrow

object Main {
  import akka.actor.typed.scaladsl.adapter._

  final class Root(config: Config) extends Actor with ActorLogging {

    private implicit val mat: Materializer = ActorMaterializer()

    private val clusterSingletonSettings = ClusterSingletonSettings(context.system.toTyped)

    private val userRepository =
      ClusterSingleton(context.system.toTyped).spawn(UserRepository(),
                                                     UserRepository.Name,
                                                     akka.actor.typed.Props.empty,
                                                     clusterSingletonSettings,
                                                     UserRepository.Stop)

    private val userView = context.spawn(UserView(), UserView.Name)

    private val userProjection = {
      import config.userProjection._
      val readJournal =
        PersistenceQuery(context.system)
          .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
      val userProjection =
        supervise(UserProjection(readJournal, userView, askTimeout))
          .onFailure[UserProjection.EventStreamCompleteException](
            restartWithBackoff(minBackoff, maxBackoff, 0)
          )
      ClusterSingleton(context.system.toTyped).spawn(userProjection,
                                                     UserProjection.Name,
                                                     akka.actor.typed.Props.empty,
                                                     clusterSingletonSettings,
                                                     UserProjection.Stop)
    }

    private val api = {
      import config.api._
      context.spawn(Api(address, port, userRepository, userView, askTimeout), Api.Name)
    }

    context.watch(userRepository)
    context.watch(userView)
    context.watch(userProjection)
    context.watch(api)
    log.info(s"${context.system.name} up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error(s"Shutting down, because actor ${actor.path} terminated!")
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    sys.props += "log4j2.contextSelector" -> "org.apache.logging.log4j.core.async.AsyncLoggerContextSelector"

    val config  = loadConfigOrThrow[Config]("wtat")
    val system  = ActorSystem("wtat")
    val cluster = Cluster(system)

    if (config.useClusterBootstrap) {
      ClusterHttpManagement(cluster).start()
      ClusterBootstrap(system).start()
    }

    cluster.registerOnMemberUp(system.actorOf(Props(new Root(config)), "root"))
  }
} 
Example 3
Source File: ReadJournalSource.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.jdbc.spark.sql.execution.streaming

import akka.actor.{ ActorSystem, ExtendedActorSystem }
import akka.persistence.query.PersistenceQuery
import akka.persistence.query.scaladsl.{ CurrentEventsByPersistenceIdQuery, CurrentEventsByTagQuery, CurrentPersistenceIdsQuery, ReadJournal }
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.extension.{ Sink => Snk }
import akka.stream.{ ActorMaterializer, Materializer }
import org.apache.spark.sql._
import org.apache.spark.sql.execution.streaming.{ LongOffset, Offset, Source }
import org.apache.spark.sql.types.StructType

import scala.collection.immutable._
import scala.concurrent.duration.{ FiniteDuration, _ }
import scala.concurrent.{ Await, ExecutionContext, Future }

trait ReadJournalSource {
  _: Source =>
  def readJournalPluginId: String
  def sqlContext: SQLContext

  // some machinery
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher

  // read journal, only interested in the Current queries, as Spark isn't asynchronous
  lazy val readJournal = PersistenceQuery(system).readJournalFor(readJournalPluginId)
    .asInstanceOf[ReadJournal with CurrentPersistenceIdsQuery with CurrentEventsByPersistenceIdQuery with CurrentEventsByTagQuery]

  implicit class FutureOps[A](f: Future[A])(implicit ec: ExecutionContext, timeout: FiniteDuration = null) {
    def futureValue: A = Await.result(f, Option(timeout).getOrElse(10.seconds))
  }

  def maxPersistenceIds: Long =
    readJournal.currentPersistenceIds().runWith(Snk.count).futureValue

  def persistenceIds(start: Long, end: Long) =
    readJournal.currentPersistenceIds().drop(start).take(end).runWith(Sink.seq).futureValue

  def maxEventsByPersistenceId(pid: String): Long =
    readJournal.currentEventsByPersistenceId(pid, 0, Long.MaxValue).runWith(Snk.count).futureValue

  def eventsByPersistenceId(pid: String, start: Long, end: Long, eventMapperFQCN: String): Seq[Row] = {
    readJournal.currentEventsByPersistenceId(pid, start, end)
      .map(env => getMapper(eventMapperFQCN).get.row(env, sqlContext)).runWith(Sink.seq).futureValue
  }

  implicit def mapToDataFrame(rows: Seq[Row]): DataFrame = {
    import scala.collection.JavaConversions._
    sqlContext.createDataFrame(rows, schema)
  }

  def getStartEnd(_start: Option[Offset], _end: Offset): (Long, Long) = (_start, _end) match {
    case (Some(LongOffset(start)), LongOffset(end)) => (start, end)
    case (None, LongOffset(end))                    => (0L, end)
  }

  def getMapper(eventMapperFQCN: String): Option[EventMapper] =
    system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[EventMapper](eventMapperFQCN, List.empty)
      .recover { case cause => cause.printStackTrace(); null }.toOption

  override def stop(): Unit = {
    println("Stopping jdbc read journal")
    system.terminate()
  }
} 
Example 4
Source File: UserRepositorySpec.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.user

import akka.actor.ActorSystem
import akka.persistence.inmemory.query.scaladsl.InMemoryReadJournal
import akka.persistence.query.PersistenceQuery
import akka.stream.ActorMaterializer
import akka.testkit.TestProbe
import org.scalatest.{ AsyncWordSpec, BeforeAndAfterAll, Matchers }
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt

class UserRepositorySpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll {
  import UserRepository._

  private implicit val system = ActorSystem()

  private implicit val mat = ActorMaterializer()

  private val readJournal = PersistenceQuery(system)
    .readJournalFor[InMemoryReadJournal](InMemoryReadJournal.Identifier)

  private val user = User(0, "jsnow", "Jon Snow", "[email protected]")

  "UserRepository" should {
    "correctly handle getting, adding and removing users" in {
      import user._

      val userRepository     = system.actorOf(UserRepository(readJournal))
      val sender             = TestProbe()
      implicit val senderRef = sender.ref

      userRepository ! GetUsers
      sender.expectMsg(Users(Set.empty))

      userRepository ! AddUser(username, nickname, email)
      val userAdded = sender.expectMsg(UserAdded(user))
      userRepository ! GetUsers
      sender.expectMsg(Users(Set(user)))

      userRepository ! AddUser(username, "Jon Targaryen", "[email protected]")
      sender.expectMsg(UsernameTaken(username))

      userRepository ! RemoveUser(id)
      val userRemoved = sender.expectMsg(UserRemoved(user))
      userRepository ! GetUsers
      sender.expectMsg(Users(Set.empty))

      userRepository ! RemoveUser(id)
      sender.expectMsg(IdUnknown(id))

      userRepository ! GetUserEvents(0)
      val userEvents = sender.expectMsgPF(hint = "source of user events") {
        case UserEvents(e) => e
      }
      userEvents
        .take(2)
        .runFold(Vector.empty[(Long, UserEvent)])(_ :+ _)
        .map(
          _ should contain inOrder (
            (1, userAdded), // The first event has seqNo 1!
            (2, userRemoved)
          )
        )
    }
  }

  override protected def afterAll() = {
    Await.ready(system.terminate(), 42.seconds)
    super.afterAll()
  }
} 
Example 5
Source File: UserApp.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.user

import akka.NotUsed
import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, SupervisorStrategy, Terminated }
import akka.cluster.Cluster
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery

object UserApp {

  final class Root extends Actor with ActorLogging {

    override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

    private val userRepository = {
      val readJournal =
        PersistenceQuery(context.system)
          .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
      val userRepository =
        context.actorOf(
          ClusterSingletonManager.props(UserRepository(readJournal),
                                        NotUsed,
                                        ClusterSingletonManagerSettings(context.system)),
          UserRepository.Name
        )
      context.actorOf(
        ClusterSingletonProxy.props(userRepository.path.elements.mkString("/", "/", ""),
                                    ClusterSingletonProxySettings(context.system)),
        s"${UserRepository.Name}-proxy"
      )
    }

    private val userApi = {
      val config  = context.system.settings.config
      val address = config.getString("gabbler-user.user-api.address")
      val port    = config.getInt("gabbler-user.user-api.port")
      val timeout = config.getDuration("gabbler-user.user-api.user-repository-timeout").asScala
      context.actorOf(UserApi(address, port, userRepository, timeout), UserApi.Name)
    }

    context.watch(userRepository)
    context.watch(userApi)
    log.info("gabbler-user up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error("Terminating the system because {} terminated!", actor.path)
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("gabbler-user")
    Cluster(system).registerOnMemberUp(system.actorOf(Props(new Root), "root"))
  }
} 
Example 6
Source File: RepairFromMessages.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg

import java.net.URLDecoder
import java.util.UUID

import akka.actor.ActorSystem
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import ch.epfl.bluebrain.nexus.kg.resources.{Id, Repo, ResId}
import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef
import ch.epfl.bluebrain.nexus.rdf.Iri
import com.typesafe.scalalogging.Logger
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.schedulers.CanBlock

import scala.concurrent.Future
import scala.util.Try


object RepairFromMessages {
  // $COVERAGE-OFF$

  private val log = Logger[RepairFromMessages.type]

  def repair(repo: Repo[Task])(implicit as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = {
    log.info("Repairing dependent tables from messages.")
    val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
    Task
      .fromFuture {
        pq.currentPersistenceIds()
          .mapAsync(1) {
            case ResourceId(id) => (repo.get(id, None).value >> Task.unit).runToFuture
            case other =>
              log.warn(s"Unknown persistence id '$other'")
              Future.successful(())
          }
          .runFold(0) {
            case (acc, _) =>
              if (acc % 1000 == 0) log.info(s"Processed '$acc' persistence ids.")
              acc + 1
          }
          .map(_ => ())
      }
      .runSyncUnsafe()
    log.info("Finished repairing dependent tables from messages.")
  }

  object ResourceId {
    private val regex =
      "^resources\\-([0-9a-fA-F]{8}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{12})\\-(.+)$".r
    def unapply(arg: String): Option[ResId] =
      arg match {
        case regex(stringUuid, stringId) =>
          for {
            uuid <- Try(UUID.fromString(stringUuid)).toOption
            iri  <- Iri.absolute(URLDecoder.decode(stringId, "UTF-8")).toOption
          } yield Id(ProjectRef(uuid), iri)
        case _ => None
      }
  }
  // $COVERAGE-ON$
} 
Example 7
Source File: MigrateV12ToV13.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg

import akka.actor.ActorSystem
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.{EventEnvelope, NoOffset, PersistenceQuery}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.commons.test.Resources
import ch.epfl.bluebrain.nexus.iam.client.types._
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.kg.resources.Event.{Created, Updated}
import ch.epfl.bluebrain.nexus.kg.resources.{OrganizationRef, ResId, Views}
import com.typesafe.scalalogging.Logger
import io.circe.Json
import io.circe.parser.parse
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.schedulers.CanBlock

import scala.concurrent.Future

object MigrateV12ToV13 extends Resources {
  private val log                                     = Logger[MigrateV12ToV13.type]
  private val newMapping                              = jsonContentOf("/elasticsearch/mapping.json")
  private val defaultEsId                             = nxv.defaultElasticSearchIndex.value
  private implicit val mockedAcls: AccessControlLists = AccessControlLists.empty

  def migrate(
      views: Views[Task],
      adminClient: AdminClient[Task]
  )(implicit config: AppConfig, as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = {

    implicit val token: Option[AuthToken] = config.iam.serviceAccountToken

    def checkAndUpdateMapping(id: ResId, rev: Long, source: Json)(
        implicit project: Project,
        caller: Caller
    ): Task[Unit] = {

      source.hcursor.get[String]("mapping").flatMap(parse) match {
        case Left(err) =>
          log.error(s"Error while fetching mapping for view id ${id.show}. Reason: '$err'")
          Task.unit
        case Right(mapping) if mapping == newMapping =>
          Task.unit
        case _ =>
          views.update(id, rev, source deepMerge Json.obj("mapping" -> newMapping)).value.flatMap {
            case Left(err) =>
              log.error(s"Error updating the view with id '${id.show}' and rev '$rev'. Reason: '$err'")
              Task.unit
            case _ =>
              log.info(s"View with id '${id.show}' and rev '$rev' was successfully updated.")
              Task.unit
          }
      }
    }

    def fetchProject(orgRef: OrganizationRef, id: ResId)(f: Project => Task[Unit]): Task[Unit] = {
      adminClient.fetchProject(orgRef.id, id.parent.id).flatMap {
        case Some(project) => f(project)
        case None =>
          log.error(s"Project with id '${id.parent.id}' was not found for view with id '${id.show}'")
          Task.unit

      }
    }

    log.info("Migrating views mappings.")
    val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
    Task
      .fromFuture {
        pq.currentEventsByTag(s"type=${nxv.ElasticSearchView.value.asString}", NoOffset)
          .mapAsync(1) {
            case EventEnvelope(_, _, _, Created(id, orgRef, _, _, source, _, subject)) if id.value == defaultEsId =>
              fetchProject(orgRef, id) { project =>
                checkAndUpdateMapping(id, 1L, source)(project, Caller(subject, Set(subject)))
              }.runToFuture
            case EventEnvelope(_, _, _, Updated(id, orgRef, rev, _, source, _, subject)) if id.value == defaultEsId =>
              fetchProject(orgRef, id) { project =>
                checkAndUpdateMapping(id, rev, source)(project, Caller(subject, Set(subject)))
              }.runToFuture
            case _ =>
              Future.unit

          }
          .runFold(0) {
            case (acc, _) =>
              if (acc % 10 == 0) log.info(s"Processed '$acc' persistence ids.")
              acc + 1
          }
          .map(_ => ())
      }
      .runSyncUnsafe()
    log.info("Finished migrating views mappings.")
  }

} 
Example 8
Source File: JournalReaderImpl.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.adaptor.readJournal

import akka.NotUsed
import akka.actor.ActorSystem
import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal
import akka.persistence.query.scaladsl._
import akka.persistence.query.{ Offset, PersistenceQuery }
import akka.stream.scaladsl.Source
import com.github.j5ik2o.bank.useCase.port.{ EventBody, JournalReader }

object JournalReaderImpl {
  type ReadJournalType =
    ReadJournal with CurrentPersistenceIdsQuery with PersistenceIdsQuery with CurrentEventsByPersistenceIdQuery with EventsByPersistenceIdQuery with CurrentEventsByTagQuery with EventsByTagQuery
}

class JournalReaderImpl(implicit system: ActorSystem) extends JournalReader {

  private val readJournal: JournalReaderImpl.ReadJournalType =
    PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier)

  def eventsByTagSource(tag: String, seqNr: Long): Source[EventBody, NotUsed] = {
    readJournal.eventsByTag(tag, Offset.sequence(seqNr)).map { ee =>
      EventBody(ee.persistenceId, ee.sequenceNr, ee.event)
    }
  }

} 
Example 9
Source File: CqrsApp.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.couchbase

import java.util.UUID
import java.util.concurrent.atomic.AtomicInteger

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.persistence.couchbase.scaladsl.CouchbaseReadJournal
import akka.persistence.query.{NoOffset, PersistenceQuery}
import akka.stream.ActorMaterializer

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.{Random, Try}

object CqrsApp {

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("CouchbaseSystem")
    val materializer = ActorMaterializer()(system)
    val ec: ExecutionContext = system.dispatcher
    val log = system.log

    AkkaManagement(system).start()
    ClusterBootstrap(system).start()

    Cluster(system).registerOnMemberUp {

      val selfRoles = Cluster(system).selfRoles

      log.info("Running with roles {}", selfRoles)

      val shardedSwitchEntity = ShardedSwitchEntity(system)
      shardedSwitchEntity.start()
      EventProcessorWrapper(system).start()

      if (selfRoles.contains("load-generation")) {
        log.info("Starting load generation")
        testIt(system, shardedSwitchEntity)
      }

      if (selfRoles.contains("simple-query")) {
        log.info("Starting simple query")
        verySimpleRead(system, materializer, ec)
      }
    }

    def verySimpleRead(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Unit = {
      val query = PersistenceQuery(system).readJournalFor[CouchbaseReadJournal](CouchbaseReadJournal.Identifier)
      val startTime = System.currentTimeMillis()
      query
        .currentEventsByTag("tag1", NoOffset)
        .runFold(0)((count, _) => count + 1)
        .onComplete { t: Try[Int] =>
          system.log.info("Query finished for tag1 in {}. Read {} rows",
                          (System.currentTimeMillis() - startTime).millis.toSeconds,
                          t)
        }
    }

    // Every instance will add 100 persistent actors and second 2 messages to each per 2 seconds
    def testIt(system: ActorSystem, shardedSwitch: ShardedSwitchEntity): Unit = {
      val uuid = UUID.randomUUID()
      val nrSwitches = 100
      def switchName(nr: Int) = s"switch-$uuid-$nr"
      log.info("Creating {} switches with uuid {}", nrSwitches, uuid)
      (0 until nrSwitches) foreach { s =>
        shardedSwitch.tell(switchName(s), SwitchEntity.CreateSwitch(6))
      }
      import system.dispatcher
      system.scheduler.schedule(3.seconds, 2.second) {
        (0 until nrSwitches) foreach { s =>
          val switch = switchName(s)
          log.debug("Sending messages to switch {}", switch)
          shardedSwitch.tell(switch, SwitchEntity.SetPortStatus(Random.nextInt(6), portEnabled = true))
          shardedSwitch.tell(switch, SwitchEntity.SendPortStatus)
        }
      }
    }
  }

} 
Example 10
Source File: CassandraJournalAdapter.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.runtime.akkapersistence

import java.util.UUID

import akka.actor.ActorSystem
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.{ NoOffset, Offset, PersistenceQuery, TimeBasedUUID }

final class CassandraJournalAdapter(system: ActorSystem,
                                    val writeJournalId: String,
                                    readJournalId: String)
    extends JournalAdapter[UUID] {

  override def createReadJournal: CassandraReadJournal =
    PersistenceQuery(system).readJournalFor[CassandraReadJournal](readJournalId)

  override val journalOffset: OffsetAdapter = new OffsetAdapter {
    override def unapply(arg: Offset): Option[UUID] = arg match {
      case TimeBasedUUID(offsetValue) => Some(offsetValue)
      case _                          => None
    }
    override def apply(value: Option[UUID]): Offset = value match {
      case Some(x) => TimeBasedUUID(x)
      case None    => NoOffset
    }
  }
}

object CassandraJournalAdapter {
  val DefaultWriteJournalIdentifier: String = "cassandra-journal"
  def apply(system: ActorSystem,
            writeJournalId: String = DefaultWriteJournalIdentifier,
            readJournalId: String = CassandraReadJournal.Identifier): CassandraJournalAdapter =
    new CassandraJournalAdapter(system, writeJournalId, readJournalId)
} 
Example 11
Source File: AllPersistenceIdsSpec.scala    From akka-persistence-redis   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.query.journal.redis

import scala.concurrent.duration._

import akka.persistence.query.PersistenceQuery
import akka.persistence.query.scaladsl.PersistenceIdsQuery
import akka.stream.ActorMaterializer
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.AkkaSpec
import akka.testkit.ImplicitSender

object AllPersistenceIdsSpec {
  val config = """
    akka.loglevel = INFO
    akka.persistence.journal.plugin = "akka-persistence-redis.journal"
    akka.test.single-expect-default = 10s
    """
}

class AllPersistenceIdsSpec extends AkkaSpec(AllPersistenceIdsSpec.config)
  with Cleanup with ImplicitSender {

  implicit val mat = ActorMaterializer()(system)

  val queries = PersistenceQuery(system).readJournalFor[ScalaReadJournal](RedisReadJournal.Identifier)

  "Redis query AllPersistenceIds" must {

    "implement standard AllPersistenceIdsQuery" in {
      queries.isInstanceOf[PersistenceIdsQuery] should ===(true)
    }

    "find existing persistenceIds" in {
      system.actorOf(TestActor.props("a")) ! "a1"
      expectMsg("a1-done")
      system.actorOf(TestActor.props("b")) ! "b1"
      expectMsg("b1-done")
      system.actorOf(TestActor.props("c")) ! "c1"
      expectMsg("c1-done")

      val src = queries.currentPersistenceIds()
      val probe = src.runWith(TestSink.probe[String])
      probe.within(10.seconds) {
        probe.request(5)
          .expectNextUnordered("a", "b", "c")
          .expectComplete()
      }
    }

    "find new persistenceIds" in {
      // a, b, c created by previous step
      system.actorOf(TestActor.props("d")) ! "d1"
      expectMsg("d1-done")

      val src = queries.persistenceIds()
      val probe = src.runWith(TestSink.probe[String])
      probe.within(10.seconds) {
        probe.request(5)
          .expectNextUnorderedN(List("a", "b", "c", "d"))

        system.actorOf(TestActor.props("e")) ! "e1"
        probe.expectNext("e")

        val more = (1 to 100).map("f" + _)
        more.foreach { p =>
          system.actorOf(TestActor.props(p)) ! p
        }

        probe.request(100)
        probe.expectNextUnorderedN(more)
      }

    }
  }

} 
Example 12
Source File: RepairFromMessages.scala    From nexus-iam   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.iam

import java.net.URLDecoder

import akka.actor.ActorSystem
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import ch.epfl.bluebrain.nexus.iam.acls.Acls
import ch.epfl.bluebrain.nexus.iam.permissions.Permissions
import ch.epfl.bluebrain.nexus.iam.realms.Realms
import ch.epfl.bluebrain.nexus.iam.types.Label
import ch.epfl.bluebrain.nexus.rdf.Iri.Path
import com.typesafe.scalalogging.Logger
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.schedulers.CanBlock

import scala.concurrent.Future


object RepairFromMessages {
  // $COVERAGE-OFF$

  private val log = Logger[RepairFromMessages.type]

  def repair(
      p: Permissions[Task],
      r: Realms[Task],
      a: Acls[Task]
  )(implicit as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = {
    val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)

    pq.currentPersistenceIds()
      .mapAsync(1) {
        case PermissionsId() => p.agg.currentState(p.persistenceId).runToFuture
        case RealmId(label)  => r.agg.currentState(label.value).runToFuture
        case AclId(path)     => a.agg.currentState(path.asString).runToFuture
        case other =>
          log.warn(s"Unknown persistence id '$other'")
          Future.successful(())
      }
      .runFold(0) {
        case (acc, _) =>
          if (acc % 100 == 0) log.info(s"Processed '$acc' persistence ids.")
          acc + 1
      }
      .runSyncDiscard()

    log.info("Repair from messages table completed.")
  }

  sealed abstract class PersistenceId(prefix: String) {
    private val len = prefix.length
    protected def dropPrefix(arg: String): Option[String] =
      if (arg.startsWith(prefix)) Some(arg.drop(len))
      else None
  }
  object RealmId extends PersistenceId("realms-") {
    def unapply(arg: String): Option[Label] =
      dropPrefix(arg).map(Label.unsafe)
  }
  object AclId extends PersistenceId("acls-") {
    def unapply(arg: String): Option[Path] =
      dropPrefix(arg).flatMap(str => Path(URLDecoder.decode(str, "UTF-8")).toOption)
  }
  object PermissionsId {
    def unapply(arg: String): Boolean =
      arg == "permissions-permissions"
  }

  implicit class RichFuture[A](val future: Future[A]) extends AnyVal {
    def runSyncDiscard()(implicit s: Scheduler, permit: CanBlock): Unit =
      Task.fromFuture(future).map(_ => ()).runSyncUnsafe()
  }
  // $COVERAGE-ON$
} 
Example 13
Source File: HttpRequestRecorderItTest.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.persistence

import akka.Done
import akka.actor.{ActorSystem, Props}
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.Uri.{Authority, Host}
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import com.amazonaws.services.s3.AmazonS3
import com.ing.wbaa.rokku.proxy.RokkuS3Proxy
import com.ing.wbaa.rokku.proxy.config.{HttpSettings, KafkaSettings, StorageS3Settings}
import com.ing.wbaa.rokku.proxy.data._
import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser
import com.ing.wbaa.rokku.proxy.handler.{FilterRecursiveListBucketHandler, RequestHandlerS3Cache}
import com.ing.wbaa.rokku.proxy.provider.{AuditLogProvider, MessageProviderKafka, SignatureProviderAws}
import com.ing.wbaa.rokku.proxy.queue.MemoryUserRequestQueue
import com.ing.wbaa.testkit.RokkuFixtures
import org.scalatest.Assertion
import org.scalatest.diagrams.Diagrams
import org.scalatest.wordspec.AsyncWordSpec

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

class HttpRequestRecorderItTest extends AsyncWordSpec with Diagrams with RokkuFixtures {
  implicit val testSystem: ActorSystem = ActorSystem.create("test-system")
  implicit val mat: ActorMaterializer = ActorMaterializer()

  val rokkuHttpSettings: HttpSettings = new HttpSettings(testSystem.settings.config) {
    override val httpPort: Int = 0
    override val httpBind: String = "127.0.0.1"
  }

  def withS3SdkToMockProxy(testCode: AmazonS3 => Assertion): Future[Assertion] = {
    val proxy: RokkuS3Proxy = new RokkuS3Proxy with RequestHandlerS3Cache with SignatureProviderAws
      with FilterRecursiveListBucketHandler with MessageProviderKafka with AuditLogProvider with MemoryUserRequestQueue with RequestParser {
      override implicit lazy val system: ActorSystem = testSystem
      override val httpSettings: HttpSettings = rokkuHttpSettings

      override def isUserAuthorizedForRequest(request: S3Request, user: User)(implicit id: RequestId): Boolean = true

      override def isUserAuthenticated(httpRequest: HttpRequest, awsSecretKey: AwsSecretKey)(implicit id: RequestId): Boolean = true

      override val storageS3Settings: StorageS3Settings = StorageS3Settings(testSystem)
      override val kafkaSettings: KafkaSettings = KafkaSettings(testSystem)

      override def areCredentialsActive(awsRequestCredential: AwsRequestCredential)(implicit id: RequestId): Future[Option[User]] =
        Future(Some(User(UserRawJson("userId", Some(Set("group")), "accesskey", "secretkey", None))))

      def createLineageFromRequest(httpRequest: HttpRequest, userSTS: User, userIPs: UserIps)(implicit id: RequestId): Future[Done] = Future.successful(Done)

      override protected def auditEnabled: Boolean = false

      override val requestPersistenceEnabled: Boolean = true
      override val configuredPersistenceId: String = "localhost-1"
    }
    proxy.startup.map { binding =>
      try testCode(getAmazonS3(
        authority = Authority(Host(binding.localAddress.getAddress), binding.localAddress.getPort)
      ))
      finally proxy.shutdown()
    }
  }

  private val CHECKER_PERSISTENCE_ID = "localhost-1"
  val requestRecorder = testSystem.actorOf(Props(classOf[HttpRequestRecorder]), CHECKER_PERSISTENCE_ID)

  val queries = PersistenceQuery(testSystem)
    .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)


  "S3 Proxy" should {
    s"with Request Recorder" that {
      "persists requests in Cassandra" in withS3SdkToMockProxy { sdk =>
        withBucket(sdk) { bucketName =>
          Thread.sleep(6000)
          val storedInCassandraF = queries.currentEventsByPersistenceId(CHECKER_PERSISTENCE_ID, 1L, Long.MaxValue)
            .map(_.event)
            .runWith(Sink.seq)
            .mapTo[Seq[ExecutedRequestEvt]]
          val r = Await.result(storedInCassandraF, 5.seconds).filter(_.httpRequest.getUri().toString.contains(bucketName))
          assert(r.size == 1)
          assert(r.head.userSTS.userName.value == "userId")
        }
      }
    }
  }
} 
Example 14
Source File: CassandraReadJournalSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.query.javadsl

import akka.persistence.cassandra.query.{ javadsl, scaladsl, TestActor }
import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec }
import akka.persistence.journal.{ Tagged, WriteEventAdapter }
import akka.persistence.query.{ Offset, PersistenceQuery }
import akka.stream.testkit.scaladsl.TestSink
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._

object CassandraReadJournalSpec {
  val config = ConfigFactory.parseString(s"""
    akka.actor.serialize-messages=off
    akka.persistence.cassandra.query.max-buffer-size = 10
    akka.persistence.cassandra.query.refresh-interval = 0.5s
    akka.persistence.cassandra.journal.event-adapters {
      test-tagger = akka.persistence.cassandra.query.javadsl.TestTagger
    }
    akka.persistence.cassandra.journal.event-adapter-bindings = {
      "java.lang.String" = test-tagger
    }
    """).withFallback(CassandraLifecycle.config)
}

class TestTagger extends WriteEventAdapter {
  override def manifest(event: Any): String = ""
  override def toJournal(event: Any): Any = event match {
    case s: String if s.startsWith("a") => Tagged(event, Set("a"))
    case _                              => event
  }
}

class CassandraReadJournalSpec extends CassandraSpec(CassandraReadJournalSpec.config) {

  lazy val javaQueries = PersistenceQuery(system)
    .getReadJournalFor(classOf[javadsl.CassandraReadJournal], scaladsl.CassandraReadJournal.Identifier)

  "Cassandra Read Journal Java API" must {
    "start eventsByPersistenceId query" in {
      val a = system.actorOf(TestActor.props("a"))
      a ! "a-1"
      expectMsg("a-1-done")

      val src = javaQueries.eventsByPersistenceId("a", 0L, Long.MaxValue)
      src.asScala.map(_.persistenceId).runWith(TestSink.probe[Any]).request(10).expectNext("a").cancel()
    }

    "start current eventsByPersistenceId query" in {
      val a = system.actorOf(TestActor.props("b"))
      a ! "b-1"
      expectMsg("b-1-done")

      val src = javaQueries.currentEventsByPersistenceId("b", 0L, Long.MaxValue)
      src.asScala.map(_.persistenceId).runWith(TestSink.probe[Any]).request(10).expectNext("b").expectComplete()
    }

    "start eventsByTag query" in {
      val src = javaQueries.eventsByTag("a", Offset.noOffset)
      src.asScala
        .map(_.persistenceId)
        .runWith(TestSink.probe[Any])
        .request(10)
        .expectNext("a")
        .expectNoMessage(100.millis)
        .cancel()
    }

    "start current eventsByTag query" in {
      val src = javaQueries.currentEventsByTag("a", Offset.noOffset)
      src.asScala.map(_.persistenceId).runWith(TestSink.probe[Any]).request(10).expectNext("a").expectComplete()
    }
  }
} 
Example 15
Source File: BuildTagViewForPersistenceId.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.reconciler

import akka.actor.ActorSystem
import akka.persistence.cassandra.PluginSettings
import akka.Done
import akka.persistence.cassandra.journal.TagWriter._
import scala.concurrent.duration._
import scala.concurrent.Future
import akka.stream.scaladsl.Source
import akka.actor.ExtendedActorSystem
import akka.persistence.query.PersistenceQuery
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.event.Logging
import akka.persistence.cassandra.journal.CassandraTagRecovery
import akka.persistence.cassandra.Extractors
import akka.util.Timeout
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Sink
import akka.annotation.InternalApi
import akka.serialization.SerializationExtension


@InternalApi
private[akka] final class BuildTagViewForPersisetceId(
    persistenceId: String,
    system: ActorSystem,
    recovery: CassandraTagRecovery,
    settings: PluginSettings) {

  import system.dispatcher

  private implicit val sys = system
  private val log = Logging(system, classOf[BuildTagViewForPersisetceId])
  private val serialization = SerializationExtension(system)

  private val queries: CassandraReadJournal =
    PersistenceQuery(system.asInstanceOf[ExtendedActorSystem])
      .readJournalFor[CassandraReadJournal]("akka.persistence.cassandra.query")

  private implicit val flushTimeout = Timeout(30.seconds)

  def reconcile(flushEvery: Int = 1000): Future[Done] = {

    val recoveryPrep = for {
      tp <- recovery.lookupTagProgress(persistenceId)
      _ <- recovery.setTagProgress(persistenceId, tp)
    } yield tp

    Source
      .futureSource(recoveryPrep.map((tp: Map[String, TagProgress]) => {
        log.debug("[{}] Rebuilding tag view table from: [{}]", persistenceId, tp)
        queries
          .eventsByPersistenceId(
            persistenceId,
            0,
            Long.MaxValue,
            Long.MaxValue,
            None,
            settings.journalSettings.readProfile,
            "BuildTagViewForPersistenceId",
            extractor = Extractors.rawEvent(settings.eventsByTagSettings.bucketSize, serialization, system))
          .map(recovery.sendMissingTagWriteRaw(tp, actorRunning = false))
          .buffer(flushEvery, OverflowStrategy.backpressure)
          .mapAsync(1)(_ => recovery.flush(flushTimeout))
      }))
      .runWith(Sink.ignore)

  }

} 
Example 16
Source File: RepairFromMessages.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.iam

import java.net.URLDecoder

import akka.actor.ActorSystem
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import ch.epfl.bluebrain.nexus.iam.acls.Acls
import ch.epfl.bluebrain.nexus.iam.permissions.Permissions
import ch.epfl.bluebrain.nexus.iam.realms.Realms
import ch.epfl.bluebrain.nexus.iam.types.Label
import ch.epfl.bluebrain.nexus.rdf.Iri.Path
import com.typesafe.scalalogging.Logger
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.schedulers.CanBlock

import scala.concurrent.Future


object RepairFromMessages {
  // $COVERAGE-OFF$

  private val log = Logger[RepairFromMessages.type]

  def repair(
      p: Permissions[Task],
      r: Realms[Task],
      a: Acls[Task]
  )(implicit as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = {
    val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)

    pq.currentPersistenceIds()
      .mapAsync(1) {
        case PermissionsId() => p.agg.currentState(p.persistenceId).runToFuture
        case RealmId(label)  => r.agg.currentState(label.value).runToFuture
        case AclId(path)     => a.agg.currentState(path.asString).runToFuture
        case other           =>
          log.warn(s"Unknown persistence id '$other'")
          Future.successful(())
      }
      .runFold(0) {
        case (acc, _) =>
          if (acc % 100 == 0) log.info(s"Processed '$acc' persistence ids.")
          acc + 1
      }
      .runSyncDiscard()

    log.info("Repair from messages table completed.")
  }

  sealed abstract class PersistenceId(prefix: String) {
    private val len                                       = prefix.length
    protected def dropPrefix(arg: String): Option[String] =
      if (arg.startsWith(prefix)) Some(arg.drop(len))
      else None
  }
  object RealmId extends PersistenceId("realms-") {
    def unapply(arg: String): Option[Label] =
      dropPrefix(arg).map(Label.unsafe)
  }
  object AclId extends PersistenceId("acls-") {
    def unapply(arg: String): Option[Path] =
      dropPrefix(arg).flatMap(str => Path(URLDecoder.decode(str, "UTF-8")).toOption)
  }
  object PermissionsId                                {
    def unapply(arg: String): Boolean =
      arg == "permissions-permissions"
  }

  implicit class RichFuture[A](val future: Future[A]) extends AnyVal {
    def runSyncDiscard()(implicit s: Scheduler, permit: CanBlock): Unit =
      Task.fromFuture(future).map(_ => ()).runSyncUnsafe()
  }
  // $COVERAGE-ON$
} 
Example 17
Source File: RepairFromMessages.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg

import java.net.URLDecoder
import java.util.UUID

import akka.actor.ActorSystem
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import ch.epfl.bluebrain.nexus.kg.resources.{Id, Repo, ResId}
import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef
import ch.epfl.bluebrain.nexus.rdf.Iri
import com.typesafe.scalalogging.Logger
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.schedulers.CanBlock

import scala.concurrent.Future
import scala.util.Try


object RepairFromMessages {
  // $COVERAGE-OFF$

  private val log = Logger[RepairFromMessages.type]

  def repair(repo: Repo[Task])(implicit as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = {
    log.info("Repairing dependent tables from messages.")
    val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
    Task
      .fromFuture {
        pq.currentPersistenceIds()
          .mapAsync(1) {
            case ResourceId(id) => (repo.get(id, None).value >> Task.unit).runToFuture
            case other          =>
              log.warn(s"Unknown persistence id '$other'")
              Future.successful(())
          }
          .runFold(0) {
            case (acc, _) =>
              if (acc % 1000 == 0) log.info(s"Processed '$acc' persistence ids.")
              acc + 1
          }
          .map(_ => ())
      }
      .runSyncUnsafe()
    log.info("Finished repairing dependent tables from messages.")
  }

  object ResourceId {
    private val regex                       =
      "^resources\\-([0-9a-fA-F]{8}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{12})\\-(.+)$".r
    def unapply(arg: String): Option[ResId] =
      arg match {
        case regex(stringUuid, stringId) =>
          for {
            uuid <- Try(UUID.fromString(stringUuid)).toOption
            iri  <- Iri.absolute(URLDecoder.decode(stringId, "UTF-8")).toOption
          } yield Id(ProjectRef(uuid), iri)
        case _                           => None
      }
  }
  // $COVERAGE-ON$
} 
Example 18
Source File: RepairFromMessages.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.admin

import java.util.UUID

import akka.actor.ActorSystem
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import ch.epfl.bluebrain.nexus.admin.organizations.Organizations
import ch.epfl.bluebrain.nexus.admin.projects.Projects
import com.typesafe.scalalogging.Logger
import monix.eval.Task
import monix.execution.Scheduler

import scala.concurrent.Future
import scala.util.Try


object RepairFromMessages {

  private val log = Logger[RepairFromMessages.type]

  def repair(
      o: Organizations[Task],
      p: Projects[Task]
  )(implicit as: ActorSystem, sc: Scheduler): Future[Unit] = {
    val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)

    pq.currentPersistenceIds()
      .mapAsync(1) {
        case OrgId(uuid)  => (o.fetch(uuid) >> Task.unit).runToFuture
        case ProjId(uuid) => (p.fetch(uuid) >> Task.unit).runToFuture
        case other        =>
          log.warn(s"Unknown persistence id '$other'")
          Future.successful(())
      }
      .runFold(0) {
        case (acc, _) =>
          if (acc % 100 == 0) log.info(s"Processed '$acc' persistence ids.")
          acc + 1
      }
      .map(_ => ())
  }

  sealed abstract class PersistenceId(prefix: String) {
    private val len                        = prefix.length
    def unapply(arg: String): Option[UUID] =
      if (arg.startsWith(prefix)) Try(UUID.fromString(arg.drop(len))).toOption
      else None
  }
  object OrgId extends PersistenceId("organizations-")
  object ProjId extends PersistenceId("projects-")
} 
Example 19
Source File: LeagueProjection.scala    From eventsourcing-intro   with Apache License 2.0 5 votes vote down vote up
package eu.reactivesystems.league.impl

import akka.actor.{Actor, ActorLogging, Props, Status}
import akka.pattern.pipe
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.{EventEnvelope2, PersistenceQuery}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession

class LeagueProjection(jdbcSession: JdbcSession)
    extends Actor
    with ActorLogging {

  import DBOperations._

  override def receive: Receive = {
    case Status.Failure(ex) =>
      log.error(ex, "read side generation terminated")
      context.stop(self)
  }

  override def preStart(): Unit = {
    val materializer = ActorMaterializer.create(context.system)
    val readJournal = PersistenceQuery
      .get(context.system)
      .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
    import context.dispatcher

    val result = getOffset(jdbcSession)
      .flatMap(
        offset =>
          readJournal
            .eventsByTag(LeagueEvent.Tag.tag, offset)
            .mapAsync(1)(e => projectEvent(e))
            .runWith(Sink.ignore)(materializer))

    result pipeTo self
    ()
  }

  private def projectEvent(event: EventEnvelope2) =
    event.event match {
      case ClubRegistered(club) => addClub(jdbcSession, event.offset, club)
      case GamePlayed(game) => addGame(jdbcSession, event.offset, game)
      case ResultRevoked(game) => revokeResult(jdbcSession, event.offset, game)
    }
}

object LeagueProjection {
  val readSideId = "leagueProjection"

  def props(jdbcSession: JdbcSession) =
    Props(new LeagueProjection(jdbcSession))
} 
Example 20
Source File: AbstractCouchbaseSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.scaladsl

import akka.actor.{ActorRef, ActorSystem}
import akka.persistence.couchbase.{CouchbaseBucketSetup, TestActor}
import akka.persistence.query.PersistenceQuery
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.{TestKit, TestProbe, WithLogCapturing}
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.duration._

abstract class AbstractCouchbaseSpec(testName: String, config: Config)
    extends TestKit(
      ActorSystem(testName, config.withFallback(ConfigFactory.load()))
    )
    with WordSpecLike
    with BeforeAndAfterAll
    with Matchers
    with ScalaFutures
    with CouchbaseBucketSetup
    with WithLogCapturing {
  def this(testName: String) =
    this(
      testName,
      ConfigFactory.parseString("""
            couchbase-journal.read {
              page-size = 10
            }
            akka.loggers = ["akka.testkit.SilenceAllTestEventListener"]
            akka.loglevel=debug
          """)
    )

  var idCounter = 0
  def nextPersistenceId(): String = {
    idCounter += 1
    val id = Integer.toString(idCounter, 24)
    id.toString
  }

  // provides a unique persistence-id per test case and some initial persisted events
  protected trait Setup {
    lazy val probe = TestProbe()
    implicit def sender: ActorRef = probe.ref
    // note must be a def or lazy val or else it doesn't work (init order)
    def initialPersistedEvents: Int = 0
    def startPersistentActor(initialEvents: Int): (String, ActorRef) = {
      val pid = nextPersistenceId()
      system.log.debug("Starting actor with pid {}, and writing {} initial events", pid, initialPersistedEvents)
      val persistentActor = system.actorOf(TestActor.props(pid))
      if (initialEvents > 0) {
        for (i <- 1 to initialEvents) {
          persistentActor ! s"$pid-$i"
          probe.expectMsg(s"$pid-$i-done")
        }
      }
      (pid, persistentActor)
    }
    val (pid, persistentActor) = startPersistentActor(initialPersistedEvents)

    // no guarantee we can immediately read our own writes
    def readingOurOwnWrites[A](f: => A): A =
      awaitAssert(f, readOurOwnWritesTimeout, interval = 250.millis) // no need to bombard the db with retries
  }

  protected val noMsgTimeout = 100.millis
  protected val readOurOwnWritesTimeout = 10.seconds
  override implicit val patienceConfig: PatienceConfig = PatienceConfig(readOurOwnWritesTimeout)
  implicit val materializer: Materializer = ActorMaterializer()

  lazy // #read-journal-access
  val queries: CouchbaseReadJournal =
    PersistenceQuery(system).readJournalFor[CouchbaseReadJournal](CouchbaseReadJournal.Identifier)
  // #read-journal-access

  protected override def afterAll(): Unit = {
    super.afterAll()
    shutdown(system)
  }
} 
Example 21
Source File: FriendJournalReader.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter6

import akka.actor.ActorSystem
import akka.persistence.Recovery
import akka.persistence.query.PersistenceQuery
import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import scala.concurrent.duration._

object FriendJournalReader extends App {
  implicit val system = ActorSystem()
  import system.dispatcher
  implicit val mat = ActorMaterializer()(system)
  val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier)

  val laura = system.actorOf(FriendActor.props("Laura", Recovery()))
  val maria = system.actorOf(FriendActor.props("Maria", Recovery()))
  laura ! AddFriend(Friend("Hector"))
  laura ! AddFriend(Friend("Nancy"))
  maria ! AddFriend(Friend("Oliver"))
  maria ! AddFriend(Friend("Steve"))
  system.scheduler.scheduleOnce(5 second, maria, AddFriend(Friend("Steve")))
  system.scheduler.scheduleOnce(10 second, maria, RemoveFriend(Friend("Oliver")))
  Thread.sleep(2000)

  queries.allPersistenceIds().map(id => system.log.info(s"Id received [$id]")).to(Sink.ignore).run()
  queries.eventsByPersistenceId("Laura").map(e => log(e.persistenceId, e.event)).to(Sink.ignore).run()
  queries.eventsByPersistenceId("Maria").map(e => log(e.persistenceId, e.event)).to(Sink.ignore).run()

  def log(id: String, evt: Any) = system.log.info(s"Id [$id] Event [$evt]")
} 
Example 22
Source File: CalculatorHistory.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.persistence.calculator

import akka.actor._

import akka.persistence.query.PersistenceQuery
import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal
 
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink

object CalculatorHistory {
  def props = Props(new CalculatorHistory)
  def name = "calculator-history"
  case object GetHistory
  case class History(added: Int = 0, subtracted: Int = 0, divided: Int = 0, multiplied: Int = 0) {
    def incrementAdded = copy(added = added + 1)
    def incrementSubtracted= copy(subtracted = subtracted + 1)
    def incrementDivided = copy(divided = divided + 1)
    def incrementMultiplied = copy(multiplied = multiplied + 1)
  }
}

class CalculatorHistory extends Actor {
  import Calculator._
  import CalculatorHistory._

  val queries = PersistenceQuery(context.system).readJournalFor[LeveldbReadJournal](
    LeveldbReadJournal.Identifier)
  implicit val materializer = ActorMaterializer()
  queries.eventsByPersistenceId(Calculator.name).runWith(Sink.actorRef(self, None))

  var history = History()
  
  def receive = {
    case _ : Added => history = history.incrementAdded
    case _ : Subtracted => history = history.incrementSubtracted
    case _ : Divided => history = history.incrementDivided
    case _ : Multiplied => history = history.incrementMultiplied
    case GetHistory => sender() ! history
  }
} 
Example 23
Source File: PaymentHistory.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.persistence

import akka.actor._

import akka.persistence.query.PersistenceQuery
import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal
 
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink

object PaymentHistory {
  def props(shopperId: Long) = Props(new PaymentHistory(shopperId))
  def name(shopperId: Long) = s"payment_history_${shopperId}"

  case object GetHistory

  case class History(items: List[Item] = Nil) {
    def paid(paidItems: List[Item]) = {
      History(paidItems ++ items)
    }
  }
}

class PaymentHistory(shopperId: Long) extends Actor
    with ActorLogging {
  import PaymentHistory._

  val queries = PersistenceQuery(context.system).readJournalFor[LeveldbReadJournal](
    LeveldbReadJournal.Identifier)
  implicit val materializer = ActorMaterializer()
  queries.eventsByPersistenceId(Wallet.name(shopperId)).runWith(Sink.actorRef(self, None))

  var history = History()

  def receive = {
    case Wallet.Paid(items, _) => history = history.paid(items)
    case GetHistory => sender() ! history
  }
}