akka.persistence.query.EventEnvelope Scala Examples

The following examples show how to use akka.persistence.query.EventEnvelope. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: PersonTest.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.domain

import akka.actor.{ ActorRef, Props }
import akka.pattern.ask
import akka.persistence.query.EventEnvelope
import akka.stream.scaladsl.{ Sink, Source }
import akka.testkit.TestProbe
import com.github.dnvriend.TestSpec
import com.github.dnvriend.domain.Person._
import com.github.dnvriend.persistence.ProtobufReader
import proto.person.Command._

class PersonTest extends TestSpec {

  import com.github.dnvriend.persistence.ProtobufFormats._

  def withPerson(id: String)(f: ActorRef ⇒ TestProbe ⇒ Unit): Unit = {
    val tp = TestProbe()
    val ref = system.actorOf(Props(new Person(id)))
    try f(ref)(tp) finally killActors(ref)
  }

  "Person" should "register a name" in {
    withPerson("p1") { ref ⇒ tp ⇒
      Source(List(RegisterNameCommand("dennis", "vriend")))
        .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue
    }

    withPerson("p1") { ref ⇒ tp ⇒
      Source(List(RegisterNameCommand("dennis", "vriend")))
        .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue
    }

    // note that the persistence-query does not use the deserializer
    // so the protobuf must be deserialized inline
    eventsForPersistenceIdSource("p1").collect {
      case EventEnvelope(_, _, _, proto: NameRegisteredMessage) ⇒
        implicitly[ProtobufReader[NameRegisteredEvent]].read(proto)
    }.testProbe { tp ⇒
      tp.request(Int.MaxValue)
      tp.expectNext(NameRegisteredEvent("dennis", "vriend"))
      tp.expectNext(NameRegisteredEvent("dennis", "vriend"))
      tp.expectComplete()
    }
  }

  it should "update its name and surname" in {
    withPerson("p2") { ref ⇒ tp ⇒
      Source(List(RegisterNameCommand("dennis", "vriend"), ChangeNameCommand("jimi"), ChangeSurnameCommand("hendrix")))
        .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue
    }

    eventsForPersistenceIdSource("p2").collect {
      case EventEnvelope(_, _, _, proto: NameRegisteredMessage) ⇒
        implicitly[ProtobufReader[NameRegisteredEvent]].read(proto)
      case EventEnvelope(_, _, _, proto: NameChangedMessage) ⇒
        implicitly[ProtobufReader[NameChangedEvent]].read(proto)
      case EventEnvelope(_, _, _, proto: SurnameChangedMessage) ⇒
        implicitly[ProtobufReader[SurnameChangedEvent]].read(proto)
    }.testProbe { tp ⇒
      tp.request(Int.MaxValue)
      tp.expectNext(NameRegisteredEvent("dennis", "vriend"))
      tp.expectNext(NameChangedEvent("jimi"))
      tp.expectNext(SurnameChangedEvent("hendrix"))
      tp.expectComplete()
    }
  }
} 
Example 2
Source File: DynamoDBReadJournal.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.query.javadsl

import akka.NotUsed
import akka.persistence.query.javadsl._
import akka.persistence.query.{ EventEnvelope, Offset }
import akka.stream.javadsl.Source
import com.github.j5ik2o.akka.persistence.dynamodb.query.scaladsl.{ DynamoDBReadJournal => ScalaDynamoDBReadJournal }

class DynamoDBReadJournal(underlying: ScalaDynamoDBReadJournal)
    extends ReadJournal
    with CurrentPersistenceIdsQuery
    with PersistenceIdsQuery
    with CurrentEventsByPersistenceIdQuery
    with EventsByPersistenceIdQuery
    with CurrentEventsByTagQuery
    with EventsByTagQuery {

  override def currentPersistenceIds(): Source[String, NotUsed] = underlying.currentPersistenceIds().asJava

  override def persistenceIds(): Source[String, NotUsed] = underlying.persistenceIds().asJava

  override def currentEventsByPersistenceId(
      persistenceId: String,
      fromSequenceNr: Long,
      toSequenceNr: Long
  ): Source[EventEnvelope, NotUsed] =
    underlying.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava

  override def eventsByPersistenceId(
      persistenceId: String,
      fromSequenceNr: Long,
      toSequenceNr: Long
  ): Source[EventEnvelope, NotUsed] =
    underlying.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava

  override def currentEventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] =
    underlying.currentEventsByTag(tag, offset).asJava

  override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] =
    underlying.eventsByTag(tag, offset).asJava

} 
Example 3
Source File: CurrentPersistenceIds1Test.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.query.query

import java.net.URI

import akka.pattern.ask
import akka.persistence.query.{ EventEnvelope, Sequence }
import com.github.j5ik2o.akka.persistence.dynamodb.query.QueryJournalSpec
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.{ Config, ConfigFactory }
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.Future
import scala.concurrent.duration._

abstract class CurrentPersistenceIds1Test(config: Config) extends QueryJournalSpec(config) {

  it should "not find any events for unknown pid" in
  withCurrentEventsByPersistenceId()("unkown-pid", 0L, Long.MaxValue) { tp =>
    tp.request(Int.MaxValue)
    tp.expectComplete()
  }

  it should "find events from an offset" in {
    withTestActors() { (actor1, actor2, actor3) =>
      Future.sequence(Range.inclusive(1, 4).map(_ => actor1 ? "a")).toTry should be a Symbol("success")

      withCurrentEventsByPersistenceId()("my-1", 2, 3) { tp =>
        tp.request(Int.MaxValue)
        tp.expectNext(new EventEnvelope(Sequence(2), "my-1", 2, "a-2", 0L))
        tp.expectNext(new EventEnvelope(Sequence(3), "my-1", 3, "a-3", 0L))
        tp.expectComplete()
      }
    }
  }
}

object DynamoDBCurrentPersistenceIds1Test {
  val dynamoDBPort = RandomPortUtil.temporaryServerPort()
}

class DynamoDBCurrentPersistenceIds1Test
    extends CurrentPersistenceIds1Test(
      ConfigFactory
        .parseString(
          s"""
           |j5ik2o.dynamo-db-journal {
           |  query-batch-size = 1
           |  dynamo-db-client {
           |    endpoint = "http://127.0.0.1:${DynamoDBCurrentPersistenceIds1Test.dynamoDBPort}/"
           |  }
           |}
           |
           |j5ik2o.dynamo-db-snapshot.dynamo-db-client {
           |  endpoint = "http://127.0.0.1:${DynamoDBCurrentPersistenceIds1Test.dynamoDBPort}/"
           |}
           |
           |j5ik2o.dynamo-db-read-journal {
           |  query-batch-size = 1
           |  dynamo-db-client {
           |    endpoint = "http://127.0.0.1:${DynamoDBCurrentPersistenceIds1Test.dynamoDBPort}/"
           |  }
           |}
           """.stripMargin
        ).withFallback(ConfigFactory.load("query-reference"))
    )
    with DynamoDBSpecSupport {

  override implicit val pc: PatienceConfig = PatienceConfig(30 seconds, 1 seconds)

  override protected lazy val dynamoDBPort: Int = DynamoDBCurrentPersistenceIds1Test.dynamoDBPort

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  override def afterAll(): Unit = {
    underlying.close()
    super.afterAll()
  }

  before { createTable }

  after { deleteTable }

} 
Example 4
Source File: CurrentEventsByTagTest4.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.query.query

import java.net.URI

import akka.pattern.ask
import akka.persistence.query.{ EventEnvelope, Sequence }
import com.github.j5ik2o.akka.persistence.dynamodb.query.QueryJournalSpec
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.{ Config, ConfigFactory }
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.duration._

abstract class CurrentEventsByTagTest4(config: Config) extends QueryJournalSpec(config) {

  it should "persist and find a tagged event with one tag" in
  withTestActors() { (actor1, actor2, actor3) =>
    (actor1 ? withTags(1, "one2")).toTry should be a Symbol("success")

    withClue("query should find the event by tag") {
      withCurrentEventsByTag()("one2", 0) { tp =>
        tp.request(Int.MaxValue)
        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }
        tp.expectComplete()
      }
    }

    withClue("query should find the event by persistenceId") {
      withCurrentEventsByPersistenceId()("my-1", 1, 1) { tp =>
        tp.request(Int.MaxValue)
        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }
        tp.expectComplete()
      }
    }
  }
}

object DynamoDBCurrentEventsByTagTest4 {
  val dynamoDBPort = RandomPortUtil.temporaryServerPort()
}

class DynamoDBCurrentEventsByTagTest4
    extends CurrentEventsByTagTest4(
      ConfigFactory
        .parseString(
          s"""
             |j5ik2o.dynamo-db-journal{
             |  query-batch-size = 1
             |  dynamo-db-client {
             |    endpoint = "http://127.0.0.1:${DynamoDBCurrentEventsByTagTest4.dynamoDBPort}/"
             |  }
             |}
             |
             |j5ik2o.dynamo-db-snapshot.dynamo-db-client {
             |  endpoint = "http://127.0.0.1:${DynamoDBCurrentEventsByTagTest4.dynamoDBPort}/"
             |}
             |
             |j5ik2o.dynamo-db-read-journal {
             |  query-batch-size = 1
             |  dynamo-db-client {
             |    endpoint = "http://127.0.0.1:${DynamoDBCurrentEventsByTagTest4.dynamoDBPort}/"
             |  }
             |}
           """.stripMargin
        ).withFallback(ConfigFactory.load("query-reference"))
    )
    with DynamoDBSpecSupport {

  override implicit val pc: PatienceConfig = PatienceConfig(30 seconds, 1 seconds)

  override protected lazy val dynamoDBPort: Int = DynamoDBCurrentEventsByTagTest4.dynamoDBPort

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  override def afterAll(): Unit = {
    underlying.close()
    super.afterAll()
  }

  before { createTable }

  after { deleteTable }

} 
Example 5
Source File: ProjectionSpi.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.spi.projection

import akka.actor.ActorSystem
import akka.annotation.InternalStableApi
import akka.persistence.query.EventEnvelope
import akka.persistence.query.Offset

import scala.concurrent.ExecutionContext
import scala.concurrent.Future

object ProjectionSpi {

  @InternalStableApi
  private[lagom] def startProcessing(system: ActorSystem, tagName: String, envelope: EventEnvelope): EventEnvelope =
    envelope

  @InternalStableApi
  private[lagom] def afterUserFlow(projectionName: String, tagName: String, offset: Offset): Offset = offset

  @InternalStableApi
  private[lagom] def completedProcessing(
      projectionName: String,
      tagName: String,
      offset: Offset
  ): Offset = offset

  @InternalStableApi
  private[lagom] def failed(
      actorSystem: ActorSystem,
      projectionName: String,
      partitionName: String,
      exception: Throwable
  ): Unit = ()

} 
Example 6
Source File: EventRoutesSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.routes

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers.`Last-Event-ID`
import akka.http.scaladsl.model.sse.ServerSentEvent
import akka.persistence.query.{EventEnvelope, NoOffset, Offset, Sequence}
import akka.stream.scaladsl.Source
import ch.epfl.bluebrain.nexus.iam.acls.Acls
import ch.epfl.bluebrain.nexus.iam.realms.Realms
import ch.epfl.bluebrain.nexus.iam.types.{Caller, Permission}
import ch.epfl.bluebrain.nexus.kg.resources.Event
import ch.epfl.bluebrain.nexus.kg.routes.EventRoutesSpec.TestableEventRoutes
import ch.epfl.bluebrain.nexus.service.config.ServiceConfig
import ch.epfl.bluebrain.nexus.rdf.Iri.Path._
import io.circe.Encoder
import monix.eval.Task

class EventRoutesSpec extends EventsSpecBase {

  private val aclsApi = mock[Acls[Task]]
  private val realms  = mock[Realms[Task]]

  val eventRoutes = new TestableEventRoutes(events, aclsApi, realms, caller)

  "EventRoutes" should {
    val read = Permission.unsafe("resources/read")
    aclsApi.hasPermission("org" / "project", read)(caller) shouldReturn Task.pure(true)
    aclsApi.hasPermission(/ + "org", read)(caller) shouldReturn Task.pure(true)

    "return all events for a project" in {
      Get("/") ~> eventRoutes.routes(project) ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected)
      }
    }

    "return all events for a project from the last seen" in {
      Get("/").addHeader(`Last-Event-ID`(0.toString)) ~> eventRoutes.routes(project) ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected, 1)
      }
    }

    "return all events for an organization" in {
      Get("/") ~> eventRoutes.routes(organization) ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected)
      }
    }

    "return all events for an organization from the last seen" in {
      Get("/").addHeader(`Last-Event-ID`(0.toString)) ~> eventRoutes.routes(organization) ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected, 1)
      }
    }
  }

}

object EventRoutesSpec {

  class TestableEventRoutes(events: List[Event], acls: Acls[Task], realms: Realms[Task], caller: Caller)(implicit
      as: ActorSystem,
      config: ServiceConfig
  ) extends EventRoutes(acls, realms, caller) {

    private val envelopes = events.zipWithIndex.map {
      case (ev, idx) =>
        EventEnvelope(Sequence(idx.toLong), "persistenceid", 1L, ev, 1L)
    }

    override protected def source(
        tag: String,
        offset: Offset
    )(implicit enc: Encoder[Event]): Source[ServerSentEvent, NotUsed] = {
      val toDrop = offset match {
        case NoOffset    => 0
        case Sequence(v) => v + 1
      }
      Source(envelopes).drop(toDrop).flatMapConcat(ee => Source(eventToSse(ee).toList))
    }
  }
} 
Example 7
Source File: GlobalEventRoutesSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.routes

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers.`Last-Event-ID`
import akka.http.scaladsl.model.sse.ServerSentEvent
import akka.persistence.query.{EventEnvelope, NoOffset, Offset, Sequence}
import akka.stream.scaladsl.Source
import ch.epfl.bluebrain.nexus.iam.acls.Acls
import ch.epfl.bluebrain.nexus.iam.realms.Realms
import ch.epfl.bluebrain.nexus.iam.types.Caller
import ch.epfl.bluebrain.nexus.kg.resources.Event
import ch.epfl.bluebrain.nexus.kg.routes.GlobalEventRoutesSpec.TestableEventRoutes
import ch.epfl.bluebrain.nexus.rdf.Iri.Path
import ch.epfl.bluebrain.nexus.service.config.ServiceConfig
import io.circe.Encoder
import monix.eval.Task

class GlobalEventRoutesSpec extends EventsSpecBase {

  private val aclsApi = mock[Acls[Task]]
  private val realms  = mock[Realms[Task]]

  val routes = new TestableEventRoutes(events, aclsApi, realms, caller).routes
  aclsApi.hasPermission(Path./, read)(caller) shouldReturn Task.pure(true)

  "GlobalEventRoutes" should {

    "return all events for a project" in {
      Get("/") ~> routes ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected)
      }
    }

    "return all events for a project from the last seen" in {
      Get("/").addHeader(`Last-Event-ID`(0.toString)) ~> routes ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected, 1)
      }
    }
  }
}

object GlobalEventRoutesSpec {

  class TestableEventRoutes(events: List[Event], acls: Acls[Task], realms: Realms[Task], caller: Caller)(implicit
      as: ActorSystem,
      config: ServiceConfig
  ) extends GlobalEventRoutes(acls, realms, caller) {

    private val envelopes = events.zipWithIndex.map {
      case (ev, idx) =>
        EventEnvelope(Sequence(idx.toLong), "persistenceid", 1L, ev, 1L)
    }

    override protected def source(
        tag: String,
        offset: Offset
    )(implicit enc: Encoder[Event]): Source[ServerSentEvent, NotUsed] = {
      val toDrop = offset match {
        case NoOffset    => 0
        case Sequence(v) => v + 1
      }
      Source(envelopes).drop(toDrop).flatMapConcat(ee => Source(eventToSse(ee).toList))
    }
  }
} 
Example 8
Source File: EventsByTagPubsubSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.query

import java.time.{ LocalDate, ZoneOffset }

import akka.cluster.Cluster
import akka.persistence.cassandra.CassandraSpec
import akka.persistence.cassandra.journal.JournalSettings
import akka.persistence.query.{ EventEnvelope, NoOffset }
import akka.stream.testkit.scaladsl.TestSink
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._

object EventsByTagPubsubSpec {
  val today = LocalDate.now(ZoneOffset.UTC)

  val config = ConfigFactory.parseString(s"""
    akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
    akka.actor.serialize-messages = off
    akka.actor.serialize-creators = off
    akka.remote.netty.tcp.port = 0
    akka.remote.artery.canonical.port = 0
    akka.remote.netty.tcp.hostname = "127.0.0.1"
    akka.persistence.cassandra {
      
      query.refresh-interval = 10s

      events-by-tag {
        pubsub-notification = on
        flush-interval = 0ms
        eventual-consistency-delay = 0s
      }
    }
    """).withFallback(EventsByTagSpec.config)
}

class EventsByTagPubsubSpec extends CassandraSpec(EventsByTagPubsubSpec.config) {

  val journalSettings = new JournalSettings(system, system.settings.config.getConfig("akka.persistence.cassandra"))

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    Cluster(system).join(Cluster(system).selfAddress)
  }

  "Cassandra query getEventsByTag when running clustered with pubsub enabled" must {
    "present new events to an ongoing getEventsByTag stream long before polling would kick in" in {
      val actor = system.actorOf(TestActor.props("EventsByTagPubsubSpec_a"))

      val blackSrc = queries.eventsByTag(tag = "black", offset = NoOffset)
      val probe = blackSrc.runWith(TestSink.probe[Any])
      probe.request(2)
      probe.expectNoMessage(300.millis)

      actor ! "a black car"
      probe.within(5.seconds) { // long before refresh-interval, which is 10s
        probe.expectNextPF { case e @ EventEnvelope(_, _, _, "a black car") => e }
      }
    }
  }
} 
Example 9
Source File: EventsByTagCrashSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra

import akka.NotUsed
import akka.persistence.cassandra.TestTaggingActor.{ Ack, Crash }
import akka.persistence.query.{ EventEnvelope, NoOffset }
import akka.stream.scaladsl.Source
import akka.stream.testkit.scaladsl.TestSink

import scala.concurrent.duration._

class EventsByTagCrashSpec extends CassandraSpec(EventsByTagRestartSpec.config) {

  val waitTime = 100.milliseconds

  "EventsByTag" must {

    "should handle crashes of the persistent actor" in {
      // crash the actor many times, persist 5 events each time
      val crashEvery = 5
      val crashNr = 20
      val msgs = crashEvery * crashNr
      val p2 = system.actorOf(TestTaggingActor.props("p2", Set("blue")))
      (1 to msgs).foreach { cn =>
        if (cn % crashEvery == 0) {
          p2 ! Crash
        }
        val msg = s"msg $cn"
        p2 ! msg
        expectMsg(Ack)
      }
      val blueTags: Source[EventEnvelope, NotUsed] = queryJournal.eventsByTag(tag = "blue", offset = NoOffset)
      val tagProbe = blueTags.runWith(TestSink.probe[EventEnvelope](system))
      (1L to msgs).foreach { m =>
        val expected = s"msg $m"
        tagProbe.request(1)
        tagProbe.expectNext().event shouldEqual expected
      }
      tagProbe.expectNoMessage(250.millis)
      tagProbe.cancel()

    }
  }
} 
Example 10
Source File: MigrateV12ToV13.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg

import akka.actor.ActorSystem
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.{EventEnvelope, NoOffset, PersistenceQuery}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.commons.test.Resources
import ch.epfl.bluebrain.nexus.iam.client.types._
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.kg.resources.Event.{Created, Updated}
import ch.epfl.bluebrain.nexus.kg.resources.{OrganizationRef, ResId, Views}
import com.typesafe.scalalogging.Logger
import io.circe.Json
import io.circe.parser.parse
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.schedulers.CanBlock

import scala.concurrent.Future

object MigrateV12ToV13 extends Resources {
  private val log                                     = Logger[MigrateV12ToV13.type]
  private val newMapping                              = jsonContentOf("/elasticsearch/mapping.json")
  private val defaultEsId                             = nxv.defaultElasticSearchIndex.value
  private implicit val mockedAcls: AccessControlLists = AccessControlLists.empty

  def migrate(
      views: Views[Task],
      adminClient: AdminClient[Task]
  )(implicit config: AppConfig, as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = {

    implicit val token: Option[AuthToken] = config.iam.serviceAccountToken

    def checkAndUpdateMapping(id: ResId, rev: Long, source: Json)(
        implicit project: Project,
        caller: Caller
    ): Task[Unit] = {

      source.hcursor.get[String]("mapping").flatMap(parse) match {
        case Left(err) =>
          log.error(s"Error while fetching mapping for view id ${id.show}. Reason: '$err'")
          Task.unit
        case Right(mapping) if mapping == newMapping =>
          Task.unit
        case _ =>
          views.update(id, rev, source deepMerge Json.obj("mapping" -> newMapping)).value.flatMap {
            case Left(err) =>
              log.error(s"Error updating the view with id '${id.show}' and rev '$rev'. Reason: '$err'")
              Task.unit
            case _ =>
              log.info(s"View with id '${id.show}' and rev '$rev' was successfully updated.")
              Task.unit
          }
      }
    }

    def fetchProject(orgRef: OrganizationRef, id: ResId)(f: Project => Task[Unit]): Task[Unit] = {
      adminClient.fetchProject(orgRef.id, id.parent.id).flatMap {
        case Some(project) => f(project)
        case None =>
          log.error(s"Project with id '${id.parent.id}' was not found for view with id '${id.show}'")
          Task.unit

      }
    }

    log.info("Migrating views mappings.")
    val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
    Task
      .fromFuture {
        pq.currentEventsByTag(s"type=${nxv.ElasticSearchView.value.asString}", NoOffset)
          .mapAsync(1) {
            case EventEnvelope(_, _, _, Created(id, orgRef, _, _, source, _, subject)) if id.value == defaultEsId =>
              fetchProject(orgRef, id) { project =>
                checkAndUpdateMapping(id, 1L, source)(project, Caller(subject, Set(subject)))
              }.runToFuture
            case EventEnvelope(_, _, _, Updated(id, orgRef, rev, _, source, _, subject)) if id.value == defaultEsId =>
              fetchProject(orgRef, id) { project =>
                checkAndUpdateMapping(id, rev, source)(project, Caller(subject, Set(subject)))
              }.runToFuture
            case _ =>
              Future.unit

          }
          .runFold(0) {
            case (acc, _) =>
              if (acc % 10 == 0) log.info(s"Processed '$acc' persistence ids.")
              acc + 1
          }
          .map(_ => ())
      }
      .runSyncUnsafe()
    log.info("Finished migrating views mappings.")
  }

} 
Example 11
Source File: EventRoutesSpec.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.routes

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers.`Last-Event-ID`
import akka.http.scaladsl.model.sse.ServerSentEvent
import akka.persistence.query.{EventEnvelope, NoOffset, Offset, Sequence}
import akka.stream.scaladsl.Source
import ch.epfl.bluebrain.nexus.iam.client.types._
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.resources.Event
import ch.epfl.bluebrain.nexus.kg.routes.EventRoutesSpec.TestableEventRoutes
import io.circe.Encoder

class EventRoutesSpec extends EventsSpecBase {

  val eventRoutes = new TestableEventRoutes(events, acls, caller)

  "EventRoutes" should {

    "return all events for a project" in {
      Get("/") ~> eventRoutes.routes(project) ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected)
      }
    }

    "return all events for a project from the last seen" in {
      Get("/").addHeader(`Last-Event-ID`(0.toString)) ~> eventRoutes.routes(project) ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected, 1)
      }
    }

    "return all events for an organization" in {
      Get("/") ~> eventRoutes.routes(organization) ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected)
      }
    }

    "return all events for an organization from the last seen" in {
      Get("/").addHeader(`Last-Event-ID`(0.toString)) ~> eventRoutes.routes(organization) ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected, 1)
      }
    }
  }

}

object EventRoutesSpec {

  class TestableEventRoutes(events: List[Event], acls: AccessControlLists, caller: Caller)(
      implicit as: ActorSystem,
      config: AppConfig
  ) extends EventRoutes(acls, caller) {

    private val envelopes = events.zipWithIndex.map {
      case (ev, idx) =>
        EventEnvelope(Sequence(idx.toLong), "persistenceid", 1L, ev)
    }

    override protected def source(
        tag: String,
        offset: Offset
    )(implicit enc: Encoder[Event]): Source[ServerSentEvent, NotUsed] = {
      val toDrop = offset match {
        case NoOffset    => 0
        case Sequence(v) => v + 1
      }
      Source(envelopes).drop(toDrop).flatMapConcat(ee => Source(eventToSse(ee).toList))
    }
  }
} 
Example 12
Source File: GlobalEventRoutesSpec.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.routes

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers.`Last-Event-ID`
import akka.http.scaladsl.model.sse.ServerSentEvent
import akka.persistence.query.{EventEnvelope, NoOffset, Offset, Sequence}
import akka.stream.scaladsl.Source
import ch.epfl.bluebrain.nexus.iam.client.types.{AccessControlLists, Caller}
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.resources.Event
import ch.epfl.bluebrain.nexus.kg.routes.GlobalEventRoutesSpec.TestableEventRoutes
import io.circe.Encoder

class GlobalEventRoutesSpec extends EventsSpecBase {

  val routes = new TestableEventRoutes(events, acls, caller).routes

  "GlobalEventRoutes" should {

    "return all events for a project" in {
      Get("/") ~> routes ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected)
      }
    }

    "return all events for a project from the last seen" in {
      Get("/").addHeader(`Last-Event-ID`(0.toString)) ~> routes ~> check {
        val expected = jsonContentOf("/events/events.json").asArray.value
        status shouldEqual StatusCodes.OK
        responseAs[String] shouldEqual eventStreamFor(expected, 1)
      }
    }
  }
}

object GlobalEventRoutesSpec {

  class TestableEventRoutes(events: List[Event], acls: AccessControlLists, caller: Caller)(
      implicit as: ActorSystem,
      config: AppConfig
  ) extends GlobalEventRoutes(acls, caller) {

    private val envelopes = events.zipWithIndex.map {
      case (ev, idx) =>
        EventEnvelope(Sequence(idx.toLong), "persistenceid", 1L, ev)
    }

    override protected def source(
        tag: String,
        offset: Offset
    )(implicit enc: Encoder[Event]): Source[ServerSentEvent, NotUsed] = {
      val toDrop = offset match {
        case NoOffset    => 0
        case Sequence(v) => v + 1
      }
      Source(envelopes).drop(toDrop).flatMapConcat(ee => Source(eventToSse(ee).toList))
    }
  }
} 
Example 13
Source File: UserRepository.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.user

import akka.NotUsed
import akka.actor.{ ActorLogging, Props }
import akka.persistence.PersistentActor
import akka.persistence.query.EventEnvelope
import akka.persistence.query.scaladsl.EventsByPersistenceIdQuery
import akka.stream.scaladsl.Source

object UserRepository {

  sealed trait UserEvent

  final case object GetUsers
  final case class Users(users: Set[User])

  final case class AddUser(username: String, nickname: String, email: String)
  final case class UserAdded(user: User) extends UserEvent
  final case class UsernameTaken(username: String)

  final case class RemoveUser(id: Long)
  final case class UserRemoved(user: User) extends UserEvent
  final case class IdUnknown(id: Long)

  final case class GetUserEvents(fromSeqNo: Long)
  final case class UserEvents(userEvents: Source[(Long, UserEvent), NotUsed])

  final case class User(id: Long, username: String, nickname: String, email: String)

  final val Name = "user-repository"

  def apply(readJournal: EventsByPersistenceIdQuery): Props =
    Props(new UserRepository(readJournal))
}

final class UserRepository(readJournal: EventsByPersistenceIdQuery)
    extends PersistentActor
    with ActorLogging {
  import UserRepository._

  override val persistenceId = Name

  private var users = Map.empty[String, User]

  override def receiveCommand = {
    case GetUsers                           => sender() ! Users(users.valuesIterator.to[Set])
    case AddUser(username, nickname, email) => handleAddUser(username, nickname, email)
    case RemoveUser(id)                     => handleRemoveUser(id)
    case GetUserEvents(fromSeqNo)           => handleGetUserEvents(fromSeqNo)
  }

  override def receiveRecover = {
    case UserAdded(user)   => users += user.username -> user
    case UserRemoved(user) => users -= user.username
  }

  private def handleAddUser(username: String, nickname: String, email: String) = {
    def add() =
      persist(UserAdded(User(lastSequenceNr, username, nickname, email))) { userAdded =>
        receiveRecover(userAdded)
        log.info("Added user with username {}", username)
        sender() ! userAdded
      }
    if (!users.contains(username)) add() else sender() ! UsernameTaken(username)
  }

  private def handleRemoveUser(id: Long) = {
    def remove(user: User) =
      persist(UserRemoved(user)) { userRemoved =>
        receiveRecover(userRemoved)
        log.info("Removed user with id {} and username {}", id, user.username)
        sender() ! userRemoved
      }
    users.valuesIterator.find(_.id == id) match {
      case Some(user) => remove(user)
      case None       => sender() ! IdUnknown(id)
    }
  }

  private def handleGetUserEvents(fromSeqNo: Long) = {
    val userEvents =
      readJournal
        .eventsByPersistenceId(Name, fromSeqNo, Long.MaxValue)
        .collect { case EventEnvelope(_, _, seqNo, event: UserEvent) => seqNo -> event }
    sender() ! UserEvents(userEvents)
  }
} 
Example 14
Source File: PersonEventMapper.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.spark.mapper

import akka.persistence.jdbc.spark.sql.execution.streaming.EventMapper
import akka.persistence.query.EventEnvelope
import com.github.dnvriend.spark.datasources.person.Person
import org.apache.spark.sql.{ Row, SQLContext }
import org.apache.spark.sql.types._

class PersonEventMapper extends EventMapper {
  override def row(envelope: EventEnvelope, sqlContext: SQLContext): Row = envelope match {
    case EventEnvelope(offset, persistenceId, sequenceNr, Person(id, name, age)) =>
      Row(offset, persistenceId, sequenceNr, id, name, age)
  }

  override def schema: StructType =
    PersonEventMapper.schema
}

object PersonEventMapper {
  val schema = StructType(Array(
    StructField("offset", LongType, nullable = false),
    StructField("persistence_id", StringType, nullable = false),
    StructField("sequence_number", LongType, nullable = false),
    StructField("id", LongType, nullable = false),
    StructField("name", StringType, nullable = true),
    StructField("age", IntegerType, nullable = true)
  ))
} 
Example 15
Source File: UserProjection.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.Scheduler
import akka.actor.typed.{ ActorRef, Behavior }
import akka.actor.typed.scaladsl.Actor
import akka.actor.typed.scaladsl.AskPattern.Askable
import akka.cluster.Cluster
import akka.cluster.ddata.{ ORSet, ORSetKey }
import akka.cluster.ddata.Replicator.WriteLocal
import akka.cluster.ddata.typed.scaladsl.{ DistributedData, Replicator }
import akka.persistence.query.EventEnvelope
import akka.persistence.query.scaladsl.EventsByPersistenceIdQuery
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import cats.instances.string._
import cats.syntax.eq._
import org.apache.logging.log4j.scala.Logging
import scala.concurrent.duration.FiniteDuration

object UserProjection extends Logging {
  import akka.actor.typed.scaladsl.adapter._

  sealed trait Command
  final case object Stop                              extends Command
  private final case object HandleEventStreamComplete extends Command

  abstract class EventStreamCompleteException
      extends IllegalStateException("Event stream completed unexpectedly!")
  private final case object EventStreamCompleteException extends EventStreamCompleteException

  final val Name = "user-projection"

  final val usersKey: ORSetKey[User] =
    ORSetKey("users")

  def apply(readJournal: EventsByPersistenceIdQuery,
            userView: ActorRef[UserView.Command],
            askTimeout: FiniteDuration)(implicit mat: Materializer): Behavior[Command] =
    Actor.deferred { context =>
      implicit val c: Cluster   = Cluster(context.system.toUntyped)
      implicit val s: Scheduler = context.system.scheduler
      implicit val t: Timeout   = askTimeout
      val replicator            = DistributedData(context.system).replicator
      val self                  = context.self

      readJournal
        .eventsByPersistenceId(UserRepository.Name, 0, Long.MaxValue)
        .collect { case EventEnvelope(_, _, _, event: UserRepository.Event) => event }
        .mapAsync(1) {
          case UserRepository.UserAdded(user) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal)(_ + user)

          case UserRepository.UserRemoved(username) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal) { users =>
              users.elements.find(_.username.value === username).fold(users)(users - _)
            }
        }
        .runWith(Sink.onComplete(_ => self ! HandleEventStreamComplete))
      logger.debug("Running event stream")

      Actor.immutable {
        case (_, Stop)                      => Actor.stopped
        case (_, HandleEventStreamComplete) => throw EventStreamCompleteException
      }
    }
}