I am running an Akka Streams Kafka application and I want to incorporate the supervision strategy on the stream consumer such that if the broker goes down, and the stream consumer dies after a stop timeout, the supervisor can restart the consumer.
Here is my complete code:
UserEventStream:
import akka.actor.{Actor, PoisonPill, Props}
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.kafka.scaladsl.Consumer
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}
import akka.pattern.ask
import akka.stream.ActorMaterializer
class UserEventStream extends Actor {
val settings = Settings(context.system).KafkaConsumers
implicit val timeout: Timeout = Timeout(10 seconds)
implicit val materializer = ActorMaterializer()
override def preStart(): Unit = {
super.preStart()
println("Starting UserEventStream....s")
}
override def receive = {
case "start" =>
val consumerConfig = settings.KafkaConsumerInfo
println(s"ConsumerConfig with $consumerConfig")
startStreamConsumer(consumerConfig("UserEventMessage" + ".c" + 1))
}
def startStreamConsumer(config: Map[String, String]) = {
println(s"startStreamConsumer with config $config")
val consumerSource = createConsumerSource(config)
val consumerSink = createConsumerSink()
val messageProcessor = context.actorOf(Props[MessageProcessor], "messageprocessor")
println("START: The UserEventStream processing")
val future =
consumerSource
.mapAsync(parallelism = 50) { message =>
val m = s"${message.record.value()}"
messageProcessor ? m
}
.runWith(consumerSink)
future.onComplete {
case Failure(ex) =>
println("FAILURE : The UserEventStream processing, stopping the actor.")
self ! PoisonPill
case Success(ex) =>
}
}
def createConsumerSource(config: Map[String, String]) = {
val kafkaMBAddress = config("bootstrap-servers")
val groupID = config("groupId")
val topicSubscription = config("subscription-topic").split(',').toList
println(s"Subscriptiontopics $topicSubscription")
val consumerSettings = ConsumerSettings(context.system, new ByteArrayDeserializer, new StringDeserializer)
.withBootstrapServers(kafkaMBAddress)
.withGroupId(groupID)
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
.withProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true")
Consumer.committableSource(consumerSettings, Subscriptions.topics(topicSubscription: _*))
}
def createConsumerSink() = {
Sink.foreach(println)
}
}
StreamProcessorSupervisor (this is the supervisor class of the UserEventStream class):
import akka.actor.{Actor, Props}
import akka.pattern.{Backoff, BackoffSupervisor}
import akka.stream.ActorMaterializer
import stream.StreamProcessorSupervisor.StartClient
import scala.concurrent.duration._
object StreamProcessorSupervisor {
final case object StartSimulator
final case class StartClient(id: String)
def props(implicit materializer: ActorMaterializer) =
Props(classOf[StreamProcessorSupervisor], materializer)
}
class StreamProcessorSupervisor(implicit materializer: ActorMaterializer) extends Actor {
override def preStart(): Unit = {
self ! StartClient(self.path.name)
}
def receive: Receive = {
case StartClient(id) =>
println(s"startCLient with id $id")
val childProps = Props(classOf[UserEventStream])
val supervisor = BackoffSupervisor.props(
Backoff.onFailure(
childProps,
childName = "usereventstream",
minBackoff = 1.second,
maxBackoff = 1.minutes,
randomFactor = 0.2
)
)
context.actorOf(supervisor, name = s"$id-backoff-supervisor")
val userEventStrean = context.actorOf(Props(classOf[UserEventStream]),"usereventstream")
userEventStrean ! "start"
}
}
App (the main application class):
import akka.actor.{ActorSystem, Props}
import akka.stream.ActorMaterializer
object App extends App {
implicit val system = ActorSystem("stream-test")
implicit val materializer = ActorMaterializer()
system.actorOf(StreamProcessorSupervisor.props,"StreamProcessorSupervisor")
}
application.conf:
kafka {
consumer {
num-consumers = "1"
c1 {
bootstrap-servers = "localhost:9092"
bootstrap-servers = ${?KAFKA_CONSUMER_ENDPOINT1}
groupId = "localakkagroup1"
subscription-topic = "test"
subscription-topic = ${?SUBSCRIPTION_TOPIC1}
message-type = "UserEventMessage"
poll-interval = 50ms
poll-timeout = 50ms
stop-timeout = 30s
close-timeout = 20s
commit-timeout = 15s
wakeup-timeout = 10s
max-wakeups = 10
use-dispatcher = "akka.kafka.default-dispatcher"
kafka-clients {
enable.auto.commit = true
}
}
}
}
After running the application, I purposely killed the Kafka broker and then found that after 30 seconds, the actor is stopping itself by sending a poison pill. But strangely it doesn't restart as mentioned in the BackoffSupervisor strategy.
What could be the issue here?
There are two instances of UserEventStream in your code: one is the child actor that the BackoffSupervisor internally creates with the Props that you pass to it, and the other is the val userEventStrean that is a child of StreamProcessorSupervisor. You're sending the "start" message to the latter, when you should be sending that message to the former.
You don't need val userEventStrean, because the BackoffSupervisor creates the child actor. Messages sent to the BackoffSupervisor are forwarded to the child, so to send a "start" message to the child, send it to the BackoffSupervisor:
class StreamProcessorSupervisor(implicit materializer: ActorMaterializer) extends Actor {
override def preStart(): Unit = {
self ! StartClient(self.path.name)
}
def receive: Receive = {
case StartClient(id) =>
println(s"startCLient with id $id")
val childProps = Props[UserEventStream]
val supervisorProps = BackoffSupervisor.props(...)
val supervisor = context.actorOf(supervisorProps, name = s"$id-backoff-supervisor")
supervisor ! "start"
}
}
The other issue is that when an actor receives a PoisonPill, that's not the same thing as that actor throwing an exception. Therefore, Backoff.onFailure won't be triggered when UserEventStream sends itself a PoisonPill. A PoisonPill stops the actor, so use Backoff.onStop instead:
val supervisorProps = BackoffSupervisor.props(
Backoff.onStop( // <--- use onStop
childProps,
...
)
)
val supervisor = context.actorOf(supervisorProps, name = s"$id-backoff-supervisor")
supervisor ! "start"
Related
Hi have below typesafe config in file application-typed.conf.
akka {
loggers = ["akka.event.slf4j.Slf4jLogger"]
loglevel = "DEBUG"
logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
actor {
provider = "local"
}
}
custom-thread-pool {
type = Dispatcher
executor = "thread-pool-executor"
thread-pool-executor {
fixed-pool-size = 40
}
throughput = 2
}
Below is the akka-typed actor code.
import akka.actor.typed.{ActorSystem, Behavior, DispatcherSelector, PostStop, Signal}
import akka.actor.typed.scaladsl.AbstractBehavior
import akka.actor.typed.scaladsl.ActorContext
import akka.actor.typed.scaladsl.Behaviors
import com.typesafe.config.ConfigFactory
import scala.concurrent.ExecutionContext
trait PrintMessage
case class PrintMessageAny(x: Any) extends PrintMessage
object PrintMeActor {
def apply(): Behavior[PrintMessage] =
Behaviors.setup[PrintMessage](context => new PrintMeActor(context))
}
class PrintMeActor(context: ActorContext[PrintMessage]) extends AbstractBehavior[PrintMessage](context) {
val dispatcherSelector: DispatcherSelector = DispatcherSelector.fromConfig("custom-thread-pool")
implicit val executionContext: ExecutionContext = context.system.dispatchers.lookup(dispatcherSelector)
println(s"PrintMeActor Application started in Thread ${Thread.currentThread().getName}")
override def onMessage(msg: PrintMessage): Behavior[PrintMessage] = {
// No need to handle any messages
println(s"Got $msg in Thread ${Thread.currentThread().getName}")
Behaviors.same
}
override def onSignal: PartialFunction[Signal, Behavior[PrintMessage]] = {
case PostStop =>
context.log.info("PrintMeActor Application stopped")
this
}
}
object TestTypedActorApp extends App {
val config = ConfigFactory.load("application-typed.conf")
val as: ActorSystem[PrintMessage] = ActorSystem(PrintMeActor(), "PrintAnyTypeMessage", config)
as.tell(PrintMessageAny("test"))
Thread.sleep(2000)
}
When I run the code, I get the below output.
PrintMeActor Application started in Thread PrintAnyTypeMessage-akka.actor.default-dispatcher-6
Got PrintMessageAny(test) in Thread PrintAnyTypeMessage-akka.actor.default-dispatcher-6
I want this actor to run on the custom-thread-pool but it is not happening. How can I achieve the same?
You associate the dispatcher with the actor when you spawn it, by passing an akka.actor.typed.DispatcherSelector (which extends akka.actor.typed.Props) corresponding to the desired dispatcher.
When spawning the ActorSystem on a custom dispatcher, one can only pass Props through the overloads that take either a Config or an ActorSystemSetup.
If wanting to override the actor for the user guardian actor (the actor with the behavior you passed into the ActorSystem), it may make more sense to make that dispatcher the default dispatcher:
akka.actor.default-dispatcher {
executor = "thread-pool-executor"
thread-pool-executor {
fixed-pool-size = 40
}
throughput = 2
}
Accessing the metrics of an Alpakka PlainSource seems fairly straight forward, but how can I do the same thing with a CommittableSource?
I currently have a simple consumer, something like this:
class Consumer(implicit val ma: ActorMaterializer, implicit val ec: ExecutionContext) extends Actor {
private val settings = ConsumerSettings(
context.system,
new ByteArrayDeserializer,
new StringDeserializer)
.withProperties(...)
override def receive: Receive = Actor.emptyBehavior
RestartSource
.withBackoff(minBackoff = 2.seconds, maxBackoff = 20.seconds, randomFactor = 0.2)(consumer)
.runForeach { handleMessage }
private def consumer() = {
AkkaConsumer
.committableSource(settings, Subscriptions.topics(Set(topic)))
.log(getClass.getSimpleName)
.withAttributes(ActorAttributes.supervisionStrategy(_ => Supervision.Resume))
}
private def handleMessage(message: CommittableMessage[Array[Byte], String]): Unit = {
...
}
}
How can I get access to the consumer metrics in this case?
We are using the Java prometheus client and I solved my issue with a custom collector that fetches its metrics directly from JMX:
import java.lang.management.ManagementFactory
import java.util
import io.prometheus.client.Collector
import io.prometheus.client.Collector.MetricFamilySamples
import io.prometheus.client.CounterMetricFamily
import io.prometheus.client.GaugeMetricFamily
import javax.management.ObjectName
import scala.collection.JavaConverters._
import scala.collection.mutable
class ConsumerMetricsCollector(val labels: Map[String, String] = Map.empty) extends Collector {
val metrics: mutable.Map[String, MetricFamilySamples] = mutable.Map.empty
def collect: util.List[MetricFamilySamples] = {
val server = ManagementFactory.getPlatformMBeanServer
for {
attrType <- List("consumer-metrics", "consumer-coordinator-metrics", "consumer-fetch-manager-metrics")
name <- server.queryNames(new ObjectName(s"kafka.consumer:type=$attrType,client-id=*"), null).asScala
attrInfo <- server.getMBeanInfo(name).getAttributes.filter { _.getType == "double" }
} yield {
val attrName = attrInfo.getName
val metricLabels = attrName.split(",").map(_.split("=").toList).collect {
case "client-id" :: (id: String) :: Nil => ("client-id", id)
}.toList ++ labels
val metricName = "kafka_consumer_" + attrName.replaceAll(raw"""[^\p{Alnum}]+""", "_")
val labelKeys = metricLabels.map(_._1).asJava
val metric = metrics.getOrElseUpdate(metricName,
if(metricName.endsWith("_total") || metricName.endsWith("_sum")) {
new CounterMetricFamily(metricName, attrInfo.getDescription, labelKeys)
} else {
new GaugeMetricFamily(metricName, attrInfo.getDescription, labelKeys)
}: MetricFamilySamples
)
val metricValue = server.getAttribute(name, attrName).asInstanceOf[Double]
val labelValues = metricLabels.map(_._2).asJava
metric match {
case f: CounterMetricFamily => f.addMetric(labelValues, metricValue)
case f: GaugeMetricFamily => f.addMetric(labelValues, metricValue)
case _ =>
}
}
metrics.values.toList.asJava
}
}
Here i have created a singleton actor. The master and the seed node is the same. From a different project i am trying to add into the cluster and want to send message. I am able to join in the cluster but cannot be able to send message.
My master and seed node:
package Demo
import akka.actor._
import akka.cluster.singleton.{ClusterSingletonManager, ClusterSingletonManagerSettings, ClusterSingletonProxy, ClusterSingletonProxySettings}
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
object MainObject1 extends App{
DemoMain1.start(8888)
}
object DemoMain1 {
val role = "test"
val singletonname = "Ruuner"
val mastername = "Master"
def start(port: Int): ActorSystem = {
val conf = ConfigFactory.parseString(
s"""
|akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
|
|akka.remote.netty.tcp.port = $port
|akka.remote.netty.tcp.hostname = 127.0.0.1
|akka.cluster.roles = ["$role"]
|akka.cluster.seed-nodes = ["akka.tcp://DemoMain1#127.0.0.1:8888"]
""".stripMargin
)
val system = ActorSystem("DemoMain1", conf)
val settings = ClusterSingletonManagerSettings(system).withRole(role)
val manager = ClusterSingletonManager.props(Props[DemoMain1], PoisonPill, settings)
val actor=system.actorOf(manager, mastername)
system
}
class DemoMain1 extends Actor with Identification {
import context._
override def preStart(): Unit = {
println(s"Master is created with id $id in $system")
println(self.path.address.host)
system.scheduler.scheduleOnce(100.seconds, self, 'exit)
}
override def receive : Receive={
case 'exit => println("$id is exiting")
context stop self
//SupervisorStrategy.Restart
case msg => println(s"messasge from $system is $msg ")
sender() ! 'how
}
}
}
The other node which is trying to join the cluster and send message.
import akka.actor._
import akka.cluster.singleton.{ClusterSingletonProxy, ClusterSingletonProxySettings}
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
object Ping extends App{
def ping: ActorSystem = {
val conf = ConfigFactory.parseString(
s"""
|akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
|
|akka.remote.netty.tcp.port = 0
|akka.remote.netty.tcp.hostname = 127.0.0.1
|akka.cluster.roles = ["slave"]
|akka.cluster.seed-nodes = ["akka.tcp://DemoMain1#127.0.0.1:8888"]
|akka.cluster.auto-down-unreachable-after = 10s
""".stripMargin
)
val system = ActorSystem("DemoMain1", conf)
system.actorOf(Props[Ping])
system
}
class Ping extends Actor {
import context._
val path = "akka.tcp://DemoMain1#127.0.0.1:8888/DemoMain1/user/Master/actor"
val settings = ClusterSingletonProxySettings(system).withRole("slave")
val actor = context.actorOf(ClusterSingletonProxy.props(path, settings))
val pingInterval = 1.seconds
override def preStart(): Unit = {
system.scheduler.schedule(pingInterval, pingInterval) {
println(s"Locate Ping $system")
actor ! 'hi
}
}
override def receive: Receive = {
case msg => println(s"The message is $msg")
}
}
Ping.ping
}
If i give ip addresses of the system then also the message s not sent.
It appears the role in your ClusterSingletonProxySettings(system).withRole("slave") settings for your Ping actor doesn't match that of your ClusterSingletonManagerSettings(system).withRole(role) where role = "test".
ClusterSingletonProxy is supposed to be present on all nodes with the specified role on which the cluster is set up, hence its role settings should match ClusterSingletonManager's. Here's a sample configuration.
I want to make an application using Akka where different actors will be using the same router to submit their jobs.
My question is, is it possible to send similar jobs from different actors to the same router in Akka ?
As long as your actors are in the same ActorSystem, then you should have no problems.
In this case just the actor which receives the messages will have a different sender.
Note that this sounds like a bad design, though. For the sake of the answer though, here is an example:
import akka.actor.{ActorSystem, ActorRef, Props, Actor}
import akka.routing.RoundRobinPool
class Worker extends Actor {
override def receive: Receive = {
case PowerMessage(value) =>
sender ! ResultMessage(value * value)
}
}
class Sender(router: ActorRef) extends Actor {
val actorName = self.path.name
override def receive: Actor.Receive = {
case m # PowerMessage(value) =>
router ! m
case ResultMessage(value) =>
System.out.println(s"Sender ($actorName). Result: $value.")
}
}
class Master(workers: Int) extends Actor {
val router: ActorRef = context.actorOf(
Props[Worker].withRouter(RoundRobinPool(workers)),
name = "workerRouter"
)
val sender1 = context.actorOf(
Props(new Sender(router)),
name = "sender1"
)
val sender2 = context.actorOf(
Props(new Sender(router)),
name = "sender2"
)
override def receive: Actor.Receive = {
case StartCalc =>
(1 to workers * 2).foreach {
case index =>
if (index % 2 == 1) {
sender1 ! PowerMessage(index)
} else {
sender2 ! PowerMessage(index)
}
}
}
}
object Senders {
def main(args: Array[String]): Unit = {
val system = ActorSystem("TestSystem")
val master = system.actorOf(
Props(new Master(10)),
name = "master"
)
master ! StartCalc
}
}
sealed trait MainMessage
case class PowerMessage(value: Long) extends MainMessage
case class ResultMessage(value: Long) extends MainMessage
case object StartCalc extends MainMessage
Actually I`m having trouble with getting my actor (router) system to work correctly.
My Setup:
I`m trying to use an akka router within an play controller. For dependency injection I use scaldi.
scaldi module:
class UserDAOModule extends Module {
binding to new ExampleRouter
binding toProvider new UserDAOWorker
}
akka router:
class UserDAORouter(implicit inj:Injector) extends Actor with AkkaInjectable {
val userDAOProps = injectActorProps[UserDAOWorker]
var router = {
val routees = Vector.fill(5) {
val r = context.actorOf(userDAOProps)
context watch r
ActorRefRoutee(r)
}
Router(RoundRobinRoutingLogic(), routees)
}
override def receive: Receive = {
case mm: MongoDBMessage =>
router.route(mm, sender)
case Terminated(a) =>
router = router.removeRoutee(a)
val r = context.actorOf(userDAOProps)
context watch r
router = router.addRoutee(r)
}
}
worker:
class UserDAOWorker(implicit inj:Injector) extends Actor with Injectable {
val db = inject[DefaultDB]
val collection:JSONCollection = db("users")
val currentSender = sender
override def receive: Receive = {
case InsertUser(user) => insertUser(user)
}
def insertUser(user:User) = {
collection.save(user).onComplete {
case Failure(e) => currentSender ! new UserDAOReturnMessage(Some(e), None)
case Success(lastError) => currentSender ! new UserDAOReturnMessage(None, lastError)
}
}
}
When I send a message (insertUser message) to the router, it is routed correctly and the worker receives the message, but when the worker sends a message back to the sender it cant be delivered, so it is send to dead letter office. I cant figure out how to fix this. Is there someone able to help me?
Thanks in advance
I guess the problem is that the currentSender is initialized with null (i.e. ActorRef.noSender) in the constructor on actor creation. 'sender' is only valid in context of receiving a message in receive(). Sending a message to ActorRef.noSender is an equivalent of sending to dead letter queue.
Something like this should work:
class UserDAOWorker(implicit inj:Injector) extends Actor with Injectable {
val db = inject[DefaultDB]
val collection:JSONCollection = db("users")
override def receive: Receive = {
case InsertUser(user) => {
insertUser(sender, user)
}
}
def insertUser(currentSender : ActorRef, user:User) = {
collection.save(user).onComplete {
case Failure(e) => currentSender ! new UserDAOReturnMessage(Some(e), None)
case Success(lastError) => currentSender ! new UserDAOReturnMessage(None, lastError)
}
}
}