Can't get a SparkContext in new AWS EMR Cluster - amazon-web-services

i just set up an AWS EMR Cluster (EMR Version 5.18 with Spark 2.3.2). I ssh into the master maschine and run spark-shell or pyspark and get the following error:
$ spark-shell
log4j:ERROR setFile(null,true) call failed.
java.io.FileNotFoundException: /stderr (Permission denied)
at java.io.FileOutputStream.open0(Native Method)
at java.io.FileOutputStream.open(FileOutputStream.java:270)
at java.io.FileOutputStream.<init>(FileOutputStream.java:213)
at java.io.FileOutputStream.<init>(FileOutputStream.java:133)
at org.apache.log4j.FileAppender.setFile(FileAppender.java:294)
at org.apache.log4j.FileAppender.activateOptions(FileAppender.java:165)
at org.apache.log4j.DailyRollingFileAppender.activateOptions(DailyRollingFileAppender.java:223)
at org.apache.log4j.config.PropertySetter.activate(PropertySetter.java:307)
at org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:172)
at org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:104)
at org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:842)
at org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:768)
at org.apache.log4j.PropertyConfigurator.parseCatsAndRenderers(PropertyConfigurator.java:672)
at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:516)
at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:580)
at org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:526)
at org.apache.log4j.LogManager.<clinit>(LogManager.java:127)
at org.apache.spark.internal.Logging$class.initializeLogging(Logging.scala:120)
at org.apache.spark.internal.Logging$class.initializeLogIfNecessary(Logging.scala:108)
at org.apache.spark.deploy.SparkSubmit$.initializeLogIfNecessary(SparkSubmit.scala:71)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:128)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
log4j:ERROR Either File or DatePattern options are not set for appender [DRFA-stderr].
log4j:ERROR setFile(null,true) call failed.
java.io.FileNotFoundException: /stdout (Permission denied)
at java.io.FileOutputStream.open0(Native Method)
at java.io.FileOutputStream.open(FileOutputStream.java:270)
at java.io.FileOutputStream.<init>(FileOutputStream.java:213)
at java.io.FileOutputStream.<init>(FileOutputStream.java:133)
at org.apache.log4j.FileAppender.setFile(FileAppender.java:294)
at org.apache.log4j.FileAppender.activateOptions(FileAppender.java:165)
at org.apache.log4j.DailyRollingFileAppender.activateOptions(DailyRollingFileAppender.java:223)
at org.apache.log4j.config.PropertySetter.activate(PropertySetter.java:307)
at org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:172)
at org.apache.log4j.config.PropertySetter.setProperties(PropertySetter.java:104)
at org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:842)
at org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:768)
at org.apache.log4j.PropertyConfigurator.parseCatsAndRenderers(PropertyConfigurator.java:672)
at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:516)
at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:580)
at org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:526)
at org.apache.log4j.LogManager.<clinit>(LogManager.java:127)
at org.apache.spark.internal.Logging$class.initializeLogging(Logging.scala:120)
at org.apache.spark.internal.Logging$class.initializeLogIfNecessary(Logging.scala:108)
at org.apache.spark.deploy.SparkSubmit$.initializeLogIfNecessary(SparkSubmit.scala:71)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:128)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
log4j:ERROR Either File or DatePattern options are not set for appender [DRFA-stdout].
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
18/11/04 12:24:32 ERROR SparkContext: Error initializing SparkContext.
java.lang.IllegalArgumentException: Required executor memory (4608+460 MB) is above the max threshold (3072 MB) of this cluster! Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or 'yarn.nodemanager.resource.memory-mb'.
at org.apache.spark.deploy.yarn.Client.verifyClusterResources(Client.scala:318)
at org.apache.spark.deploy.yarn.Client.submitApplication(Client.scala:166)
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.start(YarnClientSchedulerBackend.scala:57)
at org.apache.spark.scheduler.TaskSchedulerImpl.start(TaskSchedulerImpl.scala:164)
at org.apache.spark.SparkContext.<init>(SparkContext.scala:500)
at org.apache.spark.SparkContext$.getOrCreate(SparkContext.scala:2493)
at org.apache.spark.sql.SparkSession$Builder$$anonfun$7.apply(SparkSession.scala:934)
at org.apache.spark.sql.SparkSession$Builder$$anonfun$7.apply(SparkSession.scala:925)
at scala.Option.getOrElse(Option.scala:121)
at org.apache.spark.sql.SparkSession$Builder.getOrCreate(SparkSession.scala:925)
at org.apache.spark.repl.Main$.createSparkSession(Main.scala:103)
at $line3.$read$$iw$$iw.<init>(<console>:15)
at $line3.$read$$iw.<init>(<console>:43)
at $line3.$read.<init>(<console>:45)
at $line3.$read$.<init>(<console>:49)
at $line3.$read$.<clinit>(<console>)
at $line3.$eval$.$print$lzycompute(<console>:7)
at $line3.$eval$.$print(<console>:6)
at $line3.$eval.$print(<console>)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:786)
at scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1047)
at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:638)
at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:637)
at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
at scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:19)
at scala.tools.nsc.interpreter.IMain$WrappedRequest.loadAndRunReq(IMain.scala:637)
at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:569)
at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:565)
at scala.tools.nsc.interpreter.ILoop.interpretStartingWith(ILoop.scala:807)
at scala.tools.nsc.interpreter.ILoop.command(ILoop.scala:681)
at scala.tools.nsc.interpreter.ILoop.processLine(ILoop.scala:395)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1$$anonfun$apply$mcV$sp$2.apply(SparkILoop.scala:79)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1$$anonfun$apply$mcV$sp$2.apply(SparkILoop.scala:79)
at scala.collection.immutable.List.foreach(List.scala:381)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(SparkILoop.scala:79)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1.apply(SparkILoop.scala:79)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1.apply(SparkILoop.scala:79)
at scala.tools.nsc.interpreter.ILoop.savingReplayStack(ILoop.scala:91)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1.apply$mcV$sp(SparkILoop.scala:78)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1.apply(SparkILoop.scala:78)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1.apply(SparkILoop.scala:78)
at scala.tools.nsc.interpreter.IMain.beQuietDuring(IMain.scala:214)
at org.apache.spark.repl.SparkILoop.initializeSpark(SparkILoop.scala:77)
at org.apache.spark.repl.SparkILoop.loadFiles(SparkILoop.scala:110)
at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply$mcZ$sp(ILoop.scala:920)
at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
at scala.reflect.internal.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:97)
at scala.tools.nsc.interpreter.ILoop.process(ILoop.scala:909)
at org.apache.spark.repl.Main$.doMain(Main.scala:76)
at org.apache.spark.repl.Main$.main(Main.scala:56)
at org.apache.spark.repl.Main.main(Main.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:894)
at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:198)
at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:228)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:137)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
18/11/04 12:24:33 WARN YarnSchedulerBackend$YarnSchedulerEndpoint: Attempted to request executors before the AM has registered!
18/11/04 12:24:33 WARN MetricsSystem: Stopping a MetricsSystem that is not running
java.lang.IllegalArgumentException: Required executor memory (4608+460 MB) is above the max threshold (3072 MB) of this cluster! Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or 'yarn.nodemanager.resource.memory-mb'.
at org.apache.spark.deploy.yarn.Client.verifyClusterResources(Client.scala:318)
at org.apache.spark.deploy.yarn.Client.submitApplication(Client.scala:166)
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.start(YarnClientSchedulerBackend.scala:57)
at org.apache.spark.scheduler.TaskSchedulerImpl.start(TaskSchedulerImpl.scala:164)
at org.apache.spark.SparkContext.<init>(SparkContext.scala:500)
at org.apache.spark.SparkContext$.getOrCreate(SparkContext.scala:2493)
at org.apache.spark.sql.SparkSession$Builder$$anonfun$7.apply(SparkSession.scala:934)
at org.apache.spark.sql.SparkSession$Builder$$anonfun$7.apply(SparkSession.scala:925)
at scala.Option.getOrElse(Option.scala:121)
at org.apache.spark.sql.SparkSession$Builder.getOrCreate(SparkSession.scala:925)
at org.apache.spark.repl.Main$.createSparkSession(Main.scala:103)
... 55 elided
<console>:14: error: not found: value spark
import spark.implicits._
^
<console>:14: error: not found: value spark
import spark.sql
^
Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/___/ .__/\_,_/_/ /_/\_\ version 2.3.2
/_/
Using Scala version 2.11.8 (OpenJDK 64-Bit Server VM, Java 1.8.0_181)
Type in expressions to have them evaluated.
Type :help for more information.
I am new to Spark and to EMR and dont know what to do. Is there some configuration step i missed or anything else i have to provide to make it work?
Thank you for your help!

If you look into /etc/spark/conf/log4j.properties file, you'll find that there's new setup allowing to roll Spark Streaming logs hourly (probably as it's suggested here).
The problem occurs because ${spark.yarn.app.container.log.dir} system property is not set in Spark driver process. The property is set eventually to Yarn's container log directory, but this happens later (look here and here).
In order to fix this error in Spark driver, add the following to your spark-submit or spark-shell command:
--driver-java-options='-Dspark.yarn.app.container.log.dir=/mnt/var/log/hadoop'
Please note that /mnt/var/log/hadoop/stderr and /mnt/var/log/hadoop/stdout files will be reused by all the (Spark Streaming) processes started on the same node.

We have also run into this issue and hope some AWS or Spark engineers are reading this. I've narrowed this down to the /etc/spark/conf/log4j.properties file and how the loggers are configured using the ${spark.yarn.app.container.log.dir} system property. That value is evaluating to null and so the logging directory now evaluates to /stdout and /stderr instead of the desired /mnt/var/log/hadoop-yarn/containers/<app_id>/<container_id>/(stdout|stderr) which is how it worked in EMR < 5.18.0.
Workaround #1 (not ideal): If you set that property to a static path which the hadoop user has access to like /var/log/hadoop-yarn/stderr things work fine. This probably breaks things like the history server and an unknown number of other things, but spark-shell and pyspark can start without errors.
UPDATE Workaround #2 (revert): Not sure why I didn't do this earlier but comparing this to a 5.13 cluster, the entirety of the DRFA-stderr and DRFA-stdout appenders were non-existent. If you comment those sections out, delete them, or simply copy the log4j.properties file from the template this problem also goes away (again, unknown impact to the rest of the services). I'm not sure where that section originated from, the master repo configs do not have those appenders so it appears to be proprietary to AWS distros.

in order to fix this issue you can add configuration in json format on emr provisioning. We use code like this
{
"Classification": "yarn-site",
"Configurations": [
],
"Properties": {
"spark.yarn.app.container.log.dir": "/var/log/hadoop-yarn"
}
}

Related

How to find the cause of a broken wso2mi-dashboard?

Good day everyone.
We have wso2 implemented on three Docker containers. Everything worked fine until after adding the next flow, the entrance to the Micro Integration Dashboard stopped working. Moreover, the system itself continues to work, only you can not enter the panel.
Error image
login failed internal server error
No changes were made to the users or to the container itself.
I've searched and read everything I can, but I can't even grasp where the problem is.
Please push me in the right direction.
Here is the error from the logs:
[2023-01-12 07:40:08,602] ERROR {LoginDelegate} - Error logging into dashboard server java.lang.IndexOutOfBoundsException: Index 0 out of bounds for length 0
at java.base/jdk.internal.util.Preconditions.outOfBounds(Preconditions.java:64)
at java.base/jdk.internal.util.Preconditions.outOfBoundsCheckIndex(Preconditions.java:70)
at java.base/jdk.internal.util.Preconditions.checkIndex(Preconditions.java:248)
at java.base/java.util.Objects.checkIndex(Objects.java:372)
at java.base/java.util.ArrayList.get(ArrayList.java:459)
at org.wso2.ei.dashboard.core.rest.delegates.auth.LoginDelegate.getTokenFromMI(LoginDelegate.java:80)
at org.wso2.ei.dashboard.core.rest.delegates.auth.LoginDelegate.authenticateUser(LoginDelegate.java:51)
at org.wso2.ei.dashboard.core.rest.api.LoginApi.receiveLogin(LoginApi.java:56)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)
at org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)
at org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)
at org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)
at org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)
at org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)
at org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)
at org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)
at org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)
at org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)
at org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)
at org.glassfish.jersey.internal.Errors.process(Errors.java:316)
at org.glassfish.jersey.internal.Errors.process(Errors.java:298)
at org.glassfish.jersey.internal.Errors.process(Errors.java:268)
at org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)
at org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)
at org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)
at org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)
at org.glassfish.jersey.servlet.WebComponent.service(WebComponent.java:370)
at org.glassfish.jersey.servlet.ServletContainer.service(ServletContainer.java:389)
at org.glassfish.jersey.servlet.ServletContainer.service(ServletContainer.java:342)
at org.glassfish.jersey.servlet.ServletContainer.service(ServletContainer.java:229)
at org.eclipse.jetty.servlet.ServletHolder$NotAsync.service(ServletHolder.java:1450)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:799)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:550)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:600)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:235)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1624)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:233)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1440)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:188)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:501)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1594)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:186)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1355)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:146)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.Server.handle(Server.java:516)
at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:487)
at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:732)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:479)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:277)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)
at org.eclipse.jetty.io.ssl.SslConnection$DecryptedEndPoint.onFillable(SslConnection.java:555)
at org.eclipse.jetty.io.ssl.SslConnection.onFillable(SslConnection.java:410)
at org.eclipse.jetty.io.ssl.SslConnection$2.succeeded(SslConnection.java:164)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)
at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:338)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:315)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:131)
at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:409)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
at java.base/java.lang.Thread.run(Thread.java:829)

Restarting leveldb when failed to initialize in Akka

My application is designed in such a way that whenever someone tries to start it, the application will close other instances of itself. (notice that this is working as expected and should not be changed).
I am using Akka and one of my Actors is a PersistentActor that uses leveldb.
When the application starts it locks <path-to-lock>/LOCK, when it starts again the leveldb cannot lock the file so the PersistentActor cannot start.
After some inspection I found that the leveldb Actor class is LeveldbJournal and it starts under the system guardian with path akka://actor-system/system/akka.persistence.journal.leveldb.
I would like leveldb to restart itself until the file can be locked or a max retries limit has reached.
Logs:
[ERROR] [02/04/2019 15:39:25.731] [operator-actor-system-akka.actor.default-dispatcher-5] [akka://operator-actor-system/system/akka.persistence.journal.leveldb] Unable to acquire lock on '<path-to-lock>/LOCK'
akka.actor.ActorInitializationException: akka://operator-actor-system/system/akka.persistence.journal.leveldb: exception during creation
at akka.actor.ActorInitializationException$.apply(Actor.scala:180)
at akka.actor.ActorCell.create(ActorCell.scala:607)
at akka.actor.ActorCell.invokeAll$1(ActorCell.scala:461)
at akka.actor.ActorCell.systemInvoke(ActorCell.scala:483)
at akka.dispatch.Mailbox.processAllSystemMessages(Mailbox.scala:282)
at akka.dispatch.Mailbox.run(Mailbox.scala:223)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.io.IOException: Unable to acquire lock on '<path-to-lock>/LOCK'
at org.iq80.leveldb.impl.DbLock.<init>(DbLock.java:55)
at org.iq80.leveldb.impl.DbImpl.<init>(DbImpl.java:167)
at org.iq80.leveldb.impl.Iq80DBFactory.open(Iq80DBFactory.java:59)
at akka.persistence.journal.leveldb.LeveldbStore$class.preStart(LeveldbStore.scala:178)
at akka.persistence.journal.leveldb.LeveldbJournal.preStart(LeveldbJournal.scala:23)
at akka.actor.Actor$class.aroundPreStart(Actor.scala:510)
at akka.persistence.journal.leveldb.LeveldbJournal.aroundPreStart(LeveldbJournal.scala:23)
at akka.actor.ActorCell.create(ActorCell.scala:590)
... 7 more
When the PersistentActor restarts:
[ERROR] [02/04/2019 15:39:57.168] [operator-actor-system-akka.actor.default-dispatcher-16] [akka://operator-actor-system/user/controller/view-manager/records-service-api-supervisor/records-service-api] Persistence failure when replaying events for persistenceId [record-service-persistence-actor]. Last known sequence number [0] (akka.persistence.RecoveryTimedOut)
Thanks,
Ido Sorozon
P.S. Versions:
Scala: 2.11.8
Akka: 2.4.19

RSolr::Error::Http - 500 Internal Server Error

I am upgrading solr 5.3.1, I am getting following error when I run specs on semaphoreci
RSolr::Error::Http:
RSolr::Error::Http - 500 Internal Server Error
Error: {msg=SolrCore &apos;default&apos; is not available due to init failure: Error opening new
searcher,trace=org.apache.solr.common.SolrException: SolrCore
&apos;default&apos; is not available due to init failure: Error
opening new searcher
at org.apache.solr.core.CoreContainer.getCore(CoreContainer.java:974)
at org.apache.solr.servlet.HttpSolrCall.init(HttpSolrCall.java:250)
at org.apache.solr.servlet.HttpSolrCall.call(HttpSolrCall.java:417)
at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:214)
at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:179)
at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1652)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:585)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:577)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:223)
URI: http://localhost:8981/solr/default/update?wt=json
Request Headers: {"Content-Type"=>"application/json"}
Request Data: "[{\"id\":\"Contact 1\",\"type\":[\"Contact\",\"ActiveRecord::Base\"],\"class_name\":\"Contact\",\"first_name_text\":\"Danial\",\"last_name_text\":\"Ullrich\"}]"
Backtrace: /home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/rsolr-2.0.2/lib/rsolr/client.rb:195:in
rescue in execute'
/home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/rsolr-2.0.2/lib/rsolr/client.rb:185:in
execute'
/home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/rsolr-2.0.2/lib/rsolr/client.rb:180:in
send_and_receive'
/home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/sunspot_rails-2.2.7/lib/sunspot/rails/solr_instrumentation.rb:16:in
block in send_and_receive_with_as_instrumentation'
/home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/activesupport-4.2.7.1/lib/active_support/notifications.rb:164:in
block in instrument'
/home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/activesupport-4.2.7.1/lib/active_support/notifications/instrumenter.rb:20:in
instrument'
/home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/activesupport-4.2.7.1/lib/active_support/notifications.rb:164:in
instrument'
/home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/sunspot_rails-2.2.7/lib/sunspot/rails/solr_instrumentation.rb:15:in
send_and_receive_with_as_instrumentation'
(eval):2:in post'
/home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/rsolr-2.0.2/lib/rsolr/client.rb:83:in
update'
/home/runner/.rbenv/versions/2.3.1/lib/ruby/gems/2.3.0/gems/rsolr-2.0.2/lib/rsolr/client.rb:102:in
add'
# (eval):2:inpost'
# ./spec/controllers/contacts_controller_spec.rb:319:in block (3 levels) in <top (required)>'
# ------------------
# --- Caused by: ---
# Faraday::ClientError:
# the server responded with status 500
# (eval):2:inpost'
Solr Logs
1152 ERROR (coreLoadExecutor-6-thread-2) [ x:development] o.a.s.c.CoreContainer Error creating core [development]: Index locked for write for core 'development'. Solr now longer supports forceful unlocking via 'unlockOnStartup'. Please verify locks manually!
org.apache.solr.common.SolrException: Index locked for write for core 'development'. Solr now longer supports forceful unlocking via 'unlockOnStartup'. Please verify locks manually!
at org.apache.solr.core.SolrCore.<init>(SolrCore.java:820)
at org.apache.solr.core.SolrCore.<init>(SolrCore.java:659)
at org.apache.solr.core.CoreContainer.create(CoreContainer.java:723)
at org.apache.solr.core.CoreContainer$1.call(CoreContainer.java:443)
at org.apache.solr.core.CoreContainer$1.call(CoreContainer.java:434)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at org.apache.solr.common.util.ExecutorUtil$MDCAwareThreadPoolExecutor$1.run(ExecutorUtil.java:210)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.lucene.store.LockObtainFailedException: Index locked for write for core 'development'. Solr now longer supports forceful unlocking via 'unlockOnStartup'. Please verify locks manually!
at org.apache.solr.core.SolrCore.initIndex(SolrCore.java:528)
at org.apache.solr.core.SolrCore.<init>(SolrCore.java:761)
Any help would be much appreciated.
write.lock file might be present under solr/{environment}/data/index directory due to unclean shutdown. Removing write.lock file will fix the issue.
I was having this issue even when write.lock was not present. Here's what solved it for me:
1) Delete entire /solr directory
2) run ps -ef|grep solr to see any solr processes running
3) run kill -9 [solr pid] for each of the processes found in step 2
4) run rake sunspot:solr:start and then rake sunspot:solr:reindex
Hope this helps!

ArrayIndexOutOfBoundsException while opening stream from SSL-secured WSDL

I get a confusing ArrayIndexOutOfBoundsException at every SSL-secured Webservice call and I have no idea what's the reason. The exception comes without any relevant change on the code over night and I am baffled.
Can anyone give me a hint how I can fix this shitty bug?
Environment:
Tomcat7
IBM JDK 6
Stacktrace:
com.sun.xml.ws.client.ClientTransportException: HTTP transport error: javax.net.ssl.SSLException: java.lang.ArrayIndexOutOfBoundsException
at com.sun.xml.ws.transport.http.client.HttpClientTransport.getOutput(HttpClientTransport.java:132)
at com.sun.xml.ws.transport.http.client.HttpTransportPipe.process(HttpTransportPipe.java:256)
at com.sun.xml.ws.transport.http.client.HttpTransportPipe.processRequest(HttpTransportPipe.java:184)
at com.sun.xml.ws.transport.DeferredTransportPipe.processRequest(DeferredTransportPipe.java:137)
at com.sun.xml.ws.api.pipe.Fiber.__doRun(Fiber.java:641)
at com.sun.xml.ws.api.pipe.Fiber._doRun(Fiber.java:600)
at com.sun.xml.ws.api.pipe.Fiber.doRun(Fiber.java:585)
at com.sun.xml.ws.api.pipe.Fiber.runSync(Fiber.java:482)
at com.sun.xml.ws.client.Stub.process(Stub.java:323)
at com.sun.xml.ws.client.sei.SEIStub.doProcess(SEIStub.java:161)
at com.sun.xml.ws.client.sei.SyncMethodHandler.invoke(SyncMethodHandler.java:113)
at com.sun.xml.ws.client.sei.SyncMethodHandler.invoke(SyncMethodHandler.java:93)
at com.sun.xml.ws.client.sei.SEIStub.invoke(SEIStub.java:144)
at $Proxy294.liefereKonfiguration(Unknown Source)
at at.gv.bmf.efsz.vkps.client.lieferekonfiguration.impl.LiefereKonfigurationServiceClientImpl.updateKonfReader(LiefereKonfigurationServiceClientImpl.java:279)
at at.gv.bmf.efsz.vkps.client.lieferekonfiguration.impl.LiefereKonfigurationServiceClientImpl.initiateUpdate(LiefereKonfigurationServiceClientImpl.java:205)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:60)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:37)
at java.lang.reflect.Method.invoke(Method.java:611)
at org.springframework.aop.support.AopUtils.invokeJoinpointUsingReflection(AopUtils.java:318)
at org.springframework.aop.framework.ReflectiveMethodInvocation.invokeJoinpoint(ReflectiveMethodInvocation.java:183)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:150)
at org.springframework.transaction.interceptor.TransactionInterceptor.invoke(TransactionInterceptor.java:110)
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:172)
at org.springframework.aop.framework.JdkDynamicAopProxy.invoke(JdkDynamicAopProxy.java:202)
at $Proxy293.initiateUpdate(Unknown Source)
at at.gv.bmf.efsz.vkps.client.lieferekonfiguration.impl.VKPSClientInitializer.initVKPSClient(VKPSClientInitializer.java:23)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:60)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:37)
at java.lang.reflect.Method.invoke(Method.java:611)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeCustomInitMethod(AbstractAutowireCapableBeanFactory.java:1581)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1522)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1452)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:519)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:456)
at org.springframework.beans.factory.support.AbstractBeanFactory$1.getObject(AbstractBeanFactory.java:294)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:225)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:291)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:193)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:585)
at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:913)
at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:464)
at org.springframework.web.context.ContextLoader.configureAndRefreshWebApplicationContext(ContextLoader.java:385)
at org.springframework.web.context.ContextLoader.initWebApplicationContext(ContextLoader.java:284)
at org.springframework.web.context.ContextLoaderListener.contextInitialized(ContextLoaderListener.java:111)
at org.apache.catalina.core.StandardContext.listenerStart(StandardContext.java:4723)
at org.apache.catalina.core.StandardContext$1.call(StandardContext.java:5226)
at org.apache.catalina.core.StandardContext$1.call(StandardContext.java:5221)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:314)
at java.util.concurrent.FutureTask.run(FutureTask.java:149)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:897)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:919)
at java.lang.Thread.run(Thread.java:736)
Caused by: javax.net.ssl.SSLException: java.lang.ArrayIndexOutOfBoundsException
at com.ibm.jsse2.n.a(n.java:27)
at com.ibm.jsse2.tc.a(tc.java:328)
at com.ibm.jsse2.tc.a(tc.java:386)
at com.ibm.jsse2.tc.a(tc.java:278)
at com.ibm.jsse2.tc.a(tc.java:298)
at com.ibm.jsse2.tc.startHandshake(tc.java:337)
at com.ibm.net.ssl.www2.protocol.https.c.afterConnect(c.java:23)
at com.ibm.net.ssl.www2.protocol.https.d.connect(d.java:57)
at sun.net.www.protocol.http.HttpURLConnection.getOutputStream(HttpURLConnection.java:1024)
at com.ibm.net.ssl.www2.protocol.https.b.getOutputStream(b.java:56)
at com.sun.xml.ws.transport.http.client.HttpClientTransport.getOutput(HttpClientTransport.java:120)
... 54 more
Caused by: java.lang.ArrayIndexOutOfBoundsException
at com.ibm.crypto.provider.TlsPrfGenerator.a(Unknown Source)
at com.ibm.crypto.provider.TlsPrfGenerator.a(Unknown Source)
at com.ibm.crypto.provider.TlsPrfGenerator.a(Unknown Source)
at com.ibm.crypto.provider.TlsMasterSecretGenerator.engineGenerateKey(Unknown Source)
at javax.crypto.KeyGenerator.generateKey(Unknown Source)
at com.ibm.jsse2.gb.b(gb.java:268)
at com.ibm.jsse2.gb.a(gb.java:131)
at com.ibm.jsse2.hb.a(hb.java:216)
at com.ibm.jsse2.hb.a(hb.java:71)
at com.ibm.jsse2.gb.n(gb.java:142)
at com.ibm.jsse2.gb.a(gb.java:95)
at com.ibm.jsse2.tc.a(tc.java:244)
at com.ibm.jsse2.tc.g(tc.java:206)
at com.ibm.jsse2.tc.a(tc.java:165)
... 60 more
It seems you are using IBMJSSE2 Provider for SSL Communication.
There is a very similar problem which is already fixed:
http://www-01.ibm.com/support/docview.wss?crawler=1&uid=swg1IV73472
APAR Name: IV73472: LARGE PRE-MASTER SECRET GENERATED FROM 2048 BIT DH KEY NOT DIGESTED IN TLSV1 AND TLSV1.1
Problem summary: When TLSv1 or TLSv1.1 was used, the large pre-master secret
generated from 2048 bit DH key was not properly hashed to derive
the master secret.
Local fix: Disable cipher suites which uses DH/DHE key exchange.
Problem conclusion: A fix is made to IBMJSSE2 and IBMJCE provider to handle large pre-master secret in TLSv1 and TLSv1.1
The associated Hursley RTC Problem Report is 93670 and 94644
The associated Austin CMVC defect is 116692 and 116765
JVMs affected: Java 6.0, Java 626, Java 7.0, Java 727 and Java 8
The fix was delivered for Java 6.0 SR16FP7, Java 626 SR8FP7,
Java 7.0 SR9FP10, Java 727 SR3FP10 and Java 8 SR1FP10
The affected jar are "ibmjsseprovider2.jar" and
"ibmjceprovider.jar".
The build level of ibmjceprovider.jar for the affected releases
is "20150604".
The build level of ibmjsseprovider2.jar for the affected
releases is "20150604"
Try to upgrade ibmjsseprovider2.jar library.

IO exception while copying 38GB of file from Local to HDFS

I am trying to load 38GB of file from local server to HDFS, I am getting below error while file transfer
hadoop fs -copyFromLocal filename.csv /
Exception in thread "main" org.apache.hadoop.fs.FSError: java.io.IOException: Input/output error
at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileInputStream.read(RawLocalFileSystem.java:161)
at java.io.BufferedInputStream.fill(BufferedInputStream.java:235)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:275)
at java.io.BufferedInputStream.read(BufferedInputStream.java:334)
at java.io.DataInputStream.read(DataInputStream.java:149)
at org.apache.hadoop.fs.FSInputChecker.readFully(FSInputChecker.java:436)
at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSInputChecker.readChunk(ChecksumFileSystem.java:252)
at org.apache.hadoop.fs.FSInputChecker.readChecksumChunk(FSInputChecker.java:276)
at org.apache.hadoop.fs.FSInputChecker.read1(FSInputChecker.java:228)
at org.apache.hadoop.fs.FSInputChecker.read(FSInputChecker.java:196)
at java.io.DataInputStream.read(DataInputStream.java:100)
at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:84)
at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:52)
at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:112)
at org.apache.hadoop.fs.shell.CommandWithDestination$TargetFileSystem.writeStreamToFile(CommandWithDestination.java:456)
at org.apache.hadoop.fs.shell.CommandWithDestination.copyStreamToTarget(CommandWithDestination.java:382)
at org.apache.hadoop.fs.shell.CommandWithDestination.copyFileToTarget(CommandWithDestination.java:319)
at org.apache.hadoop.fs.shell.CommandWithDestination.processPath(CommandWithDestination.java:254)
at org.apache.hadoop.fs.shell.CommandWithDestination.processPath(CommandWithDestination.java:239)
at org.apache.hadoop.fs.shell.Command.processPaths(Command.java:306)
at org.apache.hadoop.fs.shell.Command.processPathArgument(Command.java:278)
at org.apache.hadoop.fs.shell.CommandWithDestination.processPathArgument(CommandWithDestination.java:234)
at org.apache.hadoop.fs.shell.Command.processArgument(Command.java:260)
at org.apache.hadoop.fs.shell.Command.processArguments(Command.java:244)
at org.apache.hadoop.fs.shell.CommandWithDestination.processArguments(CommandWithDestination.java:211)
at org.apache.hadoop.fs.shell.CopyCommands$Put.processArguments(CopyCommands.java:263)
at org.apache.hadoop.fs.shell.Command.processRawArguments(Command.java:190)
at org.apache.hadoop.fs.shell.Command.run(Command.java:154)
at org.apache.hadoop.fs.FsShell.run(FsShell.java:287)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:84)
at org.apache.hadoop.fs.FsShell.main(FsShell.java:340)
Caused by: java.io.IOException: Input/output error
at java.io.FileInputStream.readBytes(Native Method)
at java.io.FileInputStream.read(FileInputStream.java:272)
at org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileInputStream.read(RawLocalFileSystem.java:154)
... 31 more
after issuing copyfromlocal command I am getting above exception.
Kindly help.
Issue got resolved, its a disk issue,