I'm trying to write a simple AWS Glue scala job to read from a table and write to an s3 location. The source code is
import com.amazonaws.services.glue.GlueContext
import com.amazonaws.services.glue.util.GlueArgParser
import com.amazonaws.services.glue.util.Job
import org.apache.spark.sql.functions._
import org.apache.spark.sql._
import org.apache.spark.SparkContext
import org.joda.time.format.DateTimeFormat
import org.joda.time.{DateTime, DateTimeZone}
import scala.collection.JavaConverters._
import org.joda.time.{DateTime, DateTimeZone}
object GlueApp {
def main(args:Array[String]): Unit = {
val sc: SparkContext = new SparkContext()
val glueContext: GlueContext = new GlueContext(sc)
val glueArgs = GlueArgParser.getResolvedOptions(args, Seq("JOB_NAME").toArray)
Job.init(glueArgs("JOB_NAME"), glueContext, glueArgs.asJava)
var spark: SparkSession = glueContext.getSparkSession
var df = glueContext.getCatalogSource(database = "datastore", tableName="datastore_tbl").getDynamicFrame()
df.printSchema()
df.show(20)
df.toDF().write.format("parquet").save("s3://<bucket>/path/to")
Job.commit()
}
}
The cloudwatch logs shows the following error
2022-01-14 03:08:39,125 ERROR [main] glue.ProcessLauncher (Logging.scala:logError(73)):
Exception in User Class: org.apache.spark.SparkException : Job aborted due to stage failure: Task 0 in stage 0.0 failed 4 times, most recent failure: Lost task 0.3 in stage 0.0 (TID 3) (172.36.56.6 executor 1): java.io.IOException: Not a data file.
org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2465)
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2414)
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2413)
scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2413)
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1124)
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1124)
scala.Option.foreach(Option.scala:407)
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1124)
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2679)
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2621)
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2610)
org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:914)
org.apache.spark.SparkContext.runJob(SparkContext.scala:2238)
org.apache.spark.SparkContext.runJob(SparkContext.scala:2333)
org.apache.spark.rdd.RDD.$anonfun$fold$1(RDD.scala:1183)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
org.apache.spark.rdd.RDD.fold(RDD.scala:1177)
org.apache.spark.rdd.RDD.$anonfun$treeAggregate$1(RDD.scala:1246)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
org.apache.spark.rdd.RDD.treeAggregate(RDD.scala:1222)
org.apache.spark.sql.glue.util.SchemaUtils$.fromRDD(SchemaUtils.scala:76)
com.amazonaws.services.glue.DynamicFrame.recomputeSchema(DynamicFrame.scala:305)
com.amazonaws.services.glue.DynamicFrame.schema(DynamicFrame.scala:281)
com.amazonaws.services.glue.DynamicFrame.printSchema(DynamicFrame.scala:319)
GlueApp$.main(glue_test_job.scala:24)
GlueApp.main(glue_test_job.scala)
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
java.lang.reflect.Method.invoke(Method.java:498)
com.amazonaws.services.glue.SparkProcessLauncherPlugin.invoke(ProcessLauncher.scala:47)
com.amazonaws.services.glue.SparkProcessLauncherPlugin.invoke$(ProcessLauncher.scala:47)
com.amazonaws.services.glue.ProcessLauncher$$anon$1.invoke(ProcessLauncher.scala:77)
com.amazonaws.services.glue.ProcessLauncher.launch(ProcessLauncher.scala:133)
com.amazonaws.services.glue.ProcessLauncher$.main(ProcessLauncher.scala:29)
com.amazonaws.services.glue.ProcessLauncher.main(ProcessLauncher.scala)
2022-01-14 03:08:39,125 ERROR [main] glue.ProcessLauncher (Logging.scala:logError(73)):
Exception in User Class: org.apache.spark.SparkException : Job aborted due to stage failure: Task 0 in stage 0.0 failed 4 times, most recent failure: Lost task 0.3 in stage 0.0 (TID 3) (172.36.56.6 executor 1): java.io.IOException: Not a data file.
org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2465)
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2414)
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2413)
scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2413)
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1124)
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1124) scala.Option.foreach(Option.scala:407)
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1124)
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2679)
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2621)
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2610) org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:914) org.apache.spark.SparkContext.runJob(SparkContext.scala:2238) org.apache.spark.SparkContext.runJob(SparkContext.scala:2333) org.apache.spark.rdd.RDD.$anonfun$fold$1(RDD.scala:1183) org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) org.apache.spark.rdd.RDD.withScope(RDD.scala:414) org.apache.spark.rdd.RDD.fold(RDD.scala:1177) org.apache.spark.rdd.RDD.$anonfun$treeAggregate$1(RDD.scala:1246) org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) org.apache.spark.rdd.RDD.withScope(RDD.scala:414) org.apache.spark.rdd.RDD.treeAggregate(RDD.scala:1222) org.apache.spark.sql.glue.util.SchemaUtils$.fromRDD(SchemaUtils.scala:76) com.amazonaws.services.glue.DynamicFrame.recomputeSchema(DynamicFrame.scala:305) com.amazonaws.services.glue.DynamicFrame.schema(DynamicFrame.scala:281) com.amazonaws.services.glue.DynamicFrame.printSchema(DynamicFrame.scala:319) GlueApp$.main(glue_test_job.scala:24)
GlueApp.main(glue_test_job.scala)
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
java.lang.reflect.Method.invoke(Method.java:498)
com.amazonaws.services.glue.SparkProcessLauncherPlugin.invoke(ProcessLauncher.scala:47)
com.amazonaws.services.glue.SparkProcessLauncherPlugin.invoke$(ProcessLauncher.scala:47)
com.amazonaws.services.glue.ProcessLauncher$$anon$1.invoke(ProcessLauncher.scala:77)
com.amazonaws.services.glue.ProcessLauncher.launch(ProcessLauncher.scala:133)
com.amazonaws.services.glue.ProcessLauncher$.main(ProcessLauncher.scala:29)
com.amazonaws.services.glue.ProcessLauncher.main(ProcessLauncher.scala)
datastore_tbl is a table created using a Tables --> Add table -> Option ("Add table using existing schema") in Glue Studio. The schema was created using avro structure.
I'm able to query this table using AWS Athena and it works.
I'm not able to understand the "Not a data file" IOException. Any idea?
I am following this github repo, adopting it to a text classification problem that is built on distil bert. So given a sting of text, the model should return a label and a (probability) score.
Output from the model:
sentiment_input = {"inputs": "I love using the new Inference DLC."}
# sentiment_input= "I love using the new Inference DLC."
response = predictor.predict(data=sentiment_input)
print(response)
Output:
[{'label': 'LABEL_80', 'score': 0.008507220074534416}]
When I run the following
# Create an enpointInput
endpointInput = EndpointInput(
endpoint_name=predictor.endpoint_name,
probability_attribute="score",
inference_attribute="label",
# probability_threshold_attribute=0.5,
destination="/opt/ml/processing/input_data",
)
# Create the monitoring schedule to execute every hour.
from sagemaker.model_monitor import CronExpressionGenerator
response = clinc_intent0911.create_monitoring_schedule(
monitor_schedule_name=clincintent_monitor_schedule_name,
endpoint_input=endpointInput,
output_s3_uri=baseline_results_uri,
problem_type="MulticlassClassification",
ground_truth_input=ground_truth_upload_path,
constraints=baseline_job.suggested_constraints(),
schedule_cron_expression=CronExpressionGenerator.hourly(),
enable_cloudwatch_metrics=True,
)
I get the following error:
---------------------------------------------------------------------------
ClientError Traceback (most recent call last)
<ipython-input-269-72e7049246fb> in <module>
10 constraints=baseline_job.suggested_constraints(),
11 schedule_cron_expression=CronExpressionGenerator.hourly(),
---> 12 enable_cloudwatch_metrics=True,
13 )
/opt/conda/lib/python3.6/site-packages/sagemaker/model_monitor/model_monitoring.py in create_monitoring_schedule(self, endpoint_input, ground_truth_input, problem_type, record_preprocessor_script, post_analytics_processor_script, output_s3_uri, constraints, monitor_schedule_name, schedule_cron_expression, enable_cloudwatch_metrics)
2615 network_config=self.network_config,
2616 )
-> 2617 self.sagemaker_session.sagemaker_client.create_model_quality_job_definition(**request_dict)
2618
2619 # create schedule
/opt/conda/lib/python3.6/site-packages/botocore/client.py in _api_call(self, *args, **kwargs)
355 "%s() only accepts keyword arguments." % py_operation_name)
356 # The "self" in this scope is referring to the BaseClient.
--> 357 return self._make_api_call(operation_name, kwargs)
358
359 _api_call.__name__ = str(py_operation_name)
/opt/conda/lib/python3.6/site-packages/botocore/client.py in _make_api_call(self, operation_name, api_params)
674 error_code = parsed_response.get("Error", {}).get("Code")
675 error_class = self.exceptions.from_code(error_code)
--> 676 raise error_class(parsed_response, operation_name)
677 else:
678 return parsed_response
ClientError: An error occurred (ValidationException) when calling the CreateModelQualityJobDefinition operation: Endpoint 'clinc-intent-analysis-0911' does not exist or is not valid
At this point my sagemaker endpoint is live and unable to debug it is not valid.
SageMaker ModelMonitor only works for tabular datasets at the moment out of the box (see documentation), and hence the "not valid" error message. To use it on NLP problems, you'd have to bring your own model monitor container (BYOC). Here is an example to get started - https://aws.amazon.com/blogs/machine-learning/detect-nlp-data-drift-using-custom-amazon-sagemaker-model-monitor/,
and the associated Github repo is here - https://github.com/aws-samples/detecting-data-drift-in-nlp-using-amazon-sagemaker-custom-model-monitor
I created a DataProc cluster with Jupyter initialization. The image version I used is 1.4. I ssh to both master and worker nodes and run python --version, and both show Python 3.6.5 :: Anaconda, Inc..
However, when I try to run the example from Google:
Reading and writing data from BigQuery with Jupyter (PySpark kernel), it gives the following error:
Py4JJavaError Traceback (most recent call last)
<ipython-input-13-1cf15cbebfd5> in <module>
55
56 # Display 10 results.
---> 57 pprint.pprint(word_counts.take(10))
58
59
/usr/lib/spark/python/pyspark/rdd.py in take(self, num)
1358
1359 p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
-> 1360 res = self.context.runJob(self, takeUpToNumLeft, p)
1361
1362 items += res
/usr/lib/spark/python/pyspark/context.py in runJob(self, rdd, partitionFunc, partitions, allowLocal)
1049 # SparkContext#runJob.
1050 mappedRDD = rdd.mapPartitions(partitionFunc)
-> 1051 sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
1052 return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
1053
/usr/lib/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self, *args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name)
1258
1259 for temp_arg in temp_args:
/usr/lib/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
/usr/lib/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.runJob.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 24.0 failed 4 times, most recent failure: Lost task 0.3 in stage 24.0 (TID 563, test-1-w-0.c.abc.internal, executor 3): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 262, in main
("%d.%d" % sys.version_info[:2], version))
Exception: Python in worker has different version 2.7 than that in driver 3.6, PySpark cannot run with different minor versions.Please check environment variables PYSPARK_PYTHON and PYSPARK_DRIVER_PYTHON are correctly set.
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:452)
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:588)
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:571)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:406)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:121)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1888)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1876)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1875)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1875)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2109)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2058)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2047)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:737)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
at org.apache.spark.api.python.PythonRDD$.runJob(PythonRDD.scala:153)
at org.apache.spark.api.python.PythonRDD.runJob(PythonRDD.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 262, in main
("%d.%d" % sys.version_info[:2], version))
Exception: Python in worker has different version 2.7 than that in driver 3.6, PySpark cannot run with different minor versions.Please check environment variables PYSPARK_PYTHON and PYSPARK_DRIVER_PYTHON are correctly set.
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:452)
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:588)
at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:571)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:406)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:121)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
... 1 more
I don't understand why the master-worker python version error can happen. In addition, when I submit this job from local command line, it works without problem. Any help or suggestion is appreciated.
The described issue should only appear when using the initialization actions, which were originally written for Dataproc 1.2 or older. When using Dataproc image-versions 1.3 or newer you should be using Dataproc Optional Components to install Juptyer instead of the initialization-action; this approach will be more reliable and also ensures all the relevant version settings are correct through the whole cluster:
gcloud dataproc clusters create cluster-name \
--optional-components=JUPYTER \
--image-version=1.4 \
... other flags
I am new to h2o machine learning platform and having the below issue while trying to build models.
When i was trying to build 5 GBM models with a not so large dataset, it has the following error:
gbm Model Build Progress: [##################################################] 100%
gbm Model Build Progress: [##################################################] 100%
gbm Model Build Progress: [##################################################] 100%
gbm Model Build Progress: [##################################################] 100%
gbm Model Build Progress: [################# ] 34%
EnvironmentErrorTraceback (most recent call last)
<ipython-input-22-e74b34df2f1a> in <module>()
13 params_model={'x': features_pca_all, 'y': response, 'training_frame': train_holdout_pca_hex, 'validation_frame': validation_holdout_pca_hex, 'ntrees': ntree, 'max_depth':depth, 'min_rows': min_rows, 'learn_rate': 0.005}
14
---> 15 gbm_model=h2o.gbm(**params_model)
16
17 #store model
C:\Anaconda2\lib\site-packages\h2o\h2o.pyc in gbm(x, y, validation_x, validation_y, training_frame, model_id, distribution, tweedie_power, ntrees, max_depth, min_rows, learn_rate, nbins, nbins_cats, validation_frame, balance_classes, max_after_balance_size, seed, build_tree_one_node, nfolds, fold_column, fold_assignment, keep_cross_validation_predictions, score_each_iteration, offset_column, weights_column, do_future, checkpoint)
1058 parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
1059 parms["algo"]="gbm"
-> 1060 return h2o_model_builder.supervised(parms)
1061
1062
C:\Anaconda2\lib\site-packages\h2o\h2o_model_builder.pyc in supervised(kwargs)
28 algo = kwargs["algo"]
29 parms={k:v for k,v in kwargs.items() if (k not in ["x","y","validation_x","validation_y","algo"] and v is not None) or k=="validation_frame"}
---> 30 return supervised_model_build(x,y,vx,vy,algo,offsets,weights,fold_column,parms)
31
32 def unsupervised_model_build(x,validation_x,algo_url,kwargs): return _model_build(x,None,validation_x,None,algo_url,None,None,None,kwargs)
C:\Anaconda2\lib\site-packages\h2o\h2o_model_builder.pyc in supervised_model_build(x, y, vx, vy, algo, offsets, weights, fold_column, kwargs)
16 if not is_auto_encoder and y is None: raise ValueError("Missing response")
17 if vx is not None and vy is None: raise ValueError("Missing response validating a supervised model")
---> 18 return _model_build(x,y,vx,vy,algo,offsets,weights,fold_column,kwargs)
19
20 def supervised(kwargs):
C:\Anaconda2\lib\site-packages\h2o\h2o_model_builder.pyc in _model_build(x, y, vx, vy, algo, offsets, weights, fold_column, kwargs)
86 do_future = kwargs.pop("do_future") if "do_future" in kwargs else False
87 future_model = H2OModelFuture(H2OJob(H2OConnection.post_json("ModelBuilders/"+algo, **kwargs), job_type=(algo+" Model Build")), x)
---> 88 return future_model if do_future else _resolve_model(future_model, **kwargs)
89
90 def _resolve_model(future_model, **kwargs):
C:\Anaconda2\lib\site-packages\h2o\h2o_model_builder.pyc in _resolve_model(future_model, **kwargs)
89
90 def _resolve_model(future_model, **kwargs):
---> 91 future_model.poll()
92 if '_rest_version' in kwargs.keys(): model_json = H2OConnection.get_json("Models/"+future_model.job.dest_key, _rest_version=kwargs['_rest_version'])["models"][0]
93 else: model_json = H2OConnection.get_json("Models/"+future_model.job.dest_key)["models"][0]
C:\Anaconda2\lib\site-packages\h2o\model\model_future.pyc in poll(self)
8
9 def poll(self):
---> 10 self.job.poll()
11 self.x = None
C:\Anaconda2\lib\site-packages\h2o\job.pyc in poll(self)
39 time.sleep(sleep)
40 if sleep < 1.0: sleep += 0.1
---> 41 self._refresh_job_view()
42 running = self._is_running()
43 self._update_progress()
C:\Anaconda2\lib\site-packages\h2o\job.pyc in _refresh_job_view(self)
52
53 def _refresh_job_view(self):
---> 54 jobs = H2OConnection.get_json(url_suffix="Jobs/" + self.job_key)
55 self.job = jobs["jobs"][0] if "jobs" in jobs else jobs["job"][0]
56 self.status = self.job["status"]
C:\Anaconda2\lib\site-packages\h2o\connection.pyc in get_json(url_suffix, **kwargs)
410 if __H2OCONN__ is None:
411 raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
--> 412 return __H2OCONN__._rest_json(url_suffix, "GET", None, **kwargs)
413
414 #staticmethod
C:\Anaconda2\lib\site-packages\h2o\connection.pyc in _rest_json(self, url_suffix, method, file_upload_info, **kwargs)
419
420 def _rest_json(self, url_suffix, method, file_upload_info, **kwargs):
--> 421 raw_txt = self._do_raw_rest(url_suffix, method, file_upload_info, **kwargs)
422 return self._process_tables(raw_txt.json())
423
C:\Anaconda2\lib\site-packages\h2o\connection.pyc in _do_raw_rest(self, url_suffix, method, file_upload_info, **kwargs)
476
477 begin_time_seconds = time.time()
--> 478 http_result = self._attempt_rest(url, method, post_body, file_upload_info)
479 end_time_seconds = time.time()
480 elapsed_time_seconds = end_time_seconds - begin_time_seconds
C:\Anaconda2\lib\site-packages\h2o\connection.pyc in _attempt_rest(self, url, method, post_body, file_upload_info)
526
527 except requests.ConnectionError as e:
--> 528 raise EnvironmentError("h2o-py encountered an unexpected HTTP error:\n {}".format(e))
529
530 return http_result
EnvironmentError: h2o-py encountered an unexpected HTTP error:
('Connection aborted.', BadStatusLine("''",))
My hunch is that the cluster memory has only around 247.5 MB which is not enough to handle the model building hence aborted the connection to h2o. Here are the codes I used to initiate h2o:
#initialization of h2o module
import subprocess as sp
import sys
import os.path as p
# path of h2o jar file
h2o_path = p.join(sys.prefix, "h2o_jar", "h2o.jar")
# subprocess to launch h2o
# the command can be further modified to include virtual machine parameters
sp.Popen("java -jar " + h2o_path)
# h2o.init() call to verify that h2o launch is successfull
h2o.init(ip="localhost", port=54321, size=1, start_h2o=False, enable_assertions=False, \
license=None, max_mem_size_GB=4, min_mem_size_GB=4, ice_root=None)
and here is the returned status table:
Any ideas on the above would be greatly appreciated!!
Just to close out this question, I'll restate the solution mentioned in the comments above. The user was able to resolve the issue by starting H2O from the command line with 1GB of memory using java -jar -Xmx1g h2o.jar, and then connected to the existing H2O server in Python using h2o.init().
It's not clear to me why h2o.init() was not creating the correct size cluster using the max_mem_size_GB argument. Regardless, this argument has been deprecated recently and replaced by another argument, max_mem_size, so it may no longer be an issue.
I have a RDD object(bid) that get from map, and then I just do a simple bid.count(). Then I received the following error. Can someone tell me what this error might be? I try to google what it means but got no answer.
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
<ipython-input-10-0283df3b1938> in <module>()
----> 1 bid.count()
/home/username/spark-1.0.0-bin-hadoop2/python/pyspark/rdd.pyc in count(self)
706 3
707 """
--> 708 return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
709
710 def stats(self):
/home/username/spark-1.0.0-bin-hadoop2/python/pyspark/rdd.pyc in sum(self)
697 6.0
698 """
--> 699 return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
700
701 def count(self):
/home/username/spark-1.0.0-bin-hadoop2/python/pyspark/rdd.pyc in reduce(self, f)
617 if acc is not None:
618 yield acc
--> 619 vals = self.mapPartitions(func).collect()
620 return reduce(f, vals)
621
/home/username/spark-1.0.0-bin-hadoop2/python/pyspark/rdd.pyc in collect(self)
581 """
582 with _JavaStackTrace(self.context) as st:
--> 583 bytesInJava = self._jrdd.collect().iterator()
584 return list(self._collect_iterator_through_file(bytesInJava))
585
/home/username/spark-1.0.0-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/java_gateway.py in __call__(self, *args)
535 answer = self.gateway_client.send_command(command)
536 return_value = get_return_value(answer, self.gateway_client,
--> 537 self.target_id, self.name)
538
539 for temp_arg in temp_args:
/home/username/spark-1.0.0-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
298 raise Py4JJavaError(
299 'An error occurred while calling {0}{1}{2}.\n'.
--> 300 format(target_id, '.', name), value)
301 else:
302 raise Py4JError(
Py4JJavaError: An error occurred while calling o148.collect.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 1.0:9 failed 4 times, most recent failure: TID 7 on host 08.bm-hadoope-datanode.dev.lax1 failed for unknown reason
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1033)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1017)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1015)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1015)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
at scala.Option.foreach(Option.scala:236)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:633)
at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1207)
at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
at akka.actor.ActorCell.invoke(ActorCell.scala:456)
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
at akka.dispatch.Mailbox.run(Mailbox.scala:219)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
And then I join the bid table with another table, then get a new RDD. It seems going smooth, but when I do first() for this new RDD. I received the following error:
Py4JJavaError: An error occurred while calling o274.collectPartitions.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 9.0:6 failed 4 times, most recent failure: Exception failure in TID 32 on host 05.bm-hadoope-datanode.dev.lax1: java.io.IOException: Filesystem closed
org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:629)
org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:735)
org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:793)
java.io.DataInputStream.readFully(DataInputStream.java:195)
java.io.DataInputStream.readFully(DataInputStream.java:169)
parquet.hadoop.ParquetFileReader$ConsecutiveChunkList.readAll(ParquetFileReader.java:599)
parquet.hadoop.ParquetFileReader.readNextRowGroup(ParquetFileReader.java:360)
parquet.hadoop.InternalParquetRecordReader.checkRead(InternalParquetRecordReader.java:100)
parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:172)
parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:130)
org.apache.spark.rdd.NewHadoopRDD$$anon$1.hasNext(NewHadoopRDD.scala:122)
org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
org.apache.spark.api.python.PythonRDD$.writeIteratorToStream(PythonRDD.scala:293)
org.apache.spark.api.python.PythonRDD$WriterThread$$anonfun$run$1.apply$mcV$sp(PythonRDD.scala:200)
org.apache.spark.api.python.PythonRDD$WriterThread$$anonfun$run$1.apply(PythonRDD.scala:175)
org.apache.spark.api.python.PythonRDD$WriterThread$$anonfun$run$1.apply(PythonRDD.scala:175)
org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1160)
org.apache.spark.api.python.PythonRDD$WriterThread.run(PythonRDD.scala:174)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1033)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1017)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1015)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1015)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
at scala.Option.foreach(Option.scala:236)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:633)
at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1207)
at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
at akka.actor.ActorCell.invoke(ActorCell.scala:456)
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
at akka.dispatch.Mailbox.run(Mailbox.scala:219)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)