Commit 3dba6c50 authored by Daniele Venzano's avatar Daniele Venzano

Add Hadoop and Spark directories to the PATH

parent 22b51d28
Pipeline #10868 passed with stages
in 1 minute and 12 seconds
......@@ -18,6 +18,17 @@
"max": 68719476736,
"step": 536870912
},
{
"kind": "service_count",
"name": "spark-worker",
"readable_name": "Spark worker count",
"description": "How many workers to spawn",
"type": "int",
"default": 4,
"min": 1,
"max": 16,
"step": 1
},
{
"kind": "environment",
"name": "SPARK_WORKER_RAM",
......@@ -35,7 +46,7 @@
"readable_name": "Spark worker cores",
"description": "Number of cores each worker has access to, must be equal to the core limit for the worker service",
"type": "int",
"default": 6,
"default": 2,
"min": 1,
"max": 16,
"step": 1
......
......@@ -2,5 +2,6 @@
echo $PYTHONPATH
export PATH=/opt/hadoop/bin:/opt/spark/bin:$PATH
exec /opt/conda/bin/jupyter lab --no-browser --NotebookApp.token='' --allow-root --ip=0.0.0.0
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment