Commit ef068db5 by Daniele Venzano

Remove the pull step from CI pipeline and set core limits

parent a8f7bda7
Pipeline #4592 passed with stages
in 1 minute 7 seconds
Showing with 9 additions and 18 deletions
......@@ -7,6 +7,7 @@ stages:
variables:
VERSION: $CI_PIPELINE_ID
REPOSITORY: zapps
image: docker:latest
......@@ -63,16 +64,6 @@ push:all:
only:
- master
pull:all:
stage: pull
script:
- docker -H ${SWARM} ${SWARM_TLS_OPTIONS} pull ${DOCKER_REGISTRY}/${REPOSITORY}/spark2-submit:${VERSION}
- docker -H ${SWARM} ${SWARM_TLS_OPTIONS} pull ${DOCKER_REGISTRY}/${REPOSITORY}/spark2-master:${VERSION}
- docker -H ${SWARM} ${SWARM_TLS_OPTIONS} pull ${DOCKER_REGISTRY}/${REPOSITORY}/spark2-worker:${VERSION}
- docker -H ${SWARM} ${SWARM_TLS_OPTIONS} pull ${DOCKER_REGISTRY}/${REPOSITORY}/spark2-jupyter-notebook:${VERSION}
only:
- master
deploy:json:
stage: deploy
script:
......
......@@ -82,8 +82,8 @@ def spark_master_service(mem_limit):
"max": mem_limit
},
"cores": {
'min': None,
'max': None
'min': 1,
'max': 1
}
},
'ports': [
......@@ -132,8 +132,8 @@ def spark_worker_service(count, mem_limit, cores):
"max": mem_limit
},
"cores": {
'min': None,
'max': None
'min': cores,
'max': cores
}
},
'ports': [],
......@@ -174,8 +174,8 @@ def spark_jupyter_notebook_service(mem_limit, worker_mem_limit, hdfs_namenode):
"max": mem_limit
},
"cores": {
'min': None,
'max': None
'min': 2,
'max': 2
}
},
'ports': [
......@@ -224,8 +224,8 @@ def spark_submit_service(mem_limit, worker_mem_limit, hdfs_namenode, command):
"max": mem_limit
},
"cores": {
'min': None,
'max': None
'min': 2,
'max': 2
}
},
'ports': [],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment