Commit 0d10a66e authored by Daniele Venzano's avatar Daniele Venzano

Remove redis requirement, use plain filesystem

parent 15a01506
......@@ -29,10 +29,20 @@ For testing you can use also a single Docker instance, just set its endpoint in
To use Swarm, we use an undocumented network configuration, with the docker bridges connected to a physical interface, so that
containers on different hosts can talk to each other on the same layer 2 domain.
### Docker registry
### Images: Docker Hub Vs local Docker registry
Use the scripts in the [zoe-docker-images](https://github.com/DistributedSystemsGroup/zoe-docker-images) repository to create
and populate a private registry with Spark images. The images are quite standard and can be used also without Zoe, for examples
The images used by Zoe are available on the Docker Hub:
* https://hub.docker.com/r/zoerepo/spark-scala-notebook/
* https://hub.docker.com/r/zoerepo/spark-master/
* https://hub.docker.com/r/zoerepo/spark-worker/
* https://hub.docker.com/r/zoerepo/spark-submit/
Since the Docker Hub can be quite slow, we strongly suggest setting up a private registry. The `build_images.sh` script in the
[zoe-docker-images](https://github.com/DistributedSystemsGroup/zoe-docker-images) repository can help you populate the registry
bypassing the Hub.
The images are quite standard and can be used also without Zoe, for examples
on how to do that, see the `scripts/start_cluster.sh` script.
### Redis
......
......@@ -20,5 +20,6 @@ conf = {
'smtp_server': 'smtp.gmail.com',
'smtp_user': 'bigfoot.data@gmail.com',
'smtp_pass': open('smtp_pass.txt', 'r').read().strip(),
'web_server_name': 'bigfoot-m2.eurecom.fr'
'web_server_name': 'bigfoot-m2.eurecom.fr',
'history_path': "/var/lib/zoe/history"
}
import redis
import os
import logging
from common.state import Application, Execution
from common.configuration import conf
def _connect():
server = conf["redis_server"]
port = conf["redis_port"]
db = conf["redis_db"]
return redis.StrictRedis(host=server, port=port, db=db)
log = logging.getLogger(__name__)
def application_data_upload(application: Application, data: bytes) -> bool:
r = _connect()
key = "app-{}".format(application.id)
r.set(key, data)
fpath = os.path.join(conf['history_path'], 'apps', 'app-{}.zip'.format(application.id))
open(fpath, "wb").write(data)
def application_data_download(application: Application) -> bytes:
r = _connect()
key = "app-{}".format(application.id)
return r.get(key)
fpath = os.path.join(conf['history_path'], 'apps', 'app-{}.zip'.format(application.id))
data = open(fpath, "rb").read()
return data
def application_data_delete(application: Application):
r = _connect()
key = "app-{}".format(application.id)
r.delete(key)
fpath = os.path.join(conf['history_path'], 'apps', 'app-{}.zip'.format(application.id))
try:
os.unlink(fpath)
except OSError:
log.warning("Binary data for application {} not found, cannot delete".format(application.id))
def logs_archive_upload(execution: Execution, data: bytes) -> bool:
r = _connect()
key = "log-{}".format(execution.id)
r.set(key, data)
fpath = os.path.join(conf['history_path'], 'logs', 'log-{}.zip'.format(execution.id))
open(fpath, "wb").write(data)
def logs_archive_download(execution: Execution) -> bytes:
r = _connect()
key = "log-{}".format(execution.id)
return r.get(key)
fpath = os.path.join(conf['history_path'], 'logs', 'log-{}.zip'.format(execution.id))
data = open(fpath, "rb").read()
return data
def logs_archive_delete(execution: Execution):
r = _connect()
key = "log-{}".format(execution.id)
r.delete(key)
fpath = os.path.join(conf['history_path'], 'logs', 'log-{}.zip'.format(execution.id))
try:
os.unlink(fpath)
except OSError:
log.warning("Logs archive for execution {} not found, cannot delete".format(execution.id))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment