From d3838067c52e58da7e7f1e988af9261936f9c371 Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Wed, 14 Oct 2015 08:12:59 +0300 Subject: [PATCH 01/16] Fix .dockerignore file: Allow sending rd_ui/dist, remove rd_ui/nodemodules. --- .dockerignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 829c2f4b1e..508222d445 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,4 @@ -rd_ui/dist/ rd_ui/.tmp/ +rd_ui/node_modules/ .git/ .vagrant/ From 26032875b7d4558c0e93fe5f57ee94dcae1a5dc8 Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Sun, 18 Oct 2015 14:41:17 +0300 Subject: [PATCH 02/16] Move Amazon Linux bootstrap into folder of its own --- setup/amazon_linux/README.md | 1 + .../bootstrap.sh} | 7 +- setup/amazon_linux/files/env | 6 + setup/amazon_linux/files/nginx_redash_site | 20 + setup/amazon_linux/files/postgres_apt.sh | 162 ++++ .../files/redash_supervisord_init} | 0 setup/amazon_linux/files/redis.conf | 785 ++++++++++++++++++ setup/amazon_linux/files/redis_init | 66 ++ .../files/supervisord.conf} | 0 9 files changed, 1043 insertions(+), 4 deletions(-) create mode 100644 setup/amazon_linux/README.md rename setup/{bootstrap_amazon_linux.sh => amazon_linux/bootstrap.sh} (95%) create mode 100644 setup/amazon_linux/files/env create mode 100644 setup/amazon_linux/files/nginx_redash_site create mode 100644 setup/amazon_linux/files/postgres_apt.sh rename setup/{redash_supervisord_init_for_amazon_linux => amazon_linux/files/redash_supervisord_init} (100%) create mode 100644 setup/amazon_linux/files/redis.conf create mode 100644 setup/amazon_linux/files/redis_init rename setup/{supervisord_for_amazon_linux.conf => amazon_linux/files/supervisord.conf} (100%) diff --git a/setup/amazon_linux/README.md b/setup/amazon_linux/README.md new file mode 100644 index 0000000000..d30254ea4e --- /dev/null +++ b/setup/amazon_linux/README.md @@ -0,0 +1 @@ +Bootstrap script for Amazon Linux AMI. *Not supported*, we recommend to use the Docker images instead. diff --git a/setup/bootstrap_amazon_linux.sh b/setup/amazon_linux/bootstrap.sh similarity index 95% rename from setup/bootstrap_amazon_linux.sh rename to setup/amazon_linux/bootstrap.sh index 9d6aa837b9..36da63cb21 100644 --- a/setup/bootstrap_amazon_linux.sh +++ b/setup/amazon_linux/bootstrap.sh @@ -2,8 +2,7 @@ set -eu REDASH_BASE_PATH=/opt/redash -FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docs_setup/setup/files/ -FILE_BASE_URL_FOR_AMAZON_LINUX=https://raw.githubusercontent.com/EverythingMe/redash/master/setup/files/ +FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/master/setup/amazon_linux/files/ # Verify running as root: if [ "$(id -u)" != "0" ]; then if [ $# -ne 0 ]; then @@ -178,7 +177,7 @@ fi # Get supervisord startup script -sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILE_BASE_URL_FOR_AMAZON_LINUX"supervisord_for_amazon_linux.conf" +sudo -u redash wget -O /opt/redash/supervisord/supervisord.conf $FILES_BASE_URL"supervisord.conf" # install start-stop-daemon wget http://developer.axis.com/download/distribution/apps-sys-utils-start-stop-daemon-IR1_9_18-2.tar.gz @@ -187,7 +186,7 @@ cd apps/sys-utils/start-stop-daemon-IR1_9_18-2/ gcc start-stop-daemon.c -o start-stop-daemon cp start-stop-daemon /sbin/ -wget -O /etc/init.d/redash_supervisord $FILE_BASE_URL_FOR_AMAZON_LINUX"redash_supervisord_init_for_amazon_linux" +wget -O /etc/init.d/redash_supervisord $FILES_BASE_URL"redash_supervisord_init" add_service "redash_supervisord" # Nginx setup diff --git a/setup/amazon_linux/files/env b/setup/amazon_linux/files/env new file mode 100644 index 0000000000..7d468f86c3 --- /dev/null +++ b/setup/amazon_linux/files/env @@ -0,0 +1,6 @@ +export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/" +export REDASH_LOG_LEVEL="INFO" +export REDASH_REDIS_URL=redis://localhost:6379/1 +export REDASH_DATABASE_URL="postgresql://redash" +export REDASH_COOKIE_SECRET=veryverysecret +export REDASH_GOOGLE_APPS_DOMAIN= diff --git a/setup/amazon_linux/files/nginx_redash_site b/setup/amazon_linux/files/nginx_redash_site new file mode 100644 index 0000000000..19c21c0637 --- /dev/null +++ b/setup/amazon_linux/files/nginx_redash_site @@ -0,0 +1,20 @@ +upstream rd_servers { + server 127.0.0.1:5000; +} + +server { + listen 80 default; + + access_log /var/log/nginx/rd.access.log; + + gzip on; + gzip_types *; + gzip_proxied any; + + location / { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rd_servers; + } +} \ No newline at end of file diff --git a/setup/amazon_linux/files/postgres_apt.sh b/setup/amazon_linux/files/postgres_apt.sh new file mode 100644 index 0000000000..35018d94ed --- /dev/null +++ b/setup/amazon_linux/files/postgres_apt.sh @@ -0,0 +1,162 @@ +#!/bin/sh + +# script to add apt.postgresql.org to sources.list + +# from command line +CODENAME="$1" +# lsb_release is the best interface, but not always available +if [ -z "$CODENAME" ]; then + CODENAME=$(lsb_release -cs 2>/dev/null) +fi +# parse os-release (unreliable, does not work on Ubuntu) +if [ -z "$CODENAME" -a -f /etc/os-release ]; then + . /etc/os-release + # Debian: VERSION="7.0 (wheezy)" + # Ubuntu: VERSION="13.04, Raring Ringtail" + CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/') +fi +# guess from sources.list +if [ -z "$CODENAME" ]; then + CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }') +fi +# complain if no result yet +if [ -z "$CODENAME" ]; then + cat < /etc/apt/sources.list.d/pgdg.list < 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################ GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize yes + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile /var/run/redis_6379.pid + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile /var/log/redis_6379.log + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir /var/lib/redis/6379 + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The biggest the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEES that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are not suitable keys for eviction. +# +# At the date of writing this commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead to wait for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enalbed at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# Event notification ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# by zero or multiple characters. The empty string means that notifications +# are disabled at all. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happens to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform accordingly to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes diff --git a/setup/amazon_linux/files/redis_init b/setup/amazon_linux/files/redis_init new file mode 100644 index 0000000000..e20d856aaf --- /dev/null +++ b/setup/amazon_linux/files/redis_init @@ -0,0 +1,66 @@ +#!/bin/sh + +EXEC=/usr/local/bin/redis-server +CLIEXEC=/usr/local/bin/redis-cli +PIDFILE=/var/run/redis_6379.pid +CONF="/etc/redis/6379.conf" +REDISPORT="6379" +############### +# SysV Init Information +# chkconfig: - 58 74 +# description: redis_6379 is the redis daemon. +### BEGIN INIT INFO +# Provides: redis_6379 +# Required-Start: $network $local_fs $remote_fs +# Required-Stop: $network $local_fs $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Should-Start: $syslog $named +# Should-Stop: $syslog $named +# Short-Description: start and stop redis_6379 +# Description: Redis daemon +### END INIT INFO + + +case "$1" in + start) + if [ -f $PIDFILE ] + then + echo "$PIDFILE exists, process is already running or crashed" + else + echo "Starting Redis server..." + $EXEC $CONF + fi + ;; + stop) + if [ ! -f $PIDFILE ] + then + echo "$PIDFILE does not exist, process is not running" + else + PID=$(cat $PIDFILE) + echo "Stopping ..." + $CLIEXEC -p $REDISPORT shutdown + while [ -x /proc/${PID} ] + do + echo "Waiting for Redis to shutdown ..." + sleep 1 + done + echo "Redis stopped" + fi + ;; + status) + if [ ! -f $PIDFILE ] + then + echo 'Redis is not running' + else + echo "Redis is running ($(<$PIDFILE))" + fi + ;; + restart) + $0 stop + $0 start + ;; + *) + echo "Please use start, stop, restart or status as first argument" + ;; +esac diff --git a/setup/supervisord_for_amazon_linux.conf b/setup/amazon_linux/files/supervisord.conf similarity index 100% rename from setup/supervisord_for_amazon_linux.conf rename to setup/amazon_linux/files/supervisord.conf From f820148fc5a91f578f3f58ee0066e1534346b1ac Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Sun, 18 Oct 2015 14:42:25 +0300 Subject: [PATCH 03/16] Remove old Vagrant file --- setup/Vagrantfile_debian | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 setup/Vagrantfile_debian diff --git a/setup/Vagrantfile_debian b/setup/Vagrantfile_debian deleted file mode 100644 index 8a66612a4e..0000000000 --- a/setup/Vagrantfile_debian +++ /dev/null @@ -1,12 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - # Every Vagrant virtual environment requires a box to build off of. - config.vm.box = "box-cutter/debian76" - config.vm.provision "shell", path: "setup.sh" - config.vm.network "forwarded_port", guest: 80, host: 9001 -end From d2136d5f3e59c64576ebf53af4bd36bc322ce535 Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Sun, 18 Oct 2015 16:52:09 +0300 Subject: [PATCH 04/16] Change suffix of docker-compose file to .yml as suggested by docker-compose --- .gitignore | 2 +- docker-compose-example.yaml => docker-compose-example.yml | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename docker-compose-example.yaml => docker-compose-example.yml (100%) diff --git a/.gitignore b/.gitignore index ee41ef2430..57951982b3 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,4 @@ venv dump.rdb # Docker related -docker-compose.yaml +docker-compose.yml diff --git a/docker-compose-example.yaml b/docker-compose-example.yml similarity index 100% rename from docker-compose-example.yaml rename to docker-compose-example.yml From be813b1817176955e3ffe7c101c15ccfbbefc635 Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Sun, 18 Oct 2015 16:53:05 +0300 Subject: [PATCH 05/16] Better arrangement of setup directory --- setup/docker/README.md | 1 + setup/docker/create_database.sh | 27 +++++++++++++++ setup/docker/env | 6 ++++ .../nginx}/Dockerfile | 0 .../nginx}/nginx.conf | 0 .../supervisord/supervisord.conf} | 32 +++++++++++------- setup/docker/test.sh | 3 ++ setup/docker_init_postgres.sh | 33 ------------------- setup/ubuntu/README.md | 1 + setup/{ => ubuntu}/bootstrap.sh | 3 +- setup/{ => ubuntu}/files/env | 0 setup/{ => ubuntu}/files/nginx_redash_site | 0 setup/{ => ubuntu}/files/postgres_apt.sh | 0 .../files/redash_supervisord_init | 0 setup/{ => ubuntu}/files/redis.conf | 0 setup/{ => ubuntu}/files/redis_init | 0 setup/{ => ubuntu}/files/supervisord.conf | 0 17 files changed, 59 insertions(+), 47 deletions(-) create mode 100644 setup/docker/README.md create mode 100644 setup/docker/create_database.sh create mode 100644 setup/docker/env rename setup/{files/docker-redash-nginx => docker/nginx}/Dockerfile (100%) rename setup/{files/docker-redash-nginx => docker/nginx}/nginx.conf (100%) rename setup/{files/supervisord_docker.conf => docker/supervisord/supervisord.conf} (58%) create mode 100644 setup/docker/test.sh delete mode 100644 setup/docker_init_postgres.sh create mode 100644 setup/ubuntu/README.md rename setup/{ => ubuntu}/bootstrap.sh (99%) rename setup/{ => ubuntu}/files/env (100%) rename setup/{ => ubuntu}/files/nginx_redash_site (100%) rename setup/{ => ubuntu}/files/postgres_apt.sh (100%) rename setup/{ => ubuntu}/files/redash_supervisord_init (100%) rename setup/{ => ubuntu}/files/redis.conf (100%) rename setup/{ => ubuntu}/files/redis_init (100%) rename setup/{ => ubuntu}/files/supervisord.conf (100%) diff --git a/setup/docker/README.md b/setup/docker/README.md new file mode 100644 index 0000000000..d591fe8a02 --- /dev/null +++ b/setup/docker/README.md @@ -0,0 +1 @@ +Files used for the Docker image creation. diff --git a/setup/docker/create_database.sh b/setup/docker/create_database.sh new file mode 100644 index 0000000000..78087f2376 --- /dev/null +++ b/setup/docker/create_database.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# This script assumes you're using docker-compose, with at least two images: redash for the redash instance +# and postgres for the postgres instance. +# +# This script is not idempotent and should be run once. + +run_redash="docker-compose run --rm redash" + +$run_redash /opt/redash/current/manage.py database create_tables + +# Create default admin user +$run_redash /opt/redash/current/manage.py users create --admin --password admin "Admin" "admin" + +# This is a hack to get the Postgres IP and PORT from the instance itself. +temp_env_file=`mktemp /tmp/pg_env.XXXXXX` +docker-compose run --rm postgres env > $temp_env_file +source $temp_env_file + +run_psql="docker-compose run --rm postgres psql -h $POSTGRES_PORT_5432_TCP_ADDR -p $POSTGRES_PORT_5432_TCP_PORT -U postgres" + +# Create redash_reader user. We don't use a strong password, as the instance supposed to be accesible only from the redash host. +$run_psql -c "CREATE ROLE redash_reader WITH PASSWORD 'redash_reader' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN" +$run_psql -c "grant select(id,name,type) ON data_sources to redash_reader;" +$run_psql -c "grant select(id,name) ON users to redash_reader;" +$run_psql -c "grant select on activity_log, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" + +$run_redash /opt/redash/current/manage.py ds new -n "re:dash metadata" -t "pg" -o "{\"user\": \"redash_reader\", \"password\": \"redash_reader\", \"host\": \"postgres\", \"dbname\": \"postgres\"}" diff --git a/setup/docker/env b/setup/docker/env new file mode 100644 index 0000000000..ae640d3778 --- /dev/null +++ b/setup/docker/env @@ -0,0 +1,6 @@ +REDASH_STATIC_ASSETS_PATH=../rd_ui/dist/ +REDASH_LOG_LEVEL=INFO +REDASH_REDIS_URL=redis://redis:6379/0 +REDASH_DATABASE_URL=postgresql://postgres@postgres/postgres +REDASH_COOKIE_SECRET=veryverysecret +REDASH_GOOGLE_APPS_DOMAIN= diff --git a/setup/files/docker-redash-nginx/Dockerfile b/setup/docker/nginx/Dockerfile similarity index 100% rename from setup/files/docker-redash-nginx/Dockerfile rename to setup/docker/nginx/Dockerfile diff --git a/setup/files/docker-redash-nginx/nginx.conf b/setup/docker/nginx/nginx.conf similarity index 100% rename from setup/files/docker-redash-nginx/nginx.conf rename to setup/docker/nginx/nginx.conf diff --git a/setup/files/supervisord_docker.conf b/setup/docker/supervisord/supervisord.conf similarity index 58% rename from setup/files/supervisord_docker.conf rename to setup/docker/supervisord/supervisord.conf index 6029b3b0d2..c0ee8bca65 100644 --- a/setup/files/supervisord_docker.conf +++ b/setup/docker/supervisord/supervisord.conf @@ -1,7 +1,7 @@ [supervisord] nodaemon=true -logfile=/opt/redash/logs/supervisord.log -pidfile=/opt/redash/supervisord/supervisord.pid +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 directory=/opt/redash/current [inet_http_server] @@ -11,38 +11,46 @@ port = 0.0.0.0:9001 supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [program:redash_server] -command=/opt/redash/current/bin/run gunicorn -b 0.0.0.0:5000 --name redash -w 4 redash.wsgi:app +command=gunicorn -b 0.0.0.0:5000 --name redash -w 4 redash.wsgi:app directory=/opt/redash/current process_name=redash_server numprocs=1 priority=999 autostart=true autorestart=true -stdout_logfile=/opt/redash/logs/api.log -stderr_logfile=/opt/redash/logs/api_error.log +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +user=redash # There are two queue types here: one for ad-hoc queries, and one for the refresh of scheduled queries # (note that "scheduled_queries" appears only in the queue list of "redash_celery_scheduled"). # The default concurrency level for each is 2 (-c2), you can increase based on your machine's resources. - [program:redash_celery] -command=sudo -u redash /opt/redash/current/bin/run celery worker --app=redash.worker --beat -c2 -Qqueries,celery +command=celery worker --app=redash.worker --beat -c2 -Qqueries,celery directory=/opt/redash/current process_name=redash_celery numprocs=1 priority=999 autostart=true autorestart=true -stdout_logfile=/opt/redash/logs/celery.log -stderr_logfile=/opt/redash/logs/celery_error.log +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +user=redash [program:redash_celery_scheduled] -command=sudo -u redash /opt/redash/current/bin/run celery worker --app=redash.worker -c2 -Qscheduled_queries +command=celery worker --app=redash.worker -c1 -Qscheduled_queries directory=/opt/redash/current process_name=redash_celery_scheduled numprocs=1 priority=999 autostart=true autorestart=true -stdout_logfile=/opt/redash/logs/celery.log -stderr_logfile=/opt/redash/logs/celery_error.log +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +user=redash diff --git a/setup/docker/test.sh b/setup/docker/test.sh new file mode 100644 index 0000000000..0c74d40335 --- /dev/null +++ b/setup/docker/test.sh @@ -0,0 +1,3 @@ +PREFIX=docker-compose + +$PREFIX ps diff --git a/setup/docker_init_postgres.sh b/setup/docker_init_postgres.sh deleted file mode 100644 index d8b9fc1577..0000000000 --- a/setup/docker_init_postgres.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Create database / tables -pg_user_exists=0 -psql --host=postgres --username=postgres postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash'" | grep -q 1 || pg_user_exists=$? -if [ $pg_user_exists -ne 0 ]; then - echo "Creating redash postgres user & database." - createuser redash --username=postgres --host=postgres --no-superuser --no-createdb --no-createrole - createdb redash --username=postgres --host=postgres --owner=redash - - cd /opt/redash/current - ./manage.py database create_tables -fi - -# Create default admin user -cd /opt/redash/current -# TODO: make sure user created only once -# TODO: generate temp password and print to screen -./manage.py users create --admin --password admin "Admin" "admin" - -# Create re:dash read only pg user & setup data source -pg_user_exists=0 -psql --host=postgres --username=postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='redash_reader'" | grep -q 1 || pg_user_exists=$? -if [ $pg_user_exists -ne 0 ]; then - echo "Creating redash reader postgres user." - REDASH_READER_PASSWORD=$(pwgen -1) - psql --host=postgres --username=postgres -c "CREATE ROLE redash_reader WITH PASSWORD '$REDASH_READER_PASSWORD' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN" - psql --host=postgres --username=postgres -c "grant select(id,name,type) ON data_sources to redash_reader;" redash - psql --host=postgres --username=postgres -c "grant select(id,name) ON users to redash_reader;" redash - psql --host=postgres --username=postgres -c "grant select on activity_log, events, queries, dashboards, widgets, visualizations, query_results to redash_reader;" redash - - cd /opt/redash/current - ./manage.py ds new -n "re:dash metadata" -t "pg" -o "{\"user\": \"redash_reader\", \"password\": \"$REDASH_READER_PASSWORD\", \"host\": \"localhost\", \"dbname\": \"redash\"}" -fi diff --git a/setup/ubuntu/README.md b/setup/ubuntu/README.md new file mode 100644 index 0000000000..854ff6674f --- /dev/null +++ b/setup/ubuntu/README.md @@ -0,0 +1 @@ +Bootstrap scripts for Ubuntu (tested on Ubuntu 14.04). diff --git a/setup/bootstrap.sh b/setup/ubuntu/bootstrap.sh similarity index 99% rename from setup/bootstrap.sh rename to setup/ubuntu/bootstrap.sh index 1d5df71e55..4daa884c8a 100644 --- a/setup/bootstrap.sh +++ b/setup/ubuntu/bootstrap.sh @@ -2,7 +2,7 @@ set -eu REDASH_BASE_PATH=/opt/redash -FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docs_setup/setup/files/ +FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/master/setup/ubuntu_or_debian/files/ # Verify running as root: if [ "$(id -u)" != "0" ]; then @@ -176,4 +176,3 @@ rm /etc/nginx/sites-enabled/default wget -O /etc/nginx/sites-available/redash $FILES_BASE_URL"nginx_redash_site" ln -nfs /etc/nginx/sites-available/redash /etc/nginx/sites-enabled/redash service nginx restart - diff --git a/setup/files/env b/setup/ubuntu/files/env similarity index 100% rename from setup/files/env rename to setup/ubuntu/files/env diff --git a/setup/files/nginx_redash_site b/setup/ubuntu/files/nginx_redash_site similarity index 100% rename from setup/files/nginx_redash_site rename to setup/ubuntu/files/nginx_redash_site diff --git a/setup/files/postgres_apt.sh b/setup/ubuntu/files/postgres_apt.sh similarity index 100% rename from setup/files/postgres_apt.sh rename to setup/ubuntu/files/postgres_apt.sh diff --git a/setup/files/redash_supervisord_init b/setup/ubuntu/files/redash_supervisord_init similarity index 100% rename from setup/files/redash_supervisord_init rename to setup/ubuntu/files/redash_supervisord_init diff --git a/setup/files/redis.conf b/setup/ubuntu/files/redis.conf similarity index 100% rename from setup/files/redis.conf rename to setup/ubuntu/files/redis.conf diff --git a/setup/files/redis_init b/setup/ubuntu/files/redis_init similarity index 100% rename from setup/files/redis_init rename to setup/ubuntu/files/redis_init diff --git a/setup/files/supervisord.conf b/setup/ubuntu/files/supervisord.conf similarity index 100% rename from setup/files/supervisord.conf rename to setup/ubuntu/files/supervisord.conf From 1706658700b9d21acf1c9716d07175258daf16da Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Sun, 18 Oct 2015 16:54:40 +0300 Subject: [PATCH 06/16] Updates to Dockerfile: - No need to pg client anymore. - Fix path to supervisord.conf. --- Dockerfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index e317730eed..a34d6ca0a8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,11 +3,11 @@ MAINTAINER Di Wu # Ubuntu packages RUN apt-get update && \ - apt-get install -y python-pip python-dev curl build-essential pwgen libffi-dev sudo git-core wget && \ + apt-get install -y python-pip python-dev curl build-essential pwgen libffi-dev sudo git-core wget \ # Postgres client - apt-get -y install libpq-dev postgresql-client && \ + libpq-dev \ # Additional packages required for data sources: - apt-get install -y libssl-dev libmysqlclient-dev + libssl-dev libmysqlclient-dev # Users creation RUN useradd --system --comment " " --create-home redash @@ -34,7 +34,7 @@ RUN pip install -r requirements_all_ds.txt && \ # Setup supervisord RUN mkdir -p /opt/redash/supervisord && \ mkdir -p /opt/redash/logs && \ - cp /opt/redash/current/setup/files/supervisord_docker.conf /opt/redash/supervisord/supervisord.conf + cp /opt/redash/current/setup/docker/supervisord/supervisord.conf /opt/redash/supervisord/supervisord.conf # Fix permissions RUN chown -R redash /opt/redash From 920cc9c29fccb465164dfbf61940d142ba2e95ac Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Sun, 18 Oct 2015 23:03:09 +0300 Subject: [PATCH 07/16] update readme for ubuntu bootstrap --- setup/ubuntu/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/ubuntu/README.md b/setup/ubuntu/README.md index 854ff6674f..63b648e40f 100644 --- a/setup/ubuntu/README.md +++ b/setup/ubuntu/README.md @@ -1 +1 @@ -Bootstrap scripts for Ubuntu (tested on Ubuntu 14.04). +Bootstrap scripts for Ubuntu (tested on Ubuntu 14.04, although should work with 12.04). From d348565b12b122b06a7d67c02daa3930ae55f199 Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Sun, 18 Oct 2015 23:03:50 +0300 Subject: [PATCH 08/16] Remove latest_release_url.py - docker images will be created with current code base as context --- Dockerfile | 6 ------ setup/latest_release_url.py | 6 ------ 2 files changed, 12 deletions(-) delete mode 100644 setup/latest_release_url.py diff --git a/Dockerfile b/Dockerfile index a34d6ca0a8..d3a70b693e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,12 +16,6 @@ RUN useradd --system --comment " " --create-home redash RUN pip install -U setuptools && \ pip install supervisor==3.1.2 -# Download latest source and extract into /opt/redash/current -# COPY setup/latest_release_url.py /tmp/latest_release_url.py -# RUN wget $(python /tmp/latest_release_url.py) -O redash.tar.gz && \ -# mkdir -p /opt/redash/current && \ -# tar -C /opt/redash/current -xvf redash.tar.gz && \ -# rm redash.tar.gz COPY . /opt/redash/current # Setting working directory diff --git a/setup/latest_release_url.py b/setup/latest_release_url.py deleted file mode 100644 index f48bde92f2..0000000000 --- a/setup/latest_release_url.py +++ /dev/null @@ -1,6 +0,0 @@ -import urllib2 -import json - -latest = json.load(urllib2.urlopen("https://api.github.com/repos/EverythingMe/redash/releases/latest")) - -print latest['assets'][0]['browser_download_url'] From 1792da997ccd949ac88635424ca8d1c82005b1eb Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Mon, 19 Oct 2015 11:08:33 +0300 Subject: [PATCH 09/16] Update packer config --- setup/packer.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/packer.json b/setup/packer.json index f34f961e03..374d3e14c5 100644 --- a/setup/packer.json +++ b/setup/packer.json @@ -12,7 +12,7 @@ "access_key": "{{user `aws_access_key`}}", "secret_key": "{{user `aws_secret_key`}}", "region": "eu-west-1", - "source_ami": "ami-20cc9d57", + "source_ami": "ami-63a19214", "instance_type": "t2.micro", "ssh_username": "ubuntu", "ami_name": "redash-{{user `image_version`}}-eu-west-1" @@ -21,7 +21,7 @@ "provisioners": [ { "type": "shell", - "script": "bootstrap.sh", + "script": "ubuntu/bootstrap.sh", "execute_command": "{{ .Vars }} sudo -E -S bash '{{ .Path }}'", "environment_vars": ["REDASH_VERSION={{user `redash_version`}}"] } From a48edc6662236daf47db1687aaef4bd9881abdec Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Mon, 19 Oct 2015 11:09:05 +0300 Subject: [PATCH 10/16] Fix path in bootstrap script --- setup/ubuntu/bootstrap.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/ubuntu/bootstrap.sh b/setup/ubuntu/bootstrap.sh index 4daa884c8a..6fad7fa88c 100644 --- a/setup/ubuntu/bootstrap.sh +++ b/setup/ubuntu/bootstrap.sh @@ -2,7 +2,7 @@ set -eu REDASH_BASE_PATH=/opt/redash -FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/master/setup/ubuntu_or_debian/files/ +FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docker/setup/ubuntu/files/ # Verify running as root: if [ "$(id -u)" != "0" ]; then From 140e02226993e2b1bd6f2d017a0c86d0c80bba8c Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Mon, 19 Oct 2015 22:28:51 +0300 Subject: [PATCH 11/16] Update CircleCI config to build images --- circle.yml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/circle.yml b/circle.yml index 8221b38f86..32f24eb22f 100644 --- a/circle.yml +++ b/circle.yml @@ -1,15 +1,14 @@ machine: + services: + - docker node: version: - 0.10.24 + 0.12.4 python: version: 2.7.3 dependencies: pre: - - wget http://downloads.sourceforge.net/project/optipng/OptiPNG/optipng-0.7.5/optipng-0.7.5.tar.gz - - tar xvf optipng-0.7.5.tar.gz - - cd optipng-0.7.5; ./configure; make; sudo checkinstall -y; - make deps - pip install -r requirements_dev.txt - pip install -r requirements.txt @@ -26,6 +25,12 @@ deployment: branch: master commands: - make upload + docker: + branch: [master, docker] + commands: + - docker build -t everythingme/redash:$(./manage.py version) + - docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS + - docker push everythingme/redash:$(./manage.py version) notify: webhooks: - url: https://webhooks.gitter.im/e/895d09c3165a0913ac2f From bd07cf8963d575c89ed1d7dde7d22b62131e6950 Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Mon, 19 Oct 2015 23:02:53 +0300 Subject: [PATCH 12/16] Fix build step --- circle.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/circle.yml b/circle.yml index 32f24eb22f..7499f48a6c 100644 --- a/circle.yml +++ b/circle.yml @@ -28,9 +28,10 @@ deployment: docker: branch: [master, docker] commands: - - docker build -t everythingme/redash:$(./manage.py version) + - echo "rd_ui/app" >> .dockerignore + - docker build -t everythingme/redash:$(./manage.py version | sed -e "s/\+/./") . - docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS - - docker push everythingme/redash:$(./manage.py version) + - docker push everythingme/redash:$(./manage.py version | sed -e "s/\+/./") notify: webhooks: - url: https://webhooks.gitter.im/e/895d09c3165a0913ac2f From 2f65427d6eaefe72b859d24e02a087f361c8c649 Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Wed, 21 Oct 2015 08:54:56 +0300 Subject: [PATCH 13/16] Remove test file --- setup/docker/test.sh | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 setup/docker/test.sh diff --git a/setup/docker/test.sh b/setup/docker/test.sh deleted file mode 100644 index 0c74d40335..0000000000 --- a/setup/docker/test.sh +++ /dev/null @@ -1,3 +0,0 @@ -PREFIX=docker-compose - -$PREFIX ps From 9e1774701975ff21951cbb72e2a25f53034dfb7d Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Wed, 21 Oct 2015 16:06:10 +0300 Subject: [PATCH 14/16] Bootstrap files for Docker image --- setup/ubuntu_docker/README.md | 3 ++ setup/ubuntu_docker/bootstrap.sh | 35 +++++++++++++++++++++ setup/ubuntu_docker/files/env | 6 ++++ setup/ubuntu_docker/files/nginx_redash_site | 20 ++++++++++++ setup/ubuntu_docker/files/upstart.conf | 7 +++++ 5 files changed, 71 insertions(+) create mode 100644 setup/ubuntu_docker/README.md create mode 100644 setup/ubuntu_docker/bootstrap.sh create mode 100644 setup/ubuntu_docker/files/env create mode 100644 setup/ubuntu_docker/files/nginx_redash_site create mode 100644 setup/ubuntu_docker/files/upstart.conf diff --git a/setup/ubuntu_docker/README.md b/setup/ubuntu_docker/README.md new file mode 100644 index 0000000000..b72fda5e96 --- /dev/null +++ b/setup/ubuntu_docker/README.md @@ -0,0 +1,3 @@ +Bootstrap scripts for Ubuntu (tested on Ubuntu 14.04) using Docker images. + +Work in progress, not ready yet. diff --git a/setup/ubuntu_docker/bootstrap.sh b/setup/ubuntu_docker/bootstrap.sh new file mode 100644 index 0000000000..ddbb398d19 --- /dev/null +++ b/setup/ubuntu_docker/bootstrap.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -eu + +REDASH_BASE_PATH=/opt/redash_docker +# TODO: change this to master after merging: +FILES_BASE_URL=https://raw.githubusercontent.com/EverythingMe/redash/docker/setup/ubuntu_docker/files/ + +# Verify running as root: +if [ "$(id -u)" != "0" ]; then + if [ $# -ne 0 ]; then + echo "Failed running with sudo. Exiting." 1>&2 + exit 1 + fi + echo "This script must be run as root. Trying to run with sudo." + sudo bash $0 --with-sudo + exit 0 +fi + +# Base packages +apt-get update +# apt-get install -y python-pip python-dev nginx curl build-essential pwgen + +# Install Docker +# TODO: copy script into setup files +curl -sSL https://get.docker.com/ | sh + +# Get docker-compose file +wget $FILES_BASE_URL"docker-compose.yml" +# Add to .profile docker compose file location +# Setup upstart (?) for docker-compose +wget $FILES_BASE_URL"upstart.conf" -O /etc/init/redash-docker.conf +# Start everything +initctl reload-configuration +service redash-docker start +# Create database / tables diff --git a/setup/ubuntu_docker/files/env b/setup/ubuntu_docker/files/env new file mode 100644 index 0000000000..7d468f86c3 --- /dev/null +++ b/setup/ubuntu_docker/files/env @@ -0,0 +1,6 @@ +export REDASH_STATIC_ASSETS_PATH="../rd_ui/dist/" +export REDASH_LOG_LEVEL="INFO" +export REDASH_REDIS_URL=redis://localhost:6379/1 +export REDASH_DATABASE_URL="postgresql://redash" +export REDASH_COOKIE_SECRET=veryverysecret +export REDASH_GOOGLE_APPS_DOMAIN= diff --git a/setup/ubuntu_docker/files/nginx_redash_site b/setup/ubuntu_docker/files/nginx_redash_site new file mode 100644 index 0000000000..19c21c0637 --- /dev/null +++ b/setup/ubuntu_docker/files/nginx_redash_site @@ -0,0 +1,20 @@ +upstream rd_servers { + server 127.0.0.1:5000; +} + +server { + listen 80 default; + + access_log /var/log/nginx/rd.access.log; + + gzip on; + gzip_types *; + gzip_proxied any; + + location / { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rd_servers; + } +} \ No newline at end of file diff --git a/setup/ubuntu_docker/files/upstart.conf b/setup/ubuntu_docker/files/upstart.conf new file mode 100644 index 0000000000..0de408bf9f --- /dev/null +++ b/setup/ubuntu_docker/files/upstart.conf @@ -0,0 +1,7 @@ +description "Start re:dash Docker containers" +start on filesystem and started docker +stop on runlevel [!2345] +respawn +script + docker-compose -f /home/ubuntu/docker-compose.yml up +end script From 5e8e141ae250b336bb925c3f31f3e306d5d3ac89 Mon Sep 17 00:00:00 2001 From: Arik Fraimovich Date: Wed, 21 Oct 2015 17:12:04 +0300 Subject: [PATCH 15/16] add docker-compose.yml --- setup/ubuntu_docker/files/docker-compose.yml | 22 ++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 setup/ubuntu_docker/files/docker-compose.yml diff --git a/setup/ubuntu_docker/files/docker-compose.yml b/setup/ubuntu_docker/files/docker-compose.yml new file mode 100644 index 0000000000..dd90ae23cd --- /dev/null +++ b/setup/ubuntu_docker/files/docker-compose.yml @@ -0,0 +1,22 @@ +redash: + image: everythingme/redash + ports: + - "5000:5000" + links: + - redis + - postgres + env_file: env +redis: + image: redis:2.8 +postgres: + image: postgres:9.3 + volumes: + - /opt/postgres-data:/var/lib/postgresql/data +nginx: + image: nginx + ports: + - "80:80" + volumes: + - "./setup/docker/nginx/nginx.conf:/etc/nginx/nginx.conf" + links: + - redash From cc5122efbcdac8b5130fffe854453d586eb56d3b Mon Sep 17 00:00:00 2001 From: gissehel Date: Tue, 27 Oct 2015 07:50:34 +0100 Subject: [PATCH 16/16] Simple db init with docker --- Dockerfile | 3 + redash/settings.py | 26 ++++++- setup/docker/redash_database_init.py | 109 +++++++++++++++++++++++++++ setup/docker/redash_database_init.sh | 47 ++++++++++++ 4 files changed, 184 insertions(+), 1 deletion(-) create mode 100755 setup/docker/redash_database_init.py create mode 100755 setup/docker/redash_database_init.sh diff --git a/Dockerfile b/Dockerfile index d3a70b693e..5a46d97f52 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,6 +33,9 @@ RUN mkdir -p /opt/redash/supervisord && \ # Fix permissions RUN chown -R redash /opt/redash +# Expose init script as /init.sh +RUN ln -s /opt/redash/current/setup/docker/redash_database_init.sh /init.sh + # Expose ports EXPOSE 5000 EXPOSE 9001 diff --git a/redash/settings.py b/redash/settings.py index 8579588a2b..353ed70315 100644 --- a/redash/settings.py +++ b/redash/settings.py @@ -19,6 +19,23 @@ def parse_db_url(url): return connection +def parse_db_args(name, host, port, user, password, url): + if url is not None: + return parse_db_url(url) + connection = {'threadlocals': True} + + if name is not None: + connection['name'] = name + if name is not None: + connection['host'] = host + if name is not None: + connection['port'] = port + if name is not None: + connection['user'] = user + if name is not None: + connection['password'] = password + + return connection def fix_assets_path(path): fullpath = os.path.join(os.path.dirname(__file__), path) @@ -61,7 +78,14 @@ def all_settings(): STATSD_PREFIX = os.environ.get('REDASH_STATSD_PREFIX', "redash") # Connection settings for re:dash's own database (where we store the queries, results, etc) -DATABASE_CONFIG = parse_db_url(os.environ.get("REDASH_DATABASE_URL", "postgresql://postgres")) +# Use either NAME+HOST+PORT+USER+PASSWORD or URL. URL override other values +DATABASE_NAME = os.environ.get("REDASH_DATABASE_NAME", "postgres") +DATABASE_HOST = os.environ.get("REDASH_DATABASE_HOST", None) +DATABASE_PORT = os.environ.get("REDASH_DATABASE_PORT", None) +DATABASE_USER = os.environ.get("REDASH_DATABASE_USER", None) +DATABASE_PASSWORD = os.environ.get("REDASH_DATABASE_PASSWORD", None) +DATABASE_URL = os.environ.get("REDASH_DATABASE_URL", None) +DATABASE_CONFIG = parse_db_args(DATABASE_NAME, DATABASE_HOST, DATABASE_PORT, DATABASE_USER, DATABASE_PASSWORD, DATABASE_URL) # Celery related settings CELERY_BROKER = os.environ.get("REDASH_CELERY_BROKER", REDIS_URL) diff --git a/setup/docker/redash_database_init.py b/setup/docker/redash_database_init.py new file mode 100755 index 0000000000..fe5c8124c7 --- /dev/null +++ b/setup/docker/redash_database_init.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +import os +import sys +import psycopg2 +from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT + + +DATABASE_NAME = os.environ.get("REDASH_DATABASE_NAME", "postgres") +DATABASE_HOST = os.environ.get("REDASH_DATABASE_HOST", None) +DATABASE_PORT = os.environ.get("REDASH_DATABASE_PORT", None) +DATABASE_USER = os.environ.get("REDASH_DATABASE_USER", None) +DATABASE_PASSWORD = os.environ.get("REDASH_DATABASE_PASSWORD", None) +DATABASE_USER_ADMIN = os.environ.get("REDASH_DATABASE_USER_ADMIN", None) +DATABASE_PASSWORD_ADMIN = os.environ.get("REDASH_DATABASE_PASSWORD_ADMIN", None) + +connection = {} +connection_admin = {} + +if DATABASE_NAME is not None: + connection['database'] = DATABASE_NAME +if DATABASE_HOST is not None: + connection['host'] = DATABASE_HOST + connection_admin['host'] = DATABASE_HOST +if DATABASE_PORT is not None: + connection['port'] = DATABASE_PORT + connection_admin['port'] = DATABASE_PORT +if DATABASE_USER is not None: + connection['user'] = DATABASE_USER +if DATABASE_PASSWORD is not None: + connection['password'] = DATABASE_PASSWORD +if DATABASE_USER_ADMIN is not None: + connection_admin['user'] = DATABASE_USER_ADMIN +if DATABASE_PASSWORD_ADMIN is not None: + connection_admin['password'] = DATABASE_PASSWORD_ADMIN + +def output(text): + os.stdout(text+'\n') + +def error(text): + os.stderr(text+'\n') + +def fatal(text): + error(text) + sys.exit(1) + +if sys.argv is None: + fatal("No argv exists, you're doomed. Python doesn't work as expected !") + +if len(sys.argv)<=1: + fatal("You must provide one argument (either )") + +if DATABASE_NAME is None or DATABASE_NAME == '': + fatal("You must provide a non null database name") + +command = sys.argv[1] + +if command == 'create_db_and_role': + conn = psycopg2.connect(**connection_admin) + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + cursor = conn.cursor() + cursor.execute("select * from pg_database where datname = %(database_name)s", { 'database_name': DATABASE_NAME }) + databases = cursor.fetchall() + database_created = False + if len(databases) == 0: + cursor.execute("CREATE DATABASE %(database_name)s" % { 'database_name': DATABASE_NAME }) + database_created = True + if DATABASE_USER is not None: + cursor.execute("select 1 from pg_roles where rolname = %(role_name)s", { 'role_name': DATABASE_USER }) + role_created = False + roles = cursor.fetchall() + if len(roles) == 0: + if DATABASE_PASSWORD is None: + cursor.execute("CREATE ROLE %(role_name)s WITH LOGIN CREATEDB" % { 'role_name': DATABASE_USER }) + else: + cursor.execute("CREATE ROLE %(role_name)s WITH LOGIN CREATEDB PASSWORD '%(role_password)s'" % { 'role_name': DATABASE_USER, 'role_password': DATABASE_PASSWORD }) + role_created = True + if database_created or role_created: + cursor.execute("GRANT ALL PRIVILEGES ON DATABASE %(database_name)s to %(role_name)s" % { 'role_name': DATABASE_USER, 'database_name': DATABASE_NAME }) + +elif command == 'check_database_init': + conn = psycopg2.connect(**connection) + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + cursor = conn.cursor() + cursor.execute("select * from information_schema.tables where table_name='dashboards'") + tables = cursor.fetchall() + if len(tables) == 0: + sys.exit(2) + +elif command == 'create_reader_role': + conn = psycopg2.connect(**connection) + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + cursor = conn.cursor() + cursor.execute("select 1 from pg_roles where rolname = 'redash_reader'") + if len(cursor.fetchall()) == 0: + cursor.execute("CREATE ROLE redash_reader WITH PASSWORD 'redash_reader' NOCREATEROLE NOCREATEDB NOSUPERUSER LOGIN") + cursor.execute("grant select(id,name,type) ON data_sources to redash_reader") + cursor.execute("grant select(id,name) ON users to redash_reader") + +elif command == 'check_redash_metadata': + conn = psycopg2.connect(**connection) + conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + cursor = conn.cursor() + cursor.execute("select 1 from data_sources where name = %(name)s", { 'name': 're:dash metadata' }) + if len(cursor.fetchall()) == 0: + sys.exit(2) + +else: + fatal("The argument [%s] isn't recognized as valid parameter for this script" % (command,)) + diff --git a/setup/docker/redash_database_init.sh b/setup/docker/redash_database_init.sh new file mode 100755 index 0000000000..47c5731f6e --- /dev/null +++ b/setup/docker/redash_database_init.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +workpath="/opt/redash/current" +manage="${workpath}/manage.py" +database_init="${workpath}/setup/docker/redash_database_init.py" + +[ -z "${REDASH_DEFAULT_ADMIN_NAME}" ] && REDASH_DEFAULT_ADMIN_NAME="Admin" +[ -z "${REDASH_DEFAULT_ADMIN_LOGIN}" ] && REDASH_DEFAULT_ADMIN_LOGIN="admin" +[ -z "${REDASH_DEFAULT_ADMIN_PASSWORD}" ] && REDASH_DEFAULT_ADMIN_PASSWORD="admin" + +# [ -z "${REDASH_DATABASE_HOST}" ] +[ -z "${REDASH_DATABASE_NAME}" ] && REDASH_DATABASE_NAME="postgres" + +final() { + exit $1 +} + +fail() { + echo "$1" >&2 + final 1 +} + +end() { + final 0 +} + +"${database_init}" "create_db_and_role" || fail "Something went wrong during database and role creation" + +if "${database_init}" "check_database_init" +then + echo "database seems to be already populated... doing nothing" +else + echo "database seems to be empty. Creating tables" + "${manage}" database create_tables + "${manage}" users create --admin --password "${REDASH_DEFAULT_ADMIN_PASSWORD}" "${REDASH_DEFAULT_ADMIN_NAME}" "${REDASH_DEFAULT_ADMIN_LOGIN}" +fi + +"${database_init}" "create_reader_role" + +if "${database_init}" "check_redash_metadata" +then + echo "re:dash metadata seems to already exists... doing nothing" +else + echo "re:dash metadata seems to not be present. Inserting enreg" + "${manage}" ds new -n "re:dash metadata" -t "pg" -o "{\"user\": \"redash_reader\", \"password\": \"redash_reader\", \"host\": \"${REDASH_DATABASE_HOST}\", \"dbname\": \"${REDASH_DATABASE_NAME}\"}" +fi +