From 2ebbe2be63c2ad9e8386bd4cbfe85f01ee9359a8 Mon Sep 17 00:00:00 2001 From: Arnaldo Cesco Date: Thu, 12 Dec 2024 15:39:19 +0100 Subject: [PATCH 1/4] Logs: use logfmt Show logs in logfmt format (now you can use humanlog, and even see the date of a log line, wohooo). Moreover, make sure via a runtime configuration that displayed logs are only from level info or higher. Signed-off-by: Arnaldo Cesco --- mix.exs | 1 + mix.lock | 1 + src/astarte_vmq_plugin.erl | 14 ++++++++++++++ 3 files changed, 16 insertions(+) diff --git a/mix.exs b/mix.exs index 71431f4..56594f5 100644 --- a/mix.exs +++ b/mix.exs @@ -91,6 +91,7 @@ defmodule Astarte.VMQ.Plugin.Mixfile do {:excoveralls, "~> 0.15", only: :test}, {:mississippi, github: "secomind/mississippi"}, {:pretty_log, "~> 0.1"}, + {:flatlog, github: "annopaolo/flatlog"}, {:dialyxir, "~> 1.4", only: [:dev, :ci], runtime: false}, {:xandra, "~> 0.14"} ] diff --git a/mix.lock b/mix.lock index 51721e3..cfa6327 100644 --- a/mix.lock +++ b/mix.lock @@ -18,6 +18,7 @@ "ex_rabbit_pool": {:git, "https://github.com/leductam/ex_rabbit_pool.git", "9951452ab51d36648b9a9d3373609e48d1379a0d", []}, "excoveralls": {:hex, :excoveralls, "0.16.1", "0bd42ed05c7d2f4d180331a20113ec537be509da31fed5c8f7047ce59ee5a7c5", [:mix], [{:hackney, "~> 1.16", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "dae763468e2008cf7075a64cb1249c97cb4bc71e236c5c2b5e5cdf1cfa2bf138"}, "exprotobuf": {:hex, :exprotobuf, "1.2.17", "3003937da617f588a8fb63ebdd7b127a18d78d6502623c272076fd54c07c4de1", [:mix], [{:gpb, "~> 4.0", [hex: :gpb, repo: "hexpm", optional: false]}], "hexpm", "e07ec1e5ae6f8c1c8521450d5f6b658c8c700b1f34c70356e91ece0766f4361a"}, + "flatlog": {:git, "https://github.com/annopaolo/flatlog.git", "a176d09120bbe557d50a0e1b6eb0686c58d93e63", []}, "goldrush": {:hex, :goldrush, "0.1.9", "f06e5d5f1277da5c413e84d5a2924174182fb108dabb39d5ec548b27424cd106", [:rebar3], [], "hexpm", "99cb4128cffcb3227581e5d4d803d5413fa643f4eb96523f77d9e6937d994ceb"}, "hackney": {:hex, :hackney, "1.18.1", "f48bf88f521f2a229fc7bae88cf4f85adc9cd9bcf23b5dc8eb6a1788c662c4f6", [:rebar3], [{:certifi, "~>2.9.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~>6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~>1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~>1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "a4ecdaff44297e9b5894ae499e9a070ea1888c84afdd1fd9b7b2bc384950128e"}, "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, diff --git a/src/astarte_vmq_plugin.erl b/src/astarte_vmq_plugin.erl index 792d527..b0c30eb 100644 --- a/src/astarte_vmq_plugin.erl +++ b/src/astarte_vmq_plugin.erl @@ -28,6 +28,20 @@ start() -> % name as the application, which can't be an Elixir module due to naming % constraints. {ok, _} = application:ensure_all_started(astarte_vmq_plugin, permanent), + ok = logger:update_handler_config(default, #{level => info}), + ok = logger:set_handler_config(default, formatter, {flatlog, + #{template => + ["level=", level, " " + "time=", time, " ", + "pid=", pid, " ", + "mfa=", mfa, " ", + "line=", line, " ", + {realm, ["realm=", realm], []}, " ", + {device_id, ["device_id=", device_id], []}, " ", + msg, "\n" + ], + single_line => true} + }), ok. stop() -> From 9be7913faffcd30428262b3625cdd8c6df7237b7 Mon Sep 17 00:00:00 2001 From: Arnaldo Cesco Date: Fri, 13 Dec 2024 17:17:25 +0100 Subject: [PATCH 2/4] Move to VerneMQ 2.0.1 The most significant change is the update of Dockerfile and related scripts. The reference used is v2.0.1 of vernemq/docker-vernemq (see https://github.com/vernemq/docker-vernemq/tree/2.0.1). This is mostly a verbatim copy. Some outdated configuration params have been removed from vernemq.conf. Similar to what previously done, we build both our plugin and VerneMQ and then put them together in the image. We're still using root as user. Signed-off-by: Arnaldo Cesco --- Dockerfile | 66 +++-- docker/bin/build.sh | 11 + docker/bin/join_cluster.sh | 69 +++++ docker/bin/rand_cluster_node.escript | 2 +- docker/bin/vernemq.sh | 423 +++++++++++++++++++++------ docker/files/vernemq.conf | 45 --- 6 files changed, 443 insertions(+), 173 deletions(-) create mode 100755 docker/bin/build.sh create mode 100755 docker/bin/join_cluster.sh diff --git a/Dockerfile b/Dockerfile index 747e78b..f09b453 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM hexpm/elixir:1.15.5-erlang-26.1-debian-bullseye-20230612-slim as builder +FROM hexpm/elixir:1.15.5-erlang-26.1-debian-bullseye-20230612-slim AS builder # install build dependencies # --allow-releaseinfo-change allows to pull from 'oldstable' @@ -8,7 +8,7 @@ RUN apt-get update --allow-releaseinfo-change -y \ WORKDIR /build -# Needed for VerneMQ 1.13.0 +# TODO check if it's still needed for VerneMQ 2.0.1 RUN apt-get -qq update && apt-get -qq install libsnappy-dev libssl-dev # Let's start by building VerneMQ @@ -16,7 +16,7 @@ RUN git clone https://github.com/vernemq/vernemq.git RUN cd vernemq && \ # Check out latest master - git checkout 1cc57fa60f50b57784f2b56f79e09d5748aa90f7 && \ + git checkout -b v2.0.1 && \ make rel && \ cd .. @@ -50,11 +50,9 @@ RUN cp astarte_vmq_plugin/priv/astarte_vmq_plugin.schema vernemq/_build/default/ # Copy configuration files here - mainly because we want to keep the target image as small as possible # and avoid useless layers. -COPY docker/files/vm.args /build/vernemq/_build/default/rel/vernemq/etc/ -COPY docker/files/vernemq.conf /build/vernemq/_build/default/rel/vernemq/etc/ -COPY docker/bin/rand_cluster_node.escript /build/vernemq/_build/default/rel/vernemq/bin/ -COPY docker/bin/vernemq.sh /build/vernemq/_build/default/rel/vernemq/bin/ -RUN chmod +x /build/vernemq/_build/default/rel/vernemq/bin/vernemq.sh +COPY docker/files/vm.args vernemq/_build/default/rel/vernemq/etc/ +COPY docker/files/vernemq.conf vernemq/_build/default/rel/vernemq/etc/ +COPY docker/bin/rand_cluster_node.escript vernemq/_build/default/rel/vernemq/bin/ # Note: it is important to keep Debian versions in sync, or incompatibilities between libcrypto will happen FROM debian:bullseye-slim @@ -65,38 +63,44 @@ ENV LANG C.UTF-8 # We have to redefine this here since it goes out of scope for each build stage ARG BUILD_ENV=prod +# Install some VerneMQ scripts dependencies +RUN apt-get -qq update && apt-get -qq install bash procps openssl iproute2 curl jq libsnappy-dev net-tools nano + # We need SSL, curl, iproute2 and jq - and to ensure /etc/ssl/astarte -RUN apt-get -qq update && apt-get -qq install libssl1.1 curl jq iproute2 netcat libsnappy1v5 && apt-get clean && mkdir -p /etc/ssl/astarte +# TODO some of these might not be needed anymore +RUN apt-get -qq update && apt-get -qq install libssl1.1 curl jq iproute2 netcat && apt-get clean && mkdir -p /etc/ssl/astarte -# Copy our built stuff (both are self-contained with their ERTS release) -COPY --from=builder /build/vernemq/_build/default/rel/vernemq /opt/vernemq/ -COPY --from=builder /build/astarte_vmq_plugin/_build/$BUILD_ENV/rel/astarte_vmq_plugin /opt/astarte_vmq_plugin/ +ENV PATH="/opt/vernemq/bin:$PATH" -# Add the wait-for utility -RUN cd /usr/bin && curl -O https://raw.githubusercontent.com/eficode/wait-for/master/wait-for && chmod +x wait-for && cd - +COPY --from=builder /build/astarte_vmq_plugin/docker/bin/vernemq.sh /usr/sbin/start_vernemq +COPY --from=builder /build/astarte_vmq_plugin/docker/bin/join_cluster.sh /usr/sbin/join_cluster -# MQTT -EXPOSE 1883 +RUN chmod +x /usr/sbin/start_vernemq +RUN chmod +x /usr/sbin/join_cluster -# MQTT for Reverse Proxy -EXPOSE 1885 +# Copy our built stuff (both are self-contained with their ERTS release) +COPY --from=builder /build/vernemq/_build/default/rel/vernemq /opt/vernemq/ -# MQTT/SSL -EXPOSE 8883 +RUN ln -s /opt/vernemq/etc /etc/vernemq && \ + ln -s /opt/vernemq/data /var/lib/vernemq && \ + ln -s /opt/vernemq/log /var/log/vernemq -# VerneMQ Message Distribution -EXPOSE 44053 +COPY --from=builder /build/astarte_vmq_plugin/_build/$BUILD_ENV/rel/astarte_vmq_plugin /opt/astarte_vmq_plugin/ -# EPMD - Erlang Port Mapper Daemon -EXPOSE 4369 +# Ports +# 1883 MQTT +# 8883 MQTT/SSL +# 8080 MQTT WebSockets +# 44053 VerneMQ Message Distribution +# 4369 EPMD - Erlang Port Mapper Daemon +# 8888 Health, API, Prometheus Metrics +# 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 Specific Distributed Erlang Port Range -# Specific Distributed Erlang Port Range -EXPOSE 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 +EXPOSE 1883 8883 8080 44053 4369 8888 \ + 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 -# Prometheus Metrics -EXPOSE 8888 +VOLUME ["/opt/vernemq/log", "/opt/vernemq/data", "/opt/vernemq/etc"] -# Expose port for webroot ACME verification (in case) -EXPOSE 80 +HEALTHCHECK CMD vernemq ping | grep -q pong -CMD ["/opt/vernemq/bin/vernemq.sh"] +CMD ["start_vernemq"] diff --git a/docker/bin/build.sh b/docker/bin/build.sh new file mode 100755 index 0000000..b8abec9 --- /dev/null +++ b/docker/bin/build.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +set -e + +if [ "$1" = "swc" ] +then + make swc + mv _build/swc/rel/vernemq release +else + make rel + mv _build/default/rel/vernemq release +fi diff --git a/docker/bin/join_cluster.sh b/docker/bin/join_cluster.sh new file mode 100755 index 0000000..22ca16c --- /dev/null +++ b/docker/bin/join_cluster.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +SECRETS_KUBERNETES_DIR="/var/run/secrets/kubernetes.io/serviceaccount" +DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME=${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME:-cluster.local} + +if [ -d "${SECRETS_KUBERNETES_DIR}" ] ; then + # Let's get the namespace if it isn't set + DOCKER_VERNEMQ_KUBERNETES_NAMESPACE=${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE:-$(cat "${SECRETS_KUBERNETES_DIR}/namespace")} +fi + +insecure="" +if env | grep "DOCKER_VERNEMQ_KUBERNETES_INSECURE" -q; then + echo "Using curl with \"--insecure\" argument to access kubernetes API without matching SSL certificate" + insecure="--insecure" +fi + +function k8sCurlGet () { + local urlPath=$1 + + local hostname="kubernetes.default.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}" + local certsFile="${SECRETS_KUBERNETES_DIR}/ca.crt" + local token=$(cat ${SECRETS_KUBERNETES_DIR}/token) + local header="Authorization: Bearer ${token}" + local url="https://${hostname}/${urlPath}" + + curl -sS ${insecure} --cacert ${certsFile} -H "${header}" ${url} \ + || ( echo "### Error on accessing URL ${url}" ) +} + +try_join() { + local exit_code=0 + if env | grep "DOCKER_VERNEMQ_DISCOVERY_KUBERNETES" -q; then + # Let's set our nodename correctly + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#list-pod-v1-core + podList=$(k8sCurlGet "api/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/pods?labelSelector=${DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR}") + kube_pod_names=$(echo ${podList} | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ' | sed 's/ *$//') + VERNEMQ_KUBERNETES_SUBDOMAIN=${DOCKER_VERNEMQ_KUBERNETES_SUBDOMAIN:-$(echo ${podList} | jq '.items[0].spec.subdomain' | tr '\n' '"' | sed 's/"//g')} + + for kube_pod_name in $kube_pod_names; do + if [[ $kube_pod_name == "null" ]]; then + echo "Kubernetes discovery selected, but no pods found. Maybe we're the first?" + echo "Anyway, we won't attempt to join any cluster." + exit 0 + fi + + if [[ $kube_pod_name != "$MY_POD_NAME" ]]; then + discoveryHostname="${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}" + echo "Will join an existing Kubernetes cluster with discovery node at ${discoveryHostname}" + vmq-admin cluster show | grep "VerneMQ@${discoveryHostname}" > /dev/null || exit_code=$? + if [ $exit_code -eq 0 ]; then + echo "We have already joined the cluster - no extra work required." + exit 0 + else + echo "We have yet to join the cluster - attempting manual join..." + vmq-admin cluster join discovery-node="VerneMQ@${discoveryHostname}" + sleep 2 + fi + break + fi + done + else + exit 0 + fi +} + +while true +do + try_join +done; \ No newline at end of file diff --git a/docker/bin/rand_cluster_node.escript b/docker/bin/rand_cluster_node.escript index e17d419..3b41200 100644 --- a/docker/bin/rand_cluster_node.escript +++ b/docker/bin/rand_cluster_node.escript @@ -15,4 +15,4 @@ main([ThisNode]) -> io:format(L); false -> io:format("") - end. \ No newline at end of file + end. diff --git a/docker/bin/vernemq.sh b/docker/bin/vernemq.sh index da84373..986ac59 100644 --- a/docker/bin/vernemq.sh +++ b/docker/bin/vernemq.sh @@ -1,70 +1,246 @@ #!/usr/bin/env bash -IP_ADDRESS=$(ip -4 addr show ${DOCKER_NET_INTERFACE:-eth0} | grep -oE '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}' | sed -e "s/^[[:space:]]*//" | head -n 1) +NET_INTERFACE=$(route | grep '^default' | grep -o '[^ ]*$') +NET_INTERFACE=${DOCKER_NET_INTERFACE:-${NET_INTERFACE}} +IP_ADDRESS=$(ip -4 addr show ${NET_INTERFACE} | grep -oE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | sed -e "s/^[[:space:]]*//" | head -n 1) IP_ADDRESS=${DOCKER_IP_ADDRESS:-${IP_ADDRESS}} +VERNEMQ_ETC_DIR="/opt/vernemq/etc" +VERNEMQ_VM_ARGS_FILE="${VERNEMQ_ETC_DIR}/vm.args" +VERNEMQ_CONF_FILE="${VERNEMQ_ETC_DIR}/vernemq.conf" +VERNEMQ_CONF_LOCAL_FILE="${VERNEMQ_ETC_DIR}/vernemq.conf.local" + +SECRETS_KUBERNETES_DIR="/var/run/secrets/kubernetes.io/serviceaccount" + +# Function to check istio readiness +istio_health() { + cmd=$(curl -s http://localhost:15021/healthz/ready > /dev/null) + status=$? + return $status +} + +configure_vernemq_listeners() { + local base_port=1883 + declare -A listener_ports + + # Scan environment variables for VerneMQ listeners + while read -r line; do + if [[ $line == DOCKER_VERNEMQ_LISTENER__* ]]; then + # Extract the type, name, and property from the environment variable + IFS='=' read -r key value <<< "$line" + IFS='__' read -r _ prefix type name property <<< "$key" + + # Normalize type to lowercase for consistent handling + type=${type,,} + + # Check if this listener is being added/enabled without specifying a port + if [[ -z $property && $value == "true" ]]; then + # Assign default or auto-incremented port number + if ! [[ ${listener_ports[$type.$name]+_} ]]; then + base_port=$((base_port + 1)) + listener_ports[$type.$name]=$base_port + fi + echo "listener.$type.$name = ${I}:${listener_ports[$type.$name]}" >> ${VERNEMQ_CONF_FILE} + elif [[ $property == "PORT" ]]; then + # Specific port defined, use it + listener_ports[$type.$name]=$value + echo "listener.$type.$name = ${IP_ADDRESS}:${value}" >> ${VERNEMQ_CONF_FILE} + fi + fi + done < <(env) + + # Ensure all initialized listeners are configured, even those without a PORT env var explicitly set + for listener in "${!listener_ports[@]}"; do + IFS='.' read -r type name <<< "$listener" + echo "listener.$type.$name = ${IP_ADDRESS}:${listener_ports[$listener]}" >> ${VERNEMQ_CONF_FILE} + done +} + +configure_vernemq_listeners_help() { + echo "Usage: configure_vernemq_listeners" + echo "Scans environment variables to configure VerneMQ listeners." + echo "Environment variables should follow the pattern:" + echo "DOCKER_VERNEMQ_LISTENER____[__PORT]=" + echo "Examples:" + echo "DOCKER_VERNEMQ_LISTENER__TCP__EXTERNAL=true" + echo "DOCKER_VERNEMQ_LISTENER__WS__DEFAULT__PORT=8080" + echo "" + echo "The function assigns an IP address and ports to the VerneMQ listeners based on these variables." + echo "Listeners without a specified port will have ports auto-assigned starting from 1884." +} + + +# Ensure we have all files and needed directory write permissions +if [ ! -d ${VERNEMQ_ETC_DIR} ]; then + echo "Configuration directory at ${VERNEMQ_ETC_DIR} does not exist, exiting" >&2 + exit 1 +fi +if [ ! -f ${VERNEMQ_VM_ARGS_FILE} ]; then + echo "ls -l ${VERNEMQ_ETC_DIR}" + ls -l ${VERNEMQ_ETC_DIR} + echo "###" >&2 + echo "### Configuration file ${VERNEMQ_VM_ARGS_FILE} does not exist, exiting" >&2 + echo "###" >&2 + exit 1 +fi +if [ ! -w ${VERNEMQ_VM_ARGS_FILE} ]; then + echo "# whoami" + whoami + echo "# ls -l ${VERNEMQ_ETC_DIR}" + ls -l ${VERNEMQ_ETC_DIR} + echo "###" >&2 + echo "### Configuration file ${VERNEMQ_VM_ARGS_FILE} exists, but there are no write permissions! Exiting." >&2 + echo "###" >&2 + exit 1 +fi +if [ ! -s ${VERNEMQ_VM_ARGS_FILE} ]; then + echo "ls -l ${VERNEMQ_ETC_DIR}" + ls -l ${VERNEMQ_ETC_DIR} + echo "###" >&2 + echo "### Configuration file ${VERNEMQ_VM_ARGS_FILE} is empty! This will not work." >&2 + echo "### Exiting now." >&2 + echo "###" >&2 + exit 1 +fi + # Ensure the Erlang node name is set correctly if env | grep "DOCKER_VERNEMQ_NODENAME" -q; then - sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${DOCKER_VERNEMQ_NODENAME}/" /opt/vernemq/etc/vm.args + sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${DOCKER_VERNEMQ_NODENAME}/" ${VERNEMQ_VM_ARGS_FILE} else - sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${IP_ADDRESS}/" /opt/vernemq/etc/vm.args + if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then + NODENAME=$(ip -4 -o addr show ${NET_INTERFACE} | awk '{print $4}' | cut -d "/" -f 1) + sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${NODENAME}/" ${VERNEMQ_VM_ARGS_FILE} + else + sed -i.bak -r "s/-name VerneMQ@.+/-name VerneMQ@${IP_ADDRESS}/" ${VERNEMQ_VM_ARGS_FILE} + fi fi if env | grep "DOCKER_VERNEMQ_DISCOVERY_NODE" -q; then - sed -i.bak -r "/-eval.+/d" /opt/vernemq/etc/vm.args - echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${DOCKER_VERNEMQ_DISCOVERY_NODE}')\"" >> /opt/vernemq/etc/vm.args + discovery_node=$DOCKER_VERNEMQ_DISCOVERY_NODE + if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then + tmp='' + while [[ -z "$tmp" ]]; do + tmp=$(getent hosts tasks.$discovery_node | awk '{print $1}' | head -n 1) + sleep 1 + done + discovery_node=$tmp + fi + if [ -n "$DOCKER_VERNEMQ_COMPOSE" ]; then + tmp='' + while [[ -z "$tmp" ]]; do + tmp=$(getent hosts $discovery_node | awk '{print $1}' | head -n 1) + sleep 1 + done + discovery_node=$tmp + fi + + sed -i.bak -r "/-eval.+/d" ${VERNEMQ_VM_ARGS_FILE} + echo "-eval \"vmq_server_cmd:node_join('VerneMQ@$discovery_node')\"" >> ${VERNEMQ_VM_ARGS_FILE} fi # If you encounter "SSL certification error (subject name does not match the host name)", you may try to set DOCKER_VERNEMQ_KUBERNETES_INSECURE to "1". insecure="" if env | grep "DOCKER_VERNEMQ_KUBERNETES_INSECURE" -q; then + echo "Using curl with \"--insecure\" argument to access kubernetes API without matching SSL certificate" insecure="--insecure" fi -if env | grep "DOCKER_VERNEMQ_DISCOVERY_KUBERNETES" -q; then - DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME=${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME:-cluster.local} +if env | grep "DOCKER_VERNEMQ_KUBERNETES_ISTIO_ENABLED" -q; then + istio_health + while [ $status != 0 ]; do + istio_health + sleep 1 + done + echo "Istio ready" +fi + +# Function to call a HTTP GET request on the given URL Path, using the hostname +# of the current k8s cluster name. Usage: "k8sCurlGet /my/path" +function k8sCurlGet () { + local urlPath=$1 + + local hostname="kubernetes.default.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}" + local certsFile="${SECRETS_KUBERNETES_DIR}/ca.crt" + local token=$(cat ${SECRETS_KUBERNETES_DIR}/token) + local header="Authorization: Bearer ${token}" + local url="https://${hostname}/${urlPath}" + + curl -sS ${insecure} --cacert ${certsFile} -H "${header}" ${url} \ + || ( echo "### Error on accessing URL ${url}" ) +} + +DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME=${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME:-cluster.local} +if [ -d "${SECRETS_KUBERNETES_DIR}" ] ; then # Let's get the namespace if it isn't set - DOCKER_VERNEMQ_KUBERNETES_NAMESPACE=${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE:-`cat /var/run/secrets/kubernetes.io/serviceaccount/namespace`} + DOCKER_VERNEMQ_KUBERNETES_NAMESPACE=${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE:-$(cat "${SECRETS_KUBERNETES_DIR}/namespace")} + + # Check the API access that will be needed in the TERM signal handler + podResponse=$(k8sCurlGet api/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/pods/$(hostname) ) + statefulSetName=$(echo ${podResponse} | jq -r '.metadata.ownerReferences[0].name') + statefulSetPath="apis/apps/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/statefulsets/${statefulSetName}" + statefulSetResponse=$(k8sCurlGet ${statefulSetPath} ) + isCodeForbidden=$(echo ${statefulSetResponse} | jq '.code == 403') + if [[ ${isCodeForbidden} == "true" ]]; then + echo "Permission error: Cannot access URL ${statefulSetPath}: $(echo ${statefulSetResponse} | jq '.reason,.code,.message')" + exit 1 + else + numReplicas=$(echo ${statefulSetResponse} | jq '.status.replicas') + echo "Permissions ok: Our pod $(hostname) belongs to StatefulSet ${statefulSetName} with ${numReplicas} replicas" + fi +fi + +# Set up kubernetes node discovery +start_join_cluster=0 +if env | grep "DOCKER_VERNEMQ_DISCOVERY_KUBERNETES" -q; then # Let's set our nodename correctly - VERNEMQ_KUBERNETES_SUBDOMAIN=$(curl -X GET $insecure --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes.default.svc.$DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME/api/v1/namespaces/$DOCKER_VERNEMQ_KUBERNETES_NAMESPACE/pods?labelSelector=$DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | jq '.items[0].spec.subdomain' | sed 's/"//g' | tr '\n' '\0') - if [ $VERNEMQ_KUBERNETES_SUBDOMAIN == "null" ]; then + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#list-pod-v1-core + podList=$(k8sCurlGet "api/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/pods?labelSelector=${DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR}") + VERNEMQ_KUBERNETES_SUBDOMAIN=${DOCKER_VERNEMQ_KUBERNETES_SUBDOMAIN:-$(echo ${podList} | jq '.items[0].spec.subdomain' | tr '\n' '"' | sed 's/"//g')} + if [[ $VERNEMQ_KUBERNETES_SUBDOMAIN == "null" ]]; then VERNEMQ_KUBERNETES_HOSTNAME=${MY_POD_NAME}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME} else VERNEMQ_KUBERNETES_HOSTNAME=${MY_POD_NAME}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME} fi - sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${VERNEMQ_KUBERNETES_HOSTNAME}/" /opt/vernemq/etc/vm.args + sed -i.bak -r "s/VerneMQ@.+/VerneMQ@${VERNEMQ_KUBERNETES_HOSTNAME}/" ${VERNEMQ_VM_ARGS_FILE} # Hack into K8S DNS resolution (temporarily) - kube_pod_names=$(curl -X GET $insecure --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes.default.svc.$DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME/api/v1/namespaces/$DOCKER_VERNEMQ_KUBERNETES_NAMESPACE/pods?labelSelector=$DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ') - for kube_pod_name in $kube_pod_names; - do - if [ $kube_pod_name == "null" ] - then - echo "Kubernetes discovery selected, but no pods found. Maybe we're the first?" - echo "Anyway, we won't attempt to join any cluster." - break + kube_pod_names=$(echo ${podList} | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ' | sed 's/ *$//') + + for kube_pod_name in $kube_pod_names; do + if [[ $kube_pod_name == "null" ]]; then + echo "Kubernetes discovery selected, but no pods found. Maybe we're the first?" + echo "Anyway, we won't attempt to join any cluster." + break fi - if [ $kube_pod_name != $MY_POD_NAME ] - then - echo "Will join an existing Kubernetes cluster with discovery node at ${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}" - echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}')\"" >> /opt/vernemq/etc/vm.args - break + if [[ $kube_pod_name != $MY_POD_NAME ]]; then + discoveryHostname="${kube_pod_name}.${VERNEMQ_KUBERNETES_SUBDOMAIN}.${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}.svc.${DOCKER_VERNEMQ_KUBERNETES_CLUSTER_NAME}" + start_join_cluster=1 + echo "Will join an existing Kubernetes cluster with discovery node at ${discoveryHostname}" + echo "-eval \"vmq_server_cmd:node_join('VerneMQ@${discoveryHostname}')\"" >> ${VERNEMQ_VM_ARGS_FILE} + echo "Did I previously leave the cluster? If so, purging old state." + curl -fsSL http://${discoveryHostname}:8888/status.json >/dev/null 2>&1 || + (echo "Can't download status.json, better to exit now" && exit 1) + curl -fsSL http://${discoveryHostname}:8888/status.json | grep -q ${VERNEMQ_KUBERNETES_HOSTNAME} || + (echo "Cluster doesn't know about me, this means I've left previously. Purging old state..." && rm -rf /opt/vernemq/data/*) + break fi done fi -if [ -f /opt/vernemq/etc/vernemq.conf.local ]; then - cp /opt/vernemq/etc/vernemq.conf.local /opt/vernemq/etc/vernemq.conf + +if [ -f "${VERNEMQ_CONF_LOCAL_FILE}" ]; then + cp "${VERNEMQ_CONF_LOCAL_FILE}" ${VERNEMQ_CONF_FILE} + sed -i -r "s/###IPADDRESS###/${IP_ADDRESS}/" ${VERNEMQ_CONF_FILE} else - sed -i '/########## Start ##########/,/########## End ##########/d' /opt/vernemq/etc/vernemq.conf + sed -i '/########## Start ##########/,/########## End ##########/d' ${VERNEMQ_CONF_FILE} - echo "########## Start ##########" >> /opt/vernemq/etc/vernemq.conf + echo "########## Start ##########" >> ${VERNEMQ_CONF_FILE} - env | grep DOCKER_VERNEMQ | grep -v 'DISCOVERY_NODE\|KUBERNETES\|DOCKER_VERNEMQ_USER' | cut -c 16- | awk '{match($0,/^[A-Z0-9_]*/)}{print tolower(substr($0,RSTART,RLENGTH)) substr($0,RLENGTH+1)}' | sed 's/__/./g' >> /opt/vernemq/etc/vernemq.conf + env | grep DOCKER_VERNEMQ | grep -v 'DISCOVERY_NODE\|KUBERNETES\|SWARM\|COMPOSE\|DOCKER_VERNEMQ_USER' | cut -c 16- | awk '{match($0,/^[A-Z0-9_]*/)}{print tolower(substr($0,RSTART,RLENGTH)) substr($0,RLENGTH+1)}' | sed 's/__/./g' >> ${VERNEMQ_CONF_FILE} users_are_set=$(env | grep DOCKER_VERNEMQ_USER) if [ ! -z "$users_are_set" ]; then - echo "vmq_passwd.password_file = /opt/vernemq/etc/vmq.passwd" >> /opt/vernemq/etc/vernemq.conf + echo "vmq_passwd.password_file = /opt/vernemq/etc/vmq.passwd" >> ${VERNEMQ_CONF_FILE} touch /opt/vernemq/etc/vmq.passwd fi @@ -77,75 +253,61 @@ $password EOF done - echo "erlang.distribution.port_range.minimum = 9100" >> /opt/vernemq/etc/vernemq.conf - echo "erlang.distribution.port_range.maximum = 9109" >> /opt/vernemq/etc/vernemq.conf - echo "listener.tcp.default = ${IP_ADDRESS}:1883" >> /opt/vernemq/etc/vernemq.conf - if env | grep -q "VERNEMQ_ENABLE_SSL_LISTENER"; then - # Populate SSL config - echo "listener.ssl.default = ${IP_ADDRESS}:8883" >> /opt/vernemq/etc/vernemq.conf - echo "listener.ssl.cafile = /opt/vernemq/etc/ca.pem" >> /opt/vernemq/etc/vernemq.conf - echo "listener.ssl.certfile = /opt/vernemq/etc/cert.pem" >> /opt/vernemq/etc/vernemq.conf - echo "listener.ssl.keyfile = /opt/vernemq/etc/privkey.pem" >> /opt/vernemq/etc/vernemq.conf - echo "listener.ssl.require_certificate = on" >> /opt/vernemq/etc/vernemq.conf - echo "listener.ssl.use_identity_as_username = on" >> /opt/vernemq/etc/vernemq.conf - echo "listener.ssl.tls_version = tlsv1.2" >> /opt/vernemq/etc/vernemq.conf + + if [ -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION__PORT_RANGE__MINIMUM" ]; then + echo "erlang.distribution.port_range.minimum = 9100" >> ${VERNEMQ_CONF_FILE} fi - # We enable the revproxy listener regardless. - echo "listener.tcp.revproxy = ${IP_ADDRESS}:1885" >> /opt/vernemq/etc/vernemq.conf - echo "listener.ws.default = ${IP_ADDRESS}:8080" >> /opt/vernemq/etc/vernemq.conf - echo "listener.vmq.clustering = ${IP_ADDRESS}:44053" >> /opt/vernemq/etc/vernemq.conf - echo "listener.http.metrics = ${IP_ADDRESS}:8888" >> /opt/vernemq/etc/vernemq.conf - echo "########## End ##########" >> /opt/vernemq/etc/vernemq.conf -fi + if [ -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION__PORT_RANGE__MAXIMUM" ]; then + echo "erlang.distribution.port_range.maximum = 9109" >> ${VERNEMQ_CONF_FILE} + fi -if env | grep -q "VERNEMQ_ENABLE_SSL_LISTENER"; then - # Let's do our magic. First of all, let's ask for certificates. - cacert=$(curl -s -d '{"label": "primary"}' -X POST $CFSSL_URL/api/v1/cfssl/info | jq -e -r ".result.certificate") - if [ -z "$cacert" ]; then - echo "Could not retrieve certificate from CFSSL at $CFSSL_URL , exiting" - exit 1 + if [ -z "$DOCKER_VERNEMQ_LISTENER__TCP__DEFAULT" ]; then + configure_vernemq_listeners + echo "listener.tcp.default = ${IP_ADDRESS}:1883" >> ${VERNEMQ_CONF_FILE} fi - echo "$cacert" > /etc/ssl/cfssl-ca-cert.crt - if env | grep -q "USE_LETSENCRYPT"; then - # TODO: Make this rotate in case we're using Let's encrypt - echo "You have chosen Let's encrypt as the deploy mechanism - this means clustering Verne is impossible!" - # Ensure certbot, first of all - echo 'deb http://ftp.debian.org/debian jessie-backports main' | tee /etc/apt/sources.list.d/backports.list - apt-get update - apt-get -qq install nginx-light - /etc/init.d/nginx start - if ! apt-get -qq install certbot -t jessie-backports; then - echo "Could not install certbot, exiting" - exit $? - fi - # Obtain certificate - if env | grep -q "LETSENCRYPT_STAGING"; then - echo "Using staging Let's Encrypt - certificate won't be valid!" - certbot_staging=--test-cert - fi - if ! certbot certonly -n $certbot_staging --webroot --webroot-path=/var/www/html --agree-tos --email $LETSENCRYPT_EMAIL --domains $LETSENCRYPT_DOMAINS; then - echo "Certbot failed, exiting" - exit $? - fi - /etc/init.d/nginx stop & - letsencrypt_dir=/etc/letsencrypt/live/${LETSENCRYPT_DOMAINS%,*} - # Then we copy our private key and certificate. - cp $letsencrypt_dir/privkey.pem /opt/vernemq/etc/privkey.pem || exit 1 - cp $letsencrypt_dir/fullchain.pem /opt/vernemq/etc/cert.pem || exit 1 - # And now we merge. - cat $letsencrypt_dir/fullchain.pem /etc/ssl/cfssl-ca-cert.crt > /opt/vernemq/etc/ca.pem - else - # Then we copy our private key and certificate. We assume there's a mount at /etc/ssl/vernemq-certs - cp /etc/ssl/vernemq-certs/privkey /opt/vernemq/etc/privkey.pem || exit 1 - cp /etc/ssl/vernemq-certs/cert /opt/vernemq/etc/cert.pem || exit 1 - # And now we merge. - cat /etc/ssl/vernemq-certs/cert /etc/ssl/cfssl-ca-cert.crt > /opt/vernemq/etc/ca.pem + + if [ -z "$DOCKER_VERNEMQ_LISTENER__WS__DEFAULT" ]; then + echo "listener.ws.default = ${IP_ADDRESS}:8080" >> ${VERNEMQ_CONF_FILE} + fi + + if [ -z "$DOCKER_VERNEMQ_LISTENER__VMQ__CLUSTERING" ]; then + echo "listener.vmq.clustering = ${IP_ADDRESS}:44053" >> ${VERNEMQ_CONF_FILE} fi + + if [ -z "$DOCKER_VERNEMQ_LISTENER__HTTP__METRICS" ]; then + echo "listener.http.metrics = ${IP_ADDRESS}:8888" >> ${VERNEMQ_CONF_FILE} + fi + + configure_vernemq_listeners + + # Add our plugin to conf + # TODO check if it is correct + echo "plugins.astarte_vmq_plugin = on" >> ${VERNEMQ_CONF_FILE} + echo "plugins.astarte_vmq_plugin.path = /opt/astarte_vmq_plugin" >> ${VERNEMQ_CONF_FILE} + + echo "########## End ##########" >> ${VERNEMQ_CONF_FILE} fi +if [ ! -z "$DOCKER_VERNEMQ_ERLANG__MAX_PORTS" ]; then + sed -i.bak -r "s/\+Q.+/\+Q ${DOCKER_VERNEMQ_ERLANG__MAX_PORTS}/" ${VERNEMQ_VM_ARGS_FILE} +fi + +if [ ! -z "$DOCKER_VERNEMQ_ERLANG__PROCESS_LIMIT" ]; then + sed -i.bak -r "s/\+P.+/\+P ${DOCKER_VERNEMQ_ERLANG__PROCESS_LIMIT}/" ${VERNEMQ_VM_ARGS_FILE} +fi + +if [ ! -z "$DOCKER_VERNEMQ_ERLANG__MAX_ETS_TABLES" ]; then + sed -i.bak -r "s/\+e.+/\+e ${DOCKER_VERNEMQ_ERLANG__MAX_ETS_TABLES}/" ${VERNEMQ_VM_ARGS_FILE} +fi + +if [ ! -z "$DOCKER_VERNEMQ_ERLANG__DISTRIBUTION_BUFFER_SIZE" ]; then + sed -i.bak -r "s/\+zdbbl.+/\+zdbbl ${DOCKER_VERNEMQ_ERLANG__DISTRIBUTION_BUFFER_SIZE}/" ${VERNEMQ_VM_ARGS_FILE} +fi + + # Check configuration file -/opt/vernemq/bin/vernemq config generate 2>&1 > /dev/null | tee /tmp/config.out | grep error +opt/vernemq/bin/vernemq config generate 2>&1 > /dev/null | tee /tmp/config.out | grep error if [ $? -ne 1 ]; then echo "configuration error, exit" @@ -163,18 +325,87 @@ siguser1_handler() { # SIGTERM-handler sigterm_handler() { if [ $pid -ne 0 ]; then - # this will stop the VerneMQ process - /opt/vernemq/bin/vmq-admin cluster leave node=VerneMQ@$IP_ADDRESS -k > /dev/null - wait "$pid" + if [ -d "${SECRETS_KUBERNETES_DIR}" ] ; then + # this will stop the VerneMQ process, but first drain the node from all existing client sessions (-k) + if [ -n "$VERNEMQ_KUBERNETES_HOSTNAME" ]; then + terminating_node_name=VerneMQ@$VERNEMQ_KUBERNETES_HOSTNAME + else + terminating_node_name=VerneMQ@$IP_ADDRESS + fi + podList=$(k8sCurlGet "api/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/pods?labelSelector=${DOCKER_VERNEMQ_KUBERNETES_LABEL_SELECTOR}") + kube_pod_names=$(echo ${podList} | jq '.items[].spec.hostname' | sed 's/"//g' | tr '\n' ' ' | sed 's/ *$//') + if [ "$kube_pod_names" = "$MY_POD_NAME" ]; then + echo "I'm the only pod remaining. Not performing leave and/or state purge." + /opt/vernemq/bin/vmq-admin node stop >/dev/null + else + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#read-pod-v1-core + podResponse=$(k8sCurlGet api/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/pods/$(hostname) ) + statefulSetName=$(echo ${podResponse} | jq -r '.metadata.ownerReferences[0].name') + + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#-strong-read-operations-statefulset-v1-apps-strong- + statefulSetResponse=$(k8sCurlGet "apis/apps/v1/namespaces/${DOCKER_VERNEMQ_KUBERNETES_NAMESPACE}/statefulsets/${statefulSetName}" ) + + isCodeForbidden=$(echo ${statefulSetResponse} | jq '.code == 403') + if [[ ${isCodeForbidden} == "true" ]]; then + echo "Permission error: Cannot access URL ${statefulSetPath}: $(echo ${statefulSetResponse} | jq '.reason,.code,.message')" + fi + + reschedule=$(echo ${statefulSetResponse} | jq '.status.replicas == .status.readyReplicas') + scaled_down=$(echo ${statefulSetResponse} | jq '.status.currentReplicas == .status.updatedReplicas') + + if [[ $reschedule == "true" ]]; then + # Perhaps is an scale down? + if [[ $scaled_down == "true" ]]; then + echo "Seems that this is a scale down scenario. Leaving cluster." + /opt/vernemq/bin/vmq-admin cluster leave node=${terminating_node_name} -k && rm -rf /opt/vernemq/data/* + else + echo "Reschedule is true. Not leaving the cluster." + /opt/vernemq/bin/vmq-admin node stop >/dev/null + fi + else + echo "Reschedule is false. Leaving the cluster." + /opt/vernemq/bin/vmq-admin cluster leave node=${terminating_node_name} -k && rm -rf /opt/vernemq/data/* + fi + fi + else + if [ -n "$DOCKER_VERNEMQ_SWARM" ]; then + terminating_node_name=VerneMQ@$(hostname -i) + # For Swarm we keep the old "cluster leave" approach for now + echo "Swarm node is leaving the cluster." + /opt/vernemq/bin/vmq-admin cluster leave node=${terminating_node_name} -k && rm -rf /opt/vernemq/data/* + else + # In non-k8s mode: Stop the vernemq node gracefully + /opt/vernemq/bin/vmq-admin node stop >/dev/null + fi + fi + kill -s TERM ${pid} + WAITFOR_PID=${pid} + pid=0 + wait ${WAITFOR_PID} fi exit 143; # 128 + 15 -- SIGTERM } +if [ ! -s ${VERNEMQ_VM_ARGS_FILE} ]; then + echo "ls -l ${VERNEMQ_ETC_DIR}" + ls -l ${VERNEMQ_ETC_DIR} + echo "###" >&2 + echo "### Configuration file ${VERNEMQ_VM_ARGS_FILE} is empty! This will not work." >&2 + echo "### Exiting now." >&2 + echo "###" >&2 + exit 1 +fi + # Setup OS signal handlers trap 'siguser1_handler' SIGUSR1 trap 'sigterm_handler' SIGTERM # Start VerneMQ -/opt/vernemq/bin/vernemq console -noshell -noinput $@ -pid=$(ps aux | grep '[b]eam.smp' | awk '{print $2}') +/opt/vernemq/bin/vernemq console -noshell -noinput $@ & +pid=$! +if [ $start_join_cluster -eq 1 ]; then + mkdir -p /var/log/vernemq/log + join_cluster > /var/log/vernemq/log/join_cluster.log & +fi + wait $pid diff --git a/docker/files/vernemq.conf b/docker/files/vernemq.conf index e1f176d..8226701 100644 --- a/docker/files/vernemq.conf +++ b/docker/files/vernemq.conf @@ -38,15 +38,6 @@ allow_subscribe_during_netsplit = off ## - on or off allow_unsubscribe_during_netsplit = off -## Allows a client to logon multiple times using the same client id -## (non-standard behaviour!). -## -## Default: off -## -## Acceptable values: -## - on or off -allow_multiple_sessions = off - ## Set the time in seconds VerneMQ waits before a retry, in case a (QoS=1 or QoS=2) message ## delivery gets no answer. ## @@ -475,42 +466,6 @@ log.crash = on ## - the path to a file log.crash.file = /opt/vernemq/log/crash.log -## Maximum size in bytes of individual messages in the crash log -## -## Default: 64KB -## -## Acceptable values: -## - a byte size with units, e.g. 10GB -log.crash.maximum_message_size = 64KB - -## Maximum size of the crash log in bytes, before it is rotated -## -## Default: 10MB -## -## Acceptable values: -## - a byte size with units, e.g. 10GB -log.crash.size = 10MB - -## The schedule on which to rotate the crash log. For more -## information see: -## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation -## -## Default: $D0 -## -## Acceptable values: -## - text -log.crash.rotation = $D0 - -## The number of rotated crash logs to keep. When set to -## 'current', only the current open log file is kept. -## -## Default: 5 -## -## Acceptable values: -## - an integer -## - the text "current" -log.crash.rotation.keep = 5 - ## Name of the Erlang node ## ## Default: VerneMQ@127.0.0.1 From 97fb4651c4db61b47fd54f5761e19dcf7a3d134a Mon Sep 17 00:00:00 2001 From: Arnaldo Cesco Date: Thu, 19 Dec 2024 15:15:31 +0100 Subject: [PATCH 3/4] Add support for tls on 8883, again The v2.0.1 vernemq.sh script did not expose port 8883 for mqtts. Bring it back. Signed-off-by: Arnaldo Cesco --- docker/bin/vernemq.sh | 63 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/docker/bin/vernemq.sh b/docker/bin/vernemq.sh index 986ac59..7d50d57 100644 --- a/docker/bin/vernemq.sh +++ b/docker/bin/vernemq.sh @@ -281,8 +281,67 @@ EOF configure_vernemq_listeners - # Add our plugin to conf - # TODO check if it is correct + # Add support for listening on 8883 with TLS + if env | grep -q "VERNEMQ_ENABLE_SSL_LISTENER"; then + # Populate SSL config + echo "listener.ssl.default = ${IP_ADDRESS}:8883" >> ${VERNEMQ_CONF_FILE} + echo "listener.ssl.cafile = /opt/vernemq/etc/ca.pem" >> ${VERNEMQ_CONF_FILE} + echo "listener.ssl.certfile = /opt/vernemq/etc/cert.pem" >> ${VERNEMQ_CONF_FILE} + echo "listener.ssl.keyfile = /opt/vernemq/etc/privkey.pem" >> ${VERNEMQ_CONF_FILE} + echo "listener.ssl.require_certificate = on" >> ${VERNEMQ_CONF_FILE} + echo "listener.ssl.use_identity_as_username = on" >> ${VERNEMQ_CONF_FILE} + echo "listener.ssl.tls_version = tlsv1.2" >> ${VERNEMQ_CONF_FILE} + fi + + # Let's use obscure magic to do the right thing™ + if env | grep -q "VERNEMQ_ENABLE_SSL_LISTENER"; then + # Let's do our magic. First of all, let's ask for certificates. + cacert=$(curl -s -d '{"label": "primary"}' -X POST $CFSSL_URL/api/v1/cfssl/info | jq -e -r ".result.certificate") + if [ -z "$cacert" ]; then + echo "Could not retrieve certificate from CFSSL at $CFSSL_URL , exiting" + exit 1 + fi + echo "$cacert" > /etc/ssl/cfssl-ca-cert.crt + if env | grep -q "USE_LETSENCRYPT"; then + # TODO: Make this rotate in case we're using Let's encrypt + echo "You have chosen Let's encrypt as the deploy mechanism - this means clustering Verne is impossible!" + # Ensure certbot, first of all + echo 'deb http://ftp.debian.org/debian jessie-backports main' | tee /etc/apt/sources.list.d/backports.list + apt-get update + apt-get -qq install nginx-light + /etc/init.d/nginx start + if ! apt-get -qq install certbot -t jessie-backports; then + echo "Could not install certbot, exiting" + exit $? + fi + # Obtain certificate + if env | grep -q "LETSENCRYPT_STAGING"; then + echo "Using staging Let's Encrypt - certificate won't be valid!" + certbot_staging=--test-cert + fi + if ! certbot certonly -n $certbot_staging --webroot --webroot-path=/var/www/html --agree-tos --email $LETSENCRYPT_EMAIL --domains $LETSENCRYPT_DOMAINS; then + echo "Certbot failed, exiting" + exit $? + fi + /etc/init.d/nginx stop & + letsencrypt_dir=/etc/letsencrypt/live/${LETSENCRYPT_DOMAINS%,*} + # Then we copy our private key and certificate. + cp $letsencrypt_dir/privkey.pem /opt/vernemq/etc/privkey.pem || exit 1 + cp $letsencrypt_dir/fullchain.pem /opt/vernemq/etc/cert.pem || exit 1 + # And now we merge. + cat $letsencrypt_dir/fullchain.pem /etc/ssl/cfssl-ca-cert.crt > /opt/vernemq/etc/ca.pem + else + # Then we copy our private key and certificate. We assume there's a mount at /etc/ssl/vernemq-certs + cp /etc/ssl/vernemq-certs/privkey /opt/vernemq/etc/privkey.pem || exit 1 + cp /etc/ssl/vernemq-certs/cert /opt/vernemq/etc/cert.pem || exit 1 + # And now we merge. + cat /etc/ssl/vernemq-certs/cert /etc/ssl/cfssl-ca-cert.crt > /opt/vernemq/etc/ca.pem + fi + fi + + configure_vernemq_listeners + + # Add Astarte VerneMQ plugin echo "plugins.astarte_vmq_plugin = on" >> ${VERNEMQ_CONF_FILE} echo "plugins.astarte_vmq_plugin.path = /opt/astarte_vmq_plugin" >> ${VERNEMQ_CONF_FILE} From 9bfb267325c5712f59e1b97a469ebb811ad4e722 Mon Sep 17 00:00:00 2001 From: Arnaldo Cesco Date: Wed, 18 Dec 2024 12:54:20 +0100 Subject: [PATCH 4/4] Use the same vm.args as VErneMQ 2.0.1. Signed-off-by: Arnaldo Cesco --- docker/files/vm.args | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docker/files/vm.args b/docker/files/vm.args index dab918e..5b5f29c 100644 --- a/docker/files/vm.args +++ b/docker/files/vm.args @@ -1,12 +1,15 @@ -+P 256000 --env ERL_MAX_ETS_TABLES 256000 ++P 512000 ++e 256000 -env ERL_CRASH_DUMP /erl_crash.dump -env ERL_FULLSWEEP_AFTER 0 --env ERL_MAX_PORTS 65536 ++Q 512000 +A 64 -setcookie vmq -name VerneMQ@127.0.0.1 +K true +W w ++sbwt none ++sbwtdcpu none ++sbwtdio none -smp enable -+zdbbl 32768 ++zdbbl 32768 \ No newline at end of file