diff --git a/README.md b/README.md new file mode 100644 index 0000000..56bd21c --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# ecs-samples +Code samples for working with ECS +Please visit ecs-metrics-workshop and its README for CloudCitizen project. diff --git a/ecs-api-workshop/README.md b/ecs-api-workshop/README.md new file mode 100644 index 0000000..edc58cc --- /dev/null +++ b/ecs-api-workshop/README.md @@ -0,0 +1,5 @@ +# ecs-api-workshop +This workshop show how to create workflows using the ECS APIs as referenced in + +https://community.emc.com/docs/DOC-62642 and upcoming releases + diff --git a/ecs-api-workshop/build.gradle b/ecs-api-workshop/build.gradle new file mode 100644 index 0000000..c1a6dfb --- /dev/null +++ b/ecs-api-workshop/build.gradle @@ -0,0 +1,52 @@ +/* + * Copyright 2013-2018 Dell Inc. or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +group = 'com.emc.ecs.monitoring' +version = '1.0' + +apply plugin: 'java' +apply plugin: 'idea' +apply plugin: 'eclipse' + +repositories { + mavenCentral() +} + +dependencies { + // https://docs.gradle.org/current/userguide/declaring_dependencies.html + // compile "com.amazonaws:aws-java-sdk-s3:1.11.118" + compile group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.24' + compile group: 'org.slf4j', name:'slf4j-api', version: '1.7.24' + compile group: 'junit', name:'junit', version: '4.11' + compile group: 'com.jayway.jsonpath', name:'json-path', version:'2.1.0' + implementation 'org.springframework:spring-web:5.0.2.RELEASE' + +} + +task wrapper(type: Wrapper) { + gradleVersion = '4.3.1' +} + +jar { + from { + configurations.compile.collect { + it.isDirectory() ? it : zipTree(it) + } + } + manifest { + attributes( + 'Main-Class': 'com.emc.ecs.api.sample.APIResource' + ) + } +} diff --git a/ecs-api-workshop/gradle/wrapper/gradle-wrapper.properties b/ecs-api-workshop/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..ab4b7fe --- /dev/null +++ b/ecs-api-workshop/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Thu Nov 23 11:33:09 CET 2017 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-4.3.1-bin.zip diff --git a/ecs-api-workshop/gradlew b/ecs-api-workshop/gradlew new file mode 100755 index 0000000..91a7e26 --- /dev/null +++ b/ecs-api-workshop/gradlew @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# For Cygwin, ensure paths are in UNIX format before anything is touched. +if $cygwin ; then + [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` +fi + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >&- +APP_HOME="`pwd -P`" +cd "$SAVED" >&- + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/ecs-api-workshop/gradlew.bat b/ecs-api-workshop/gradlew.bat new file mode 100644 index 0000000..aec9973 --- /dev/null +++ b/ecs-api-workshop/gradlew.bat @@ -0,0 +1,90 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windowz variants + +if not "%OS%" == "Windows_NT" goto win9xME_args +if "%@eval[2+2]" == "4" goto 4NT_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* +goto execute + +:4NT_args +@rem Get arguments from the 4NT Shell from JP Software +set CMD_LINE_ARGS=%$ + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/APIResource.java b/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/APIResource.java new file mode 100644 index 0000000..3a25be8 --- /dev/null +++ b/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/APIResource.java @@ -0,0 +1,102 @@ +package com.emc.ecs.api.sample; +import java.net.*; +import java.io.*; +import java.util.*; +import javax.net.ssl.HttpsURLConnection; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class APIResource { + private static final Logger logger = LoggerFactory.getLogger(APIResource.class); + + protected static Map getHeaders() { + Map headers = new HashMap<>(); + headers.put("Content-Type", "application/x-www-form-urlencoded"); + headers.put("Accept", "application/json"); + headers.put("Content-Encoding", "UTF-8"); + headers.put("Connection", "keep-alive"); + return headers; + } + + + public static String getResponse(String httpsURL, Map headers, String payload, String method) throws Exception { + URL myurl = new URL(httpsURL); + String response = null; + logger.info("Sending a " + method + " request to:" + httpsURL); + HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); + con.setRequestMethod(method); + for (Map.Entry entry : headers.entrySet()) { + con.setRequestProperty(entry.getKey(), entry.getValue()); + } + con.setDoOutput(true); + con.setDoInput(true); + if (method.equals("POST")) { + try (DataOutputStream output = new DataOutputStream(con.getOutputStream())) { + output.writeBytes(payload); + } + } + try (DataInputStream input = new DataInputStream(con.getInputStream())) { + StringBuffer contents = new StringBuffer(); + String tmp; + while ((tmp = input.readLine()) != null) { + contents.append(tmp); + logger.debug("tmp="+tmp); + } + response = contents.toString(); + } + logger.info("Resp Code:" + con.getResponseCode()); + // logger.info("Resp Message:" + con.getResponseMessage()); + return response; + } + + private static String getToken(String host, String username, String password) throws Exception { + String tokenEndpoint = "/login"; + String address = "https://" + host + ":4443" + tokenEndpoint; + logger.info("address="+address); + URL myurl = new URL(address); + HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); + String method = "GET"; + con.setRequestMethod(method); + Map headers = getHeaders(); + for (Map.Entry entry : headers.entrySet()) { + con.setRequestProperty(entry.getKey(), entry.getValue()); + } + String credential = username + ":" + password; + String encodedString = Base64.getEncoder().encodeToString(credential.getBytes()); + con.setDoOutput(true); + con.setDoInput(true); + String response = null; + String token = ""; + if (con.getResponseCode() == 200) { + token = con.getHeaderField("X-SDS-AUTH-TOKEN"); + } + logger.debug("token={}", token); + return token; + } + + public static void main(String[] args) throws InvalidKeyException, NoSuchAlgorithmException, IllegalStateException, UnsupportedEncodingException { + String host = "localhost"; + String username = ""; + String password = ""; + String endpoint = "https://"+ host + ":4443"; + String canonical_querystring = ""; + String method = "POST"; + try { + String token = getToken(host, username, password); + logger.info("token="+token); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + } +} + +// Output: +// [main] INFO com.emc.ecs.api.sample.APIResource - address=https://localhost:4443/login + diff --git a/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/Master.java b/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/Master.java new file mode 100644 index 0000000..c6c7ef7 --- /dev/null +++ b/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/Master.java @@ -0,0 +1,85 @@ +package com.emc.ecs.api.sample; + +import java.net.URI; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Master { + private static final Logger logger = LoggerFactory.getLogger(Master.class); + private String name; + private URI masterId; + private String resource; + private String address; + private Map headers; + + public Master() { + this.masterId = URI.create("invalidId"); + this.resource = "//master"; + this.address = Resource.getPath() + resource; + this.headers = Resource.getHeadersWithAuth(); + this.name = null; + } + + public String create(String name) throws Exception { + this.name = name; + String parameter = "{\"master_type\": \"GEMALTO\","; + parameter += "\"name\": \"" + name + "\""; + parameter += "}"; + String id = null; + String response = Resource.getResponse(address, headers, parameter, "POST"); + int start = response.indexOf("\"id\": \""); + if ( start != -1 ) { + start += 7; + int end = response.indexOf("\"", start); + if ( end != -1 && end > start) { + id = response.substring(start, end); + } else { + logger.info("master id not found in response."); + } + } else { + logger.info("id key not found in response."); + } + if (id != null) { + this.masterId = URI.create(id); + } + return id; + } + + public String updateMaster(String vdcId, String primaryServerId, String secondaryServerId) throws Exception { + String parameter = "{"; + parameter += "\"name\": \""+ this.name + "\","; + parameter += "\"_mapping_set\":["; + parameter += "\"_mapping\":{"; + parameter += "\"_slaves_list\":["; + parameter += "\"_slave\": \""+ primaryServerId + "\","; + parameter += "\"_slave\": \""+ secondaryServerId + "\","; + parameter += "],"; + parameter += "\"vdc_id\": \"" + vdcId + "\""; + parameter += "}"; + parameter += "]}"; + String response = Resource.getResponse(address, headers, parameter, "PUT"); + return response; + } + + + public String getMaster() throws Exception { + String response = Resource.getResponse(address + "/" + masterId.toString(), headers, "", "GET"); + return response; + } + + public String listMaster() throws Exception { + String response = Resource.getResponse(address, headers, "", "GET"); + return response; + } + + public String deleteMaster() throws Exception { + String response = Resource.getResponse(address + "/" + masterId.toString(), headers, "", "DELETE"); + return response; + } + + public String activateMaster(String vdcId, String primaryServerId, String secondaryServerId) throws Exception { + String response = Resource.getResponse(address + "/" + masterId.toString() + "/activate", headers, "", "PUT"); + return response; + } +} diff --git a/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/Resource.java b/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/Resource.java new file mode 100644 index 0000000..30fa032 --- /dev/null +++ b/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/Resource.java @@ -0,0 +1,67 @@ +package com.emc.ecs.api.sample; +import java.net.*; +import java.io.*; +import java.util.*; +import javax.net.ssl.HttpsURLConnection; +import java.nio.charset.StandardCharsets; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Resource { + private static final Logger logger = LoggerFactory.getLogger(Resource.class); + + public static Map getHeaders(String token) { + Map headers = new HashMap<>(); + headers.put("X-SDS-AUTH-TOKEN", token); + headers.put("Content-Type", "application/json"); + headers.put("Accept", "application/json"); + headers.put("Content-Encoding", "UTF-8"); + headers.put("Connection", "keep-alive"); + return headers; + } + + public static String getResponse(String httpsURL, Map headers, String payload, String method) throws Exception { + URL myurl = new URL(httpsURL); + String response = null; + logger.info("Sending a " + method + " request to:" + httpsURL); + HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); + con.setRequestMethod(method); + for (Map.Entry entry : headers.entrySet()) { + con.setRequestProperty(entry.getKey(), entry.getValue()); + } + con.setDoOutput(true); + con.setDoInput(true); + if (method.equals("POST")) { + try (DataOutputStream output = new DataOutputStream(con.getOutputStream())) { + output.writeBytes(payload); + } + } + try (DataInputStream input = new DataInputStream(con.getInputStream())) { + StringBuffer contents = new StringBuffer(); + String tmp; + while ((tmp = input.readLine()) != null) { + contents.append(tmp); + logger.debug("tmp="+tmp); + } + response = contents.toString(); + } + logger.info("Resp Code:" + con.getResponseCode()); + return response; + } + + public static String getToken() { + // read from config or provide as constant + String token = "my_access_token"; + return token; + } + + public static String getPath() { + return "https://localhost:4443"; + } + + public static Map getHeadersWithAuth() { + String token = getToken(); + return getHeaders(token); + } +} diff --git a/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/Slave.java b/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/Slave.java new file mode 100644 index 0000000..067262c --- /dev/null +++ b/ecs-api-workshop/src/main/java/com/emc/ecs/api/sample/Slave.java @@ -0,0 +1,104 @@ +package com.emc.ecs.api.sample; + +import java.net.URI; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Slave { + private static final Logger logger = LoggerFactory.getLogger(Slave.class); + private String name; + private URI masterId; + private URI slaveId; + private String resource; + private String address; + private String username; + private String password; + private String certificateAuthority; + private String certificateRevocationList; + private String identityStore; + private String identityStorePassword; + + private Map headers; + private static String parameterFormat = String.join("{", + " \"username\": \"%s\",", + " \"password\": \"%s\"," , + " \"port\": 5696,\n", + " \"master_id\": \"%s\",\n", + " \"fqdn_ip\": \"%s\",\n", + " \"slave_hostname\": \"%s\",\n", + " \"certificate_authority\": \"%s\"", + " \"certificate_revocation_list\": \"%s\"", // -----BEGIN CERTIFICATE----- + " \"identity_store\": \"%s\"", //LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0t + " \"identity_store_password\": \"%s\"", + "}"); + + public Slave() { + this.masterId = URI.create("invalidId"); + this.name = "invalidName"; + this.slaveId = URI.create("invalidId"); + this.resource = "/slave"; + this.address = Resource.getPath() + resource; + this.headers = Resource.getHeadersWithAuth(); + } + + public String create(URI masterId, + String name, + String username, + String password, + String certificateAuthority, + String certificateRevocationList, + String identityStore, + String identityStorePassword) throws Exception { + this.name = name; + this.masterId = masterId; + this.username = username; + this.password = password; + this.certificateAuthority = certificateAuthority; + this.certificateRevocationList = certificateRevocationList; + this.identityStore = identityStore; + this.identityStorePassword = identityStorePassword; + + String parameter = String.format(parameterFormat, username, password, masterId.toString(), certificateAuthority, certificateRevocationList, identityStore, identityStorePassword); + String id = null; + String response = Resource.getResponse(address, headers, parameter, "POST"); + int start = response.indexOf("slave_id\": \""); + if ( start != -1 ) { + start += 13; + int end = response.indexOf("\"", start); + if ( end != -1 && end > start) { + id = response.substring(start, end); + } else { + logger.info("slave id not found in response."); + } + } else { + logger.info("id key not found in response."); + } + if (id != null) { + this.slaveId = URI.create(id); + } + return id; + } + + public String updateSlave(String vdcId, String primarySlaveId, String secondarySlaveId) throws Exception { + String parameter = String.format(parameterFormat, username, password, masterId.toString(), certificateAuthority, certificateRevocationList, identityStore, identityStorePassword); + String response = Resource.getResponse(address + "/" + slaveId.toString(), headers, parameter, "PUT"); + return response; + } + + + public String getSlave() throws Exception { + String response = Resource.getResponse(address + "/" + slaveId.toString(), headers, "", "GET"); + return response; + } + + public String listSlave() throws Exception { + String response = Resource.getResponse(address, headers, "", "GET"); + return response; + } + + public String deleteSlave() throws Exception { + String response = Resource.getResponse(address + "/" + slaveId.toString(), headers, "", "DELETE"); + return response; + } +} diff --git a/ecs-api-workshop/src/main/resources/css/main.css b/ecs-api-workshop/src/main/resources/css/main.css new file mode 100644 index 0000000..ab36e31 --- /dev/null +++ b/ecs-api-workshop/src/main/resources/css/main.css @@ -0,0 +1,54 @@ +#clickSource { + display: inline-block; + *display: inline; + zoom: 1; + padding: 6px 20px; + margin: 0; + cursor: pointer; + border: 1px solid #bbb; + overflow: visible; + font: bold 13px arial, helvetica, sans-serif; + text-decoration: none; + white-space: nowrap; + color: #555; + text-transform: capitalize; + + background-color: #ddd; + background-image: -webkit-gradient(linear, left top, left bottom, from(rgba(255,255,255,1)), to(rgba(255,255,255,0))); + background-image: -webkit-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -moz-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -ms-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -o-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + + -webkit-transition: background-color .2s ease-out; + -moz-transition: background-color .2s ease-out; + -ms-transition: background-color .2s ease-out; + -o-transition: background-color .2s ease-out; + transition: background-color .2s ease-out; + background-clip: padding-box; /* Fix bleeding */ + + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + border-radius: 3px; + + -moz-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + text-shadow: 0 1px 0 rgba(255,255,255, .9); + + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +#clickTarget { + float: right; + height: 80px; + border: 1px solid #999; + font: 14pt arial; + color: chocolate; +} \ No newline at end of file diff --git a/ecs-api-workshop/src/main/resources/js/main.js b/ecs-api-workshop/src/main/resources/js/main.js new file mode 100644 index 0000000..382f654 --- /dev/null +++ b/ecs-api-workshop/src/main/resources/js/main.js @@ -0,0 +1,8 @@ +var counter = 0; +function click(e) { + document.getElementById('clickTarget').innerText = 'You clicked me ' + ++counter + ' times!'; +} +function load() { + document.getElementById('clickSource').onclick = click; +} +window.onload = load; \ No newline at end of file diff --git a/ecs-api-workshop/src/main/resources/main.html b/ecs-api-workshop/src/main/resources/main.html new file mode 100644 index 0000000..714dfda --- /dev/null +++ b/ecs-api-workshop/src/main/resources/main.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test + + + + +

This is a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Page 1

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-api-workshop/src/main/resources/pages/page1.html b/ecs-api-workshop/src/main/resources/pages/page1.html new file mode 100644 index 0000000..89cede8 --- /dev/null +++ b/ecs-api-workshop/src/main/resources/pages/page1.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 1 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-api-workshop/src/main/resources/pages/page2.html b/ecs-api-workshop/src/main/resources/pages/page2.html new file mode 100644 index 0000000..d1fde0d --- /dev/null +++ b/ecs-api-workshop/src/main/resources/pages/page2.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 2 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 1

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-api-workshop/src/test/java/com/emc/ecs/api/sample/MasterTest.java b/ecs-api-workshop/src/test/java/com/emc/ecs/api/sample/MasterTest.java new file mode 100644 index 0000000..aad18d4 --- /dev/null +++ b/ecs-api-workshop/src/test/java/com/emc/ecs/api/sample/MasterTest.java @@ -0,0 +1,36 @@ +package com.emc.ecs.api.sample; + +import com.emc.ecs.api.sample.Master; +import com.emc.ecs.api.sample.Slave; +import com.emc.ecs.api.sample.Resource; +import java.net.URI; +import org.junit.Test; +import static org.junit.Assert.*; + +public class MasterTest { + + @Test + public void MasterSlaveTest() { + Master m = new Master(); + Slave s = new Slave(); + String masterId = null; + String slaveId = null; + try { + masterId = m.create("master"); + slaveId = s.create(URI.create(masterId), + "slave", + "username", + "password", + "certificateAuthority", + "certificateRevocationList", + "identityStore", + "identityStorePassword"); + assertTrue(masterId != null); + assertTrue(slaveId != null); + } catch (Exception e) { + e.printStackTrace(); + } + assertTrue( m != null); + assertTrue( s != null); + } +} diff --git a/ecs-go-certs/main.go b/ecs-go-certs/main.go new file mode 100644 index 0000000..ebe861f --- /dev/null +++ b/ecs-go-certs/main.go @@ -0,0 +1,52 @@ +package main + +import ( + "context" + "crypto/tls" + "flag" + "fmt" + "golang.org/x/crypto/acme/autocert" + "log" +) + +var ( + help = false +) + +func parseFlags() { + flag.BoolVar(&help, "help", false, "if true, prints usage") + flag.Parse() +} + +func printUsage() { + log.Printf("----------------------------------------------") + log.Printf(" Certgen: a command line utility to provide certificates") + log.Printf(" ---------------------------------------------") +} + +func main() { + parseFlags() + var m *autocert.Manager + if help { + printUsage() + return + } + + hostPolicy := func (ctx context.Context, host string) error { + allowedHost := "localhost" // please change name + if host == allowedHost { + return nil + } + return fmt.Errorf("acme: only %s is allowed", allowedHost) + } + + dataDir := "." + m = &autocert.Manager { + Prompt: autocert.AcceptTOS, + HostPolicy: hostPolicy, + Cache: autocert.DirCache(dataDir), + } + + config := tls.Config{GetCertificate: m.GetCertificate} + log.Printf("%vx", config) +} diff --git a/ecs-metrics-workshop/CasTester.java b/ecs-metrics-workshop/CasTester.java new file mode 100644 index 0000000..2c55e3e --- /dev/null +++ b/ecs-metrics-workshop/CasTester.java @@ -0,0 +1,314 @@ +//package com.emc.ecs.monitoring.sample; +//import com.filepool.fplibrary.*; +//import com.google.common.base.Preconditions; +//import com.google.common.base.Throwables; +//import com.google.common.collect.ImmutableList; +//import com.google.common.util.concurrent.ListenableFuture; +//import com.google.common.util.concurrent.ListeningExecutorService; +//import com.google.common.util.concurrent.MoreExecutors; +//import org.apache.commons.lang3.StringUtils; +//import org.apache.commons.lang3.tuple.Pair; +//import org.joda.time.DateTime; +//import org.joda.time.format.DateTimeFormatter; +//import org.slf4j.Logger; +//import org.slf4j.LoggerFactory; +// +//import java.io.*; +//import java.net.*; +//import java.util.*; +// +//import static java.lang.String.format; +//import static java.nio.charset.StandardCharsets.UTF_8; +//import static java.util.Arrays.asList; +//import static java.util.Arrays.stream; +//import static org.apache.commons.lang3.ArrayUtils.EMPTY_BYTE_ARRAY; +//import static org.joda.time.format.ISODateTimeFormat.basicDateTime; +// +//import javax.net.ssl.HttpsURLConnection; +//import java.security.InvalidKeyException; +//import java.security.NoSuchAlgorithmException; +//import java.util.concurrent.Callable; +//import java.util.concurrent.Executors; +//import javax.crypto.Mac; +//import javax.crypto.spec.SecretKeySpec; +//import org.apache.commons.io.FileUtils; +//import org.slf4j.Logger; +//import org.slf4j.LoggerFactory; +// +///* +//* This class is a standalone utility to test the DELL ECS CAS Head Service from command-line +//* It lists the helper methods required to test the CAS Head Service. +//*/ +//public class CasTester { +// private static final Logger logger = LoggerFactory.getLogger(CasTester.class); +// public static final DateTimeFormatter DATE_TIME_FORMATTER = basicDateTime().withZoneUTC(); +// public static final int blobSize = 1024 * 16 * 2; //32768 +// private static final String DEFAULT_BLOB_TAG_NAME = "blob"; +// private static final String CLIP_LIST_TAG_NAME = "clip_list"; +// private static final String STANDARD_CAS_HEAD_PORT = "3218"; +// private static final byte[] payload = new byte[blobSize]; +// +// static { +// for (int i = 0; i < payload.length; ++i) { +// for (byte c = 0x21; c <= 0x7A && i < payload.length; ++i, ++c) { +// payload[i] = c; +// } +// } +// } +// +// private class CasConnection { +// private FPPool fpPool; +// private String connectionString; +// +// public CasConnection(String connectionString, FPPool fpPool) { +// this.fpPool = fpPool; +// this.connectionString = connectionString; +// } +// +// public FPPool getFpPool() { +// return fpPool; +// } +// +// public String getConnectionString() { +// return connectionString; +// } +// +// public void Close() throws Exception { +// if (fpPool != null){ +// fpPool.Close(); +// } +// } +// } +// +// private CasConnection getCasConnection(String ip, String port, String user, String password, String namespace, File pea) throws Exception { +// String connectionString = format( +// "%s:%d?path=%s", +// ip, STANDARD_CAS_HEAD_PORT, pea.getAbsolutePath()); +// FPPool fpPool = getFP(connectionString); +// if (fpPool == null) { +// String message = "FPPool could not be instantiated."; +// logger.error(message); +// throw new Exception(message); +// } +// CasConnection casConnection = new CasConnection(connectionString, fpPool); +// return casConnection; +// } +// +// protected static Map getHeaders() { +// Map headers = new HashMap<>(); +// headers.put("Content-Type", "application/x-www-form-urlencoded"); +// headers.put("Accept", "application/json"); +// headers.put("Content-Encoding", "UTF-8"); +// headers.put("Connection", "keep-alive"); +// return headers; +// } +// +// public static String getResponse(String httpsURL, Map headers, String payload, String method) throws Exception { +// URL myurl = new URL(httpsURL); +// String response = null; +// logger.info("Sending a " + method + " request to:" + httpsURL); +// HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); +// con.setRequestMethod(method); +// for (Map.Entry entry : headers.entrySet()) { +// con.setRequestProperty(entry.getKey(), entry.getValue()); +// } +// con.setDoOutput(true); +// con.setDoInput(true); +// if (method.equals("POST")) { +// try (DataOutputStream output = new DataOutputStream(con.getOutputStream())) { +// output.writeBytes(payload); +// } +// } +// try (DataInputStream input = new DataInputStream(con.getInputStream())) { +// StringBuffer contents = new StringBuffer(); +// String tmp; +// while ((tmp = input.readLine()) != null) { +// contents.append(tmp); +// logger.debug("tmp="+tmp); +// } +// response = contents.toString(); +// } +// logger.info("Resp Code:" + con.getResponseCode()); +// return response; +// } +// +// private static FPPool getFP(String connectionString) throws Exception { +// FPPool fpPool = null; +// try { +// fpPool = new FPPool(connectionString); +// } catch (Exception e) { +// e.printStackTrace(); +// logger.error("Exception:", e); +// } finally { +// if(fpPool != null) { +// fpPool.Close(); +// } +// } +// return fpPool; +// } +// +// +// private static String fillParams(String pattern, String ip, String port, String user, String namespace){ +// String result = pattern +// .replace("{ip}", ip) +// .replace("{port}", port) +// .replace("{userId}", user) +// .replace("{namespace}", namespace); +// return result; +// } +// +// public static String getUserCasSecret(String ip, String port, String user, String namespace){ +// String secretEndPoint = fillParams("https://{ip}:{port}/object/user-cas/secret/{namespace}/{userId}", +// ip, port, user, namespace); +// logger.info("address={}", secretEndPoint); +// String response = null; +// try { +// Map headers = getHeaders(); +// response = getResponse(secretEndPoint, headers, null, "GET"); +// logger.info("secret:" + response); +// } catch (Exception e) { +// e.printStackTrace(); +// logger.error("Exception:", e); +// } +// return response; +// } +// +// public static String getProfilePea(String ip, String port, String user, String namespace){ +// String peaEndpoint = fillParams("https://{ip}:{port}/object/user-cas/secret/{namespace}/{userId}/pea", +// ip, port, user, namespace); +// logger.info("address={}", peaEndpoint); +// String response = null; +// try { +// Map headers = getHeaders(); +// response = getResponse(peaEndpoint, headers, null, "GET"); +// logger.info("pea:" + response); +// } catch (Exception e) { +// e.printStackTrace(); +// logger.error("Exception:", e); +// } +// return response; +// } +// +// public static File fetchPeaFile(String ip, +// String port, +// final String user, +// final String namespace) { +// final File peaFile = new File( +// FileUtils.getTempDirectory(), +// String.join("-", user, ip, port) + ".pea" +// ); +// +// final String pea = getProfilePea(ip, port, user, namespace); +// +// try { +// FileUtils.writeStringToFile(peaFile, pea); +// } catch (final IOException e) { +// throw new UncheckedIOException(e); +// } +// +// return peaFile; +// } +// +// private static class ClipsTask implements Callable { +// private final CasConnection casConnection; +// private final String ecsCasSecret; +// private final File pea; +// private byte[] payload; +// +// public ClipsTask(CasConnection casConnection, String ecsCasSecret, File pea) { +// this.casConnection = casConnection; +// this.ecsCasSecret = ecsCasSecret; +// this.pea = pea; +// final byte[] payloadBytes = new byte[1024]; +// for (int i = 0; i < payloadBytes.length; ++i) { +// for (byte c = 0x21; c <= 0x7A && i < payloadBytes.length; ++i, ++c) { +// this.payload[i] = c; +// } +// } +// } +// public void accept(FPPool fpPool, FPClip fpClip) { +// try { +// final FPTag topTag = fpClip.getTopTag(); +// try { +// final FPTag blob = new FPTag(topTag, DEFAULT_BLOB_TAG_NAME); +// try { +// final byte[] clipTime = EMPTY_BYTE_ARRAY; +// +// blob.BlobWrite(new SequenceInputStream(new ByteArrayInputStream(clipTime), new ByteArrayInputStream(payload))); +// } finally { +// blob.Close(); +// } +// } finally { +// topTag.Close(); +// } +// } catch (FPLibraryException | IOException e) { +// Throwables.propagate(e); +// } +// } +// +// @Override +// public Void call() throws Exception { +// try { +// for (int i = 0; i < 10; i++) { +// final Optional dateTimeOp = Optional.empty(); +// final FPClip fpClip = new FPClip(casConnection.getFpPool()); +// try { +// accept(casConnection.getFpPool(), fpClip); +// final String clipRefId = fpClip.Write(); +// +// final ByteArrayOutputStream cdf = new ByteArrayOutputStream(); +// fpClip.RawRead(cdf); +// logger.info("clip Id: {}, size read = {}", clipRefId, cdf.size()); +// } finally { +// fpClip.Close(); +// } +// +// } +// } catch (RuntimeException | IOException | FPLibraryException e) { +// throw e; +// } +// +// return null; +// } +// } +// +// public void writeClips(String ip, String port, String user, String password, String namespace) throws Exception { +// final String ecsCasSecret = getUserCasSecret(ip, port, user, namespace); +// if (ecsCasSecret == null) { +// String message = "Credentials are not correct"; +// logger.error(message); +// throw new Exception(message); +// } +// File pea = fetchPeaFile(ip, port, user, namespace); +// CasConnection casConnection = getCasConnection(ip, port, user, password, namespace, pea); +// +// try { +// final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(2)); +// +// executor.submit(new ClipsTask(casConnection, ecsCasSecret, pea)); +// } catch (Exception e) { +// e.printStackTrace(); +// logger.error("Exception:", e); +// } +// +// Thread.sleep(15000); +// casConnection.Close(); +// } +// +// public void main(String[] args) throws Exception { +// String ip = "10.247.142.111"; // NetworkUtility.getNodeIp(); +// String user = "apiuser"; +// String port = "3218"; +// String password = ""; +// String namespace = "s3"; +// String bucket = "b6"; +// try { +// +// writeClips(ip, port, user, password, namespace); +// +// } catch (Exception e) { +// e.printStackTrace(); +// logger.error("Exception:", e); +// } +// } +//} diff --git a/ecs-metrics-workshop/README.md b/ecs-metrics-workshop/README.md new file mode 100644 index 0000000..bf02f07 --- /dev/null +++ b/ecs-metrics-workshop/README.md @@ -0,0 +1,5 @@ +# +# These workshops explain how to publish and read metrics from on-premise instances to and from both public clouds +# +# Details: http://github.com/EMCECS/ecs-samples/files/2779641/AzureMetrics.docx + diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/README.md b/ecs-metrics-workshop/aws-get-metrics-workshop/README.md new file mode 100644 index 0000000..472404b --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/README.md @@ -0,0 +1,6 @@ +# +# AWS Get Metrics api sample for metrics published from on-premise instances +# +# Details: http://github.com/EMCECS/ecs-samples/files/2779641/AzureMetrics.docx +The file referred in details, was originally intended for Azure but includes both AWS and Azure + diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/build.gradle b/ecs-metrics-workshop/aws-get-metrics-workshop/build.gradle new file mode 100644 index 0000000..2f8bd87 --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/build.gradle @@ -0,0 +1,50 @@ +/* + * Copyright 2013-2018 Dell Inc. or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +group = 'com.emc.ecs.workshop' +version = '1.0' + +apply plugin: 'java' +apply plugin: 'idea' +apply plugin: 'eclipse' + +repositories { + mavenCentral() +} + +dependencies { + // https://docs.gradle.org/current/userguide/declaring_dependencies.html + // compile "com.amazonaws:aws-java-sdk-s3:1.11.118" + compile group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.24' + compile group: 'org.slf4j', name:'slf4j-api', version: '1.7.24' + compile group: 'com.jayway.jsonpath', name:'json-path', version:'2.1.0' + implementation 'org.springframework:spring-web:5.0.2.RELEASE' +} + +task wrapper(type: Wrapper) { + gradleVersion = '4.3.1' +} + +jar { + from { + configurations.compile.collect { + it.isDirectory() ? it : zipTree(it) + } + } + manifest { + attributes( + 'Main-Class': 'com.emc.ecs.monitoring.sample.GetMetricsRequest' + ) + } +} diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/gradle/wrapper/gradle-wrapper.properties b/ecs-metrics-workshop/aws-get-metrics-workshop/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..ab4b7fe --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Thu Nov 23 11:33:09 CET 2017 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-4.3.1-bin.zip diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/gradlew b/ecs-metrics-workshop/aws-get-metrics-workshop/gradlew new file mode 100755 index 0000000..91a7e26 --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/gradlew @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# For Cygwin, ensure paths are in UNIX format before anything is touched. +if $cygwin ; then + [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` +fi + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >&- +APP_HOME="`pwd -P`" +cd "$SAVED" >&- + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/gradlew.bat b/ecs-metrics-workshop/aws-get-metrics-workshop/gradlew.bat new file mode 100644 index 0000000..aec9973 --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/gradlew.bat @@ -0,0 +1,90 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windowz variants + +if not "%OS%" == "Windows_NT" goto win9xME_args +if "%@eval[2+2]" == "4" goto 4NT_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* +goto execute + +:4NT_args +@rem Get arguments from the 4NT Shell from JP Software +set CMD_LINE_ARGS=%$ + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/java/com/emc/ecs/monitoring/sample/GetMetricsRequest.java b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/java/com/emc/ecs/monitoring/sample/GetMetricsRequest.java new file mode 100644 index 0000000..b09312b --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/java/com/emc/ecs/monitoring/sample/GetMetricsRequest.java @@ -0,0 +1,190 @@ +package com.emc.ecs.monitoring.sample; +import java.net.*; +import java.io.*; +import java.text.SimpleDateFormat; +import java.util.*; +import javax.net.ssl.HttpsURLConnection; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetMetricsRequest { + private static final Logger logger = LoggerFactory.getLogger(GetMetricsRequest.class); + + protected static byte[] sha256(String content) throws Exception { + MessageDigest digest = MessageDigest.getInstance("SHA-256"); + byte[] encodedhash = digest.digest( + content.getBytes(StandardCharsets.UTF_8)); + return encodedhash; + } + protected static String bytesToHex(byte[] hash) { + StringBuffer hexString = new StringBuffer(); + for (int i = 0; i < hash.length; i++) { + String hex = Integer.toHexString(0xff & hash[i]); + if(hex.length() == 1) hexString.append('0'); + hexString.append(hex); + } + return hexString.toString(); + } + protected static byte[] HmacSHA256(String data, byte[] key) throws Exception { + String algorithm="HmacSHA256"; + Mac mac = Mac.getInstance(algorithm); + mac.init(new SecretKeySpec(key, algorithm)); + return mac.doFinal(data.getBytes("UTF8")); + } + + protected static byte[] getSignatureKey(String key, String dateStamp, String regionName, String serviceName) throws Exception { + byte[] kSecret = ("AWS4" + key).getBytes("UTF8"); + byte[] kDate = HmacSHA256(dateStamp, kSecret); + byte[] kRegion = HmacSHA256(regionName, kDate); + byte[] kService = HmacSHA256(serviceName, kRegion); + byte[] kSigning = HmacSHA256("aws4_request", kService); + return kSigning; + } + + protected static Map getHeaders(String amz_date, String authorization_header, String apiName, String content_type) { + Map headers = new HashMap<>(); + headers.put("x-amz-date", amz_date); + headers.put("Authorization", authorization_header); + headers.put("x-amz-target", "GraniteServiceVersion20100801."+apiName); + headers.put("Content-Type", content_type); + headers.put("Accept", "application/json"); + headers.put("Content-Encoding", "amz-1.0"); + headers.put("Connection", "keep-alive"); + return headers; + } + + + public static String getResponse(String httpsURL, Map headers, String payload) throws Exception { + URL myurl = new URL(httpsURL); + String response = null; + logger.info("Sending a post request to:" + httpsURL); + HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); + con.setRequestMethod("POST"); + for (Map.Entry entry : headers.entrySet()) { + logger.info("Header "+entry.getKey()+": " + entry.getValue()); + con.setRequestProperty(entry.getKey(), entry.getValue()); + } + con.setDoOutput(true); + con.setDoInput(true); + try (DataOutputStream output = new DataOutputStream(con.getOutputStream())) { + output.writeBytes(payload); + } + try (DataInputStream input = new DataInputStream(con.getInputStream())) { + StringBuffer contents = new StringBuffer(); + String tmp; + while ((tmp = input.readLine()) != null) { + contents.append(tmp); + logger.debug("tmp="+tmp); + } + response = contents.toString(); + } + logger.info("Resp Code:" + con.getResponseCode()); + logger.info("Resp Message:" + con.getResponseMessage()); + return response; + } + + protected static String getDateString() { + String dateString = null; + try { + Date dt = new Date(); + SimpleDateFormat dateFormatter = new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'"); + dateString = dateFormatter.format(dt); + logger.info("x_amz_date = "+dateString); + } catch (Exception e) { + logger.error("Exception:", e); + } + return dateString; + } + + public static void main(String[] args) throws InvalidKeyException, NoSuchAlgorithmException, IllegalStateException, UnsupportedEncodingException { + String AWS_ACCESS_KEY_ID="my_access_key_id"; + String AWS_SECRET_ACCESS_KEY="my_access_secret"; + String service="monitoring"; + String host="monitoring.us-east-1.amazonaws.com"; + String region="us-east-1"; + String endpoint="https://monitoring.us-east-1.amazonaws.com"; + String AWS_request_parameters="Action=GetMetricStatistics&Version=2010-08-01"; + String amz_date = getDateString(); + String date_stamp = amz_date.substring(0, amz_date.indexOf("T")); + logger.info("date_stamp="+date_stamp); + String canonical_uri = "/"; + String canonical_querystring = ""; + String method = "POST"; + String apiName = "GetMetricStatistics"; + String content_type = "application/x-amz-json-1.0"; + String amz_target = "GraniteServiceVersion20100801."+apiName; + String canonical_headers = "content-type:" + content_type + "\n" + "host:" + host + "\n" + "x-amz-date:" + amz_date + "\n" + "x-amz-target:" + amz_target + "\n"; + String signed_headers = "content-type;host;x-amz-date;x-amz-target"; + String accessKey = AWS_ACCESS_KEY_ID; + String accessSecretKey = AWS_SECRET_ACCESS_KEY; + String date = "20130806"; + String signing = "aws4_request"; + String request_parameters = "{"; + request_parameters += " \"Action\": \"GetMetricStatistics\", "; + request_parameters += " \"Namespace\": \"On-PremiseObjectStorageMetrics\","; + request_parameters += " \"MetricName\": \"BucketSizeBytes\","; + request_parameters += " \"Dimensions\": ["; + request_parameters += " {"; + request_parameters += " \"Name\": \"BucketName\","; + request_parameters += " \"Value\": \"ExampleBucket\""; + request_parameters += " }"; + request_parameters += " ],"; + request_parameters += " \"StartTime\": 1545884562,"; + request_parameters += " \"EndTime\": 1545884662,"; + request_parameters += " \"Period\": 86400,"; + request_parameters += " \"Statistics\": ["; + request_parameters += " \"Average\""; + request_parameters += " ],"; + request_parameters += " \"Unit\": \"Bytes\""; + request_parameters += "}"; + request_parameters = new String(request_parameters.getBytes("UTF-8"), "UTF-8"); + + try { + String payload_hash = bytesToHex(sha256(request_parameters)); + String canonical_request = method + "\n" + canonical_uri + "\n" + canonical_querystring + "\n" + canonical_headers + "\n" + signed_headers + "\n" + payload_hash; + canonical_request = new String(canonical_request.getBytes("UTF-8"), "UTF-8"); + String algorithm = "AWS4-HMAC-SHA256"; + String credential_scope = date_stamp + "/" + region + "/" + service + "/" + "aws4_request"; + String string_to_sign = algorithm + "\n" + amz_date + "\n" + credential_scope + "\n" + bytesToHex(sha256(canonical_request)); + string_to_sign = new String(string_to_sign.getBytes("UTF-8"), "UTF-8"); + byte[] signing_key = getSignatureKey(accessSecretKey, date_stamp, region, service); + String signature = bytesToHex(HmacSHA256(string_to_sign, signing_key)); + logger.info("signature: {}", bytesToHex(signing_key)); + String authorization_header = algorithm + " " + "Credential=" + accessKey + "/" + credential_scope + ", " + "SignedHeaders=" + signed_headers + ", " + "Signature=" + signature; + logger.info("authorization_header="+authorization_header); + Map headers = getHeaders(amz_date, authorization_header, apiName, content_type); + logger.info("Sending request with:" + request_parameters); + String response = getResponse(endpoint, headers, request_parameters); + logger.info("response:"+response); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + } +} +/* +output: +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - x_amz_date = 20181231T210801Z +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - signature: bfa7520029f34f6d407b381197bd18a97101efbd2d4fa5bc183c44522ce24fde +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - authorization_header=AWS4-HMAC-SHA256 Credential=obfuscated/20181231/us-east-1/monitoring/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=a713c40092d1f2c8dcf08457483500be869f87e62ca933b76613fc51962c67b4 +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Sending request with:{ "Action": "GetMetricStatistics", "Namespace": "On-PremiseObjectStorageMetrics", "MetricName": "BucketSizeBytes", "Dimensions": [ { "Name": "BucketName", "Value": "ExampleBucket" } ], "StartTime": 1545884562, "EndTime": 1545884662, "Period": 86400, "Statistics": [ "Average" ], "Unit": "Bytes"} +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Sending a post request to:https://monitoring.us-east-1.amazonaws.com +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Header Authorization: AWS4-HMAC-SHA256 Credential=obfuscated/20181231/us-east-1/monitoring/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=a713c40092d1f2c8dcf08457483500be869f87e62ca933b76613fc51962c67b4 +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Header x-amz-target: GraniteServiceVersion20100801.GetMetricStatistics +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Header x-amz-date: 20181231T210801Z +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Header Accept: application/json +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Header Content-Encoding: amz-1.0 +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Header Connection: keep-alive +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Header Content-Type: application/x-amz-json-1.0 +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Resp Code:200 +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - Resp Message:OK +[main] INFO com.emc.ecs.monitoring.sample.GetMetricsRequest - response:{"Datapoints":[{"Average":1.024E12,"Timestamp":1.54588452E9,"Unit":"Bytes"}],"Label":"BucketSizeBytes"} +*/ diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/css/main.css b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/css/main.css new file mode 100644 index 0000000..ab36e31 --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/css/main.css @@ -0,0 +1,54 @@ +#clickSource { + display: inline-block; + *display: inline; + zoom: 1; + padding: 6px 20px; + margin: 0; + cursor: pointer; + border: 1px solid #bbb; + overflow: visible; + font: bold 13px arial, helvetica, sans-serif; + text-decoration: none; + white-space: nowrap; + color: #555; + text-transform: capitalize; + + background-color: #ddd; + background-image: -webkit-gradient(linear, left top, left bottom, from(rgba(255,255,255,1)), to(rgba(255,255,255,0))); + background-image: -webkit-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -moz-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -ms-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -o-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + + -webkit-transition: background-color .2s ease-out; + -moz-transition: background-color .2s ease-out; + -ms-transition: background-color .2s ease-out; + -o-transition: background-color .2s ease-out; + transition: background-color .2s ease-out; + background-clip: padding-box; /* Fix bleeding */ + + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + border-radius: 3px; + + -moz-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + text-shadow: 0 1px 0 rgba(255,255,255, .9); + + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +#clickTarget { + float: right; + height: 80px; + border: 1px solid #999; + font: 14pt arial; + color: chocolate; +} \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/js/main.js b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/js/main.js new file mode 100644 index 0000000..382f654 --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/js/main.js @@ -0,0 +1,8 @@ +var counter = 0; +function click(e) { + document.getElementById('clickTarget').innerText = 'You clicked me ' + ++counter + ' times!'; +} +function load() { + document.getElementById('clickSource').onclick = click; +} +window.onload = load; \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/main.html b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/main.html new file mode 100644 index 0000000..714dfda --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/main.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test + + + + +

This is a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Page 1

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/pages/page1.html b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/pages/page1.html new file mode 100644 index 0000000..89cede8 --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/pages/page1.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 1 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/pages/page2.html b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/pages/page2.html new file mode 100644 index 0000000..d1fde0d --- /dev/null +++ b/ecs-metrics-workshop/aws-get-metrics-workshop/src/main/resources/pages/page2.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 2 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 1

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/README.md b/ecs-metrics-workshop/aws-put-metrics-workshop/README.md new file mode 100644 index 0000000..6e8e710 --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/README.md @@ -0,0 +1,6 @@ +# +# AWS Put Metrics api sample for metrics published from on-premise instances +# +# Details: http://github.com/EMCECS/ecs-samples/files/2779641/AzureMetrics.docx +The file referred to in details, was originally intended for Azure but includes both AWS and Azure + diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/build.gradle b/ecs-metrics-workshop/aws-put-metrics-workshop/build.gradle new file mode 100644 index 0000000..632cdef --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/build.gradle @@ -0,0 +1,50 @@ +/* + * Copyright 2013-2018 Dell Inc. or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +group = 'com.emc.ecs.workshop' +version = '1.0' + +apply plugin: 'java' +apply plugin: 'idea' +apply plugin: 'eclipse' + +repositories { + mavenCentral() +} + +dependencies { + // https://docs.gradle.org/current/userguide/declaring_dependencies.html + // compile "com.amazonaws:aws-java-sdk-s3:1.11.118" + compile group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.24' + compile group: 'org.slf4j', name:'slf4j-api', version: '1.7.24' + compile group: 'com.jayway.jsonpath', name:'json-path', version:'2.1.0' + implementation 'org.springframework:spring-web:5.0.2.RELEASE' +} + +task wrapper(type: Wrapper) { + gradleVersion = '4.3.1' +} + +jar { + from { + configurations.compile.collect { + it.isDirectory() ? it : zipTree(it) + } + } + manifest { + attributes( + 'Main-Class': 'com.emc.ecs.monitoring.sample.PutMetricsRequest' + ) + } +} diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/gradle/wrapper/gradle-wrapper.properties b/ecs-metrics-workshop/aws-put-metrics-workshop/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..ab4b7fe --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Thu Nov 23 11:33:09 CET 2017 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-4.3.1-bin.zip diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/gradlew b/ecs-metrics-workshop/aws-put-metrics-workshop/gradlew new file mode 100755 index 0000000..91a7e26 --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/gradlew @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# For Cygwin, ensure paths are in UNIX format before anything is touched. +if $cygwin ; then + [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` +fi + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >&- +APP_HOME="`pwd -P`" +cd "$SAVED" >&- + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/gradlew.bat b/ecs-metrics-workshop/aws-put-metrics-workshop/gradlew.bat new file mode 100644 index 0000000..aec9973 --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/gradlew.bat @@ -0,0 +1,90 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windowz variants + +if not "%OS%" == "Windows_NT" goto win9xME_args +if "%@eval[2+2]" == "4" goto 4NT_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* +goto execute + +:4NT_args +@rem Get arguments from the 4NT Shell from JP Software +set CMD_LINE_ARGS=%$ + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/java/com/emc/ecs/monitoring/sample/PutMetricsRequest.java b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/java/com/emc/ecs/monitoring/sample/PutMetricsRequest.java new file mode 100644 index 0000000..02562aa --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/java/com/emc/ecs/monitoring/sample/PutMetricsRequest.java @@ -0,0 +1,193 @@ +package com.emc.ecs.monitoring.sample; +import java.net.*; +import java.io.*; +import java.text.SimpleDateFormat; +import java.util.*; +import javax.net.ssl.HttpsURLConnection; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PutMetricsRequest { + private static final Logger logger = LoggerFactory.getLogger(PutMetricsRequest.class); + + protected static byte[] sha256(String content) throws Exception { + MessageDigest digest = MessageDigest.getInstance("SHA-256"); + byte[] encodedhash = digest.digest( + content.getBytes(StandardCharsets.UTF_8)); + return encodedhash; + } + protected static String bytesToHex(byte[] hash) { + StringBuffer hexString = new StringBuffer(); + for (int i = 0; i < hash.length; i++) { + String hex = Integer.toHexString(0xff & hash[i]); + if(hex.length() == 1) hexString.append('0'); + hexString.append(hex); + } + return hexString.toString(); + } + protected static byte[] HmacSHA256(String data, byte[] key) throws Exception { + String algorithm="HmacSHA256"; + Mac mac = Mac.getInstance(algorithm); + mac.init(new SecretKeySpec(key, algorithm)); + return mac.doFinal(data.getBytes("UTF8")); + } + + protected static byte[] getSignatureKey(String key, String dateStamp, String regionName, String serviceName) throws Exception { + byte[] kSecret = ("AWS4" + key).getBytes("UTF8"); + byte[] kDate = HmacSHA256(dateStamp, kSecret); + byte[] kRegion = HmacSHA256(regionName, kDate); + byte[] kService = HmacSHA256(serviceName, kRegion); + byte[] kSigning = HmacSHA256("aws4_request", kService); + return kSigning; + } + + protected static Map getHeaders(String amz_date, String authorization_header, String apiName, String content_type) { + Map headers = new HashMap<>(); + headers.put("x-amz-date", amz_date); + headers.put("Authorization", authorization_header); + headers.put("x-amz-target", "GraniteServiceVersion20100801."+apiName); + headers.put("Content-Type", content_type); + headers.put("Accept", "application/json"); + headers.put("Content-Encoding", "amz-1.0"); + headers.put("Connection", "keep-alive"); + return headers; + } + + + public static String getResponse(String httpsURL, Map headers, String payload) throws Exception { + URL myurl = new URL(httpsURL); + String response = null; + logger.info("Sending a post request to:" + httpsURL); + HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); + con.setRequestMethod("POST"); + for (Map.Entry entry : headers.entrySet()) { + logger.info("Header "+entry.getKey()+": " + entry.getValue()); + con.setRequestProperty(entry.getKey(), entry.getValue()); + } + con.setDoOutput(true); + con.setDoInput(true); + try (DataOutputStream output = new DataOutputStream(con.getOutputStream())) { + output.writeBytes(payload); + } + try (DataInputStream input = new DataInputStream(con.getInputStream())) { + StringBuffer contents = new StringBuffer(); + String tmp; + while ((tmp = input.readLine()) != null) { + contents.append(tmp); + logger.debug("tmp="+tmp); + } + response = contents.toString(); + } + logger.info("Resp Code:" + con.getResponseCode()); + logger.info("Resp Message:" + con.getResponseMessage()); + return response; + } + + protected static String getDateString() { + String dateString = null; + try { + Date dt = new Date(); + SimpleDateFormat dateFormatter = new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'"); + dateString = dateFormatter.format(dt); + logger.info("x_amz_date = "+dateString); + } catch (Exception e) { + logger.error("Exception:", e); + } + return dateString; + } + public static void main(String[] args) throws InvalidKeyException, NoSuchAlgorithmException, IllegalStateException, UnsupportedEncodingException { + String AWS_ACCESS_KEY_ID="my_aws_key_id"; + String AWS_SECRET_ACCESS_KEY="my_aws_secret_id"; + String service="monitoring"; + String host="monitoring.us-east-1.amazonaws.com"; + String region="us-east-1"; + String endpoint="https://monitoring.us-east-1.amazonaws.com"; + String AWS_request_parameters="Action=PutMetricData&Version=2010-08-01"; + String amz_date = getDateString(); + String date_stamp = amz_date.substring(0, amz_date.indexOf("T")); + String canonical_uri = "/"; + String canonical_querystring = ""; + String method = "POST"; + String apiName = "PutMetricData"; + String content_type = "application/x-amz-json-1.0"; + String amz_target = "GraniteServiceVersion20100801."+apiName; + String canonical_headers = "content-type:" + content_type + "\n" + "host:" + host + "\n" + "x-amz-date:" + amz_date + "\n" + "x-amz-target:" + amz_target + "\n"; + String signed_headers = "content-type;host;x-amz-date;x-amz-target"; + String accessKey = AWS_ACCESS_KEY_ID; + String accessSecretKey = AWS_SECRET_ACCESS_KEY; + String date = "20130806"; + String signing = "aws4_request"; + String request_parameters = "{"; + request_parameters += "\"Namespace\":\"On-PremiseObjectStorageMetrics\","; + request_parameters += "\"MetricData\":"; + request_parameters += "["; + request_parameters += " {"; + request_parameters += " \"MetricName\": \"NumberOfObjects1\","; + request_parameters += " \"Dimensions\": ["; + request_parameters += " {"; + request_parameters += " \"Name\": \"BucketName\","; + request_parameters += " \"Value\": \"ExampleBucket\""; + request_parameters += " },"; + request_parameters += " {"; + request_parameters += " \"Name\": \"ECSSystemId\","; + request_parameters += " \"Value\": \"UUID\""; + request_parameters += " }"; + request_parameters += " ],"; + request_parameters += " \"Timestamp\": " + null + ","; + request_parameters += " \"Value\": 10,"; + request_parameters += " \"Unit\": \"Count\","; + request_parameters += " \"StorageResolution\": 60"; + request_parameters += " }"; + request_parameters += "]"; + request_parameters += "}"; + request_parameters = new String(request_parameters.getBytes("UTF-8"), "UTF-8"); + + try { + String payload_hash = bytesToHex(sha256(request_parameters)); + String canonical_request = method + "\n" + canonical_uri + "\n" + canonical_querystring + "\n" + canonical_headers + "\n" + signed_headers + "\n" + payload_hash; + canonical_request = new String(canonical_request.getBytes("UTF-8"), "UTF-8"); + String algorithm = "AWS4-HMAC-SHA256"; + String credential_scope = date_stamp + "/" + region + "/" + service + "/" + "aws4_request"; + String string_to_sign = algorithm + "\n" + amz_date + "\n" + credential_scope + "\n" + bytesToHex(sha256(canonical_request)); + string_to_sign = new String(string_to_sign.getBytes("UTF-8"), "UTF-8"); + byte[] signing_key = getSignatureKey(accessSecretKey, date_stamp, region, service); + String signature = bytesToHex(HmacSHA256(string_to_sign, signing_key)); + logger.info("signature: {}", bytesToHex(signing_key)); + String authorization_header = algorithm + " " + "Credential=" + accessKey + "/" + credential_scope + ", " + "SignedHeaders=" + signed_headers + ", " + "Signature=" + signature; + logger.info("authorization_header="+authorization_header); + Map headers = getHeaders(amz_date, authorization_header, apiName, content_type); + logger.info("Sending request with:" + request_parameters); + String response = getResponse(endpoint, headers, request_parameters); + logger.info("response:"+response); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + } +} +/* +output: +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - x_amz_date = 20181231T213344Z +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - signature: bfa7520029f34f6d407b381197bd18a97101efbd2d4fa5bc183c44522ce24fde +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - authorization_header=AWS4-HMAC-SHA256 Credential=obfuscated/20181231/us-east-1/monitoring/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=b08a51264237c6e92bf389c45f1ca536d3f7f57a8e9c43b2f724773bad7b6c97 +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Sending request with:{"Namespace":"On-PremiseObjectStorageMetrics","MetricData":[ { "MetricName": "NumberOfObjects", "Dimensions": [ { "Name": "BucketName", "Value": "ExampleBucket" } ], "Timestamp": null, "Value": 10, "Unit": "Count", "StorageResolution": 60 }]} +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Sending a post request to:https://monitoring.us-east-1.amazonaws.com +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Header Authorization: AWS4-HMAC-SHA256 Credential=obfuscated/20181231/us-east-1/monitoring/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=b08a51264237c6e92bf389c45f1ca536d3f7f57a8e9c43b2f724773bad7b6c97 +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Header x-amz-target: GraniteServiceVersion20100801.PutMetricData +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Header x-amz-date: 20181231T213344Z +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Header Accept: application/json +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Header Content-Encoding: amz-1.0 +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Header Connection: keep-alive +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Header Content-Type: application/x-amz-json-1.0 +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Resp Code:200 +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - Resp Message:OK +[main] INFO com.emc.ecs.monitoring.sample.PutMetricsRequest - response: +*/ diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/css/main.css b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/css/main.css new file mode 100644 index 0000000..ab36e31 --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/css/main.css @@ -0,0 +1,54 @@ +#clickSource { + display: inline-block; + *display: inline; + zoom: 1; + padding: 6px 20px; + margin: 0; + cursor: pointer; + border: 1px solid #bbb; + overflow: visible; + font: bold 13px arial, helvetica, sans-serif; + text-decoration: none; + white-space: nowrap; + color: #555; + text-transform: capitalize; + + background-color: #ddd; + background-image: -webkit-gradient(linear, left top, left bottom, from(rgba(255,255,255,1)), to(rgba(255,255,255,0))); + background-image: -webkit-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -moz-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -ms-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -o-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + + -webkit-transition: background-color .2s ease-out; + -moz-transition: background-color .2s ease-out; + -ms-transition: background-color .2s ease-out; + -o-transition: background-color .2s ease-out; + transition: background-color .2s ease-out; + background-clip: padding-box; /* Fix bleeding */ + + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + border-radius: 3px; + + -moz-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + text-shadow: 0 1px 0 rgba(255,255,255, .9); + + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +#clickTarget { + float: right; + height: 80px; + border: 1px solid #999; + font: 14pt arial; + color: chocolate; +} \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/js/main.js b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/js/main.js new file mode 100644 index 0000000..382f654 --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/js/main.js @@ -0,0 +1,8 @@ +var counter = 0; +function click(e) { + document.getElementById('clickTarget').innerText = 'You clicked me ' + ++counter + ' times!'; +} +function load() { + document.getElementById('clickSource').onclick = click; +} +window.onload = load; \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/main.html b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/main.html new file mode 100644 index 0000000..714dfda --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/main.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test + + + + +

This is a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Page 1

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/pages/page1.html b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/pages/page1.html new file mode 100644 index 0000000..89cede8 --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/pages/page1.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 1 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/pages/page2.html b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/pages/page2.html new file mode 100644 index 0000000..d1fde0d --- /dev/null +++ b/ecs-metrics-workshop/aws-put-metrics-workshop/src/main/resources/pages/page2.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 2 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 1

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/README.md b/ecs-metrics-workshop/aws-s3-metrics-workshop/README.md new file mode 100644 index 0000000..438c42d --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/README.md @@ -0,0 +1,74 @@ +AWS Signature v4 auth requires passing -region specification otherwise it won't work. See example below: +aws s3 ls s3://ravirajamani --debug >> README.md + +2019-05-26 17:38:06,445 - MainThread - botocore.auth - DEBUG - StringToSign: +GET + + +Sun, 26 May 2019 17:38:06 GMT +/ravirajamani/ +2019-05-26 17:38:06,448 - MainThread - botocore.endpoint - DEBUG - Sending http request: +2019-05-26 17:38:06,781 - MainThread - botocore.parsers - DEBUG - Response headers: {'Server': 'AmazonS3', 'Date': 'Sun, 26 May 2019 17:38:06 GMT', 'x-amz-request-id': '539C951595BF6B5A', 'Transfer-Encoding': 'chunked', 'x-amz-id-2': 'X0DS8ROEV4yESl1iy3fxBwvvP/X/1J2ZERmwfWPcrEn1WaeKM0HMKrzPw85w4P8tuIsGqH4DrKo=', 'x-amz-bucket-region': 'us-east-2', 'Connection': 'close', 'Content-Type': 'application/xml', 'x-amz-region': 'us-east-2'} +2019-05-26 17:38:06,781 - MainThread - botocore.parsers - DEBUG - Response body: +b'\nInvalidRequestThe authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.539C951595BF6B5AX0DS8ROEV4yESl1iy3fxBwvvP/X/1J2ZERmwfWPcrEn1WaeKM0HMKrzPw85w4P8tuIsGqH4DrKo=' +2019-05-26 17:38:06,782 - MainThread - botocore.hooks - DEBUG - Event needs-retry.s3.ListObjects: calling handler +2019-05-26 17:38:06,782 - MainThread - botocore.retryhandler - DEBUG - No retry needed. +2019-05-26 17:38:06,782 - MainThread - botocore.hooks - DEBUG - Event needs-retry.s3.ListObjects: calling handler > +2019-05-26 17:38:06,782 - MainThread - botocore.hooks - DEBUG - Event after-call.s3.ListObjects: calling handler +2019-05-26 17:38:06,782 - MainThread - botocore.hooks - DEBUG - Event after-call.s3.ListObjects: calling handler +2019-05-26 17:38:06,783 - MainThread - awscli.clidriver - DEBUG - Exception caught in main() +Traceback (most recent call last): + File "/usr/lib/python3/dist-packages/awscli/clidriver.py", line 186, in main + return command_table[parsed_args.command](remaining, parsed_args) + File "/usr/lib/python3/dist-packages/awscli/customizations/commands.py", line 190, in __call__ + parsed_globals) + File "/usr/lib/python3/dist-packages/awscli/customizations/commands.py", line 187, in __call__ + return self._run_main(parsed_args, parsed_globals) + File "/usr/lib/python3/dist-packages/awscli/customizations/s3/subcommands.py", line 472, in _run_main + bucket, key, parsed_args.page_size, parsed_args.request_payer) + File "/usr/lib/python3/dist-packages/awscli/customizations/s3/subcommands.py", line 499, in _list_all_objects + for response_data in iterator: + File "/usr/lib/python3/dist-packages/botocore/paginate.py", line 102, in __iter__ + response = self._make_request(current_kwargs) + File "/usr/lib/python3/dist-packages/botocore/paginate.py", line 174, in _make_request + return self._method(**current_kwargs) + File "/usr/lib/python3/dist-packages/botocore/client.py", line 251, in _api_call + return self._make_api_call(operation_name, kwargs) + File "/usr/lib/python3/dist-packages/botocore/client.py", line 537, in _make_api_call + raise ClientError(parsed_response, operation_name) +botocore.exceptions.ClientError: An error occurred (InvalidRequest) when calling the ListObjects operation: You are attempting to operate on a bucket in a region that requires Signature Version 4. You can fix this issue by explicitly providing the correct region location using the --region argument, the AWS_DEFAULT_REGION environment variable, or the region variable in the AWS CLI configuration file. You can get the bucket's location by running "aws s3api get-bucket-location --bucket BUCKET". +2019-05-26 17:38:06,784 - MainThread - awscli.clidriver - DEBUG - Exiting with rc 255 + +An error occurred (InvalidRequest) when calling the ListObjects operation: You are attempting to operate on a bucket in a region that requires Signature Version 4. You can fix this issue by explicitly providing the correct region location using the --region argument, the AWS_DEFAULT_REGION environment variable, or the region variable in the AWS CLI configuration file. You can get the bucket's location by running "aws s3api get-bucket-location --bucket BUCKET". + + + +aws s3 ls s3://ravirajamani --region=us-east-2 --debug >> README.md + +2019-05-26 17:38:40,719 - MainThread - botocore.auth - DEBUG - CanonicalRequest: +GET +/ravirajamani +delimiter=%2F&encoding-type=url&prefix= +host:s3.us-east-2.amazonaws.com +x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +x-amz-date:20190526T173840Z + +host;x-amz-content-sha256;x-amz-date +e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +2019-05-26 17:38:40,720 - MainThread - botocore.auth - DEBUG - StringToSign: +AWS4-HMAC-SHA256 +20190526T173840Z +20190526/us-east-2/s3/aws4_request +dc1192c29dff4801d50e76cdbe2a7aa0ed7281664a51ad98931c69fb938dbc55 +2019-05-26 17:38:40,720 - MainThread - botocore.auth - DEBUG - Signature: +7fcd5f82d95c42ba484700bc8e7732417c92eaad46aa7d34e2901def30917f5e +2019-05-26 17:38:40,722 - MainThread - botocore.endpoint - DEBUG - Sending http request: +2019-05-26 17:38:41,063 - MainThread - botocore.parsers - DEBUG - Response headers: {'Server': 'AmazonS3', 'x-amz-bucket-region': 'us-east-2', 'Date': 'Sun, 26 May 2019 17:38:42 GMT', 'x-amz-request-id': '87EE58B465396C4F', 'x-amz-id-2': 'C5vDT3dESqoxe3eeSRrZcr1CEVXyTETdVxC+eXTYJcHoSwVOuer+dcN7/lqXbZltJ6zqZtHN1II=', 'Content-Type': 'application/xml', 'Transfer-Encoding': 'chunked'} +2019-05-26 17:38:41,063 - MainThread - botocore.parsers - DEBUG - Response body: +b'\nravirajamani1000/urlfalseDynamicLogin.zip2018-02-18T18:51:57.000Z"1f1b1f5e9e96ad0a526d31d66b21245c"697624dd6d5e8e5d3414d384f1e1dc7d76d3199a66b06befedabd10f66cbe50f6cd5a0STANDARDScreen+Shot+2017-11-27+at+1.15.42+PM.png2017-11-27T22:57:08.000Z"f3be3d10d5fbe143df6932554494bbbb"189087dd6d5e8e5d3414d384f1e1dc7d76d3199a66b06befedabd10f66cbe50f6cd5a0STANDARDScreen+Shot+2017-11-27+at+1.29.01+PM.png2017-11-27T22:56:33.000Z"205ed7799f0a9860c8d6296498d8d13d"197041dd6d5e8e5d3414d384f1e1dc7d76d3199a66b06befedabd10f66cbe50f6cd5a0STANDARDScreen+Shot+2018-02-21+at+8.34.00+AM.png2018-02-21T16:49:41.000Z"554b9cbe29c4eddfec6e02874de88336"167108dd6d5e8e5d3414d384f1e1dc7d76d3199a66b06befedabd10f66cbe50f6cd5a0STANDARDlogout_form.html2018-02-18T18:54:53.000Z"df12d231c95bf32d7a0c916e5d67b47b"85dd6d5e8e5d3414d384f1e1dc7d76d3199a66b06befedabd10f66cbe50f6cd5a0STANDARDmain_page.html2018-02-18T19:01:14.000Z"196506de969ce07d659f1cab1093d301"404374dd6d5e8e5d3414d384f1e1dc7d76d3199a66b06befedabd10f66cbe50f6cd5a0STANDARDDynamicLogin/' +2019-05-26 17:38:41,065 - MainThread - botocore.hooks - DEBUG - Event needs-retry.s3.ListObjects: calling handler +2019-05-26 17:38:41,065 - MainThread - botocore.retryhandler - DEBUG - No retry needed. +2019-05-26 17:38:41,065 - MainThread - botocore.hooks - DEBUG - Event needs-retry.s3.ListObjects: calling handler > +2019-05-26 17:38:41,065 - MainThread - botocore.hooks - DEBUG - Event after-call.s3.ListObjects: calling handler +2019-05-26 17:38:41,065 - MainThread - botocore.hooks - DEBUG - Event after-call.s3.ListObjects: calling handler + diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/ba.sh b/ecs-metrics-workshop/aws-s3-metrics-workshop/ba.sh new file mode 100644 index 0000000..8df0bc4 --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/ba.sh @@ -0,0 +1,331 @@ +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar +java -jar build/libs/aws-put-metrics-workshop-1.0.jar + diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/build.gradle b/ecs-metrics-workshop/aws-s3-metrics-workshop/build.gradle new file mode 100644 index 0000000..94a6bd0 --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/build.gradle @@ -0,0 +1,74 @@ +/* + * Copyright 2013-2018 Dell Inc. or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +group = 'com.emc.ecs.workshop' +version = '1.0' + +apply plugin: 'java' +apply plugin: 'idea' +apply plugin: 'eclipse' +apply plugin: "jacoco" + +repositories { + mavenCentral() +} + +dependencies { + compile group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.24' + compile group: 'org.slf4j', name:'slf4j-api', version: '1.7.24' + compile group: 'com.jayway.jsonpath', name:'json-path', version:'2.1.0' + compile group: 'org.springframework', name: 'spring-web', version: '5.0.2.RELEASE' + testCompile "junit:junit:4.+" +} + +jacoco { + toolVersion = "0.7.1.201405082137" + reportsDir = file("$buildDir/jacoco") +} + + +task wrapper(type: Wrapper) { + gradleVersion = '4.3.1' +} + +jar { + from { + configurations.compile.collect { + it.isDirectory() ? it : zipTree(it) + } + } + manifest { + attributes( + 'Main-Class': 'com.emc.ecs.s3.sample.PutS3Request' + ) + } +} + + +test { + jacoco { + append = false + destinationFile = file("$buildDir/jacoco/jacocoTest.exec") + // classDumpFile = file("$buildDir/jacoco/classpathdumps") + } +} + + +jacocoTestReport { + reports { + xml.enabled true + csv.enabled true + html.destination file("${buildDir}/jacoco/jacocoHtml") + } +} diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/gradle/wrapper/gradle-wrapper.properties b/ecs-metrics-workshop/aws-s3-metrics-workshop/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..5e5bc1e --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Fri Apr 12 00:43:21 UTC 2019 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-4.3.1-bin.zip diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/gradlew b/ecs-metrics-workshop/aws-s3-metrics-workshop/gradlew new file mode 100755 index 0000000..9d82f78 --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/gradlew @@ -0,0 +1,160 @@ +#!/usr/bin/env bash + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/gradlew.bat b/ecs-metrics-workshop/aws-s3-metrics-workshop/gradlew.bat new file mode 100644 index 0000000..aec9973 --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/gradlew.bat @@ -0,0 +1,90 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windowz variants + +if not "%OS%" == "Windows_NT" goto win9xME_args +if "%@eval[2+2]" == "4" goto 4NT_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* +goto execute + +:4NT_args +@rem Get arguments from the 4NT Shell from JP Software +set CMD_LINE_ARGS=%$ + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/java/com/emc/ecs/s3/sample/PutS3Request.java b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/java/com/emc/ecs/s3/sample/PutS3Request.java new file mode 100644 index 0000000..6af5f40 --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/java/com/emc/ecs/s3/sample/PutS3Request.java @@ -0,0 +1,267 @@ +package com.emc.ecs.s3.sample; +import java.net.*; +import java.io.*; +import java.text.SimpleDateFormat; +import java.util.*; +import javax.net.ssl.HttpsURLConnection; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PutS3Request { + private static final Logger logger = LoggerFactory.getLogger(PutS3Request.class); + + protected static byte[] sha256(String content) throws Exception { + MessageDigest digest = MessageDigest.getInstance("SHA-256"); + byte[] encodedhash = digest.digest( + content.getBytes(StandardCharsets.UTF_8)); + return encodedhash; + } + protected static String bytesToHex(byte[] hash) { + StringBuffer hexString = new StringBuffer(); + for (int i = 0; i < hash.length; i++) { + String hex = Integer.toHexString(0xff & hash[i]); + if(hex.length() == 1) hexString.append('0'); + hexString.append(hex); + } + return hexString.toString(); + } + + protected static String emptySha() { + try { + return bytesToHex(sha256("")); + } catch (Exception e) { + e.printStackTrace(); + return null; + } + } + + protected static byte[] HmacSHA256(String data, byte[] key) throws Exception { + String algorithm="HmacSHA256"; + Mac mac = Mac.getInstance(algorithm); + mac.init(new SecretKeySpec(key, algorithm)); + return mac.doFinal(data.getBytes("UTF8")); + } + + protected static byte[] getSignatureKey(String key, String dateStamp, String regionName, String serviceName) throws Exception { + byte[] kSecret = ("AWS4" + key).getBytes("UTF8"); + byte[] kDate = HmacSHA256(dateStamp, kSecret); + byte[] kRegion = HmacSHA256(regionName, kDate); + byte[] kService = HmacSHA256(serviceName, kRegion); + byte[] kSigning = HmacSHA256("aws4_request", kService); + return kSigning; + } + + protected static Map getHeaders(String amz_date, String authorization_header, String apiName, String content_type) { + Map headers = new HashMap<>(); + headers.put("x-amz-date", amz_date); + headers.put("Authorization", authorization_header); + headers.put("x-amz-content-sha256", emptySha()); + headers.put("Accept", "application/json"); + headers.put("Connection", "keep-alive"); + return headers; + } + + public static String getResponse(String methodName, String httpsURL, Map headers, String payload) throws Exception { + URL myurl = new URL(httpsURL); + String response = null; + logger.info("Sending a " + methodName + " request to:" + httpsURL); + HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); + con.setRequestMethod(methodName); + for (Map.Entry entry : headers.entrySet()) { + logger.info("Header "+entry.getKey()+": " + entry.getValue()); + con.setRequestProperty(entry.getKey(), entry.getValue()); + } + con.setDoOutput(true); + con.setDoInput(true); + if (payload.equals("") == false) { + try (DataOutputStream output = new DataOutputStream(con.getOutputStream())) { + logger.info("payload="+payload); + output.writeBytes(payload); + } + } + try (DataInputStream input = new DataInputStream(con.getInputStream())) { + StringBuffer contents = new StringBuffer(); + String tmp; + while ((tmp = input.readLine()) != null) { + contents.append(tmp); + logger.debug("tmp="+tmp); + } + response = contents.toString(); + } + logger.info("Resp Code:" + con.getResponseCode()); + logger.info("Resp Message:" + con.getResponseMessage()); + return response; + } + + protected static String getDateString() { + String dateString = null; + try { + Date dt = new Date(); + SimpleDateFormat dateFormatter = new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'"); + dateString = dateFormatter.format(dt); + logger.info("x_amz_date = "+dateString); + } catch (Exception e) { + logger.error("Exception:", e); + } + return dateString; + } + + protected static String createBucketParameters(String bucketName) { + String request_parameters = ""; +/* + request_parameters += "{"; + request_parameters += "\"Version\": \"2012-10-17\","; + request_parameters += "\"Statement\": ["; + request_parameters += "{"; + request_parameters += "\"Sid\": \"statement1\","; + request_parameters += "\"Effect\": \"Allow\","; + // request_parameters += "\"Principal\": {"; + // request_parameters += "\"AWS\": \"arn:aws:iam::AccountB-ID:user/Dave\""; + // request_parameters += "},"; + request_parameters += "\"Action\": [\"s3:CreateBucket\"],"; + request_parameters += "\"Resource\": [\"arn:aws:s3:::*\"],"; + request_parameters += "\"Condition\": {"; + request_parameters += "\"StringLike\": {"; + request_parameters += "\"s3:LocationConstraint\": \"us-east-1\""; + request_parameters += "},"; + request_parameters += "\"StringEquals\": {"; + request_parameters += "\"s3:x-amz-acl\":[\"public-read-write\"]"; + request_parameters += "}"; + request_parameters += "}"; + request_parameters += "}"; + request_parameters += "]"; + request_parameters += "}"; +*/ + return request_parameters; + } + + protected static String createBucket(String bucketName) { + String response = ""; + String host=bucketName + ".s3.amazonaws.com"; + String endpoint="https://s3.amazonaws.com/"+bucketName; + String method = "PUT"; + String apiName = "CreateBucket"; + String content_type = "application/x-amz-json-1.0"; + String amz_date = getDateString(); + String request_parameters = createBucketParameters(bucketName); + String authorization_header = getAuthorizationHeader(host, request_parameters, method, apiName); + Map headers = getHeaders(amz_date, authorization_header, apiName, content_type); + try { + response = getResponse(method, endpoint, headers, request_parameters); + } catch (Exception e) { + logger.error("Exception:", e); + } + logger.info("response:"+response); + return response; + } + + protected static String deleteBucket(String bucketName) { + String response = ""; + String host = "s3.amazonaws.com"; + String endpoint="https://s3.amazonaws.com/" + bucketName; // + host + "/"; + String method = "DELETE"; + String apiName = "DeleteBucket"; + String content_type = "application/x-amz-json-1.0"; + String amz_date = getDateString(); + String authorization_header = getAuthorizationHeader(host, "", method, apiName); + Map headers = getHeaders(amz_date, authorization_header, apiName, content_type); + try { + response = getResponse(method, endpoint, headers, ""); + } catch (Exception e) { + logger.error("Exception:", e); + } + logger.info("response:"+response); + return response; + } + + public static String getAuthorizationHeader(String host, String request_parameters, String method, String apiName) { + String service="s3"; + String region="us-east-1"; + String amz_date = getDateString(); + logger.info("amz_date="+amz_date); + String date_stamp = amz_date.substring(0, amz_date.indexOf("T")); + String canonical_uri = "/"; + String canonical_querystring = ""; + String content_type = "application/x-amz-json-1.0"; + String amz_target = "GraniteServiceVersion20100801."+apiName; + logger.info("host="+host); + String canonical_headers = "host:" + host + "\n" + "x-amz-content-sha256:" + emptySha() + "\n" + "x-amz-date:" + amz_date ; + String signed_headers = "host;x-amz-content-sha256;x-amz-date"; + String accessKey = "your_access_key"; + String accessSecretKey = "your_access_secret"; + String date = "20130806"; + String signing = "aws4_request"; + try { + String payload_hash = bytesToHex(sha256(request_parameters)); + String canonical_request = method + "\n" + canonical_uri + "\n" + canonical_querystring + "\n" + canonical_headers + "\n" + signed_headers + "\n" + payload_hash; + canonical_request = new String(canonical_request.getBytes("UTF-8"), "UTF-8"); + String algorithm = "AWS4-HMAC-SHA256"; + String credential_scope = date_stamp + "/" + region + "/" + service + "/" + "aws4_request"; + logger.info("date_stamp="+date_stamp); + String string_to_sign = algorithm + "\n" + amz_date + "\n" + credential_scope + "\n" + bytesToHex(sha256(canonical_request)); + string_to_sign = new String(string_to_sign.getBytes("UTF-8"), "UTF-8"); + byte[] signing_key = getSignatureKey(accessSecretKey, date_stamp, region, service); + String signature = bytesToHex(HmacSHA256(string_to_sign, signing_key)); + String authorization_header = algorithm + " " + "Credential=" + accessKey + "/" + credential_scope + "," + "SignedHeaders=" + signed_headers + ", " + "Signature=" + signature; + return authorization_header; + } catch (Exception e) { + e.printStackTrace(); + return null; + } + } + + public static void main(String[] args) throws InvalidKeyException, NoSuchAlgorithmException, IllegalStateException, UnsupportedEncodingException { + createBucket("examplebucket"); + deleteBucket("examplebucket"); + } +} +/* +[main] INFO com.emc.ecs.s3.sample.PutS3Request - x_amz_date = 20190505T203151Z +[main] INFO com.emc.ecs.s3.sample.PutS3Request - x_amz_date = 20190505T203151Z +[main] INFO com.emc.ecs.s3.sample.PutS3Request - amz_date=20190505T203151Z +[main] INFO com.emc.ecs.s3.sample.PutS3Request - host=examplebucket.s3.amazonaws.com +[main] INFO com.emc.ecs.s3.sample.PutS3Request - date_stamp=20190505 +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Sending a PUT request to:https://s3.amazonaws.com/examplebucket +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header Authorization: AWS4-HMAC-SHA256 Credential=your_access_key/20190505/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=6e1cbaf4e9135c792bf84193235d8f2acc282b3d5f66ec003e9acc29a7613f8f +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header x-amz-date: 20190505T203151Z +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header Accept: application/json +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header Connection: keep-alive +[main] ERROR com.emc.ecs.s3.sample.PutS3Request - Exception: +java.io.IOException: Server returned HTTP response code: 403 for URL: https://s3.amazonaws.com/examplebucket + at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1894) + at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1492) + at sun.net.www.protocol.https.HttpsURLConnectionImpl.getInputStream(HttpsURLConnectionImpl.java:263) + at com.emc.ecs.s3.sample.PutS3Request.getResponse(PutS3Request.java:90) + at com.emc.ecs.s3.sample.PutS3Request.createBucket(PutS3Request.java:158) + at com.emc.ecs.s3.sample.PutS3Request.main(PutS3Request.java:224) +[main] INFO com.emc.ecs.s3.sample.PutS3Request - response: +[main] INFO com.emc.ecs.s3.sample.PutS3Request - x_amz_date = 20190505T203152Z +[main] INFO com.emc.ecs.s3.sample.PutS3Request - x_amz_date = 20190505T203152Z +[main] INFO com.emc.ecs.s3.sample.PutS3Request - amz_date=20190505T203152Z +[main] INFO com.emc.ecs.s3.sample.PutS3Request - host=s3.amazonaws.com +[main] INFO com.emc.ecs.s3.sample.PutS3Request - date_stamp=20190505 +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Sending a DELETE request to:https://s3.amazonaws.com/examplebucket +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header Authorization: AWS4-HMAC-SHA256 Credential=your_access_key/20190505/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=7f7aca28c6fd6facecf7fefb55ac375c57059ca2085178668ea142d6027018c8 +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header x-amz-date: 20190505T203152Z +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header Accept: application/json +[main] INFO com.emc.ecs.s3.sample.PutS3Request - Header Connection: keep-alive +[main] ERROR com.emc.ecs.s3.sample.PutS3Request - Exception: +java.io.IOException: Server returned HTTP response code: 403 for URL: https://s3.amazonaws.com/examplebucket + at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1894) + at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1492) + at sun.net.www.protocol.https.HttpsURLConnectionImpl.getInputStream(HttpsURLConnectionImpl.java:263) + at com.emc.ecs.s3.sample.PutS3Request.getResponse(PutS3Request.java:90) + at com.emc.ecs.s3.sample.PutS3Request.deleteBucket(PutS3Request.java:177) + at com.emc.ecs.s3.sample.PutS3Request.main(PutS3Request.java:225) +[main] INFO com.emc.ecs.s3.sample.PutS3Request - response: +*/ diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/css/main.css b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/css/main.css new file mode 100644 index 0000000..ab36e31 --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/css/main.css @@ -0,0 +1,54 @@ +#clickSource { + display: inline-block; + *display: inline; + zoom: 1; + padding: 6px 20px; + margin: 0; + cursor: pointer; + border: 1px solid #bbb; + overflow: visible; + font: bold 13px arial, helvetica, sans-serif; + text-decoration: none; + white-space: nowrap; + color: #555; + text-transform: capitalize; + + background-color: #ddd; + background-image: -webkit-gradient(linear, left top, left bottom, from(rgba(255,255,255,1)), to(rgba(255,255,255,0))); + background-image: -webkit-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -moz-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -ms-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -o-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + + -webkit-transition: background-color .2s ease-out; + -moz-transition: background-color .2s ease-out; + -ms-transition: background-color .2s ease-out; + -o-transition: background-color .2s ease-out; + transition: background-color .2s ease-out; + background-clip: padding-box; /* Fix bleeding */ + + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + border-radius: 3px; + + -moz-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + text-shadow: 0 1px 0 rgba(255,255,255, .9); + + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +#clickTarget { + float: right; + height: 80px; + border: 1px solid #999; + font: 14pt arial; + color: chocolate; +} \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/js/main.js b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/js/main.js new file mode 100644 index 0000000..382f654 --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/js/main.js @@ -0,0 +1,8 @@ +var counter = 0; +function click(e) { + document.getElementById('clickTarget').innerText = 'You clicked me ' + ++counter + ' times!'; +} +function load() { + document.getElementById('clickSource').onclick = click; +} +window.onload = load; \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/main.html b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/main.html new file mode 100644 index 0000000..714dfda --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/main.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test + + + + +

This is a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Page 1

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/pages/page1.html b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/pages/page1.html new file mode 100644 index 0000000..89cede8 --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/pages/page1.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 1 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/pages/page2.html b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/pages/page2.html new file mode 100644 index 0000000..d1fde0d --- /dev/null +++ b/ecs-metrics-workshop/aws-s3-metrics-workshop/src/main/resources/pages/page2.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 2 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 1

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/README.md b/ecs-metrics-workshop/azure-get-monitoring-workshop/README.md new file mode 100644 index 0000000..b54f407 --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/README.md @@ -0,0 +1,5 @@ +# +# Azure Get Monitoring api sample for metrics published from on-premise instances +# +# Details: http://github.com/EMCECS/ecs-samples/files/2779641/AzureMetrics.docx + diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/build.gradle b/ecs-metrics-workshop/azure-get-monitoring-workshop/build.gradle new file mode 100644 index 0000000..b2606e3 --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/build.gradle @@ -0,0 +1,51 @@ +/* + * Copyright 2013-2018 Dell Inc. or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +group = 'com.emc.ecs.monitoring' +version = '1.0' + +apply plugin: 'java' +apply plugin: 'idea' +apply plugin: 'eclipse' + +repositories { + mavenCentral() +} + +dependencies { + // https://docs.gradle.org/current/userguide/declaring_dependencies.html + // compile "com.amazonaws:aws-java-sdk-s3:1.11.118" + compile group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.24' + compile group: 'org.slf4j', name:'slf4j-api', version: '1.7.24' + compile group: 'com.jayway.jsonpath', name:'json-path', version:'2.1.0' + implementation 'org.springframework:spring-web:5.0.2.RELEASE' + +} + +task wrapper(type: Wrapper) { + gradleVersion = '4.3.1' +} + +jar { + from { + configurations.compile.collect { + it.isDirectory() ? it : zipTree(it) + } + } + manifest { + attributes( + 'Main-Class': 'com.emc.ecs.monitoring.sample.GetMonitoringRequest' + ) + } +} diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/gradle/wrapper/gradle-wrapper.properties b/ecs-metrics-workshop/azure-get-monitoring-workshop/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..ab4b7fe --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Thu Nov 23 11:33:09 CET 2017 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-4.3.1-bin.zip diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/gradlew b/ecs-metrics-workshop/azure-get-monitoring-workshop/gradlew new file mode 100755 index 0000000..91a7e26 --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/gradlew @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# For Cygwin, ensure paths are in UNIX format before anything is touched. +if $cygwin ; then + [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` +fi + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >&- +APP_HOME="`pwd -P`" +cd "$SAVED" >&- + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/gradlew.bat b/ecs-metrics-workshop/azure-get-monitoring-workshop/gradlew.bat new file mode 100644 index 0000000..aec9973 --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/gradlew.bat @@ -0,0 +1,90 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windowz variants + +if not "%OS%" == "Windows_NT" goto win9xME_args +if "%@eval[2+2]" == "4" goto 4NT_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* +goto execute + +:4NT_args +@rem Get arguments from the 4NT Shell from JP Software +set CMD_LINE_ARGS=%$ + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/java/com/emc/ecs/monitoring/sample/GetMonitoringRequest.java b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/java/com/emc/ecs/monitoring/sample/GetMonitoringRequest.java new file mode 100644 index 0000000..f38cb12 --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/java/com/emc/ecs/monitoring/sample/GetMonitoringRequest.java @@ -0,0 +1,140 @@ +package com.emc.ecs.monitoring.sample; +import java.net.*; +import java.io.*; +import java.util.*; +import javax.net.ssl.HttpsURLConnection; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GetMonitoringRequest { + private static final Logger logger = LoggerFactory.getLogger(GetMonitoringRequest.class); + + protected static Map getHeaders() { + Map headers = new HashMap<>(); + headers.put("Content-Type", "application/x-www-form-urlencoded"); + headers.put("Accept", "application/json"); + headers.put("Content-Encoding", "UTF-8"); + headers.put("Connection", "keep-alive"); + return headers; + } + + public static String getResponse(String httpsURL, Map headers, String payload, String method) throws Exception { + URL myurl = new URL(httpsURL); + String response = null; + logger.info("Sending a " + method + " request to:" + httpsURL); + HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); + con.setRequestMethod(method); + for (Map.Entry entry : headers.entrySet()) { + con.setRequestProperty(entry.getKey(), entry.getValue()); + } + con.setDoOutput(true); + con.setDoInput(true); + if (method.equals("POST")) { + try (DataOutputStream output = new DataOutputStream(con.getOutputStream())) { + output.writeBytes(payload); + } + } + try (DataInputStream input = new DataInputStream(con.getInputStream())) { + StringBuffer contents = new StringBuffer(); + String tmp; + while ((tmp = input.readLine()) != null) { + contents.append(tmp); + logger.debug("tmp="+tmp); + } + response = contents.toString(); + } + logger.info("Resp Code:" + con.getResponseCode()); + return response; + } + + + private static String getToken(String accessKeyId, String accessSecret, String ARMResource, String tenantId, String spnPayload) { + String TokenEndpoint = "https://login.windows.net/{0}/oauth2/token"; + String address = "https://login.windows.net/" + tenantId + "/oauth2/token"; + logger.info("address="+address); + String token = ""; + try { + String payload = "resource=" + + java.net.URLEncoder.encode(ARMResource,"UTF-8") + + "&client_id=" + java.net.URLEncoder.encode(accessKeyId, "UTF-8") + + "&grant_type=client_credentials&client_secret=" + + java.net.URLEncoder.encode(accessSecret, "UTF-8"); + logger.info("payload="+payload); + Map headers = getHeaders(); + String response = getResponse(address, headers, payload, "POST"); + int start = response.indexOf("\"access_token\":\""); + if ( start != -1 ) { + start += 16; + int end = response.indexOf("\"", start); + if ( end != -1 && end > start) { + token = response.substring(start, end); + logger.info("response:" + response); + } else { + logger.info("token not found in response."); + } + } else { + logger.info("access_token not found in response."); + } + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + return token; + } + + public static void main(String[] args) throws InvalidKeyException, NoSuchAlgorithmException, IllegalStateException, UnsupportedEncodingException { + String AZURE_ACCESS_KEY_ID="my_access_key_id"; + String AZURE_SECRET_ACCESS_KEY="my_access_secret_id"; + String AZURE_TENANT_ID = "my_azure_tenant_guid"; + String ARMResource = "https://management.core.windows.net/"; + String SPNPayload = "resource={0}&client_id={1}&grant_type=client_credentials&client_secret={2}"; + String endpoint="https://management.azure.com/"; + String AZURE_request_parameters="Action=GetMetricStatistics&Version=2010-08-01"; + String amz_date = "20181230T125500Z"; + String date_stamp = "20181230"; + String subscriptionId = "my_azure_subscription_guid"; + String resourceGroupName = "RaviRajamaniRG"; + String resource = "subscriptions/"+ subscriptionId + "/resourceGroups/" + resourceGroupName + "/providers/Microsoft.Web/sites/shrink-text/metricdefinitions?api-version=2018-02-01"; + String canonical_uri = endpoint + resource; + String canonical_querystring = ""; + String method = "POST"; + String accessKey = "my_access_key_id"; + String accessSecretKey = "my_access_secret_id"; + String tenantId = "my_azure_tenant_id"; + String armResource = "https://management.core.windows.net/"; + AZURE_ACCESS_KEY_ID=accessKey; + AZURE_SECRET_ACCESS_KEY=accessSecretKey; + String request_parameters = ""; + + try { + String token = getToken(AZURE_ACCESS_KEY_ID, AZURE_SECRET_ACCESS_KEY, ARMResource, AZURE_TENANT_ID, SPNPayload); + logger.info("token="+token); + Map headers= getHeaders(); + headers.put("Authorization", "Bearer " + token); + String response = getResponse(canonical_uri, headers, request_parameters, "GET"); + logger.info("response:"+response); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + } +} +// +// output: +/* +(venv) ravi@RaviRajamani:~/ecs/ecs-samples/azure-java-workshop1$ java -jar build/libs/azure-java-workshop1-1.0.jar +[main] INFO com.emc.ecs.monitoring.sample.GetMonitoringRequest - address=https://login.windows.net//oauth2/token +[main] INFO com.emc.ecs.monitoring.sample.GetMonitoringRequest - payload=resource=https%3A%2F%2Fmanagement.core.windows.net%2F&client_id=&grant_type=client_credentials&client_secret= +[main] INFO com.emc.ecs.monitoring.sample.GetMonitoringRequest - Sending a POST request to:https://login.windows.net//oauth2/token +[main] INFO com.emc.ecs.monitoring.sample.GetMonitoringRequest - Resp Code:200 +[main] INFO com.emc.ecs.monitoring.sample.GetMonitoringRequest - response:{"token_type":"Bearer","expires_in":"3600","ext_expires_in":"3600","expires_on":"1547434329","not_before":"1547430429","resource":"https://management.core.windows.net/","access_token":""} +[main] INFO com.emc.ecs.monitoring.sample.GetMonitoringRequest - token= +[main] INFO com.emc.ecs.monitoring.sample.GetMonitoringRequest - Sending a GET request to:https://management.azure.com/subscriptions//resourceGroups/RaviRajamaniRG/providers/Microsoft.Web/sites/shrink-text/metricdefinitions?api-version=2018-02-01 +[main] INFO com.emc.ecs.monitoring.sample.GetMonitoringRequest - Resp Code:200 +*/ diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/css/main.css b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/css/main.css new file mode 100644 index 0000000..ab36e31 --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/css/main.css @@ -0,0 +1,54 @@ +#clickSource { + display: inline-block; + *display: inline; + zoom: 1; + padding: 6px 20px; + margin: 0; + cursor: pointer; + border: 1px solid #bbb; + overflow: visible; + font: bold 13px arial, helvetica, sans-serif; + text-decoration: none; + white-space: nowrap; + color: #555; + text-transform: capitalize; + + background-color: #ddd; + background-image: -webkit-gradient(linear, left top, left bottom, from(rgba(255,255,255,1)), to(rgba(255,255,255,0))); + background-image: -webkit-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -moz-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -ms-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -o-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + + -webkit-transition: background-color .2s ease-out; + -moz-transition: background-color .2s ease-out; + -ms-transition: background-color .2s ease-out; + -o-transition: background-color .2s ease-out; + transition: background-color .2s ease-out; + background-clip: padding-box; /* Fix bleeding */ + + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + border-radius: 3px; + + -moz-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + text-shadow: 0 1px 0 rgba(255,255,255, .9); + + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +#clickTarget { + float: right; + height: 80px; + border: 1px solid #999; + font: 14pt arial; + color: chocolate; +} \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/js/main.js b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/js/main.js new file mode 100644 index 0000000..382f654 --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/js/main.js @@ -0,0 +1,8 @@ +var counter = 0; +function click(e) { + document.getElementById('clickTarget').innerText = 'You clicked me ' + ++counter + ' times!'; +} +function load() { + document.getElementById('clickSource').onclick = click; +} +window.onload = load; \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/main.html b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/main.html new file mode 100644 index 0000000..714dfda --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/main.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test + + + + +

This is a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Page 1

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/pages/page1.html b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/pages/page1.html new file mode 100644 index 0000000..89cede8 --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/pages/page1.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 1 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/pages/page2.html b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/pages/page2.html new file mode 100644 index 0000000..d1fde0d --- /dev/null +++ b/ecs-metrics-workshop/azure-get-monitoring-workshop/src/main/resources/pages/page2.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 2 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 1

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/README.md b/ecs-metrics-workshop/azure-put-monitoring-workshop/README.md new file mode 100644 index 0000000..013245a --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/README.md @@ -0,0 +1,5 @@ +# +# Azure Put Monitoring api sample for metrics published from on-premise instances +# +# Details: http://github.com/EMCECS/ecs-samples/files/2779641/AzureMetrics.docx + diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/build.gradle b/ecs-metrics-workshop/azure-put-monitoring-workshop/build.gradle new file mode 100644 index 0000000..d4f2673 --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/build.gradle @@ -0,0 +1,50 @@ +/* + * Copyright 2013-2018 Dell Inc. or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +group = 'com.emc.ecs.monitoring' +version = '1.0' + +apply plugin: 'java' +apply plugin: 'idea' +apply plugin: 'eclipse' + +repositories { + mavenCentral() +} + +dependencies { + // https://docs.gradle.org/current/userguide/declaring_dependencies.html + compile group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.24' + compile group: 'org.slf4j', name:'slf4j-api', version: '1.7.24' + compile group: 'com.jayway.jsonpath', name:'json-path', version:'2.1.0' + implementation 'org.springframework:spring-web:5.0.2.RELEASE' + +} + +task wrapper(type: Wrapper) { + gradleVersion = '4.3.1' +} + +jar { + from { + configurations.compile.collect { + it.isDirectory() ? it : zipTree(it) + } + } + manifest { + attributes( + 'Main-Class': 'com.emc.ecs.monitoring.sample.PutMonitoringRequest' + ) + } +} diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/gradle/wrapper/gradle-wrapper.properties b/ecs-metrics-workshop/azure-put-monitoring-workshop/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..ab4b7fe --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Thu Nov 23 11:33:09 CET 2017 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-4.3.1-bin.zip diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/gradlew b/ecs-metrics-workshop/azure-put-monitoring-workshop/gradlew new file mode 100755 index 0000000..91a7e26 --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/gradlew @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# For Cygwin, ensure paths are in UNIX format before anything is touched. +if $cygwin ; then + [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` +fi + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >&- +APP_HOME="`pwd -P`" +cd "$SAVED" >&- + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/gradlew.bat b/ecs-metrics-workshop/azure-put-monitoring-workshop/gradlew.bat new file mode 100644 index 0000000..aec9973 --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/gradlew.bat @@ -0,0 +1,90 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windowz variants + +if not "%OS%" == "Windows_NT" goto win9xME_args +if "%@eval[2+2]" == "4" goto 4NT_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* +goto execute + +:4NT_args +@rem Get arguments from the 4NT Shell from JP Software +set CMD_LINE_ARGS=%$ + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/java/com/emc/ecs/monitoring/sample/PutMonitoringRequest.java b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/java/com/emc/ecs/monitoring/sample/PutMonitoringRequest.java new file mode 100644 index 0000000..74215f7 --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/java/com/emc/ecs/monitoring/sample/PutMonitoringRequest.java @@ -0,0 +1,153 @@ +package com.emc.ecs.monitoring.sample; +import java.net.*; +import java.io.*; +import java.util.*; +import javax.net.ssl.HttpsURLConnection; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PutMonitoringRequest { + private static final Logger logger = LoggerFactory.getLogger(PutMonitoringRequest.class); + + protected static Map getHeaders() { + Map headers = new HashMap<>(); + headers.put("Content-Type", "application/x-www-form-urlencoded"); + headers.put("Accept", "application/json"); + headers.put("Content-Encoding", "UTF-8"); + headers.put("Connection", "keep-alive"); + return headers; + } + + public static String getResponse(String httpsURL, Map headers, String payload, String method) throws Exception { + URL myurl = new URL(httpsURL); + String response = null; + logger.info("Sending a " + method + " request to:" + httpsURL); + HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); + con.setRequestMethod(method); + for (Map.Entry entry : headers.entrySet()) { + con.setRequestProperty(entry.getKey(), entry.getValue()); + } + con.setDoOutput(true); + con.setDoInput(true); + if (method.equals("POST")) { + try (DataOutputStream output = new DataOutputStream(con.getOutputStream())) { + output.writeBytes(payload); + } + } + try (DataInputStream input = new DataInputStream(con.getInputStream())) { + StringBuffer contents = new StringBuffer(); + String tmp; + while ((tmp = input.readLine()) != null) { + contents.append(tmp); + logger.debug("tmp="+tmp); + } + response = contents.toString(); + } + logger.info("Resp Code:" + con.getResponseCode()); + return response; + } + + + private static String getToken(String accessKeyId, String accessSecret, String ARMResource, String tenantId, String spnPayload) { + String TokenEndpoint = "https://login.windows.net/{0}/oauth2/token"; + String address = "https://login.windows.net/" + tenantId + "/oauth2/token"; + logger.info("address="+address); + String token = ""; + try { + String payload = "resource=" + + java.net.URLEncoder.encode(ARMResource,"UTF-8") + + "&client_id=" + java.net.URLEncoder.encode(accessKeyId, "UTF-8") + + "&grant_type=client_credentials&client_secret=" + + java.net.URLEncoder.encode(accessSecret, "UTF-8"); + logger.info("payload="+payload); + Map headers = getHeaders(); + String response = getResponse(address, headers, payload, "POST"); + int start = response.indexOf("\"access_token\":\""); + if ( start != -1 ) { + start += 16; + int end = response.indexOf("\"", start); + if ( end != -1 && end > start) { + token = response.substring(start, end); + logger.info("response:" + response); + } else { + logger.info("token not found in response."); + } + } else { + logger.info("access_token not found in response."); + } + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + return token; + } + + public static void main(String[] args) throws InvalidKeyException, NoSuchAlgorithmException, IllegalStateException, UnsupportedEncodingException { + String endpoint = "https://management.azure.com/"; + String subscriptionId = "my_azure_subscription_id"; + String resourceGroupName = "RaviRajamaniRG"; + String resource = "subscriptions/" + subscriptionId + "/resourcegroups/DELLEMC/providers/ECS/storageAccounts/objectstore/metrics?api-version=2018-02-01"; + String canonical_uri = endpoint + resource; + String canonical_querystring = ""; + String method = "POST"; + String accessKey = "my_accesss_key_id"; + String accessSecretKey = "my_access_secret_id"; + String tenantId = "my_azure_tenant_id"; + String armResource = "https://management.core.windows.net/"; + String spnPayload = "resource={0}&client_id={1}&grant_type=client_credentials&client_secret={2}"; + String request_parameters = "{"; + request_parameters += "\"Namespace\":\"On-PremiseObjectStorageMetrics\","; + request_parameters += "\"MetricData\":"; + request_parameters += "["; + request_parameters += " {"; + request_parameters += " \"MetricName\": \"NumberOfObjects1\","; + request_parameters += " \"Dimensions\": ["; + request_parameters += " {"; + request_parameters += " \"Name\": \"BucketName\","; + request_parameters += " \"Value\": \"ExampleBucket\""; + request_parameters += " },"; + request_parameters += " {"; + request_parameters += " \"Name\": \"ECSSystemId\","; + request_parameters += " \"Value\": \"UUID\""; + request_parameters += " }"; + request_parameters += " ],"; + request_parameters += " \"Timestamp\": " + null + ","; + request_parameters += " \"Value\": 10,"; + request_parameters += " \"Unit\": \"Count\","; + request_parameters += " \"StorageResolution\": 60"; + request_parameters += " }"; + request_parameters += "]"; + request_parameters += "}"; + request_parameters = new String(request_parameters.getBytes("UTF-8"), "UTF-8"); + + try { + String token = getToken(accessKey, accessSecretKey, armResource, tenantId, spnPayload); + logger.info("token="+token); + Map headers= getHeaders(); + headers.put("Authorization", "Bearer " + token); + headers.remove("Content-Type"); + headers.put("Content-Type", "application/json"); + String response = getResponse(canonical_uri, headers, request_parameters, "POST"); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + } +} +// +// output: +// +//[main] INFO com.emc.ecs.monitoring.sample.PutMonitoringRequest - address=https://login.windows.net//oauth2/token +// [main] INFO com.emc.ecs.monitoring.sample.PutMonitoringRequest - payload=resource=https%3A%2F%2Fmanagement.core.windows.net%2F&client_id=&grant_type=client_credentials&client_secret= +// [main] INFO com.emc.ecs.monitoring.sample.PutMonitoringRequest - Sending a POST request to:https://login.windows.net/tenantId/oauth2/token +// [main] INFO com.emc.ecs.monitoring.sample.PutMonitoringRequest - Resp Code:200 +// [main] INFO com.emc.ecs.monitoring.sample.PutMonitoringRequest - response:{"token_type":"Bearer","expires_in":"3599","ext_expires_in":"3599","expires_on":"1547440166","not_before":"1547436266","resource":"https://management.core.windows.net/","access_token":""} +// [main] INFO com.emc.ecs.monitoring.sample.PutMonitoringRequest - token= +// [main] INFO com.emc.ecs.monitoring.sample.PutMonitoringRequest - Sending a POST request to:https://management.azure.com/subscriptions/656e67c6-f810-4ea6-8b89-636dd0b6774c/resourcegroups/DELLEMC/providers/ECS/storageAccounts/objectstore/metrics?api-version=2018-02-01 + diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/css/main.css b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/css/main.css new file mode 100644 index 0000000..ab36e31 --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/css/main.css @@ -0,0 +1,54 @@ +#clickSource { + display: inline-block; + *display: inline; + zoom: 1; + padding: 6px 20px; + margin: 0; + cursor: pointer; + border: 1px solid #bbb; + overflow: visible; + font: bold 13px arial, helvetica, sans-serif; + text-decoration: none; + white-space: nowrap; + color: #555; + text-transform: capitalize; + + background-color: #ddd; + background-image: -webkit-gradient(linear, left top, left bottom, from(rgba(255,255,255,1)), to(rgba(255,255,255,0))); + background-image: -webkit-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -moz-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -ms-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: -o-linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + background-image: linear-gradient(top, rgba(255,255,255,1), rgba(255,255,255,0)); + + -webkit-transition: background-color .2s ease-out; + -moz-transition: background-color .2s ease-out; + -ms-transition: background-color .2s ease-out; + -o-transition: background-color .2s ease-out; + transition: background-color .2s ease-out; + background-clip: padding-box; /* Fix bleeding */ + + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + border-radius: 3px; + + -moz-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + box-shadow: 0 1px 0 rgba(0, 0, 0, .3), 0 2px 2px -1px rgba(0, 0, 0, .5), 0 1px 0 rgba(255, 255, 255, .3) inset; + text-shadow: 0 1px 0 rgba(255,255,255, .9); + + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +#clickTarget { + float: right; + height: 80px; + border: 1px solid #999; + font: 14pt arial; + color: chocolate; +} \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/js/main.js b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/js/main.js new file mode 100644 index 0000000..382f654 --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/js/main.js @@ -0,0 +1,8 @@ +var counter = 0; +function click(e) { + document.getElementById('clickTarget').innerText = 'You clicked me ' + ++counter + ' times!'; +} +function load() { + document.getElementById('clickSource').onclick = click; +} +window.onload = load; \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/main.html b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/main.html new file mode 100644 index 0000000..714dfda --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/main.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test + + + + +

This is a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Page 1

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/pages/page1.html b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/pages/page1.html new file mode 100644 index 0000000..89cede8 --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/pages/page1.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 1 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 2

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/pages/page2.html b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/pages/page2.html new file mode 100644 index 0000000..d1fde0d --- /dev/null +++ b/ecs-metrics-workshop/azure-put-monitoring-workshop/src/main/resources/pages/page2.html @@ -0,0 +1,20 @@ + + + + + ECS Static Website Test - Page 2 + + + + +

This is a sub-page of a static website

+

All of the images, stylesheets and links you see are hosted within a bucket and publicly readable. + Note that all references are relative. This is because the namespace is included in the host name, which consumes + the wildcard portion of the SSL certificate. Therefore the bucket must be in the path, which would mess up any + absolute references.

+

Back to main page

+

Page 1

+
Click Me!
+
+ + \ No newline at end of file diff --git a/ecs-metrics-workshop/local-cas-tester/README.md b/ecs-metrics-workshop/local-cas-tester/README.md new file mode 100644 index 0000000..68b3298 --- /dev/null +++ b/ecs-metrics-workshop/local-cas-tester/README.md @@ -0,0 +1,4 @@ +# +# Content Addressed Storage Tester +# CAS is available from ECS. This is a standalone tool to write clips to this storage. + diff --git a/ecs-metrics-workshop/local-cas-tester/build.gradle b/ecs-metrics-workshop/local-cas-tester/build.gradle new file mode 100644 index 0000000..424a237 --- /dev/null +++ b/ecs-metrics-workshop/local-cas-tester/build.gradle @@ -0,0 +1,66 @@ +/* + * Copyright 2013-2018 Dell Inc. or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://www.apache.org/licenses/LICENSE-2.0.txt + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +group = 'com.emc.ecs.cas' +version = '1.0' + +apply plugin: 'java' +apply plugin: 'idea' +apply plugin: 'eclipse' + +repositories { + jcenter() + mavenCentral() + google() + maven { + url 'https://maven.fabric.io/public' + } + maven { url 'https://dl.bintray.com/android/android-tools' } + maven { url 'https://mvnrepository.com/artifact/org.apache.commons/commons-lang3' } +} + +dependencies { + compile files ('./Centera_SDK/lib/FPLibrary.jar') + compile group: 'org.slf4j', name: 'slf4j-simple', version: '1.7.24' + compile group: 'org.slf4j', name:'slf4j-api', version: '1.7.24' + compile group: 'com.jayway.jsonpath', name:'json-path', version:'2.1.0' + compile group: 'commons-io', name:'commons-io', version:'2.5' + compile group: 'com.google.gms', name:'google-services', version:'4.2.0' + compile group: 'com.google.guava', name:'guava', version:'14.0' + compile group: 'org.apache.commons', name: 'commons-lang3', version: '3.1' + implementation 'org.springframework:spring-web:5.0.2.RELEASE' + +} + +task wrapper(type: Wrapper) { + gradleVersion = '4.3.1' +} + +jar { + // from { + // configurations.compile.collect { + // it.isDirectory() ? it : zipTree(it) + // } + // } + from { + configurations.compile.collect { + it.isDirectory() ? it : zipTree(it) + } + } + manifest { + attributes( + 'Main-Class': 'com.emc.ecs.cas.sample.CasTester' + ) + } +} diff --git a/ecs-metrics-workshop/local-cas-tester/gradle/wrapper/gradle-wrapper.properties b/ecs-metrics-workshop/local-cas-tester/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..ab4b7fe --- /dev/null +++ b/ecs-metrics-workshop/local-cas-tester/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Thu Nov 23 11:33:09 CET 2017 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-4.3.1-bin.zip diff --git a/ecs-metrics-workshop/local-cas-tester/gradlew b/ecs-metrics-workshop/local-cas-tester/gradlew new file mode 100755 index 0000000..91a7e26 --- /dev/null +++ b/ecs-metrics-workshop/local-cas-tester/gradlew @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# For Cygwin, ensure paths are in UNIX format before anything is touched. +if $cygwin ; then + [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` +fi + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >&- +APP_HOME="`pwd -P`" +cd "$SAVED" >&- + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/ecs-metrics-workshop/local-cas-tester/gradlew.bat b/ecs-metrics-workshop/local-cas-tester/gradlew.bat new file mode 100644 index 0000000..aec9973 --- /dev/null +++ b/ecs-metrics-workshop/local-cas-tester/gradlew.bat @@ -0,0 +1,90 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windowz variants + +if not "%OS%" == "Windows_NT" goto win9xME_args +if "%@eval[2+2]" == "4" goto 4NT_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* +goto execute + +:4NT_args +@rem Get arguments from the 4NT Shell from JP Software +set CMD_LINE_ARGS=%$ + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/ecs-metrics-workshop/local-cas-tester/src/main/java/com/emc/ecs/cas/sample/CasTester.java b/ecs-metrics-workshop/local-cas-tester/src/main/java/com/emc/ecs/cas/sample/CasTester.java new file mode 100644 index 0000000..17956b0 --- /dev/null +++ b/ecs-metrics-workshop/local-cas-tester/src/main/java/com/emc/ecs/cas/sample/CasTester.java @@ -0,0 +1,314 @@ +package com.emc.ecs.cas.sample; +import com.filepool.fplibrary.*; +import com.google.common.base.Preconditions; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.*; +import java.net.*; +import java.util.*; + +import static java.lang.String.format; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Arrays.asList; +import static java.util.Arrays.stream; +import static org.apache.commons.lang3.ArrayUtils.EMPTY_BYTE_ARRAY; + +import javax.net.ssl.HttpsURLConnection; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/* + * This class is a standalone utility to test the DELL ECS CAS Head Service from command-line + * It lists the helper methods required to test the CAS Head Service. + */ +public class CasTester { + private static final Logger logger = LoggerFactory.getLogger(CasTester.class); + public static final int blobSize = 1024 * 16 * 2; //32768 + private static final String DEFAULT_BLOB_TAG_NAME = "blob"; + private static final String CLIP_LIST_TAG_NAME = "clip_list"; + private static final String STANDARD_CAS_HEAD_PORT = "3218"; + private static final byte[] payload = new byte[blobSize]; + + static { + for (int i = 0; i < payload.length; ++i) { + for (byte c = 0x21; c <= 0x7A && i < payload.length; ++i, ++c) { + payload[i] = c; + } + } + } + + private class CasConnection { + private FPPool fpPool; + private String connectionString; + + public CasConnection(String connectionString, FPPool fpPool) { + this.fpPool = fpPool; + this.connectionString = connectionString; + } + + public FPPool getFpPool() { + return fpPool; + } + + public String getConnectionString() { + return connectionString; + } + + public void Close() throws Exception { + if (fpPool != null){ + fpPool.Close(); + } + } + } + + private CasConnection getCasConnection(String ip, String port, String user, String password, String namespace, File pea) throws Exception { + String connectionString = format( + "%s:%d?path=%s", + ip, STANDARD_CAS_HEAD_PORT, pea.getAbsolutePath()); + FPPool fpPool = getFP(connectionString); + if (fpPool == null) { + String message = "FPPool could not be instantiated."; + logger.error(message); + throw new Exception(message); + } + CasConnection casConnection = new CasConnection(connectionString, fpPool); + return casConnection; + } + + protected static Map getHeaders() { + Map headers = new HashMap<>(); + headers.put("Content-Type", "application/x-www-form-urlencoded"); + headers.put("Accept", "application/json"); + headers.put("Content-Encoding", "UTF-8"); + headers.put("Connection", "keep-alive"); + return headers; + } + + public static String getResponse(String httpsURL, Map headers, String payload, String method) throws Exception { + URL myurl = new URL(httpsURL); + String response = null; + logger.info("Sending a " + method + " request to:" + httpsURL); + HttpsURLConnection con = (HttpsURLConnection)myurl.openConnection(); + con.setRequestMethod(method); + for (Map.Entry entry : headers.entrySet()) { + con.setRequestProperty(entry.getKey(), entry.getValue()); + } + con.setDoOutput(true); + con.setDoInput(true); + if (method.equals("POST")) { + try (DataOutputStream output = new DataOutputStream(con.getOutputStream())) { + output.writeBytes(payload); + } + } + try (DataInputStream input = new DataInputStream(con.getInputStream())) { + StringBuffer contents = new StringBuffer(); + String tmp; + while ((tmp = input.readLine()) != null) { + contents.append(tmp); + logger.debug("tmp="+tmp); + } + response = contents.toString(); + } + logger.info("Resp Code:" + con.getResponseCode()); + return response; + } + + private static FPPool getFP(String connectionString) throws Exception { + FPPool fpPool = null; + try { + fpPool = new FPPool(connectionString); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } finally { + if(fpPool != null) { + fpPool.Close(); + } + } + return fpPool; + } + + + private static String fillParams(String pattern, String ip, String port, String user, String namespace){ + String result = pattern + .replace("{ip}", ip) + .replace("{port}", port) + .replace("{userId}", user) + .replace("{namespace}", namespace); + return result; + } + + public static String getUserCasSecret(String ip, String port, String user, String namespace){ + String secretEndPoint = fillParams("https://{ip}:{port}/object/user-cas/secret/{namespace}/{userId}", + ip, port, user, namespace); + logger.info("address={}", secretEndPoint); + String response = null; + try { + Map headers = getHeaders(); + response = getResponse(secretEndPoint, headers, null, "GET"); + logger.info("secret:" + response); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + return response; + } + + public static String getProfilePea(String ip, String port, String user, String namespace){ + String peaEndpoint = fillParams("https://{ip}:{port}/object/user-cas/secret/{namespace}/{userId}/pea", + ip, port, user, namespace); + logger.info("address={}", peaEndpoint); + String response = null; + try { + Map headers = getHeaders(); + response = getResponse(peaEndpoint, headers, null, "GET"); + logger.info("pea:" + response); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + return response; + } + + public static File fetchPeaFile(String ip, + String port, + final String user, + final String namespace) { + final File peaFile = new File( + FileUtils.getTempDirectory(), + String.join("-", user, ip, port) + ".pea" + ); + + final String pea = getProfilePea(ip, port, user, namespace); + + try { + FileUtils.writeStringToFile(peaFile, pea); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + + return peaFile; + } + + private static class ClipsTask implements Callable { + private final CasConnection casConnection; + private final String ecsCasSecret; + private final File pea; + private byte[] payload; + + public ClipsTask(CasConnection casConnection, String ecsCasSecret, File pea) { + this.casConnection = casConnection; + this.ecsCasSecret = ecsCasSecret; + this.pea = pea; + final byte[] payloadBytes = new byte[1024]; + for (int i = 0; i < payloadBytes.length; ++i) { + for (byte c = 0x21; c <= 0x7A && i < payloadBytes.length; ++i, ++c) { + this.payload[i] = c; + } + } + } + public void accept(FPPool fpPool, FPClip fpClip) { + try { + final FPTag topTag = fpClip.getTopTag(); + try { + final FPTag blob = new FPTag(topTag, DEFAULT_BLOB_TAG_NAME); + try { + final byte[] clipTime = EMPTY_BYTE_ARRAY; + + blob.BlobWrite(new SequenceInputStream(new ByteArrayInputStream(clipTime), new ByteArrayInputStream(payload))); + } finally { + blob.Close(); + } + } finally { + topTag.Close(); + } + } catch (FPLibraryException | IOException e) { + Throwables.propagate(e); + } + } + + @Override + public Void call() throws Exception { + try { + for (int i = 0; i < 10; i++) { + final FPClip fpClip = new FPClip(casConnection.getFpPool()); + try { + accept(casConnection.getFpPool(), fpClip); + final String clipRefId = fpClip.Write(); + + final ByteArrayOutputStream cdf = new ByteArrayOutputStream(); + fpClip.RawRead(cdf); + logger.info("clip Id: {}, size read = {}", clipRefId, cdf.size()); + } finally { + fpClip.Close(); + } + + } + } catch (RuntimeException | IOException | FPLibraryException e) { + throw e; + } + + return null; + } + } + + public void writeClips(String ip, String port, String user, String password, String namespace) throws Exception { + final String ecsCasSecret = getUserCasSecret(ip, port, user, namespace); + if (ecsCasSecret == null) { + String message = "Credentials are not correct"; + logger.error(message); + throw new Exception(message); + } + File pea = fetchPeaFile(ip, port, user, namespace); + CasConnection casConnection = getCasConnection(ip, port, user, password, namespace, pea); + + try { + final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(2)); + + executor.submit(new ClipsTask(casConnection, ecsCasSecret, pea)); + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + + Thread.sleep(15000); + casConnection.Close(); + } + + public static void main(String[] args) throws Exception { + String ip = "10.247.142.111"; // NetworkUtility.getNodeIp(); + String user = "apiuser"; + String port = "3218"; + String password = ""; + String namespace = "s3"; + String bucket = "b6"; + try { + + CasTester tester = new CasTester(); + + tester.writeClips(ip, port, user, password, namespace); + + } catch (Exception e) { + e.printStackTrace(); + logger.error("Exception:", e); + } + } +} +// [main] INFO com.emc.ecs.cas.sample.CasTester - address=https://10.247.142.111:3218/object/user-cas/secret/s3/apiuser +// [main] INFO com.emc.ecs.cas.sample.CasTester - Sending a GET request to:https://10.247.142.111:3218/object/user-cas/secret/s3/apiuser + diff --git a/ecs-operator/.gitignore b/ecs-operator/.gitignore new file mode 100644 index 0000000..7c50470 --- /dev/null +++ b/ecs-operator/.gitignore @@ -0,0 +1,77 @@ +# Temporary Build Files +build/_output +build/_test +# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* +# Org-mode +.org-id-locations +*_archive +# flymake-mode +*_flymake.* +# eshell files +/eshell/history +/eshell/lastdir +# elpa packages +/elpa/ +# reftex files +*.rel +# AUCTeX auto folder +/auto/ +# cask packages +.cask/ +dist/ +# Flycheck +flycheck_*.el +# server auth directory +/server/ +# projectiles files +.projectile +projectile-bookmarks.eld +# directory configuration +.dir-locals.el +# saveplace +places +# url cache +url/cache/ +# cedet +ede-projects.el +# smex +smex-items +# company-statistics +company-statistics-cache.el +# anaconda-mode +anaconda-mode/ +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +# Test binary, build with 'go test -c' +*.test +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +### Vim ### +# swap +.sw[a-p] +.*.sw[a-p] +# session +Session.vim +# temporary +.netrwhist +# auto-generated tag files +tags +### VisualStudioCode ### +.vscode/* +.history +# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode diff --git a/ecs-operator/README.md b/ecs-operator/README.md new file mode 100644 index 0000000..22ab590 --- /dev/null +++ b/ecs-operator/README.md @@ -0,0 +1,23 @@ +# Overview +This is a Kubernetes operator for DELLEMC ECS Object Storage +https://www.dellemc.com/en-us/storage/ecs/index.htm + +A guide to writing a Kubernetes operator for DellEMC Elastic Cloud Storage now follows: +This ECS operator manages ECS clusters deployed to Kubernetes and automates tasks related to an ECS cluster. + +# Introduction: + +This document strives to curate the information needed in one place to write an operator for hosting ECS on Kubernetes. ECS is an on-premise Object Storage cloud provider and Kubernetes is a containerization technology. Kubernetes is well-known for isolating applications and making them portable with a rich and extensible framework. This framework allows declaring all the resources and awareness that the application needs. This document is intended for a software developer audience. + +# Description: +A Kubernetes operator is a controller that takes its resource definitions for an application and reconciles the container configuration with the definitions. For example, an upgrade would be defined by the source and destination version numbers and associated locations to permit the controller to take the necessary actions. Definitions are for resources specific to the applications and they are called custom resource definitions. They are written in Yaml – a language for definitions and maintained in a folder called deploy under the root project folder. The other folder is for the logic in the controller and written in Go language. The operator logic has code for apis and controller. As with all logic, there is also code for the entry point invocation. Moreover, there is a Dockerfile created by packaging the operator into a Docker image that deploys the operator and an associated account to run it. +Writing the operator is facilitated with the help of an operator software development kit which is available as a command line tool that generates the scaffolds necessary for the api and the controller. The custom resource definitions have to be edited by hand. The operators have a naming convention of -operator. Definitions are broken out to their own yaml files and each definition in the file has a version, metadata and specification to help differentiate the changes made to the definitions. +The generated controller code allows structures, a primitive of the Go language for declarations, and functions for invocation. There is a Reconcile function that is primary to the controller. When invoked, the controller fetches the definition and matches what is available from the existing deployment. Each time the resource changes, Kubernetes invokes the reconcile corresponding to the operator for the resource definition. The logic is therefore state driven. The state, its handlers and ownership of activities are elaborated as detailed as possible. A controllerUtil.SetControllerReference function is used to describe the primary ownership of a resource. +The operator sdk tool also helps build the docker image and push it to Docker Hub. + +# Conclusion: + +Writing an operator for Kubernetes is made easy with the help of scaffoldings generated from the operator sdk tool for definitions and controller. The next step involves determining the custom definitions for the application. + + + diff --git a/ecs-operator/cluster-operator/.gitignore b/ecs-operator/cluster-operator/.gitignore new file mode 100644 index 0000000..0765074 --- /dev/null +++ b/ecs-operator/cluster-operator/.gitignore @@ -0,0 +1,172 @@ +.idea/ + +# Temporary Build Files +tmp/_output/ +tmp/_test + +# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode,intellij + +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +### Intellij ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/vcs.xml +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +### Intellij Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +.idea/sonarlint + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json + + +# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode,intellij + +bin/ + +# Ignore YAML files in the project root directory +/*.yaml + +# Operator SDK +deploy/test +deploy/test-pod.yaml +build/test-framework/ diff --git a/ecs-operator/cluster-operator/.travis.yml b/ecs-operator/cluster-operator/.travis.yml new file mode 100644 index 0000000..c540a74 --- /dev/null +++ b/ecs-operator/cluster-operator/.travis.yml @@ -0,0 +1,92 @@ +sudo: required + +language: go +go_import_path: github.com/ecs/ecs-operator +go: + - '1.11' + +env: + global: + - secure: "X4zdixrmuh/D4WwJ6btu0fJROq7fTPO5n4/jeyoxTQvrpwikycwf3ZpXS94u43rHx1ovc8vweVIc4Kur7MFAJDmFpPz5adjD7cCjqaRHo+Sw4a/gdNKizFEmZEBFlelynJ+cYekfF3rIFRws+u/8yWUQnSPl4495Qq1NKjrDTqoB6ZrQDxYdIPPpLAit+Aj9a2zQ4xPHyOv02SmfgL22wQhOx8SF78VNmqVah6klC8j3tAemHJQSBmUQ2fPVBpqAy3YsI7mpF9aSdBOzr7Dk1/cajOx4M5WrAxDPZ7+fSKD97SjICKqtM6MuX63O6Tg86zzdoEdvTWvwBwjWpbI3iCytJOCD8vT1QBPd9bLwZI7K5dElrW990HGrBBS4GmKTFykEE6PCNvxDHu1pG4N0vmLhZ3Hh4evZxA8xSnHRyYTLOaDkSe/vurlmsRJiW0GOEa6Fyz+xrD5y4l5MDkPVvaEPzWFZQf8v+l3OvvDvmHmqup4ADXE/XBYbVraTn8wPcQnvk5ueCvJWBOd/E4keGFsGbqDxQpvQRG/8aHWv32lj2HHK6/qbihXvTfJfRbrWli2BV0LMBL/OBIiuL1JeXycYZG3AtbnwgJVXPzs8kpwrmxkRYvbb3Kc9HXu9x+ajeWDYb978Rq6/0DfXhKFIiLrgUOxR2SK6eHDTcfUY+us=" + +stages: + - check + - unit + - e2e + - deploy + +jobs: + include: + - stage: check + name: Gofmt and License checks + script: + - make check + + - stage: unit + name: Unit tests + script: + - make test-unit + + - stage: e2e + name: End-to-end tests + if: type = pull_request OR branch = master OR tag IS present + services: + - docker + env: + - KUBECTL_VERSION=v1.12.0 + - OPERATOR_SDK_VERSION=v0.4.0 + - HELM_VERSION=v2.12.0 + - DEP_VERSION=v0.5.0 + - CLOUDSDK_CORE_DISABLE_PROMPTS=1 + - GOOGLE_APPLICATION_CREDENTIALS="$HOME/gcloud-service-key.json" + - GOOGLE_SERVICE_ACCOUNT=ecs-travis-service-account@ecs-dev.iam.gserviceaccount.com + - PROJECT_NAME=ecs-dev + - CLUSTER_NAME="ecs-operator-travis-$(date +'%Y%m%d%H%M%S')" + - CLUSTER_ZONE=us-central1-c + - CLUSTER_SIZE=5 + - CLUSTER_NODE_TYPE=n1-standard-2 + install: + - if [ ! -d "$HOME/google-cloud-sdk/bin" ]; then rm -rf $HOME/google-cloud-sdk; curl https://sdk.cloud.google.com | bash; fi + - source $HOME/google-cloud-sdk/path.bash.inc + - gcloud --quiet version + - gcloud --quiet components update + - curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/ + - curl -Lo dep https://github.com/golang/dep/releases/download/$DEP_VERSION/dep-linux-amd64 && chmod +x dep && sudo mv dep /usr/local/bin/ + - curl -Lo operator-sdk https://github.com/operator-framework/operator-sdk/releases/download/$OPERATOR_SDK_VERSION/operator-sdk-$OPERATOR_SDK_VERSION-x86_64-linux-gnu && chmod +x operator-sdk && sudo mv operator-sdk /usr/local/bin/ + - curl -Lo helm.tar.gz https://storage.googleapis.com/kubernetes-helm/helm-$HELM_VERSION-linux-amd64.tar.gz && tar xfz helm.tar.gz && sudo mv linux-amd64/{helm,tiller} /usr/local/bin/ + before_script: + - echo $GCLOUD_SERVICE_KEY | base64 --decode -i > $HOME/gcloud-service-key.json + - gcloud auth activate-service-account --key-file $HOME/gcloud-service-key.json + - gcloud --quiet config set project $PROJECT_NAME + - gcloud --quiet config set container/use_application_default_credentials True + - gcloud --quiet container clusters create $CLUSTER_NAME --num-nodes=$CLUSTER_SIZE --zone=$CLUSTER_ZONE --machine-type=$CLUSTER_NODE_TYPE + - gcloud --quiet container clusters get-credentials $CLUSTER_NAME --zone=$CLUSTER_ZONE + - kubectl config view + - kubectl config current-context + - kubectl get nodes -o wide + - kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=$GOOGLE_SERVICE_ACCOUNT + # Install Helm Tiller + - kubectl create serviceaccount --namespace kube-system tiller + - kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + - helm init --service-account tiller --wait + # Install NFS provisioner + - helm install stable/nfs-server-provisioner + - kubectl -n default create -f test/e2e/resources/tier2.yaml + - JSONPATH='{.status.phase}'; until kubectl get pvc ecs-tier2 -o jsonpath="$JSONPATH" 2>&1 | grep -q "Bound"; do sleep 1; done + # Install ZooKeeper + - kubectl -n default create -f test/e2e/resources/zookeeper.yaml + - JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl get pods -l kind=ZookeeperMember -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 1; done + # Show ECS dependencies + - kubectl -n default get pod,pvc,svc -o wide + script: + - make test-e2e + after_script: + - gcloud --quiet container clusters delete $CLUSTER_NAME --zone $CLUSTER_ZONE + + - stage: deploy + name: Push Docker image + if: type != pull_request AND tag IS present + services: + - docker + script: + - make push diff --git a/ecs-operator/cluster-operator/CONTRIBUTING.md b/ecs-operator/cluster-operator/CONTRIBUTING.md new file mode 100644 index 0000000..675560f --- /dev/null +++ b/ecs-operator/cluster-operator/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contributing to ECS Operator + + Please check the [Contributing](https://github.com/ecs/ecs-operator/wiki/Contributing) wiki page. + + Happy hacking! diff --git a/ecs-operator/cluster-operator/Dockerfile b/ecs-operator/cluster-operator/Dockerfile new file mode 100644 index 0000000..777db47 --- /dev/null +++ b/ecs-operator/cluster-operator/Dockerfile @@ -0,0 +1,39 @@ +# +# Copyright (c) 2017 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +FROM golang:1.10.1-alpine3.7 as go-builder + +ARG PROJECT_NAME=ecs-operator +ARG REPO_PATH=github.com/ecs/${PROJECT_NAME} +ARG BUILD_PATH=${REPO_PATH}/cmd/manager + +# Build version and commit SHA should be passed in when performing docker build +ARG VERSION=0.0.0-localdev +ARG GIT_SHA=0000000 + +COPY pkg /go/src/${REPO_PATH}/pkg +COPY cmd /go/src/${REPO_PATH}/cmd +COPY vendor /go/src/${REPO_PATH}/vendor + +RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o ${GOBIN}/${PROJECT_NAME} \ + -ldflags "-X ${REPO_PATH}/pkg/version.Version=${VERSION} -X ${REPO_PATH}/pkg/version.GitSHA=${GIT_SHA}" \ + $BUILD_PATH + +# ============================================================================= +FROM alpine:3.7 AS final + +ARG PROJECT_NAME=ecs-operator +ARG REPO_PATH=github.com/ecs/$PROJECT_NAME + +COPY --from=go-builder ${GOBIN}/${PROJECT_NAME} /usr/local/bin/${PROJECT_NAME} + +RUN adduser -D ${PROJECT_NAME} +USER ${PROJECT_NAME} + +ENTRYPOINT ["/usr/local/bin/ecs-operator"] diff --git a/ecs-operator/cluster-operator/Gopkg.lock b/ecs-operator/cluster-operator/Gopkg.lock new file mode 100644 index 0000000..0b19831 --- /dev/null +++ b/ecs-operator/cluster-operator/Gopkg.lock @@ -0,0 +1,1035 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:fd1a7ca82682444a45424f6af37b1e0373f632e5a303441b111558ae8656a9b7" + name = "cloud.google.com/go" + packages = ["compute/metadata"] + pruneopts = "NT" + revision = "0ebda48a7f143b1cce9eb37a8c1106ac762a3430" + version = "v0.34.0" + +[[projects]] + digest = "1:75d2b55b13298745ec068057251d05d65bbae0a668201fe45ad6986551a55601" + name = "github.com/BurntSushi/toml" + packages = ["."] + pruneopts = "NT" + revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" + version = "v0.3.1" + +[[projects]] + digest = "1:d8ebbd207f3d3266d4423ce4860c9f3794956306ded6c7ba312ecc69cdfbf04c" + name = "github.com/PuerkitoBio/purell" + packages = ["."] + pruneopts = "NT" + revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727" + name = "github.com/PuerkitoBio/urlesc" + packages = ["."] + pruneopts = "NT" + revision = "de5bf2ad457846296e2031421a34e2568e304e35" + +[[projects]] + branch = "master" + digest = "1:c819830f4f5ef85874a90ac3cbcc96cd322c715f5c96fbe4722eacd3dafbaa07" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "NT" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + digest = "1:4b8b5811da6970495e04d1f4e98bb89518cc3cfc3b3f456bdb876ed7b6c74049" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "NT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:e6f888d4be8ec0f05c50e2aba83da4948b58045dee54d03be81fa74ea673302c" + name = "github.com/emicklei/go-restful" + packages = [ + ".", + "log", + ] + pruneopts = "NT" + revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0" + version = "v2.8.0" + +[[projects]] + digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "NT" + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:d421af4c4fe51d399667d573982d663fe1fa67020a88d3ae43466ebfe8e2b5c9" + name = "github.com/go-logr/logr" + packages = ["."] + pruneopts = "NT" + revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" + +[[projects]] + digest = "1:340497a512995aa69c0add901d79a2096b3449d35a44a6f1f1115091a9f8c687" + name = "github.com/go-logr/zapr" + packages = ["."] + pruneopts = "NT" + revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab" + version = "v0.1.0" + +[[projects]] + digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441" + name = "github.com/go-openapi/jsonpointer" + packages = ["."] + pruneopts = "NT" + revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" + version = "v0.17.2" + +[[projects]] + digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546" + name = "github.com/go-openapi/jsonreference" + packages = ["."] + pruneopts = "NT" + revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3" + version = "v0.17.2" + +[[projects]] + digest = "1:dfab391de021809e0041f0ab5648da6b74dd16a685472a1b8c3dc06b3dca1ee2" + name = "github.com/go-openapi/spec" + packages = ["."] + pruneopts = "NT" + revision = "5bae59e25b21498baea7f9d46e9c147ec106a42e" + version = "v0.17.2" + +[[projects]] + digest = "1:983f95b2fae6fe8fdd361738325ed6090f4f3bd15ce4db745e899fb5b0fdfc46" + name = "github.com/go-openapi/swag" + packages = ["."] + pruneopts = "NT" + revision = "5899d5c5e619fda5fa86e14795a835f473ca284c" + version = "v0.17.2" + +[[projects]] + digest = "1:756ec597ae63e724366f1b393e9477d3e4d980baf1790a029494a336386e89f1" + name = "github.com/gobuffalo/envy" + packages = ["."] + pruneopts = "NT" + revision = "7e7ddcbb431d1d9e32121d7eeee8d68bdecd7081" + version = "v1.6.10" + +[[projects]] + digest = "1:2a9d5e367df8c95e780975ca1dd4010bef8e39a3777066d3880ce274b39d4b5a" + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys", + ] + pruneopts = "NT" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" + +[[projects]] + branch = "master" + digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "NT" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + branch = "master" + digest = "1:aaedc94233e56ed57cdb04e3abfacc85c90c14082b62e3cdbe8ea72fc06ee035" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "NT" + revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa" + +[[projects]] + digest = "1:d7cb4458ea8782e6efacd8f4940796ec559c90833509c436f40c4085b98156dd" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "NT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + branch = "master" + digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" + name = "github.com/google/btree" + packages = ["."] + pruneopts = "NT" + revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" + +[[projects]] + branch = "master" + digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "NT" + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "NT" + revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8" + version = "v1.1.0" + +[[projects]] + digest = "1:289332c13b80edfefc88397cce5266c16845dcf204fa2f6ac7e464ee4c7f6e96" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "NT" + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" + +[[projects]] + branch = "master" + digest = "1:97972f03fbf34ec4247ddc78ddb681389c468c020492aa32b109744a54fc0c14" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache", + ] + pruneopts = "NT" + revision = "c63ab54fda8f77302f8d414e19933f2b6026a089" + +[[projects]] + digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "NT" + revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" + version = "v0.5.0" + +[[projects]] + digest = "1:76efa3b55850d9caa14f8c0b3a951797f9bc2ffc283526073dcad1b06b6e02d3" + name = "github.com/hpcloud/tail" + packages = [ + ".", + "ratelimiter", + "util", + "watch", + "winfile", + ] + pruneopts = "NT" + revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" + version = "v1.0.0" + +[[projects]] + digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "NT" + revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" + version = "v0.3.6" + +[[projects]] + digest = "1:f5b9328966ccea0970b1d15075698eff0ddb3e75889560aad2e9f76b289b536a" + name = "github.com/joho/godotenv" + packages = ["."] + pruneopts = "NT" + revision = "23d116af351c84513e1946b527c88823e476be13" + version = "v1.3.0" + +[[projects]] + digest = "1:1d39c063244ad17c4b18e8da1551163b6ffb52bd1640a49a8ec5c3b7bf4dbd5d" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "NT" + revision = "1624edc4454b8682399def8740d46db5e4362ba4" + version = "v1.1.5" + +[[projects]] + digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "NT" + revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" + version = "v1.0.1" + +[[projects]] + branch = "master" + digest = "1:7d9fcac7f1228470c4ea0ee31cdfb662a758c44df691e39b3e76c11d3e12ba8f" + name = "github.com/mailru/easyjson" + packages = [ + "buffer", + "jlexer", + "jwriter", + ] + pruneopts = "NT" + revision = "60711f1a8329503b04e1c88535f419d0bb440bff" + +[[projects]] + digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde" + name = "github.com/markbates/inflect" + packages = ["."] + pruneopts = "NT" + revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" + version = "v1.0.4" + +[[projects]] + branch = "master" + digest = "1:0e9bfc47ab9941ecc3344e580baca5deb4091177e84dd9773b48b38ec26b93d5" + name = "github.com/mattbaird/jsonpatch" + packages = ["."] + pruneopts = "NT" + revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" + +[[projects]] + digest = "1:ea1db000388d88b31db7531c83016bef0d6db0d908a07794bfc36aca16fbf935" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "NT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "NT" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "NT" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + digest = "1:0204417af2b5c56719f7e2809902c9a56ebb1b539d73ba520eeac84e98f21b72" + name = "github.com/onsi/ginkgo" + packages = [ + ".", + "config", + "internal/codelocation", + "internal/containernode", + "internal/failer", + "internal/leafnodes", + "internal/remote", + "internal/spec", + "internal/spec_iterator", + "internal/specrunner", + "internal/suite", + "internal/testingtproxy", + "internal/writer", + "reporters", + "reporters/stenographer", + "reporters/stenographer/support/go-colorable", + "reporters/stenographer/support/go-isatty", + "types", + ] + pruneopts = "NT" + revision = "2e1be8f7d90e9d3e3e58b0ce470f2f14d075406f" + version = "v1.7.0" + +[[projects]] + digest = "1:7efa6868c0394e8567b411d9160f10376d6f28926c5786d520f3603bc3e18198" + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types", + ] + pruneopts = "NT" + revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd" + version = "v1.4.3" + +[[projects]] + digest = "1:0b2dd5813eba320fd99869c1a5c4b54eb6950544259f2389c5b137be2168429a" + name = "github.com/operator-framework/operator-sdk" + packages = [ + "internal/util/fileutil", + "internal/util/k8sutil", + "internal/util/yamlutil", + "pkg/k8sutil", + "pkg/scaffold", + "pkg/scaffold/input", + "pkg/test", + "pkg/test/e2eutil", + "version", + ] + pruneopts = "NT" + revision = "cc5fe885869c181d820557bd296f092637fa70af" + version = "v0.4.0" + +[[projects]] + digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf" + name = "github.com/pborman/uuid" + packages = ["."] + pruneopts = "NT" + revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" + version = "v1.2" + +[[projects]] + branch = "master" + digest = "1:bf2ac97824a7221eb16b096aecc1c390d4c8a4e49524386aaa2e2dd215cbfb31" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + pruneopts = "NT" + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + digest = "1:e4e9e026b8e4c5630205cd0208efb491b40ad40552e57f7a646bb8a46896077b" + name = "github.com/peterbourgon/diskv" + packages = ["."] + pruneopts = "NT" + revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" + version = "v2.0.1" + +[[projects]] + digest = "1:ec2a29e3bd141038ae5c3d3a4f57db0c341fcc1d98055a607aedd683aed124ee" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "NT" + revision = "505eaef017263e299324067d40ca2c48f6a2cf50" + version = "v0.9.2" + +[[projects]] + branch = "master" + digest = "1:c2cc5049e927e2749c0d5163c9f8d924880d83e84befa732b9aad0b6be227bed" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "NT" + revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" + +[[projects]] + digest = "1:30261b5e263b5c4fb40571b53a41a99c96016c6b1b2c45c1cefd226fc3f6304b" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "NT" + revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250" + version = "v0.2.0" + +[[projects]] + branch = "master" + digest = "1:753d988fc383cc61173d5afdf94a149b853c75d399dafacfd93ba5b734e06044" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "NT" + revision = "316cf8ccfec56d206735d46333ca162eb374da8b" + +[[projects]] + digest = "1:4e63570205b765959739e2ef37add1d229cab7dbf70d80341a0608816120493b" + name = "github.com/rogpeppe/go-internal" + packages = [ + "modfile", + "module", + "semver", + ] + pruneopts = "NT" + revision = "d87f08a7d80821c797ffc8eb8f4e01675f378736" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:b3f691dbcabd091f701954bb37bd7e7f1dedaad22c64a9c4ff7ca52fc87e0650" + name = "github.com/samuel/go-zookeeper" + packages = ["zk"] + pruneopts = "NT" + revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" + +[[projects]] + digest = "1:cd2f2cba5b7ffafd0412fb647ff4bcff170292de57270f05fbbf391e3eb9566b" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "NT" + revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95" + version = "v1.2.0" + +[[projects]] + digest = "1:2a7c79c506479dc73c0100982a40bacc89e06d96dc458eb41c9b6aa44d9e0b6d" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "NT" + revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd" + version = "v1.1.2" + +[[projects]] + digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "NT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7" + name = "go.uber.org/atomic" + packages = ["."] + pruneopts = "NT" + revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" + version = "v1.3.2" + +[[projects]] + digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e" + name = "go.uber.org/multierr" + packages = ["."] + pruneopts = "NT" + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + digest = "1:572fa4496563920f3e3107a2294cf2621d6cc4ffd03403fb6397b1bab9fa082a" + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore", + ] + pruneopts = "NT" + revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" + version = "v1.9.1" + +[[projects]] + branch = "master" + digest = "1:d6d3b59b8c4ceb6a7db2f20169719e57a8dcfa2c055b4418feb3fcc7bbd1a936" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "NT" + revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447" + +[[projects]] + branch = "master" + digest = "1:b39fe73cabf4ae7600e25b0d116bb884a52d475e019bf583d03c08d98a567350" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "html", + "html/atom", + "html/charset", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + ] + pruneopts = "NT" + revision = "351d144fa1fc0bd934e2408202be0c29f25e35a0" + +[[projects]] + branch = "master" + digest = "1:bdb664c89389d18d2aa69fb3b61fe5e2effc09e55b333a56e3cb071026418e33" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt", + ] + pruneopts = "NT" + revision = "d668ce993890a79bda886613ee587a69dd5da7a6" + +[[projects]] + branch = "master" + digest = "1:0461030328ef9d2e0e38a2bc5febc8ce585e03db950028a6ab3d8d8ca1df151c" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "NT" + revision = "a5c9d58dba9a56f97aaa86f55e638b718c5a6c42" + +[[projects]] + digest = "1:8c74f97396ed63cc2ef04ebb5fc37bb032871b8fd890a25991ed40974b00cd2a" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "internal/utf8internal", + "language", + "runes", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + "width", + ] + pruneopts = "NT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "NT" + revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd" + +[[projects]] + branch = "master" + digest = "1:9c4e8d4c649b62e4eee87faf8b9aee75545e064ab05591bfc9ebaa9412467cbc" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "imports", + "internal/fastwalk", + "internal/gopathwalk", + ] + pruneopts = "NT" + revision = "22934f0fdb6201c132a3dc6120150dcb1646d74c" + +[[projects]] + digest = "1:2a4972ee51c3b9dfafbb3451fa0552e7a198d9d12c721bfc492050fe2f72e0f6" + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] + pruneopts = "NT" + revision = "4a4468ece617fc8205e99368fa2200e9d1fad421" + version = "v1.3.0" + +[[projects]] + digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" + name = "gopkg.in/fsnotify.v1" + packages = ["."] + pruneopts = "NT" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" + version = "v1.4.7" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "NT" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + branch = "v1" + digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1" + name = "gopkg.in/tomb.v1" + packages = ["."] + pruneopts = "NT" + revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" + +[[projects]] + digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "NT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[[projects]] + digest = "1:b3f8152a68d73095a40fdcf329a93fc42e8eadb3305171df23fdb6b4e41a6417" + name = "k8s.io/api" + packages = [ + "admission/v1beta1", + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "NT" + revision = "b503174bad5991eb66f18247f52e41c3258f6348" + version = "kubernetes-1.12.3" + +[[projects]] + digest = "1:82b4765488fd2a8bcefb93e196fdbfe342d33b16ae073a6f51bb4fb13e81e102" + name = "k8s.io/apiextensions-apiserver" + packages = [ + "pkg/apis/apiextensions", + "pkg/apis/apiextensions/v1beta1", + "pkg/client/clientset/clientset/scheme", + ] + pruneopts = "NT" + revision = "0cd23ebeb6882bd1cdc2cb15fc7b2d72e8a86a5b" + version = "kubernetes-1.12.3" + +[[projects]] + digest = "1:868de7cbaa0ecde6dc231c1529a10ae01bb05916095c0c992186e2a5cac57e79" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/strategicpatch", + "pkg/util/uuid", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/json", + "third_party/forked/golang/reflect", + ] + pruneopts = "NT" + revision = "eddba98df674a16931d2d4ba75edc3a389bf633a" + version = "kubernetes-1.12.3" + +[[projects]] + digest = "1:00089f60de414edb1a51e63efde2480ce87c95d2cb3536ea240afe483905d736" + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/cached", + "dynamic", + "kubernetes", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1alpha1", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/core/v1", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/networking/v1", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1beta1", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "plugin/pkg/client/auth/gcp", + "rest", + "rest/watch", + "restmapper", + "third_party/forked/golang/template", + "tools/auth", + "tools/cache", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/leaderelection", + "tools/leaderelection/resourcelock", + "tools/metrics", + "tools/pager", + "tools/record", + "tools/reference", + "transport", + "util/buffer", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/homedir", + "util/integer", + "util/jsonpath", + "util/retry", + "util/workqueue", + ] + pruneopts = "NT" + revision = "d082d5923d3cc0bfbb066ee5fbdea3d0ca79acf8" + version = "kubernetes-1.12.3" + +[[projects]] + digest = "1:4e2addcdbe0330f43800c1fcb905fc7a21b86415dfcca619e5c606c87257af1b" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "cmd/conversion-gen", + "cmd/conversion-gen/args", + "cmd/conversion-gen/generators", + "cmd/deepcopy-gen", + "cmd/deepcopy-gen/args", + "cmd/defaulter-gen", + "cmd/defaulter-gen/args", + "cmd/informer-gen", + "cmd/informer-gen/args", + "cmd/informer-gen/generators", + "cmd/lister-gen", + "cmd/lister-gen/args", + "cmd/lister-gen/generators", + "cmd/openapi-gen", + "cmd/openapi-gen/args", + "pkg/util", + ] + pruneopts = "T" + revision = "3dcf91f64f638563e5106f21f50c31fa361c918d" + version = "kubernetes-1.12.3" + +[[projects]] + branch = "master" + digest = "1:5edbd655d7ee65178fd5750bda9a3d3cd7fb96291937926f4969e6b2dfbc5743" + name = "k8s.io/gengo" + packages = [ + "args", + "examples/deepcopy-gen/generators", + "examples/defaulter-gen/generators", + "examples/set-gen/sets", + "generator", + "namer", + "parser", + "types", + ] + pruneopts = "NT" + revision = "fd15ee9cc2f77baa4f31e59e6acbf21146455073" + +[[projects]] + digest = "1:f3b42f307c7f49a1a7276c48d4b910db76e003220e88797f7acd41e3a9277ddf" + name = "k8s.io/klog" + packages = ["."] + pruneopts = "NT" + revision = "a5bc97fbc634d635061f3146511332c7e313a55a" + version = "v0.1.0" + +[[projects]] + branch = "master" + digest = "1:9ac2fdede4a8304e3b00ea3b36526536339f306d0306e320fc74f6cefeead18e" + name = "k8s.io/kube-openapi" + packages = [ + "cmd/openapi-gen/args", + "pkg/common", + "pkg/generators", + "pkg/generators/rules", + "pkg/util/proto", + "pkg/util/sets", + ] + pruneopts = "NT" + revision = "0317810137be915b9cf888946c6e115c1bfac693" + +[[projects]] + digest = "1:e03ddaf9f31bccbbb8c33eabad2c85025a95ca98905649fd744e0a54c630a064" + name = "sigs.k8s.io/controller-runtime" + packages = [ + "pkg/cache", + "pkg/cache/internal", + "pkg/client", + "pkg/client/apiutil", + "pkg/client/config", + "pkg/controller", + "pkg/controller/controllerutil", + "pkg/event", + "pkg/handler", + "pkg/internal/controller", + "pkg/internal/controller/metrics", + "pkg/internal/recorder", + "pkg/leaderelection", + "pkg/manager", + "pkg/metrics", + "pkg/patch", + "pkg/predicate", + "pkg/reconcile", + "pkg/recorder", + "pkg/runtime/inject", + "pkg/runtime/log", + "pkg/runtime/scheme", + "pkg/runtime/signals", + "pkg/source", + "pkg/source/internal", + "pkg/webhook/admission", + "pkg/webhook/admission/types", + "pkg/webhook/types", + ] + pruneopts = "NT" + revision = "c63ebda0bf4be5f0a8abd4003e4ea546032545ba" + version = "v0.1.8" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", + "github.com/operator-framework/operator-sdk/pkg/k8sutil", + "github.com/operator-framework/operator-sdk/pkg/test", + "github.com/operator-framework/operator-sdk/pkg/test/e2eutil", + "github.com/operator-framework/operator-sdk/version", + "github.com/samuel/go-zookeeper/zk", + "github.com/sirupsen/logrus", + "k8s.io/api/apps/v1", + "k8s.io/api/batch/v1", + "k8s.io/api/core/v1", + "k8s.io/api/policy/v1beta1", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/intstr", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/client-go/plugin/pkg/client/auth/gcp", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/conversion-gen", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/openapi-gen", + "k8s.io/gengo/args", + "sigs.k8s.io/controller-runtime/pkg/client", + "sigs.k8s.io/controller-runtime/pkg/client/config", + "sigs.k8s.io/controller-runtime/pkg/controller", + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil", + "sigs.k8s.io/controller-runtime/pkg/handler", + "sigs.k8s.io/controller-runtime/pkg/manager", + "sigs.k8s.io/controller-runtime/pkg/reconcile", + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme", + "sigs.k8s.io/controller-runtime/pkg/runtime/signals", + "sigs.k8s.io/controller-runtime/pkg/source", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/ecs-operator/cluster-operator/Gopkg.toml b/ecs-operator/cluster-operator/Gopkg.toml new file mode 100644 index 0000000..3075005 --- /dev/null +++ b/ecs-operator/cluster-operator/Gopkg.toml @@ -0,0 +1,65 @@ +# Force dep to vendor the code generators, which aren't imported just used at dev time. +required = [ + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/conversion-gen", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/openapi-gen", + "k8s.io/gengo/args", +] + +[[override]] + name = "github.com/sirupsen/logrus" + version = "v1.2.0" + +[[override]] + name = "k8s.io/code-generator" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/api" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/apiextensions-apiserver" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/apiserver" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/client-go" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/cli-runtime" + version = "kubernetes-1.12.3" + +[[override]] + name = "sigs.k8s.io/controller-runtime" + version = "=v0.1.8" + +[[override]] + source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" + name = "gopkg.in/fsnotify.v1" + +[[constraint]] + name = "github.com/operator-framework/operator-sdk" + # The version rule is used for a specific release and the master branch for in between releases. + # branch = "v0.4.x" #osdk_branch_annotation + version = "=v0.4.0" #osdk_version_annotation + +[prune] + go-tests = true + non-go = true + + [[prune.project]] + name = "k8s.io/code-generator" + non-go = false diff --git a/ecs-operator/cluster-operator/LICENSE b/ecs-operator/cluster-operator/LICENSE new file mode 100644 index 0000000..5c304d1 --- /dev/null +++ b/ecs-operator/cluster-operator/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ecs-operator/cluster-operator/Makefile b/ecs-operator/cluster-operator/Makefile new file mode 100644 index 0000000..a0447c6 --- /dev/null +++ b/ecs-operator/cluster-operator/Makefile @@ -0,0 +1,68 @@ +# Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +SHELL=/bin/bash -o pipefail + +PROJECT_NAME=ecs-operator +REPO=ecs/$(PROJECT_NAME) +VERSION=$(shell git describe --always --tags --dirty | sed "s/\(.*\)-g`git rev-parse --short HEAD`/\1/") +GIT_SHA=$(shell git rev-parse --short HEAD) +TEST_IMAGE=$(REPO)-testimages:$(VERSION) +GOOS=linux +GOARCH=amd64 + +.PHONY: all dep build check clean test + +all: check test build + +dep: + dep ensure -v + +build: build-go build-image + +build-go: + CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build \ + -ldflags "-X github.com/$(REPO)/pkg/version.Version=$(VERSION) -X github.com/$(REPO)/pkg/version.GitSHA=$(GIT_SHA)" \ + -o bin/$(PROJECT_NAME) cmd/manager/main.go + +build-image: + docker build --build-arg VERSION=$(VERSION) --build-arg GIT_SHA=$(GIT_SHA) -t $(REPO):$(VERSION) . + docker tag $(REPO):$(VERSION) $(REPO):latest + +test: test-unit test-e2e + +test-unit: + go test $$(go list ./... | grep -v /vendor/ | grep -v /test/e2e ) + +test-e2e: test-e2e-remote + +test-e2e-remote: login + operator-sdk build $(TEST_IMAGE) --enable-tests + docker push $(TEST_IMAGE) + operator-sdk test local ./test/e2e --namespace default --image $(TEST_IMAGE) --go-test-flags "-v -timeout 0" + +test-e2e-local: + operator-sdk test local ./test/e2e --namespace default --up-local --go-test-flags "-v -timeout 0" + +login: + @docker login -u "$(DOCKER_USER)" -p "$(DOCKER_PASS)" + +push: build login + docker push $(REPO):$(VERSION) + docker push $(REPO):latest + +clean: + rm -f bin/$(PROJECT_NAME) + +check: check-format check-license + +check-format: + ./scripts/check_format.sh + +check-license: + ./scripts/check_license.sh diff --git a/ecs-operator/cluster-operator/README.md b/ecs-operator/cluster-operator/README.md new file mode 100644 index 0000000..fa8570f --- /dev/null +++ b/ecs-operator/cluster-operator/README.md @@ -0,0 +1,409 @@ +# ECS cluster-operator + +[![Build Status](https://travis-ci.org/ecs/cluster-operator.svg?branch=master)](https://travis-ci.org/ecs/cluster-operator) +[![CircleCI](https://circleci.com/gh/ecs/cluster-operator.svg?style=svg)](https://circleci.com/gh/ecs/cluster-operator) + +The ECS Cluster Operator deploys and configures a ECS cluster on +Kubernetes. + +For quick installation of the cluster operator, use the [cluster operator helm +chart](https://github.com/ecs/charts/tree/master/stable/ecscluster-operator). + +## Pre-requisites + +* Kubernetes 1.9+ +* Kubernetes must be configured to allow (configured by default in 1.10+): + * Privileged mode containers (enabled by default) + * Feature gate: MountPropagation=true. This can be done by appending + `--feature-gates MountPropagation=true` to the kube-apiserver and kubelet + services. + +Refer to the [ECS prerequisites docs](https://www.dellemc.com/en-us/collaterals/unauth/data-sheets/products/storage/h13117-emc-ecs-appliance-ss.pdf) +for more information. + +## Setup/Development + +1. Install [operator-sdk](https://github.com/operator-framework/operator-sdk/tree/master#quick-start). +2. Run `operator-sdk generate k8s` if there's a change in api type. +3. Build operator container with `operator-sdk build ecs/cluster-operator:` +4. Apply the manifests in `deploy/` to install the operator + * Apply `namespace.yaml` to create the `ecs-operator` namespace. + * Apply `service_account.yaml`, `role.yaml` and `role_binding.yaml` to create + a service account and to grant all the permissions. + * Apply `crds/*_crd.yaml` to define the custom resources. + * Apply `operator.yaml` to install the operator. Change the container image + in this file when installing a new operator. + * Apply `crds/*_ecscluster_cr.yaml` to create a `ECSCluster` + custom resource. + +**NOTE**: Installing ECS on Minikube is not currently supported due to +missing [kernel prerequisites](https://www.dellemc.com/en-us/collaterals/unauth/data-sheets/products/storage/h13117-emc-ecs-appliance-ss.pdf). + +For development, run the operator outside of the k8s cluster by running: + +```bash +make local-run +``` + +Build operator container image: + +```bash +make image/cluster-operator OPERATOR_IMAGE=ecs/cluster-operator:test +``` + +This builds all the components and copies the binaries into the same container. + +After creating a resource, query the resource: + +```bash +$ kubectl get ecscluster +NAME READY STATUS AGE +example-ecs 3/3 Running 4m +``` + +## Inspect a ECSCluster Resource + +Get all the details about the cluster: + +```bash +$ kubectl describe ecscluster/example-ecs +Name: example-ecs +Namespace: default +Labels: +Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"dellemc.com/v1","kind":"ECSCluster","metadata":{"annotations":{},"name":"example-ecs","namespace":"default"},"spec":{"... +API Version: dellemc.com/v1 +Kind: ECSCluster +Metadata: + Creation Timestamp: 2018-07-21T12:57:11Z + Generation: 1 + Resource Version: 10939030 + Self Link: /apis/dellemc.com/v1/namespaces/default/ecsclusters/example-ecs + UID: 955b24a4-8ce5-11e8-956a-1866da35eee2 +Spec: + Join: test07 +Status: + Node Health Status: + ... + ... + Nodes: + test09 + test08 + test07 + Phase: Running + Ready: 3/3 +Events: +``` + +## ECSCluster Resource Configuration + +Once the ECS operator is running, a ECS cluster can be deployed by +creating a Cluster Configuration. The parameters specified in the configuration +will define how ECS is deployed, the rest of the installation details are +handled by the operator. + +The following tables lists the configurable spec +parameters of the ECSCluster custom resource and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`secretRefName` | Reference name of ecs secret | +`secretRefNamespace` | Namespace of ecs secret | +`namespace` | Namespace where ecs cluster resources are created | `ecs` +`images.nodeContainer` | ECS node container image | `ecs/node:1.1.0` +`images.initContainer` | ECS init container image | `ecs/init:0.1` +`images.csiNodeDriverRegistrarContainer` | CSI Node Driver Registrar Container image | `quay.io/k8scsi/csi-node-driver-registrar:v1.0.1` +`images.csiClusterDriverRegistrarContainer` | CSI Cluster Driver Registrar Container image | `quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1` +`images.csiExternalProvisionerContainer` | CSI External Provisioner Container image | `ecs/csi-provisioner:v1.0.1` +`images.csiExternalAttacherContainer` | CSI External Attacher Container image | `quay.io/k8scsi/csi-attacher:v1.0.1` +`csi.enable` | Enable CSI setup | `false` +`csi.enableProvisionCreds` | Enable CSI provision credentials | `false` +`csi.enableControllerPublishCreds` | Enable CSI controller publish credentials | `false` +`csi.enableNodePublishCreds` | Enable CSI node publish credentials | `false` +`service.name` | Name of the Service used by the cluster | `ecs` +`service.type` | Type of the Service used by the cluster | `ClusterIP` +`service.externalPort` | External port of the Service used by the cluster | `5705` +`service.internalPort` | Internal port of the Service used by the cluster | `5705` +`service.annotations` | Annotations of the Service used by the cluster | +`ingress.enable` | Enable ingress for the cluster | `false` +`ingress.hostname` | Hostname to be used in cluster ingress | `ecs.local` +`ingress.tls` | Enable TLS for the ingress | `false` +`ingress.annotations` | Annotations of the ingress used by the cluster | +`sharedDir` | Path to be shared with kubelet container when deployed as a pod | `/var/lib/kubelet/plugins/kubernetes.io~ecs` +`kvBackend.address` | Comma-separated list of addresses of external key-value store. (`1.2.3.4:2379,2.3.4.5:2379`) | +`kvBackend.backend` | Name of the key-value store to use. Set to `etcd` for external key-value store. | `embedded` +`pause` | Pause the operator for cluster maintenance | `false` +`debug` | Enable debug mode for all the cluster nodes | `false` +`disableFencing` | Disable Pod fencing | `false` +`disableTelemetry` | Disable telemetry reports | `false` +`nodeSelectorTerms` | Set node selector for ecs pod placement | +`tolerations` | Set pod tolerations for ecs pod placement | +`resources` | Set resource requirements for the containers | + +## Upgrading a ECS Cluster + +An existing ECS cluster can be upgraded to a new version of ECS by +creating an Upgrade Configuration. The cluster-operator takes care of +downloading the new container image and updating all the nodes with new version +of ECS. +An example of `ECSUpgrade` resource is [ecs_v1_ecsupgrade_cr.yaml](/deploy/crds/ecs_v1_ecsupgrade_cr.yaml). + +Only offline upgrade is supported for now by cluster-operator. During the +upgrade, ECS maintenance mode is enabled, the applications that use +ECS volumes are scaled down and the whole ECS cluster is restarted +with a new version. Once the ECS cluster becomes usable, the applications +are scaled up to their previous configuration. Once the update is complete, make +sure to delete the upgrade resource to put the ECS cluster in normal mode. +This will disable the maintenance mode. + +Once an upgrade resource is created, events related to the upgrade can be +viewed in the upgrade object description. All the status and errors, if any, +encountered during the upgrade are posted as events. + +```bash +$ kubectl describe ecsupgrades example-ecsupgrade +Name: example-ecsupgrade +Namespace: default +Labels: +Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"dellemc.com/v1","kind":"ECSUpgrade","metadata":{"annotations":{},"name":"example-ecsupgrade","namespace":"default"},... +API Version: dellemc.com/v1 +Kind: ECSUpgrade +... +Spec: + New Image: ecs/node:1.0.0 +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PullImage 4m ecs-upgrader Pulling the new container image + Normal PauseClusterCtrl 2m ecs-upgrader Pausing the cluster controller and enabling cluster maintenance mode + Normal UpgradeInit 2m ecs-upgrader ECS upgrade of cluster example-ecs started + Normal UpgradeComplete 0s ecs-upgrader ECS upgraded to ecs/node:1.0.0. Delete upgrade object to disable cluster maintenance mode +``` + +## ECSUpgrade Resource Configuration + +The following table lists the configurable spec parameters of the +ECSUpgrade custom resource and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`newImage` | ECS node container image to upgrade to | + +## Cleanup Old Configurations + +ECS creates and saves its files at `/var/lib/ecs` on the hosts. This +also contains some configurations of the cluster. To do a fresh install of +ECS, these files need to be deleted. + +__WARNING__: This will delete any existing data and won't be recoverable. + +__NOTE__: When using an external etcd, the data related to ecs should also +be removed. + +```bash +ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://ecs-etcd-server:2379 del --prefix ecs +``` + +The cluster-operator provides a `Job`resource that can execute certain tasks on +all nodes or on selected nodes. This can be used to easily perform cleanup +task. An example would be to create a `Job` resource: + +```yaml +apiVersion: dellemc.com/v1 +kind: Job +metadata: + name: cleanup-job +spec: + image: ecs/cleanup:v0.0.2 + args: ["/var/lib/ecs"] + mountPath: "/var/lib" + hostPath: "/var/lib" + completionWord: "done" + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: In + values: + - "true" +``` + +When applied, this job will run `ecs/cleanup` container on the nodes that +have label `node-role.kubernetes.io/worker` with value `"true"`, mounting +`/var/lib` and passing the argument `/var/lib/ecs`. This will run +`rm -rf /var/lib/ecs` in the selected nodes and cleanup all the ecs +files. To run it on all the nodes, remove the `nodeSelectorTerms` attribute. +On completion, the resource description shows that the task is completed and +can be deleted. + +```bash +$ kubectl describe jobs.dellemc.com cleanup-job +Name: cleanup-job +Namespace: default +... +... +Spec: + Completion Word: + Args: + /var/lib/ecs + Host Path: /var/lib + Image: ecs/cleanup:v0.0.2 + ... +Status: + Completed: true +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal JobCompleted 39s ecscluster-operator Job Completed. Safe to delete. +``` + +Deleting the resource, will terminate all the pods that were created to run the +task. + +Internally, this `Job` is backed by a controller that creates pods using a +DaemonSet. Job containers have to be built in a specific way to achieve this +behavior. + +In the above example, the cleanup container runs a shell script(`script.sh`): + +```bash +#!/bin/ash + +set -euo pipefail + +# Gracefully handle the TERM signal sent when deleting the daemonset +trap 'exit' TERM + +# This is the main command that's run by this script on +# all the nodes. +rm -rf $1 + +# Let the monitoring script know we're done. +echo "done" + +# this is a workaround to prevent the container from exiting +# and k8s restarting the daemonset pod +while true; do sleep 1; done +``` + +And the container image is made with Dockerfile: + +```dockerfile +FROM alpine:3.6 +COPY script.sh . +RUN chmod u+x script.sh +ENTRYPOINT ["./script.sh"] +``` + +The script, after running the main command, enters into a sleep state, instead +of exiting. This is needed because we don't want the container to exit and start +again and again. Once completed, it echos "done". This is read by the Job +controller to figure out when the task is completed. Once all the pods have +completed the task, the Job status is completed and it can be deleted. + +This can be extended to do other similar cluster management operations. This is +also used internally in the cluster upgrade process. + +## Job (jobs.dellemc.com) Resource Configuration + +The following table lists the configurable spec parameters of the +Job custom resource and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`image` | Container image that the job runs | +`args` | Any arguments to be passed when the container is run | +`hostPath` | Path on the host that is mounted on the job container | +`mountPath` | Path on the job container where the hostPath is mounted | +`completionWord` | The word that job controller looks for in the pod logs to determine if the task is completed | +`labelSelector` | Labels that are added to the job pods and are used to select them. | +`nodeSelectorTerms` | This can be used to select the nodes where the job runs. | + +## TLS Support + +To enable TLS, ensure that an ingress controller is installed in the cluster. +Set `ingress.enable` and `ingress.tls` to `true`. +Store the TLS cert and key as part of the ecs secret as: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: "ecs-api" +... +... +data: + # echo -n '' | base64 + ... + ... + # Add base64 encoded TLS cert and key. + tls.crt: + tls.key: +``` + +## CSI + +ECS also supports the [Container Storage Interface (CSI)](https://github.com/container-storage-interface/spec) +to communicate with Kubernetes. + +Only versions 1.10+ are supported. CSI ensures forward compatibility with +future releases of Kubernetes, as vendor-specific drivers will soon be +deprecated from Kubernetes. However, some functionality is not yet supported. + +To enable CSI, set `csi.enable` to `true` in the `ECSCluster` resource +config. + +```yaml +apiVersion: "dellemc.com/v1" +kind: "ECSCluster" +metadata: + name: "example-ecs" + namespace: "default" +spec: + secretRefName: "ecs-api" + secretRefNamespace: "default" + csi: + enable: true +``` + +### CSI Credentials + +To enable CSI Credentials, ensure that CSI is enabled by setting `csi.enable` to +`true`. Based on the type of credentials to enable, set the csi fields to +`true`: + +```yaml +apiVersion: "dellemc.com/v1" +kind: "ECSCluster" +metadata: + name: "example-ecs" + namespace: "default" +spec: + ... + ... + csi: + enable: true + enableProvisionCreds: true + enableControllerPublishCreds: true + enableNodePublishCreds: true + ... +``` + +Specify the CSI credentials as part of the ecs secret object as: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: "ecs-api" +... +... +data: + # echo -n '' | base64 + ... + ... + csiProvisionUsername: + csiProvisionPassword: + csiControllerPublishUsername: + csiControllerPublishPassword: + csiNodePublishUsername: + csiNodePublishPassword: +``` diff --git a/ecs-operator/cluster-operator/charts/nautilus-operator/Chart.yaml b/ecs-operator/cluster-operator/charts/nautilus-operator/Chart.yaml new file mode 100644 index 0000000..1de0df1 --- /dev/null +++ b/ecs-operator/cluster-operator/charts/nautilus-operator/Chart.yaml @@ -0,0 +1,14 @@ +name: ecs-operator +version: 0.1.0 +appVersion: 0.1.0 +description: | + ecs operator deploys a custom resource for a ecs cluster, and a + pod to provision and scale ecs clusters. +keywords: +- ecs +- storage +home: https://github.com/ecs/ecs-operator/blob/master/charts/ecs-operator +icon: https://avatars3.githubusercontent.com/u/25698199 +sources: +- https://github.com/ecs/charts/blob/master/ecs-operator +engine: gotpl diff --git a/ecs-operator/cluster-operator/charts/nautilus-operator/templates/_helpers.tpl b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/_helpers.tpl new file mode 100644 index 0000000..cec0d91 --- /dev/null +++ b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ecsOp.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ecsOp.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} \ No newline at end of file diff --git a/ecs-operator/cluster-operator/charts/nautilus-operator/templates/cluster-rbac.yaml b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/cluster-rbac.yaml new file mode 100644 index 0000000..911b2d4 --- /dev/null +++ b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/cluster-rbac.yaml @@ -0,0 +1,49 @@ +{{if eq .Values.watch.namespace ""}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "ecsOp.fullname" . }} +rules: +- apiGroups: + - ecs.ecs.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: default-account-{{ template "ecsOp.fullname" . }} +subjects: +- kind: ServiceAccount + name: default + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ template "ecsOp.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/ecs-operator/cluster-operator/charts/nautilus-operator/templates/crd.yaml b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/crd.yaml new file mode 100644 index 0000000..97409cd --- /dev/null +++ b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/crd.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ecsclusters.ecs.ecs.io +spec: + group: ecs.ecs.io + names: + kind: ECSCluster + listKind: ECSClusterList + plural: ecsclusters + singular: ecscluster + scope: Namespaced + version: v1alpha1 \ No newline at end of file diff --git a/ecs-operator/cluster-operator/charts/nautilus-operator/templates/operator.yaml b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/operator.yaml new file mode 100644 index 0000000..e56b3ce --- /dev/null +++ b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/operator.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "ecsOp.fullname" . }} +spec: + replicas: 1 + selector: + matchLabels: + name: {{ template "ecsOp.fullname" . }} + template: + metadata: + labels: + name: {{ template "ecsOp.fullname" . }} + spec: + containers: + - name: {{ template "ecsOp.fullname" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: "WATCH_NAMESPACE" + value: "{{ .Values.watch.namespace }}" diff --git a/ecs-operator/cluster-operator/charts/nautilus-operator/templates/rbac.yaml b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/rbac.yaml new file mode 100644 index 0000000..d4cb5cb --- /dev/null +++ b/ecs-operator/cluster-operator/charts/nautilus-operator/templates/rbac.yaml @@ -0,0 +1,48 @@ +{{if ne .Values.watch.namespace ""}} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "ecsOp.fullname" . }} +rules: +- apiGroups: + - ecs.ecs.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" + +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: default-account-{{ template "ecsOp.fullname" . }} +subjects: +- kind: ServiceAccount + name: default +roleRef: + kind: Role + name: {{ template "ecsOp.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{end}} \ No newline at end of file diff --git a/ecs-operator/cluster-operator/charts/nautilus-operator/values.yaml b/ecs-operator/cluster-operator/charts/nautilus-operator/values.yaml new file mode 100644 index 0000000..09cd675 --- /dev/null +++ b/ecs-operator/cluster-operator/charts/nautilus-operator/values.yaml @@ -0,0 +1,8 @@ +image: + repository: ecs/ecs-operator + tag: 0.1.0 + pullPolicy: Always + +# Namespace to watch for ECSCluster resources "" means ALL namespaces +watch: + namespace: "" diff --git a/ecs-operator/cluster-operator/cmd/manager/main.go b/ecs-operator/cluster-operator/cmd/manager/main.go new file mode 100644 index 0000000..81e3589 --- /dev/null +++ b/ecs-operator/cluster-operator/cmd/manager/main.go @@ -0,0 +1,111 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package main + +import ( + "context" + "flag" + "os" + "runtime" + + "github.com/ecs/ecs-operator/pkg/apis" + "github.com/ecs/ecs-operator/pkg/controller" + controllerconfig "github.com/ecs/ecs-operator/pkg/controller/config" + "github.com/ecs/ecs-operator/pkg/version" + + "github.com/operator-framework/operator-sdk/pkg/k8sutil" + "github.com/operator-framework/operator-sdk/pkg/leader" + "github.com/operator-framework/operator-sdk/pkg/ready" + sdkVersion "github.com/operator-framework/operator-sdk/version" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/runtime/signals" + + log "github.com/sirupsen/logrus" +) + +var ( + versionFlag bool +) + +func init() { + flag.BoolVar(&versionFlag, "version", false, "Show version and quit") + flag.BoolVar(&controllerconfig.TestMode, "test", false, "Enable test mode. Do not use this flag in production") +} + +func printVersion() { + log.Printf("ecs-operator Version: %v", version.Version) + log.Printf("Git SHA: %s", version.GitSHA) + log.Printf("Go Version: %s", runtime.Version()) + log.Printf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH) + log.Printf("operator-sdk Version: %v", sdkVersion.Version) +} + +func main() { + flag.Parse() + + printVersion() + + if versionFlag { + os.Exit(0) + } + + if controllerconfig.TestMode { + log.Warn("----- Running in test mode. Make sure you are NOT in production -----") + } + + namespace, err := k8sutil.GetWatchNamespace() + if err != nil { + log.Fatal(err, "failed to get watch namespace") + } + + // Get a config to talk to the apiserver + cfg, err := config.GetConfig() + if err != nil { + log.Fatal(err) + } + + // Become the leader before proceeding + leader.Become(context.TODO(), "ecs-operator-lock") + + r := ready.NewFileReady() + err = r.Set() + if err != nil { + log.Fatal(err, "") + } + defer r.Unset() + + // Create a new Cmd to provide shared dependencies and start components + mgr, err := manager.New(cfg, manager.Options{Namespace: namespace}) + if err != nil { + log.Fatal(err) + } + + log.Print("Registering Components") + + // Setup Scheme for all resources + if err := apis.AddToScheme(mgr.GetScheme()); err != nil { + log.Fatal(err) + } + + // Setup all Controllers + if err := controller.AddToManager(mgr); err != nil { + log.Fatal(err) + } + + log.Print("Starting the Cmd") + + // Start the Cmd + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Fatal(err, "manager exited non-zero") + } +} diff --git a/ecs-operator/cluster-operator/deploy/crd.yaml b/ecs-operator/cluster-operator/deploy/crd.yaml new file mode 100644 index 0000000..1c98c99 --- /dev/null +++ b/ecs-operator/cluster-operator/deploy/crd.yaml @@ -0,0 +1,27 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ecsclusters.ecs.ecs.io +spec: + group: ecs.ecs.io + names: + kind: ECSCluster + listKind: ECSClusterList + plural: ecsclusters + singular: ecscluster + additionalPrinterColumns: + - name: Desired Members + type: integer + description: The number of desired ecs members + JSONPath: .status.replicas + - name: Ready Members + type: integer + description: The number ecs members ready + JSONPath: .status.readyReplicas + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + scope: Namespaced + version: v1alpha1 + subresources: + status: {} diff --git a/ecs-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_cr.yaml b/ecs-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_cr.yaml new file mode 100644 index 0000000..7464c15 --- /dev/null +++ b/ecs-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_cr.yaml @@ -0,0 +1,68 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ecs-tier2 +spec: + storageClassName: "nfs" + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: "ecs.ecs.io/v1alpha1" +kind: "ECSCluster" +metadata: + name: "ecs" +spec: + zookeeperUri: zk-client:2181 + + externalAccess: + enabled: true + type: LoadBalancer + + bookkeeper: + image: + repository: ecs/bookkeeper + tag: latest + pullPolicy: IfNotPresent + + replicas: 3 + + storage: + ledgerVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + journalVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + autoRecovery: true + + ecs: + controllerReplicas: 1 + nodeReplicas: 3 + + cacheVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 20Gi + + image: + repository: ecs/ecs + tag: latest + pullPolicy: IfNotPresent + + tier2: + filesystem: + persistentVolumeClaim: + claimName: ecs-tier2 diff --git a/ecs-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_crd.yaml b/ecs-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_crd.yaml new file mode 100644 index 0000000..e1cea52 --- /dev/null +++ b/ecs-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_crd.yaml @@ -0,0 +1,27 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ecsclusters.ecs.ecs.io +spec: + group: ecs.ecs.io + names: + kind: ECSCluster + listKind: ECSClusterList + plural: ecsclusters + singular: ecscluster + additionalPrinterColumns: + - name: Members + type: integer + description: The number ecs members running + JSONPath: .status.replicas + - name: Ready Members + type: integer + description: The number ecs members ready + JSONPath: .status.readyReplicas + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + scope: Namespaced + version: v1alpha1 + subresources: + status: {} \ No newline at end of file diff --git a/ecs-operator/cluster-operator/deploy/operator.yaml b/ecs-operator/cluster-operator/deploy/operator.yaml new file mode 100644 index 0000000..383ef95 --- /dev/null +++ b/ecs-operator/cluster-operator/deploy/operator.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ecs-operator +spec: + replicas: 1 + selector: + matchLabels: + name: ecs-operator + template: + metadata: + labels: + name: ecs-operator + spec: + serviceAccountName: ecs-operator + containers: + - name: ecs-operator + image: ecs/ecs-operator:latest + ports: + - containerPort: 60000 + name: metrics + command: + - ecs-operator + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "ecs-operator" diff --git a/ecs-operator/cluster-operator/deploy/role.yaml b/ecs-operator/cluster-operator/deploy/role.yaml new file mode 100644 index 0000000..3992e7c --- /dev/null +++ b/ecs-operator/cluster-operator/deploy/role.yaml @@ -0,0 +1,60 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ecs-operator +rules: +- apiGroups: + - ecs.ecs.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - '*' +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - "*" +- apiGroups: + - batch + resources: + - jobs + verbs: + - '*' + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ecs-operator +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list diff --git a/ecs-operator/cluster-operator/deploy/role_binding.yaml b/ecs-operator/cluster-operator/deploy/role_binding.yaml new file mode 100644 index 0000000..bfa9764 --- /dev/null +++ b/ecs-operator/cluster-operator/deploy/role_binding.yaml @@ -0,0 +1,26 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ecs-operator +subjects: +- kind: ServiceAccount + name: ecs-operator +roleRef: + kind: Role + name: ecs-operator + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: ecs-operator +subjects: +- kind: ServiceAccount + name: ecs-operator + namespace: default +roleRef: + kind: ClusterRole + name: ecs-operator + apiGroup: rbac.authorization.k8s.io diff --git a/ecs-operator/cluster-operator/deploy/service_account.yaml b/ecs-operator/cluster-operator/deploy/service_account.yaml new file mode 100644 index 0000000..98a0c6f --- /dev/null +++ b/ecs-operator/cluster-operator/deploy/service_account.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ecs-operator diff --git a/ecs-operator/cluster-operator/example/cr-detailed.yaml b/ecs-operator/cluster-operator/example/cr-detailed.yaml new file mode 100644 index 0000000..1e4b20c --- /dev/null +++ b/ecs-operator/cluster-operator/example/cr-detailed.yaml @@ -0,0 +1,123 @@ +apiVersion: "ecs.ecs.io/v1alpha1" +kind: "ECSCluster" +metadata: + name: "example" +spec: + zookeeperUri: zk-client:2181 + + bookkeeper: + image: + repository: ecs/bookkeeper + tag: 0.4.0 + pullPolicy: IfNotPresent + + replicas: 3 + resources: + requests: + memory: "3Gi" + cpu: "1000m" + limits: + memory: "5Gi" + cpu: "2000m" + + storage: + ledgerVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + journalVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + indexVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + # Turns on automatic recovery + # see https://bookkeeper.apache.org/docs/latest/admin/autorecovery/ + autoRecovery: true + + # To enable bookkeeper metrics feature, take codahale for example here. + # See http://bookkeeper.apache.org/docs/4.7.0/admin/metrics/ for more metrics provider + # See http://bookkeeper.apache.org/docs/4.7.0/reference/config/#statistics for metrics provider configuration details + options: + enableStatistics: "true" + statsProviderClass: "org.apache.bookkeeper.stats.codahale.CodahaleMetricsProvider" + codahaleStatsGraphiteEndpoint: "graphite.example.com:2003" + # Default is 60 + codahaleStatsOutputFrequencySeconds: 30 + + ecs: + controllerReplicas: 1 + controllerResources: + requests: + memory: "1Gi" + cpu: "1000m" + limits: + memory: "3Gi" + cpu: "2000m" + + nodeReplicas: 3 + nodeResources: + requests: + memory: "3Gi" + cpu: "1000m" + limits: + memory: "5Gi" + cpu: "2000m" + + # Turn on ECS Debug Logging + debugLogging: false + + image: + repository: ecs/ecs + tag: 0.4.0 + pullPolicy: IfNotPresent + + cacheVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 20Gi + + tier2: + filesystem: + persistentVolumeClaim: + claimName: ecs-tier2 + +# ecs: +# uri: http://10.247.10.52:9020 +# bucket: shared +# root: "ecs/example" +# namespace: ecs +# credentials: ecs-credentials + +# hdfs: +# uri: hdfs://10.240.10.52:8020/ +# root: /example +# replicationFactor: 3 + + # See https://github.com/ecs/ecs/blob/3f5b65084ae17e74c8ef8e6a40e78e61fa98737b/config/config.properties + # for available configuration properties + options: + ecsservice.containerCount: "4" + ecsservice.cacheMaxSize: "17179869184" + ecsservice.zkSessionTimeoutMs: "10000" + attributeIndex.readBlockSize: "1048576" + readIndex.storageReadAlignment: "1048576" + durableLog.checkpointMinCommitCount: "300" + bookkeeper.bkAckQuorumSize: "3" + metrics.dynamicCacheSize: "100000" + metrics.enableStatistics: "true" + metrics.statsdHost: "telegraph.default" + metrics.statsdPort: "8125" diff --git a/ecs-operator/cluster-operator/example/cr.yaml b/ecs-operator/cluster-operator/example/cr.yaml new file mode 100644 index 0000000..ac8e519 --- /dev/null +++ b/ecs-operator/cluster-operator/example/cr.yaml @@ -0,0 +1,27 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ecs-tier2 +spec: + storageClassName: "nfs" + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: "ecs.ecs.io/v1alpha1" +kind: "ECSCluster" +metadata: + name: "example" +spec: + zookeeperUri: zk-client:2181 + bookkeeper: + replicas: 3 + ecs: + controllerReplicas: 1 + nodeReplicas: 3 + tier2: + filesystem: + persistentVolumeClaim: + claimName: ecs-tier2 diff --git a/ecs-operator/cluster-operator/pkg/apis/addtoscheme_nautilus_v1alpha1.go b/ecs-operator/cluster-operator/pkg/apis/addtoscheme_nautilus_v1alpha1.go new file mode 100644 index 0000000..911214b --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/addtoscheme_nautilus_v1alpha1.go @@ -0,0 +1,20 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package apis + +import ( + "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) +} diff --git a/ecs-operator/cluster-operator/pkg/apis/apis.go b/ecs-operator/cluster-operator/pkg/apis/apis.go new file mode 100644 index 0000000..54aac90 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/apis.go @@ -0,0 +1,23 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/bookkeeper.go b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/bookkeeper.go new file mode 100644 index 0000000..3c7a08f --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/bookkeeper.go @@ -0,0 +1,227 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1 + +import ( + "fmt" + + "github.com/ecs/ecs-operator/pkg/controller/config" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + // DefaultBookkeeperImageRepository is the default Docker repository for + // the BookKeeper image + DefaultBookkeeperImageRepository = "ecs/bookkeeper" + + // DefaultBookkeeperImageTag is the default tag used for for the BookKeeper + // Docker image + DefaultBookkeeperImageTag = "latest" + + // DefaultBookkeeperImagePullPolicy is the default image pull policy used + // for the BookKeeper Docker image + DefaultBookkeeperImagePullPolicy = v1.PullAlways + + // DefaultBookkeeperLedgerVolumeSize is the default volume size for the + // Bookkeeper ledger volume + DefaultBookkeeperLedgerVolumeSize = "10Gi" + + // DefaultBookkeeperJournalVolumeSize is the default volume size for the + // Bookkeeper journal volume + DefaultBookkeeperJournalVolumeSize = "10Gi" + + // DefaultBookkeeperIndexVolumeSize is the default volume size for the + // Bookkeeper index volume + DefaultBookkeeperIndexVolumeSize = "10Gi" + + // MinimumBookkeeperReplicas is the minimum number of Bookkeeper replicas + // accepted + MinimumBookkeeperReplicas = 3 + + // DefaultBookkeeperRequestCPU is the default CPU request for BookKeeper + DefaultBookkeeperRequestCPU = "500m" + + // DefaultBookkeeperLimitCPU is the default CPU limit for BookKeeper + DefaultBookkeeperLimitCPU = "1" + + // DefaultBookkeeperRequestMemory is the default memory request for BookKeeper + DefaultBookkeeperRequestMemory = "1Gi" + + // DefaultBookkeeperLimitMemory is the limit memory limit for BookKeeper + DefaultBookkeeperLimitMemory = "2Gi" +) + +// BookkeeperSpec defines the configuration of BookKeeper +type BookkeeperSpec struct { + // Image defines the BookKeeper Docker image to use. + // By default, "ecs/bookkeeper:latest" will be used. + Image *BookkeeperImageSpec `json:"image"` + + // Replicas defines the number of BookKeeper replicas. + // Minimum is 3. Defaults to 3. + Replicas int32 `json:"replicas"` + + // Storage configures the storage for BookKeeper + Storage *BookkeeperStorageSpec `json:"storage"` + + // AutoRecovery indicates whether or not BookKeeper auto recovery is enabled. + // Defaults to true. + AutoRecovery *bool `json:"autoRecovery"` + + // ServiceAccountName configures the service account used on BookKeeper instances + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // BookieResources specifies the request and limit of resources that bookie can have. + // BookieResources includes CPU and memory resources + Resources *v1.ResourceRequirements `json:"resources,omitempty"` + + // Options is the Bookkeeper configuration that is to override the bk_server.conf + // in bookkeeper. Some examples can be found here + // https://github.com/apache/bookkeeper/blob/master/docker/README.md + Options map[string]string `json:"options"` +} + +func (s *BookkeeperSpec) withDefaults() (changed bool) { + if s.Image == nil { + changed = true + s.Image = &BookkeeperImageSpec{} + } + if s.Image.withDefaults() { + changed = true + } + + if !config.TestMode && s.Replicas < MinimumBookkeeperReplicas { + changed = true + s.Replicas = MinimumBookkeeperReplicas + } + + if s.Storage == nil { + changed = true + s.Storage = &BookkeeperStorageSpec{} + } + if s.Storage.withDefaults() { + changed = true + } + + if s.AutoRecovery == nil { + changed = true + boolTrue := true + s.AutoRecovery = &boolTrue + } + + if s.Resources == nil { + changed = true + s.Resources = &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultBookkeeperRequestCPU), + v1.ResourceMemory: resource.MustParse(DefaultBookkeeperRequestMemory), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultBookkeeperLimitCPU), + v1.ResourceMemory: resource.MustParse(DefaultBookkeeperLimitMemory), + }, + } + } + + if s.Options == nil { + s.Options = map[string]string{} + } + + return changed +} + +// BookkeeperImageSpec defines the fields needed for a BookKeeper Docker image +type BookkeeperImageSpec struct { + ImageSpec +} + +// String formats a container image struct as a Docker compatible repository string +func (s *BookkeeperImageSpec) String() string { + return fmt.Sprintf("%s:%s", s.Repository, s.Tag) +} + +func (s *BookkeeperImageSpec) withDefaults() (changed bool) { + if s.Repository == "" { + changed = true + s.Repository = DefaultBookkeeperImageRepository + } + + if s.Tag == "" { + changed = true + s.Tag = DefaultBookkeeperImageTag + } + + if s.PullPolicy == "" { + changed = true + s.PullPolicy = DefaultBookkeeperImagePullPolicy + } + + return changed +} + +// BookkeeperStorageSpec is the configuration of the volumes used in BookKeeper +type BookkeeperStorageSpec struct { + // LedgerVolumeClaimTemplate is the spec to describe PVC for the BookKeeper ledger + // This field is optional. If no PVC spec and there is no default storage class, + // stateful containers will use emptyDir as volume + LedgerVolumeClaimTemplate *v1.PersistentVolumeClaimSpec `json:"ledgerVolumeClaimTemplate"` + + // JournalVolumeClaimTemplate is the spec to describe PVC for the BookKeeper journal + // This field is optional. If no PVC spec and there is no default storage class, + // stateful containers will use emptyDir as volume + JournalVolumeClaimTemplate *v1.PersistentVolumeClaimSpec `json:"journalVolumeClaimTemplate"` + + // IndexVolumeClaimTemplate is the spec to describe PVC for the BookKeeper index + // This field is optional. If no PVC spec and there is no default storage class, + // stateful containers will use emptyDir as volume + IndexVolumeClaimTemplate *v1.PersistentVolumeClaimSpec `json:"indexVolumeClaimTemplate"` +} + +func (s *BookkeeperStorageSpec) withDefaults() (changed bool) { + if s.LedgerVolumeClaimTemplate == nil { + changed = true + s.LedgerVolumeClaimTemplate = &v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(DefaultBookkeeperLedgerVolumeSize), + }, + }, + } + } + + if s.JournalVolumeClaimTemplate == nil { + changed = true + s.JournalVolumeClaimTemplate = &v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(DefaultBookkeeperJournalVolumeSize), + }, + }, + } + } + + if s.IndexVolumeClaimTemplate == nil { + changed = true + s.IndexVolumeClaimTemplate = &v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(DefaultBookkeeperIndexVolumeSize), + }, + }, + } + } + + return changed +} diff --git a/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/doc.go b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/doc.go new file mode 100644 index 0000000..65969c7 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/doc.go @@ -0,0 +1,14 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +// Package v1alpha1 contains API Schema definitions for the ecs v1alpha1 API group +// +k8s:deepcopy-gen=package,register +// +groupName=ecs.ecs.io +package v1alpha1 diff --git a/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautilus.go b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautilus.go new file mode 100644 index 0000000..209d1b7 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautilus.go @@ -0,0 +1,278 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1 + +import ( + "fmt" + + "github.com/ecs/ecs-operator/pkg/controller/config" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + // DefaultECSImageRepository is the default Docker repository for + // the ECS image + DefaultECSImageRepository = "ecs/ecs" + + // DefaultECSImageTag is the default tag used for for the ECS + // Docker image + DefaultECSImageTag = "latest" + + // DefaultECSImagePullPolicy is the default image pull policy used + // for the ECS Docker image + DefaultECSImagePullPolicy = v1.PullAlways + + // DefaultECSCacheVolumeSize is the default volume size for the + // ECS Node cache volume + DefaultECSCacheVolumeSize = "20Gi" + + // DefaultECSTier2ClaimName is the default volume claim name used as Tier 2 + DefaultECSTier2ClaimName = "ecs-tier2" + + // DefaultControllerReplicas is the default number of replicas for the ECS + // Controller component + DefaultControllerReplicas = 1 + + // DefaultNodeReplicas is the default number of replicas for the ECS + // Segment Store component + DefaultNodeReplicas = 1 + + // DefaultControllerRequestCPU is the default CPU request for ECS + DefaultControllerRequestCPU = "250m" + + // DefaultControllerLimitCPU is the default CPU limit for ECS + DefaultControllerLimitCPU = "500m" + + // DefaultControllerRequestMemory is the default memory request for ECS + DefaultControllerRequestMemory = "512Mi" + + // DefaultControllerLimitMemory is the default memory limit for ECS + DefaultControllerLimitMemory = "1Gi" + + // DefaultNodeRequestCPU is the default CPU request for ECS + DefaultNodeRequestCPU = "500m" + + // DefaultNodeLimitCPU is the default CPU limit for ECS + DefaultNodeLimitCPU = "1" + + // DefaultNodeRequestMemory is the default memory request for ECS + DefaultNodeRequestMemory = "1Gi" + + // DefaultNodeLimitMemory is the default memory limit for ECS + DefaultNodeLimitMemory = "2Gi" +) + +// ECSSpec defines the configuration of ECS +type ECSSpec struct { + // ControllerReplicas defines the number of Controller replicas. + // Defaults to 1. + ControllerReplicas int32 `json:"controllerReplicas"` + + // NodeReplicas defines the number of Segment Store replicas. + // Defaults to 1. + NodeReplicas int32 `json:"nodeReplicas"` + + // DebugLogging indicates whether or not debug level logging is enabled. + // Defaults to false. + DebugLogging bool `json:"debugLogging"` + + // Image defines the ECS Docker image to use. + // By default, "ecs/ecs:latest" will be used. + Image *ECSImageSpec `json:"image"` + + // Options is the ECS configuration that is passed to the ECS processes + // as JAVA_OPTS. See the following file for a complete list of options: + // https://github.com/ecs/ecs/blob/master/config/config.properties + Options map[string]string `json:"options"` + + // CacheVolumeClaimTemplate is the spec to describe PVC for the ECS cache. + // This field is optional. If no PVC spec, stateful containers will use + // emptyDir as volume + CacheVolumeClaimTemplate *v1.PersistentVolumeClaimSpec `json:"cacheVolumeClaimTemplate"` + + // Tier2 is the configuration of ECS's tier 2 storage. If no configuration + // is provided, it will assume that a PersistentVolumeClaim called "ecs-tier2" + // is present and it will use it as Tier 2 + Tier2 *Tier2Spec `json:"tier2"` + + // ControllerServiceAccountName configures the service account used on controller instances. + // If not specified, Kubernetes will automatically assign the default service account in the namespace + ControllerServiceAccountName string `json:"controllerServiceAccountName,omitempty"` + + // NodeServiceAccountName configures the service account used on segment store instances. + // If not specified, Kubernetes will automatically assign the default service account in the namespace + NodeServiceAccountName string `json:"nodeServiceAccountName,omitempty"` + + // ControllerResources specifies the request and limit of resources that controller can have. + // ControllerResources includes CPU and memory resources + ControllerResources *v1.ResourceRequirements `json:"controllerResources,omitempty"` + + // NodeResources specifies the request and limit of resources that node can have. + // NodeResources includes CPU and memory resources + NodeResources *v1.ResourceRequirements `json:"nodeResources,omitempty"` +} + +func (s *ECSSpec) withDefaults() (changed bool) { + if !config.TestMode && s.ControllerReplicas < 1 { + changed = true + s.ControllerReplicas = 1 + } + + if !config.TestMode && s.NodeReplicas < 1 { + changed = true + s.NodeReplicas = 1 + } + + if s.Image == nil { + changed = true + s.Image = &ECSImageSpec{} + } + if s.Image.withDefaults() { + changed = true + } + + if s.Options == nil { + changed = true + s.Options = map[string]string{} + } + + if s.CacheVolumeClaimTemplate == nil { + changed = true + s.CacheVolumeClaimTemplate = &v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(DefaultECSCacheVolumeSize), + }, + }, + } + } + + if s.Tier2 == nil { + changed = true + s.Tier2 = &Tier2Spec{} + } + + if s.Tier2.withDefaults() { + changed = true + } + + if s.ControllerResources == nil { + changed = true + s.ControllerResources = &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultControllerRequestCPU), + v1.ResourceMemory: resource.MustParse(DefaultControllerRequestMemory), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultControllerLimitCPU), + v1.ResourceMemory: resource.MustParse(DefaultControllerLimitMemory), + }, + } + } + + if s.NodeResources == nil { + changed = true + s.NodeResources = &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultNodeRequestCPU), + v1.ResourceMemory: resource.MustParse(DefaultNodeRequestMemory), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultNodeLimitCPU), + v1.ResourceMemory: resource.MustParse(DefaultNodeLimitMemory), + }, + } + } + + return changed +} + +// ECSImageSpec defines the fields needed for a ECS Docker image +type ECSImageSpec struct { + ImageSpec +} + +// String formats a container image struct as a Docker compatible repository string +func (s *ECSImageSpec) String() string { + return fmt.Sprintf("%s:%s", s.Repository, s.Tag) +} + +func (s *ECSImageSpec) withDefaults() (changed bool) { + if s.Repository == "" { + changed = true + s.Repository = DefaultECSImageRepository + } + + if s.Tag == "" { + changed = true + s.Tag = DefaultECSImageTag + } + + if s.PullPolicy == "" { + changed = true + s.PullPolicy = DefaultECSImagePullPolicy + } + + return changed +} + +// Tier2Spec configures the Tier 2 storage type to use with ECS. +// If not specified, Tier 2 will be configured in filesystem mode and will try +// to use a PersistentVolumeClaim with the name "ecs-tier2" +type Tier2Spec struct { + // FileSystem is used to configure a pre-created Persistent Volume Claim + // as Tier 2 backend. + // It is default Tier 2 mode. + FileSystem *FileSystemSpec `json:"filesystem,omitempty"` + + // ECS is used to configure a Dell EMC ECS system as a Tier 2 backend + ECS *ECSSpec `json:"ecs,omitempty"` + + // Hdfs is used to configure an HDFS system as a Tier 2 backend + Hdfs *HDFSSpec `json:"hdfs,omitempty"` +} + +func (s *Tier2Spec) withDefaults() (changed bool) { + if s.FileSystem == nil && s.ECS == nil && s.Hdfs == nil { + changed = true + fs := &FileSystemSpec{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: DefaultECSTier2ClaimName, + }, + } + s.FileSystem = fs + } + + return changed +} + +// FileSystemSpec contains the reference to a PVC. +type FileSystemSpec struct { + PersistentVolumeClaim *v1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim"` +} + +// ECSSpec contains the connection details to a Dell EMC ECS system +type ECSSpec struct { + Uri string `json:"uri"` + Bucket string `json:"bucket"` + Root string `json:"root"` + Namespace string `json:"namespace"` + Credentials string `json:"credentials"` +} + +// HDFSSpec contains the connection details to an HDFS system +type HDFSSpec struct { + Uri string `json:"uri"` + Root string `json:"root"` + ReplicationFactor int32 `json:"replicationFactor"` +} diff --git a/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautiluscluster_types.go b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautiluscluster_types.go new file mode 100644 index 0000000..6f4bbf9 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautiluscluster_types.go @@ -0,0 +1,141 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // DefaultZookeeperUri is the default ZooKeeper URI in the form of "hostname:port" + DefaultZookeeperUri = "zk-client:2181" + + // DefaultServiceType is the default service type for external access + DefaultServiceType = v1.ServiceTypeLoadBalancer +) + +func init() { + SchemeBuilder.Register(&ECSCluster{}, &ECSClusterList{}) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ECSClusterList contains a list of ECSCluster +type ECSClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ECSCluster `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ECSCluster is the Schema for the ecsclusters API +// +k8s:openapi-gen=true +type ECSCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSpec `json:"spec,omitempty"` + Status ClusterStatus `json:"status,omitempty"` +} + +// WithDefaults set default values when not defined in the spec. +func (p *ECSCluster) WithDefaults() (changed bool) { + changed = p.Spec.withDefaults() + + return changed +} + +// ClusterSpec defines the desired state of ECSCluster +type ClusterSpec struct { + // ZookeeperUri specifies the hostname/IP address and port in the format + // "hostname:port". + // By default, the value "zk-client:2181" is used, that corresponds to the + // default Zookeeper service created by the ECS Zookkeeper operator + // available at: https://github.com/ecs/zookeeper-operator + ZookeeperUri string `json:"zookeeperUri"` + + // ExternalAccess specifies whether or not to allow external access + // to clients and the service type to use to achieve it + // By default, external access is not enabled + ExternalAccess *ExternalAccess `json:"externalAccess"` + + // Bookkeeper configuration + Bookkeeper *BookkeeperSpec `json:"bookkeeper"` + + // ECS configuration + ECS *ECSSpec `json:"ecs"` +} + +func (s *ClusterSpec) withDefaults() (changed bool) { + if s.ZookeeperUri == "" { + changed = true + s.ZookeeperUri = DefaultZookeeperUri + } + + if s.ExternalAccess == nil { + changed = true + s.ExternalAccess = &ExternalAccess{} + } + if s.ExternalAccess.withDefaults() { + changed = true + } + + if s.Bookkeeper == nil { + changed = true + s.Bookkeeper = &BookkeeperSpec{} + } + if s.Bookkeeper.withDefaults() { + changed = true + } + + if s.ECS == nil { + changed = true + s.ECS = &ECSSpec{} + } + if s.ECS.withDefaults() { + changed = true + } + + return changed +} + +// ExternalAccess defines the configuration of the external access +type ExternalAccess struct { + // Enabled specifies whether or not external access is enabled + // By default, external access is not enabled + Enabled bool `json:"enabled"` + + // Type specifies the service type to achieve external access. + // Options are "LoadBalancer" and "NodePort". + // By default, if external access is enabled, it will use "LoadBalancer" + Type v1.ServiceType `json:"type,omitempty"` +} + +func (e *ExternalAccess) withDefaults() (changed bool) { + if e.Enabled == false && e.Type != "" { + changed = true + e.Type = "" + } else if e.Enabled == true && e.Type == "" { + changed = true + e.Type = DefaultServiceType + } + + return changed +} + +// ImageSpec defines the fields needed for a Docker repository image +type ImageSpec struct { + Repository string `json:"repository"` + Tag string `json:"tag"` + PullPolicy v1.PullPolicy `json:"pullPolicy"` +} diff --git a/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/register.go b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/register.go new file mode 100644 index 0000000..51973e3 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/register.go @@ -0,0 +1,27 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +// Package v1alpha1 contains API Schema definitions for the ecs v1alpha1 API group +// +k8s:deepcopy-gen=package,register +// +groupName=ecs.ecs.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "ecs.ecs.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) diff --git a/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status.go b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status.go new file mode 100644 index 0000000..78c2926 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status.go @@ -0,0 +1,131 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" +) + +type ClusterConditionType string + +const ( + ClusterConditionPodsReady ClusterConditionType = "PodsReady" +) + +// ClusterStatus defines the observed state of ECSCluster +type ClusterStatus struct { + // Conditions list all the applied conditions + Conditions []ClusterCondition `json:"conditions,omitempty"` + + // CurrentVersion is the current cluster version + CurrentVersion string `json:"currentVersion,omitempty"` + + // TargetVersion is the version the cluster upgrading to. + // If the cluster is not upgrading, TargetVersion is empty. + TargetVersion string `json:"targetVersion,omitempty"` + + // Replicas is the number of desired replicas in the cluster + Replicas int32 `json:"replicas"` + + // CurrentReplicas is the number of current replicas in the cluster + CurrentReplicas int32 `json:"currentReplicas"` + + // ReadyReplicas is the number of ready replicas in the cluster + ReadyReplicas int32 `json:"readyReplicas"` + + // Members is the ECS members in the cluster + Members MembersStatus `json:"members"` +} + +// MembersStatus is the status of the members of the cluster with both +// ready and unready node membership lists +type MembersStatus struct { + Ready []string `json:"ready"` + Unready []string `json:"unready"` +} + +// ClusterCondition shows the current condition of a ECS cluster. +// Comply with k8s API conventions +type ClusterCondition struct { + // Type of ECS cluster condition. + Type ClusterConditionType `json:"type"` + + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty"` + + // The last time this condition was updated. + LastUpdateTime string `json:"lastUpdateTime,omitempty"` + + // Last time the condition transitioned from one status to another. + LastTransitionTime string `json:"lastTransitionTime,omitempty"` +} + +func (ps *ClusterStatus) SetPodsReadyConditionTrue() { + c := newClusterCondition(ClusterConditionPodsReady, corev1.ConditionTrue, "", "") + ps.setClusterCondition(*c) +} + +func (ps *ClusterStatus) SetPodsReadyConditionFalse() { + c := newClusterCondition(ClusterConditionPodsReady, corev1.ConditionFalse, "", "") + ps.setClusterCondition(*c) +} + +func newClusterCondition(condType ClusterConditionType, status corev1.ConditionStatus, reason, message string) *ClusterCondition { + return &ClusterCondition{ + Type: condType, + Status: status, + Reason: reason, + Message: message, + LastUpdateTime: "", + LastTransitionTime: "", + } +} + +func (ps *ClusterStatus) GetClusterCondition(t ClusterConditionType) (int, *ClusterCondition) { + for i, c := range ps.Conditions { + if t == c.Type { + return i, &c + } + } + return -1, nil +} + +func (ps *ClusterStatus) setClusterCondition(newCondition ClusterCondition) { + now := time.Now().Format(time.RFC3339) + position, existingCondition := ps.GetClusterCondition(newCondition.Type) + + if existingCondition == nil { + ps.Conditions = append(ps.Conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = now + existingCondition.LastUpdateTime = now + } + + if existingCondition.Reason != newCondition.Reason || existingCondition.Message != newCondition.Message { + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message + existingCondition.LastUpdateTime = now + } + + ps.Conditions[position] = *existingCondition +} diff --git a/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status_test.go b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status_test.go new file mode 100644 index 0000000..5441afb --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status_test.go @@ -0,0 +1,91 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1_test + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" +) + +func TestV1alpha1(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "ECSCluster Status") +} + +var _ = Describe("ECSCluster Status", func() { + + var p v1alpha1.ECSCluster + + BeforeEach(func() { + p = v1alpha1.ECSCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + } + }) + + Context("manually set pods ready condition to be true", func() { + BeforeEach(func() { + condition := v1alpha1.ClusterCondition{ + Type: v1alpha1.ClusterConditionPodsReady, + Status: corev1.ConditionTrue, + Reason: "", + Message: "", + LastUpdateTime: "", + LastTransitionTime: "", + } + p.Status.Conditions = append(p.Status.Conditions, condition) + }) + + It("should contains pods ready condition and it is true status", func() { + _, condition := p.Status.GetClusterCondition(v1alpha1.ClusterConditionPodsReady) + Ω(condition.Status).To(Equal(corev1.ConditionTrue)) + }) + }) + + Context("set conditions", func() { + Context("set pods ready condition to be true", func() { + BeforeEach(func() { + p.Status.SetPodsReadyConditionFalse() + p.Status.SetPodsReadyConditionTrue() + }) + It("should have pods ready condition with true status", func() { + _, condition := p.Status.GetClusterCondition(v1alpha1.ClusterConditionPodsReady) + Ω(condition.Status).To(Equal(corev1.ConditionTrue)) + }) + }) + + Context("set pod ready condition to be false", func() { + BeforeEach(func() { + p.Status.SetPodsReadyConditionTrue() + p.Status.SetPodsReadyConditionFalse() + }) + + It("should have ready condition with false status", func() { + _, condition := p.Status.GetClusterCondition(v1alpha1.ClusterConditionPodsReady) + Ω(condition.Status).To(Equal(corev1.ConditionFalse)) + }) + + It("should have updated timestamps", func() { + _, condition := p.Status.GetClusterCondition(v1alpha1.ClusterConditionPodsReady) + // TODO: check the timestamps + Ω(condition.LastUpdateTime).NotTo(Equal("")) + Ω(condition.LastTransitionTime).NotTo(Equal("")) + }) + }) + }) +}) diff --git a/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/zz_generated.deepcopy.go b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b8f7ab3 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,454 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BookkeeperImageSpec) DeepCopyInto(out *BookkeeperImageSpec) { + *out = *in + out.ImageSpec = in.ImageSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BookkeeperImageSpec. +func (in *BookkeeperImageSpec) DeepCopy() *BookkeeperImageSpec { + if in == nil { + return nil + } + out := new(BookkeeperImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BookkeeperSpec) DeepCopyInto(out *BookkeeperSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(BookkeeperImageSpec) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(BookkeeperStorageSpec) + (*in).DeepCopyInto(*out) + } + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(bool) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BookkeeperSpec. +func (in *BookkeeperSpec) DeepCopy() *BookkeeperSpec { + if in == nil { + return nil + } + out := new(BookkeeperSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BookkeeperStorageSpec) DeepCopyInto(out *BookkeeperStorageSpec) { + *out = *in + if in.LedgerVolumeClaimTemplate != nil { + in, out := &in.LedgerVolumeClaimTemplate, &out.LedgerVolumeClaimTemplate + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + if in.JournalVolumeClaimTemplate != nil { + in, out := &in.JournalVolumeClaimTemplate, &out.JournalVolumeClaimTemplate + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + if in.IndexVolumeClaimTemplate != nil { + in, out := &in.IndexVolumeClaimTemplate, &out.IndexVolumeClaimTemplate + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BookkeeperStorageSpec. +func (in *BookkeeperStorageSpec) DeepCopy() *BookkeeperStorageSpec { + if in == nil { + return nil + } + out := new(BookkeeperStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + if in.ExternalAccess != nil { + in, out := &in.ExternalAccess, &out.ExternalAccess + *out = new(ExternalAccess) + **out = **in + } + if in.Bookkeeper != nil { + in, out := &in.Bookkeeper, &out.Bookkeeper + *out = new(BookkeeperSpec) + (*in).DeepCopyInto(*out) + } + if in.ECS != nil { + in, out := &in.ECS, &out.ECS + *out = new(ECSSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(*in)) + copy(*out, *in) + } + in.Members.DeepCopyInto(&out.Members) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSSpec) DeepCopyInto(out *ECSSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSSpec. +func (in *ECSSpec) DeepCopy() *ECSSpec { + if in == nil { + return nil + } + out := new(ECSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalAccess) DeepCopyInto(out *ExternalAccess) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAccess. +func (in *ExternalAccess) DeepCopy() *ExternalAccess { + if in == nil { + return nil + } + out := new(ExternalAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemSpec) DeepCopyInto(out *FileSystemSpec) { + *out = *in + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(v1.PersistentVolumeClaimVolumeSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemSpec. +func (in *FileSystemSpec) DeepCopy() *FileSystemSpec { + if in == nil { + return nil + } + out := new(FileSystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HDFSSpec) DeepCopyInto(out *HDFSSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSSpec. +func (in *HDFSSpec) DeepCopy() *HDFSSpec { + if in == nil { + return nil + } + out := new(HDFSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MembersStatus) DeepCopyInto(out *MembersStatus) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Unready != nil { + in, out := &in.Unready, &out.Unready + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MembersStatus. +func (in *MembersStatus) DeepCopy() *MembersStatus { + if in == nil { + return nil + } + out := new(MembersStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSCluster) DeepCopyInto(out *ECSCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSCluster. +func (in *ECSCluster) DeepCopy() *ECSCluster { + if in == nil { + return nil + } + out := new(ECSCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ECSCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSClusterList) DeepCopyInto(out *ECSClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ECSCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSClusterList. +func (in *ECSClusterList) DeepCopy() *ECSClusterList { + if in == nil { + return nil + } + out := new(ECSClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ECSClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSImageSpec) DeepCopyInto(out *ECSImageSpec) { + *out = *in + out.ImageSpec = in.ImageSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSImageSpec. +func (in *ECSImageSpec) DeepCopy() *ECSImageSpec { + if in == nil { + return nil + } + out := new(ECSImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSSpec) DeepCopyInto(out *ECSSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ECSImageSpec) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CacheVolumeClaimTemplate != nil { + in, out := &in.CacheVolumeClaimTemplate, &out.CacheVolumeClaimTemplate + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + if in.Tier2 != nil { + in, out := &in.Tier2, &out.Tier2 + *out = new(Tier2Spec) + (*in).DeepCopyInto(*out) + } + if in.ControllerResources != nil { + in, out := &in.ControllerResources, &out.ControllerResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeResources != nil { + in, out := &in.NodeResources, &out.NodeResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSSpec. +func (in *ECSSpec) DeepCopy() *ECSSpec { + if in == nil { + return nil + } + out := new(ECSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tier2Spec) DeepCopyInto(out *Tier2Spec) { + *out = *in + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(FileSystemSpec) + (*in).DeepCopyInto(*out) + } + if in.ECS != nil { + in, out := &in.ECS, &out.ECS + *out = new(ECSSpec) + **out = **in + } + if in.Hdfs != nil { + in, out := &in.Hdfs, &out.Hdfs + *out = new(HDFSSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tier2Spec. +func (in *Tier2Spec) DeepCopy() *Tier2Spec { + if in == nil { + return nil + } + out := new(Tier2Spec) + in.DeepCopyInto(out) + return out +} diff --git a/ecs-operator/cluster-operator/pkg/controller/add_nautiluscluster.go b/ecs-operator/cluster-operator/pkg/controller/add_nautiluscluster.go new file mode 100644 index 0000000..68206f3 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/controller/add_nautiluscluster.go @@ -0,0 +1,20 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package controller + +import ( + "github.com/ecs/ecs-operator/pkg/controller/ecscluster" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, ecscluster.Add) +} diff --git a/ecs-operator/cluster-operator/pkg/controller/config/config.go b/ecs-operator/cluster-operator/pkg/controller/config/config.go new file mode 100644 index 0000000..0e062e2 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/controller/config/config.go @@ -0,0 +1,18 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package config + +// TestMode enables test mode in the operator and applies +// the following changes: +// - Disables BookKeeper minimum number of replicas +// - Disables ECS Controller minimum number of replicas +// - Disables Segment Store minimum number of replicas +var TestMode bool diff --git a/ecs-operator/cluster-operator/pkg/controller/controller.go b/ecs-operator/cluster-operator/pkg/controller/controller.go new file mode 100644 index 0000000..53ce364 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/controller/controller.go @@ -0,0 +1,28 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package controller + +import ( + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// AddToManagerFuncs is a list of functions to add all Controllers to the Manager +var AddToManagerFuncs []func(manager.Manager) error + +// AddToManager adds all Controllers to the Manager +func AddToManager(m manager.Manager) error { + for _, f := range AddToManagerFuncs { + if err := f(m); err != nil { + return err + } + } + return nil +} diff --git a/ecs-operator/cluster-operator/pkg/controller/nautilus/bookie.go b/ecs-operator/cluster-operator/pkg/controller/nautilus/bookie.go new file mode 100644 index 0000000..c501067 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/controller/nautilus/bookie.go @@ -0,0 +1,274 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package ecs + +import ( + "fmt" + "strings" + + "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + "github.com/ecs/ecs-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + LedgerDiskName = "ledger" + JournalDiskName = "journal" + IndexDiskName = "index" +) + +func MakeBookieHeadlessService(ecsCluster *v1alpha1.ECSCluster) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.HeadlessServiceNameForBookie(ecsCluster.Name), + Namespace: ecsCluster.Namespace, + Labels: util.LabelsForBookie(ecsCluster), + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "bookie", + Port: 3181, + }, + }, + Selector: util.LabelsForBookie(ecsCluster), + ClusterIP: corev1.ClusterIPNone, + }, + } +} + +func MakeBookieStatefulSet(ecsCluster *v1alpha1.ECSCluster) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.StatefulSetNameForBookie(ecsCluster.Name), + Namespace: ecsCluster.Namespace, + Labels: util.LabelsForBookie(ecsCluster), + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: util.HeadlessServiceNameForBookie(ecsCluster.Name), + Replicas: &ecsCluster.Spec.Bookkeeper.Replicas, + PodManagementPolicy: appsv1.ParallelPodManagement, + Template: makeBookieStatefulTemplate(ecsCluster), + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForBookie(ecsCluster), + }, + VolumeClaimTemplates: makeBookieVolumeClaimTemplates(ecsCluster.Spec.Bookkeeper), + }, + } +} + +func makeBookieStatefulTemplate(ecsCluster *v1alpha1.ECSCluster) corev1.PodTemplateSpec { + return corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: util.LabelsForBookie(ecsCluster), + }, + Spec: *makeBookiePodSpec(ecsCluster.Name, ecsCluster.Spec.Bookkeeper), + } +} + +func makeBookiePodSpec(clusterName string, bookkeeperSpec *v1alpha1.BookkeeperSpec) *corev1.PodSpec { + podSpec := &corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "bookie", + Image: bookkeeperSpec.Image.String(), + ImagePullPolicy: bookkeeperSpec.Image.PullPolicy, + Ports: []corev1.ContainerPort{ + { + Name: "bookie", + ContainerPort: 3181, + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: util.ConfigMapNameForBookie(clusterName), + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: LedgerDiskName, + MountPath: "/bk/journal", + }, + { + Name: JournalDiskName, + MountPath: "/bk/ledgers", + }, + { + Name: IndexDiskName, + MountPath: "/bk/index", + }, + }, + Resources: *bookkeeperSpec.Resources, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "/opt/bookkeeper/bin/bookkeeper shell bookiesanity"}, + }, + }, + // Bookie pods should start fast. We give it up to 1.5 minute to become ready. + InitialDelaySeconds: 20, + PeriodSeconds: 10, + FailureThreshold: 9, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(3181), + }, + }, + // We start the liveness probe from the maximum time the pod can take + // before becoming ready. + // If the pod fails the health check during 1 minute, Kubernetes + // will restart it. + InitialDelaySeconds: 60, + PeriodSeconds: 15, + FailureThreshold: 4, + }, + }, + }, + Affinity: util.PodAntiAffinity("bookie", clusterName), + } + + if bookkeeperSpec.ServiceAccountName != "" { + podSpec.ServiceAccountName = bookkeeperSpec.ServiceAccountName + } + + return podSpec +} + +func makeBookieVolumeClaimTemplates(spec *v1alpha1.BookkeeperSpec) []corev1.PersistentVolumeClaim { + return []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: JournalDiskName, + }, + Spec: *spec.Storage.JournalVolumeClaimTemplate, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: LedgerDiskName, + }, + Spec: *spec.Storage.LedgerVolumeClaimTemplate, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: IndexDiskName, + }, + Spec: *spec.Storage.IndexVolumeClaimTemplate, + }, + } +} + +func MakeBookieConfigMap(ecsCluster *v1alpha1.ECSCluster) *corev1.ConfigMap { + memoryOpts := []string{ + "-Xms1g", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseCGroupMemoryLimitForHeap", + "-XX:MaxRAMFraction=2", + "-XX:MaxDirectMemorySize=1g", + "-XX:+ExitOnOutOfMemoryError", + "-XX:+CrashOnOutOfMemoryError", + "-XX:+HeapDumpOnOutOfMemoryError", + } + + gcOpts := []string{ + "-XX:+UseG1GC", + "-XX:MaxGCPauseMillis=10", + "-XX:+ParallelRefProcEnabled", + "-XX:+AggressiveOpts", + "-XX:+DoEscapeAnalysis", + "-XX:ParallelGCThreads=32", + "-XX:ConcGCThreads=32", + "-XX:G1NewSizePercent=50", + "-XX:+DisableExplicitGC", + "-XX:-ResizePLAB", + } + + gcLoggingOpts := []string{ + "-XX:+PrintGCDetails", + "-XX:+PrintGCDateStamps", + "-XX:+PrintGCApplicationStoppedTime", + "-XX:+UseGCLogFileRotation", + "-XX:NumberOfGCLogFiles=5", + "-XX:GCLogFileSize=64m", + } + + configData := map[string]string{ + "BOOKIE_MEM_OPTS": strings.Join(memoryOpts, " "), + "BOOKIE_GC_OPTS": strings.Join(gcOpts, " "), + "BOOKIE_GC_LOGGING_OPTS": strings.Join(gcLoggingOpts, " "), + "ZK_URL": ecsCluster.Spec.ZookeeperUri, + // Set useHostNameAsBookieID to false until BookKeeper Docker + // image is updated to 4.7 + // This value can be explicitly overridden when using the operator + // with images based on BookKeeper 4.7 or newer + "BK_useHostNameAsBookieID": "false", + "ECS_CLUSTER_NAME": ecsCluster.ObjectMeta.Name, + "WAIT_FOR": ecsCluster.Spec.ZookeeperUri, + } + + if *ecsCluster.Spec.Bookkeeper.AutoRecovery { + configData["BK_AUTORECOVERY"] = "true" + } + + for k, v := range ecsCluster.Spec.Bookkeeper.Options { + prefixKey := fmt.Sprintf("BK_%s", k) + configData[prefixKey] = v + } + + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ConfigMapNameForBookie(ecsCluster.Name), + Namespace: ecsCluster.ObjectMeta.Namespace, + }, + Data: configData, + } +} + +func MakeBookiePodDisruptionBudget(ecsCluster *v1alpha1.ECSCluster) *policyv1beta1.PodDisruptionBudget { + maxUnavailable := intstr.FromInt(1) + return &policyv1beta1.PodDisruptionBudget{ + TypeMeta: metav1.TypeMeta{ + Kind: "PodDisruptionBudget", + APIVersion: "policy/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.PdbNameForBookie(ecsCluster.Name), + Namespace: ecsCluster.Namespace, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MaxUnavailable: &maxUnavailable, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForBookie(ecsCluster), + }, + }, + } +} diff --git a/ecs-operator/cluster-operator/pkg/controller/nautilus/nautilus_controller.go b/ecs-operator/cluster-operator/pkg/controller/nautilus/nautilus_controller.go new file mode 100644 index 0000000..211d9ef --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/controller/nautilus/nautilus_controller.go @@ -0,0 +1,213 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package ecs + +import ( + "strings" + + "fmt" + + api "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + "github.com/ecs/ecs-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func MakeControllerDeployment(p *api.ECSCluster) *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.DeploymentNameForController(p.Name), + Namespace: p.Namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &p.Spec.ECS.ControllerReplicas, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: util.LabelsForController(p), + }, + Spec: *makeControllerPodSpec(p.Name, p.Spec.ECS), + }, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForController(p), + }, + }, + } +} + +func makeControllerPodSpec(name string, ecsSpec *api.ECSSpec) *corev1.PodSpec { + podSpec := &corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ecs-controller", + Image: ecsSpec.Image.String(), + ImagePullPolicy: ecsSpec.Image.PullPolicy, + Args: []string{ + "controller", + }, + Ports: []corev1.ContainerPort{ + { + Name: "rest", + ContainerPort: 10080, + }, + { + Name: "grpc", + ContainerPort: 9090, + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: util.ConfigMapNameForController(name), + }, + }, + }, + }, + Resources: *ecsSpec.ControllerResources, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(9090), + }, + }, + // Controller pods start fast. We give it up to 1 minute to become ready. + PeriodSeconds: 5, + FailureThreshold: 12, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(9090), + }, + }, + // We start the liveness probe from the maximum time the pod can take + // before becoming ready. + // If the pod fails the health check during 1 minute, Kubernetes + // will restart it. + InitialDelaySeconds: 60, + PeriodSeconds: 15, + FailureThreshold: 4, + }, + }, + }, + Affinity: util.PodAntiAffinity("ecs-controller", name), + } + + if ecsSpec.ControllerServiceAccountName != "" { + podSpec.ServiceAccountName = ecsSpec.ControllerServiceAccountName + } + + return podSpec +} + +func MakeControllerConfigMap(p *api.ECSCluster) *corev1.ConfigMap { + var javaOpts = []string{ + "-Xms512m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseCGroupMemoryLimitForHeap", + "-XX:MaxRAMFraction=2", + "-XX:+ExitOnOutOfMemoryError", + "-XX:+CrashOnOutOfMemoryError", + "-XX:+HeapDumpOnOutOfMemoryError", + "-Decsservice.clusterName=" + p.Name, + } + + for name, value := range p.Spec.ECS.Options { + javaOpts = append(javaOpts, fmt.Sprintf("-D%v=%v", name, value)) + } + + configData := map[string]string{ + "CLUSTER_NAME": p.Name, + "ZK_URL": p.Spec.ZookeeperUri, + "JAVA_OPTS": strings.Join(javaOpts, " "), + "REST_SERVER_PORT": "10080", + "CONTROLLER_SERVER_PORT": "9090", + "AUTHORIZATION_ENABLED": "false", + "TOKEN_SIGNING_KEY": "secret", + "USER_PASSWORD_FILE": "/etc/ecs/conf/passwd", + "TLS_ENABLED": "false", + "WAIT_FOR": p.Spec.ZookeeperUri, + } + + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ConfigMapNameForController(p.Name), + Labels: util.LabelsForController(p), + Namespace: p.Namespace, + }, + Data: configData, + } + + return configMap +} + +func MakeControllerService(p *api.ECSCluster) *corev1.Service { + serviceType := corev1.ServiceTypeClusterIP + if p.Spec.ExternalAccess.Enabled { + serviceType = p.Spec.ExternalAccess.Type + } + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ServiceNameForController(p.Name), + Namespace: p.Namespace, + Labels: util.LabelsForController(p), + }, + Spec: corev1.ServiceSpec{ + Type: serviceType, + Ports: []corev1.ServicePort{ + { + Name: "rest", + Port: 10080, + }, + { + Name: "grpc", + Port: 9090, + }, + }, + Selector: util.LabelsForController(p), + }, + } +} + +func MakeControllerPodDisruptionBudget(ecsCluster *api.ECSCluster) *policyv1beta1.PodDisruptionBudget { + minAvailable := intstr.FromInt(1) + return &policyv1beta1.PodDisruptionBudget{ + TypeMeta: metav1.TypeMeta{ + Kind: "PodDisruptionBudget", + APIVersion: "policy/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.PdbNameForController(ecsCluster.Name), + Namespace: ecsCluster.Namespace, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MinAvailable: &minAvailable, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForController(ecsCluster), + }, + }, + } +} diff --git a/ecs-operator/cluster-operator/pkg/controller/nautilus/nautilus_node.go b/ecs-operator/cluster-operator/pkg/controller/nautilus/nautilus_node.go new file mode 100644 index 0000000..3a991fd --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/controller/nautilus/nautilus_node.go @@ -0,0 +1,365 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package ecs + +import ( + "strings" + + "fmt" + + api "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + "github.com/ecs/ecs-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + cacheVolumeName = "cache" + cacheVolumeMountPoint = "/tmp/ecs/cache" + tier2FileMountPoint = "/mnt/tier2" + tier2VolumeName = "tier2" + nodeKind = "ecs-node" +) + +func MakeNodeStatefulSet(ecsCluster *api.ECSCluster) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.StatefulSetNameForNode(ecsCluster.Name), + Namespace: ecsCluster.Namespace, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "ecs-node", + Replicas: &ecsCluster.Spec.ECS.NodeReplicas, + PodManagementPolicy: appsv1.OrderedReadyPodManagement, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: util.LabelsForNode(ecsCluster), + }, + Spec: makeNodePodSpec(ecsCluster), + }, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForNode(ecsCluster), + }, + VolumeClaimTemplates: makeCacheVolumeClaimTemplate(ecsCluster.Spec.ECS), + }, + } +} + +func makeNodePodSpec(ecsCluster *api.ECSCluster) corev1.PodSpec { + environment := []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: util.ConfigMapNameForNode(ecsCluster.Name), + }, + }, + }, + } + + ecsSpec := ecsCluster.Spec.ECS + + environment = configureTier2Secrets(environment, ecsSpec) + + podSpec := corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ecs-node", + Image: ecsSpec.Image.String(), + ImagePullPolicy: ecsSpec.Image.PullPolicy, + Args: []string{ + "node", + }, + Ports: []corev1.ContainerPort{ + { + Name: "server", + ContainerPort: 12345, + }, + }, + EnvFrom: environment, + Env: util.DownwardAPIEnv(), + VolumeMounts: []corev1.VolumeMount{ + { + Name: cacheVolumeName, + MountPath: cacheVolumeMountPoint, + }, + }, + Resources: *ecsSpec.NodeResources, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(12345), + }, + }, + // Segment Stores can take a few minutes to become ready when the cluster + // is configured with external enabled as they need to wait for the allocation + // of the external IP address. + // This config gives it up to 5 minutes to become ready. + PeriodSeconds: 10, + FailureThreshold: 30, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(12345), + }, + }, + // In the readiness probe we allow the pod to take up to 5 minutes + // to become ready. Therefore, the liveness probe will give it + // a 5-minute grace period before starting monitoring the container. + // If the pod fails the health check during 1 minute, Kubernetes + // will restart it. + InitialDelaySeconds: 300, + PeriodSeconds: 15, + FailureThreshold: 4, + }, + }, + }, + Affinity: util.PodAntiAffinity("ecs-node", ecsCluster.Name), + } + + if ecsSpec.NodeServiceAccountName != "" { + podSpec.ServiceAccountName = ecsSpec.NodeServiceAccountName + } + + configureTier2Filesystem(&podSpec, ecsSpec) + + return podSpec +} + +func MakeNodeConfigMap(p *api.ECSCluster) *corev1.ConfigMap { + javaOpts := []string{ + "-Xms1g", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseCGroupMemoryLimitForHeap", + "-XX:MaxRAMFraction=2", + "-XX:+ExitOnOutOfMemoryError", + "-XX:+CrashOnOutOfMemoryError", + "-XX:+HeapDumpOnOutOfMemoryError", + "-Decsservice.clusterName=" + p.Name, + } + + for name, value := range p.Spec.ECS.Options { + javaOpts = append(javaOpts, fmt.Sprintf("-D%v=%v", name, value)) + } + + configData := map[string]string{ + "AUTHORIZATION_ENABLED": "false", + "CLUSTER_NAME": p.Name, + "ZK_URL": p.Spec.ZookeeperUri, + "JAVA_OPTS": strings.Join(javaOpts, " "), + "CONTROLLER_URL": util.ECSControllerServiceURL(*p), + } + + // Wait for at least 3 Bookies to come up + var waitFor []string + for i := int32(0); i < util.Min(3, p.Spec.Bookkeeper.Replicas); i++ { + waitFor = append(waitFor, + fmt.Sprintf("%s-%d.%s.%s:3181", + util.StatefulSetNameForBookie(p.Name), + i, + util.HeadlessServiceNameForBookie(p.Name), + p.Namespace)) + } + configData["WAIT_FOR"] = strings.Join(waitFor, ",") + + if p.Spec.ExternalAccess.Enabled { + configData["K8_EXTERNAL_ACCESS"] = "true" + } + + if p.Spec.ECS.DebugLogging { + configData["log.level"] = "DEBUG" + } + + for k, v := range getTier2StorageOptions(p.Spec.ECS) { + configData[k] = v + } + + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ConfigMapNameForNode(p.Name), + Namespace: p.Namespace, + Labels: util.LabelsForNode(p), + }, + Data: configData, + } +} + +func makeCacheVolumeClaimTemplate(ecsSpec *api.ECSSpec) []corev1.PersistentVolumeClaim { + return []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: cacheVolumeName, + }, + Spec: *ecsSpec.CacheVolumeClaimTemplate, + }, + } +} + +func getTier2StorageOptions(ecsSpec *api.ECSSpec) map[string]string { + if ecsSpec.Tier2.FileSystem != nil { + return map[string]string{ + "TIER2_STORAGE": "FILESYSTEM", + "NFS_MOUNT": tier2FileMountPoint, + } + } + + if ecsSpec.Tier2.ECS != nil { + // EXTENDEDS3_ACCESS_KEY_ID & EXTENDEDS3_SECRET_KEY will come from secret storage + return map[string]string{ + "TIER2_STORAGE": "EXTENDEDS3", + "EXTENDEDS3_BUCKET": ecsSpec.Tier2.ECS.Bucket, + "EXTENDEDS3_URI": ecsSpec.Tier2.ECS.Uri, + "EXTENDEDS3_ROOT": ecsSpec.Tier2.ECS.Root, + "EXTENDEDS3_NAMESPACE": ecsSpec.Tier2.ECS.Namespace, + } + } + + if ecsSpec.Tier2.Hdfs != nil { + return map[string]string{ + "TIER2_STORAGE": "HDFS", + "HDFS_URL": ecsSpec.Tier2.Hdfs.Uri, + "HDFS_ROOT": ecsSpec.Tier2.Hdfs.Root, + } + } + + return make(map[string]string) +} + +func configureTier2Secrets(environment []corev1.EnvFromSource, ecsSpec *api.ECSSpec) []corev1.EnvFromSource { + if ecsSpec.Tier2.ECS != nil { + return append(environment, corev1.EnvFromSource{ + Prefix: "EXTENDEDS3_", + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ecsSpec.Tier2.ECS.Credentials, + }, + }, + }) + } + + return environment +} + +func configureTier2Filesystem(podSpec *corev1.PodSpec, ecsSpec *api.ECSSpec) { + + if ecsSpec.Tier2.FileSystem != nil { + podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: tier2VolumeName, + MountPath: tier2FileMountPoint, + }) + + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: tier2VolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: ecsSpec.Tier2.FileSystem.PersistentVolumeClaim, + }, + }) + } +} + +func MakeNodeHeadlessService(ecsCluster *api.ECSCluster) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.HeadlessServiceNameForNode(ecsCluster.Name), + Namespace: ecsCluster.Namespace, + Labels: util.LabelsForNode(ecsCluster), + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "server", + Port: 12345, + Protocol: "TCP", + }, + }, + Selector: util.LabelsForNode(ecsCluster), + ClusterIP: corev1.ClusterIPNone, + }, + } +} + +func MakeNodeExternalServices(ecsCluster *api.ECSCluster) []*corev1.Service { + var service *corev1.Service + services := make([]*corev1.Service, ecsCluster.Spec.ECS.NodeReplicas) + + for i := int32(0); i < ecsCluster.Spec.ECS.NodeReplicas; i++ { + service = &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ServiceNameForNode(ecsCluster.Name, i), + Namespace: ecsCluster.Namespace, + Labels: util.LabelsForNode(ecsCluster), + }, + Spec: corev1.ServiceSpec{ + Type: ecsCluster.Spec.ExternalAccess.Type, + Ports: []corev1.ServicePort{ + { + Name: "server", + Port: 12345, + Protocol: "TCP", + TargetPort: intstr.FromInt(12345), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeLocal, + Selector: map[string]string{ + appsv1.StatefulSetPodNameLabel: fmt.Sprintf("%s-%d", util.StatefulSetNameForNode(ecsCluster.Name), i), + }, + }, + } + services[i] = service + } + return services +} + +func MakeNodePodDisruptionBudget(ecsCluster *api.ECSCluster) *policyv1beta1.PodDisruptionBudget { + var maxUnavailable intstr.IntOrString + + if ecsCluster.Spec.ECS.NodeReplicas == int32(1) { + maxUnavailable = intstr.FromInt(0) + } else { + maxUnavailable = intstr.FromInt(1) + } + + return &policyv1beta1.PodDisruptionBudget{ + TypeMeta: metav1.TypeMeta{ + Kind: "PodDisruptionBudget", + APIVersion: "policy/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.PdbNameForNode(ecsCluster.Name), + Namespace: ecsCluster.Namespace, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MaxUnavailable: &maxUnavailable, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForNode(ecsCluster), + }, + }, + } +} diff --git a/ecs-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller.go b/ecs-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller.go new file mode 100644 index 0000000..6035d59 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller.go @@ -0,0 +1,482 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package ecscluster + +import ( + "context" + "fmt" + "time" + + ecsv1alpha1 "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + "github.com/ecs/ecs-operator/pkg/controller/ecs" + "github.com/ecs/ecs-operator/pkg/util" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + log "github.com/sirupsen/logrus" +) + +// ReconcileTime is the delay between reconciliations +const ReconcileTime = 30 * time.Second + +// Add creates a new ECSCluster Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileECSCluster{client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("ecscluster-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to primary resource ECSCluster + err = c.Watch(&source.Kind{Type: &ecsv1alpha1.ECSCluster{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileECSCluster{} + +// ReconcileECSCluster reconciles a ECSCluster object +type ReconcileECSCluster struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a ECSCluster object and makes changes based on the state read +// and what is in the ECSCluster.Spec +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ReconcileECSCluster) Reconcile(request reconcile.Request) (reconcile.Result, error) { + log.Printf("Reconciling ECSCluster %s/%s\n", request.Namespace, request.Name) + + // Fetch the ECSCluster instance + ecsCluster := &ecsv1alpha1.ECSCluster{} + err := r.client.Get(context.TODO(), request.NamespacedName, ecsCluster) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + log.Printf("ECSCluster %s/%s not found. Ignoring since object must be deleted\n", request.Namespace, request.Name) + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + log.Printf("failed to get ECSCluster: %v", err) + return reconcile.Result{}, err + } + + // Set default configuration for unspecified values + changed := ecsCluster.WithDefaults() + if changed { + log.Printf("Setting default settings for ecs-cluster: %s", request.Name) + if err = r.client.Update(context.TODO(), ecsCluster); err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{Requeue: true}, nil + } + + err = r.run(ecsCluster) + if err != nil { + log.Printf("failed to reconcile ecs cluster (%s): %v", ecsCluster.Name, err) + return reconcile.Result{}, err + } + + return reconcile.Result{RequeueAfter: ReconcileTime}, nil +} + +func (r *ReconcileECSCluster) run(p *ecsv1alpha1.ECSCluster) (err error) { + // Clean up zookeeper metadata + err = r.reconcileFinalizers(p) + if err != nil { + log.Printf("failed to clean up zookeeper: %v", err) + return err + } + + err = r.deployCluster(p) + if err != nil { + log.Printf("failed to deploy cluster: %v", err) + return err + } + + err = r.syncClusterSize(p) + if err != nil { + log.Printf("failed to sync cluster size: %v", err) + return err + } + + err = r.reconcileClusterStatus(p) + if err != nil { + log.Printf("failed to reconcile cluster status: %v", err) + return err + } + return nil +} + +func (r *ReconcileECSCluster) deployCluster(p *ecsv1alpha1.ECSCluster) (err error) { + err = r.deployBookie(p) + if err != nil { + log.Printf("failed to deploy bookie: %v", err) + return err + } + + err = r.deployController(p) + if err != nil { + log.Printf("failed to deploy controller: %v", err) + return err + } + + err = r.deployNode(p) + if err != nil { + log.Printf("failed to deploy segment store: %v", err) + return err + } + return nil +} + +func (r *ReconcileECSCluster) deployController(p *ecsv1alpha1.ECSCluster) (err error) { + pdb := ecs.MakeControllerPodDisruptionBudget(p) + controllerutil.SetControllerReference(p, pdb, r.scheme) + err = r.client.Create(context.TODO(), pdb) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + configMap := ecs.MakeControllerConfigMap(p) + controllerutil.SetControllerReference(p, configMap, r.scheme) + err = r.client.Create(context.TODO(), configMap) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + deployment := ecs.MakeControllerDeployment(p) + controllerutil.SetControllerReference(p, deployment, r.scheme) + err = r.client.Create(context.TODO(), deployment) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + service := ecs.MakeControllerService(p) + controllerutil.SetControllerReference(p, service, r.scheme) + err = r.client.Create(context.TODO(), service) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + return nil +} + +func (r *ReconcileECSCluster) deployNode(p *ecsv1alpha1.ECSCluster) (err error) { + + headlessService := ecs.MakeNodeHeadlessService(p) + controllerutil.SetControllerReference(p, headlessService, r.scheme) + err = r.client.Create(context.TODO(), headlessService) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + if p.Spec.ExternalAccess.Enabled { + services := ecs.MakeNodeExternalServices(p) + for _, service := range services { + controllerutil.SetControllerReference(p, service, r.scheme) + err = r.client.Create(context.TODO(), service) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + } + } + + pdb := ecs.MakeNodePodDisruptionBudget(p) + controllerutil.SetControllerReference(p, pdb, r.scheme) + err = r.client.Create(context.TODO(), pdb) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + configMap := ecs.MakeNodeConfigMap(p) + controllerutil.SetControllerReference(p, configMap, r.scheme) + err = r.client.Create(context.TODO(), configMap) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + statefulSet := ecs.MakeNodeStatefulSet(p) + controllerutil.SetControllerReference(p, statefulSet, r.scheme) + for i := range statefulSet.Spec.VolumeClaimTemplates { + controllerutil.SetControllerReference(p, &statefulSet.Spec.VolumeClaimTemplates[i], r.scheme) + } + err = r.client.Create(context.TODO(), statefulSet) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + return nil +} + +func (r *ReconcileECSCluster) deployBookie(p *ecsv1alpha1.ECSCluster) (err error) { + headlessService := ecs.MakeBookieHeadlessService(p) + controllerutil.SetControllerReference(p, headlessService, r.scheme) + err = r.client.Create(context.TODO(), headlessService) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + pdb := ecs.MakeBookiePodDisruptionBudget(p) + controllerutil.SetControllerReference(p, pdb, r.scheme) + err = r.client.Create(context.TODO(), pdb) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + configMap := ecs.MakeBookieConfigMap(p) + controllerutil.SetControllerReference(p, configMap, r.scheme) + err = r.client.Create(context.TODO(), configMap) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + statefulSet := ecs.MakeBookieStatefulSet(p) + controllerutil.SetControllerReference(p, statefulSet, r.scheme) + for i := range statefulSet.Spec.VolumeClaimTemplates { + controllerutil.SetControllerReference(p, &statefulSet.Spec.VolumeClaimTemplates[i], r.scheme) + } + err = r.client.Create(context.TODO(), statefulSet) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + return nil +} + +func (r *ReconcileECSCluster) syncClusterSize(p *ecsv1alpha1.ECSCluster) (err error) { + err = r.syncBookieSize(p) + if err != nil { + return err + } + + err = r.syncNodeSize(p) + if err != nil { + return err + } + + err = r.syncControllerSize(p) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileECSCluster) syncBookieSize(p *ecsv1alpha1.ECSCluster) (err error) { + sts := &appsv1.StatefulSet{} + name := util.StatefulSetNameForBookie(p.Name) + err = r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: p.Namespace}, sts) + if err != nil { + return fmt.Errorf("failed to get stateful-set (%s): %v", sts.Name, err) + } + + if *sts.Spec.Replicas != p.Spec.Bookkeeper.Replicas { + sts.Spec.Replicas = &(p.Spec.Bookkeeper.Replicas) + err = r.client.Update(context.TODO(), sts) + if err != nil { + return fmt.Errorf("failed to update size of stateful-set (%s): %v", sts.Name, err) + } + + err = r.syncStatefulSetPvc(sts) + if err != nil { + return fmt.Errorf("failed to sync pvcs of stateful-set (%s): %v", sts.Name, err) + } + } + return nil +} + +func (r *ReconcileECSCluster) syncNodeSize(p *ecsv1alpha1.ECSCluster) (err error) { + sts := &appsv1.StatefulSet{} + name := util.StatefulSetNameForNode(p.Name) + err = r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: p.Namespace}, sts) + if err != nil { + return fmt.Errorf("failed to get stateful-set (%s): %v", sts.Name, err) + } + + if *sts.Spec.Replicas != p.Spec.ECS.NodeReplicas { + sts.Spec.Replicas = &(p.Spec.ECS.NodeReplicas) + err = r.client.Update(context.TODO(), sts) + if err != nil { + return fmt.Errorf("failed to update size of stateful-set (%s): %v", sts.Name, err) + } + + err = r.syncStatefulSetPvc(sts) + if err != nil { + return fmt.Errorf("failed to sync pvcs of stateful-set (%s): %v", sts.Name, err) + } + } + return nil +} + +func (r *ReconcileECSCluster) syncControllerSize(p *ecsv1alpha1.ECSCluster) (err error) { + deploy := &appsv1.Deployment{} + name := util.DeploymentNameForController(p.Name) + err = r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: p.Namespace}, deploy) + if err != nil { + return fmt.Errorf("failed to get deployment (%s): %v", deploy.Name, err) + } + + if *deploy.Spec.Replicas != p.Spec.ECS.ControllerReplicas { + deploy.Spec.Replicas = &(p.Spec.ECS.ControllerReplicas) + err = r.client.Update(context.TODO(), deploy) + if err != nil { + return fmt.Errorf("failed to update size of deployment (%s): %v", deploy.Name, err) + } + } + return nil +} + +func (r *ReconcileECSCluster) reconcileFinalizers(p *ecsv1alpha1.ECSCluster) (err error) { + if p.DeletionTimestamp.IsZero() { + if !util.ContainsString(p.ObjectMeta.Finalizers, util.ZkFinalizer) { + p.ObjectMeta.Finalizers = append(p.ObjectMeta.Finalizers, util.ZkFinalizer) + if err = r.client.Update(context.TODO(), p); err != nil { + return fmt.Errorf("failed to add the finalizer (%s): %v", p.Name, err) + } + } + } else { + if util.ContainsString(p.ObjectMeta.Finalizers, util.ZkFinalizer) { + p.ObjectMeta.Finalizers = util.RemoveString(p.ObjectMeta.Finalizers, util.ZkFinalizer) + if err = r.client.Update(context.TODO(), p); err != nil { + return fmt.Errorf("failed to update ECS object (%s): %v", p.Name, err) + } + if err = r.cleanUpZookeeperMeta(p); err != nil { + return fmt.Errorf("failed to clean up metadata (%s): %v", p.Name, err) + } + } + } + return nil +} + +func (r *ReconcileECSCluster) cleanUpZookeeperMeta(p *ecsv1alpha1.ECSCluster) (err error) { + if err = util.WaitForClusterToTerminate(r.client, p); err != nil { + return fmt.Errorf("failed to wait for cluster pods termination (%s): %v", p.Name, err) + } + + if err = util.DeleteAllZnodes(p); err != nil { + return fmt.Errorf("failed to delete zookeeper znodes for (%s): %v", p.Name, err) + } + return nil +} + +func (r *ReconcileECSCluster) syncStatefulSetPvc(sts *appsv1.StatefulSet) error { + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: sts.Spec.Template.Labels, + }) + if err != nil { + return fmt.Errorf("failed to convert label selector: %v", err) + } + + pvcList := &corev1.PersistentVolumeClaimList{} + pvclistOps := &client.ListOptions{ + Namespace: sts.Namespace, + LabelSelector: selector, + } + err = r.client.List(context.TODO(), pvclistOps, pvcList) + if err != nil { + return err + } + + for _, pvcItem := range pvcList.Items { + if util.PvcIsOrphan(pvcItem.Name, *sts.Spec.Replicas) { + pvcDelete := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcItem.Name, + Namespace: pvcItem.Namespace, + }, + } + + err = r.client.Delete(context.TODO(), pvcDelete) + if err != nil { + return fmt.Errorf("failed to delete pvc: %v", err) + } + } + } + return nil +} + +func (r *ReconcileECSCluster) reconcileClusterStatus(p *ecsv1alpha1.ECSCluster) error { + expectedSize := util.GetClusterExpectedSize(p) + listOps := &client.ListOptions{ + Namespace: p.Namespace, + LabelSelector: labels.SelectorFromSet(util.LabelsForECSCluster(p)), + } + podList := &corev1.PodList{} + err := r.client.List(context.TODO(), listOps, podList) + if err != nil { + return err + } + + var ( + readyMembers []string + unreadyMembers []string + ) + + for _, p := range podList.Items { + if util.IsPodReady(&p) { + readyMembers = append(readyMembers, p.Name) + } else { + unreadyMembers = append(unreadyMembers, p.Name) + } + } + + if len(readyMembers) == expectedSize { + p.Status.SetPodsReadyConditionTrue() + } else { + p.Status.SetPodsReadyConditionFalse() + } + + p.Status.Replicas = int32(expectedSize) + p.Status.CurrentReplicas = int32(len(podList.Items)) + p.Status.ReadyReplicas = int32(len(readyMembers)) + p.Status.Members.Ready = readyMembers + p.Status.Members.Unready = unreadyMembers + + err = r.client.Status().Update(context.TODO(), p) + if err != nil { + return fmt.Errorf("failed to update cluster status: %v", err) + } + return nil +} diff --git a/ecs-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller_test.go b/ecs-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller_test.go new file mode 100644 index 0000000..f7444c4 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller_test.go @@ -0,0 +1,225 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package ecscluster + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + "github.com/ecs/ecs-operator/pkg/util" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestBookie(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "ECS cluster") +} + +var _ = Describe("ECSCluster Controller", func() { + const ( + Name = "example" + Namespace = "default" + ) + + var ( + s = scheme.Scheme + r *ReconcileECSCluster + ) + + Context("Reconcile", func() { + var ( + req reconcile.Request + p *v1alpha1.ECSCluster + ) + + BeforeEach(func() { + req = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: Name, + Namespace: Namespace, + }, + } + p = &v1alpha1.ECSCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: Name, + Namespace: Namespace, + }, + } + s.AddKnownTypes(v1alpha1.SchemeGroupVersion, p) + }) + + Context("Default spec", func() { + var ( + client client.Client + err error + ) + + BeforeEach(func() { + p.WithDefaults() + client = fake.NewFakeClient(p) + r = &ReconcileECSCluster{client: client, scheme: s} + _, err = r.Reconcile(req) + }) + + It("shouldn't error", func() { + Ω(err).Should(BeNil()) + }) + + Context("Default bookkeeper", func() { + It("should have a default bookie resource", func() { + foundBk := &appsv1.StatefulSet{} + nn := types.NamespacedName{ + Name: util.StatefulSetNameForBookie(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundBk) + Ω(err).Should(BeNil()) + Ω(foundBk.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("500m")) + Ω(foundBk.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("1Gi")) + Ω(foundBk.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("1")) + Ω(foundBk.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("2Gi")) + }) + }) + + Context("Default ECS controller", func() { + It("should have a default controller resource", func() { + foundController := &appsv1.Deployment{} + nn := types.NamespacedName{ + Name: util.DeploymentNameForController(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundController) + Ω(err).Should(BeNil()) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("250m")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("512Mi")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("500m")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("1Gi")) + }) + }) + + Context("Default ECS node", func() { + It("should have a default controller resource", func() { + foundSS := &appsv1.StatefulSet{} + nn := types.NamespacedName{ + Name: util.StatefulSetNameForNode(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundSS) + Ω(err).Should(BeNil()) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("500m")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("1Gi")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("1")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("2Gi")) + }) + }) + }) + + Context("Custom spec", func() { + var ( + client client.Client + err error + customReq *corev1.ResourceRequirements + ) + + BeforeEach(func() { + customReq = &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("6Gi"), + }, + } + p.Spec = v1alpha1.ClusterSpec{ + Bookkeeper: &v1alpha1.BookkeeperSpec{ + Resources: customReq, + }, + ECS: &v1alpha1.ECSSpec{ + ControllerResources: customReq, + NodeResources: customReq, + }, + } + p.WithDefaults() + client = fake.NewFakeClient(p) + r = &ReconcileECSCluster{client: client, scheme: s} + _, err = r.Reconcile(req) + }) + + It("shouldn't error", func() { + Ω(err).Should(BeNil()) + }) + + Context("Custom bookkeeper", func() { + It("should have a custom bookie resource", func() { + foundBK := &appsv1.StatefulSet{} + nn := types.NamespacedName{ + Name: util.StatefulSetNameForBookie(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundBK) + Ω(err).Should(BeNil()) + Ω(foundBK.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("2")) + Ω(foundBK.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("4Gi")) + Ω(foundBK.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("4")) + Ω(foundBK.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("6Gi")) + }) + }) + + Context("Custom ECS controller", func() { + It("should have a custom controller resource", func() { + foundController := &appsv1.Deployment{} + nn := types.NamespacedName{ + Name: util.DeploymentNameForController(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundController) + Ω(err).Should(BeNil()) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("2")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("4Gi")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("4")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("6Gi")) + }) + }) + + Context("Custom ECS node", func() { + It("should have a custom node resource", func() { + foundSS := &appsv1.StatefulSet{} + nn := types.NamespacedName{ + Name: util.StatefulSetNameForNode(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundSS) + Ω(err).Should(BeNil()) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("2")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("4Gi")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("4")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("6Gi")) + }) + }) + }) + }) +}) diff --git a/ecs-operator/cluster-operator/pkg/test/e2e/e2eutil/nautiluscluster_util.go b/ecs-operator/cluster-operator/pkg/test/e2e/e2eutil/nautiluscluster_util.go new file mode 100644 index 0000000..edbbd7b --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/test/e2e/e2eutil/nautiluscluster_util.go @@ -0,0 +1,279 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2eutil + +import ( + goctx "context" + "fmt" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + framework "github.com/operator-framework/operator-sdk/pkg/test" + api "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + "github.com/ecs/ecs-operator/pkg/util" + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +var ( + RetryInterval = time.Second * 5 + Timeout = time.Second * 60 + CleanupRetryInterval = time.Second * 1 + CleanupTimeout = time.Second * 5 +) + +// CreateCluster creates a ECSCluster CR with the desired spec +func CreateCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.ECSCluster) (*api.ECSCluster, error) { + t.Logf("creating ecs cluster: %s", p.Name) + err := f.Client.Create(goctx.TODO(), p, &framework.CleanupOptions{TestContext: ctx, Timeout: CleanupTimeout, RetryInterval: CleanupRetryInterval}) + if err != nil { + return nil, fmt.Errorf("failed to create CR: %v", err) + } + + ecs := &api.ECSCluster{} + err = f.Client.Get(goctx.TODO(), types.NamespacedName{Namespace: p.Namespace, Name: p.Name}, ecs) + if err != nil { + return nil, fmt.Errorf("failed to obtain created CR: %v", err) + } + t.Logf("created ecs cluster: %s", ecs.Name) + return ecs, nil +} + +// DeleteCluster deletes the ECSCluster CR specified by cluster spec +func DeleteCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.ECSCluster) error { + t.Logf("deleting ecs cluster: %s", p.Name) + err := f.Client.Delete(goctx.TODO(), p) + if err != nil { + return fmt.Errorf("failed to delete CR: %v", err) + } + + t.Logf("deleted ecs cluster: %s", p.Name) + return nil +} + +// UpdateCluster updates the ECSCluster CR +func UpdateCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.ECSCluster) error { + t.Logf("updating ecs cluster: %s", p.Name) + err := f.Client.Update(goctx.TODO(), p) + if err != nil { + return fmt.Errorf("failed to update CR: %v", err) + } + + t.Logf("updated ecs cluster: %s", p.Name) + return nil +} + +// GetCluster returns the latest ECSCluster CR +func GetCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.ECSCluster) (*api.ECSCluster, error) { + ecs := &api.ECSCluster{} + err := f.Client.Get(goctx.TODO(), types.NamespacedName{Namespace: p.Namespace, Name: p.Name}, ecs) + if err != nil { + return nil, fmt.Errorf("failed to obtain created CR: %v", err) + } + return ecs, nil +} + +// WaitForClusterToBecomeReady will wait until all cluster pods are ready +func WaitForClusterToBecomeReady(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.ECSCluster, size int) error { + t.Logf("waiting for cluster pods to become ready: %s", p.Name) + + err := wait.Poll(RetryInterval, 5*time.Minute, func() (done bool, err error) { + cluster, err := GetCluster(t, f, ctx, p) + if err != nil { + return false, err + } + + t.Logf("\twaiting for pods to become ready (%d/%d), pods (%v)", cluster.Status.ReadyReplicas, size, cluster.Status.Members.Ready) + + _, condition := cluster.Status.GetClusterCondition(api.ClusterConditionPodsReady) + if condition != nil && condition.Status == corev1.ConditionTrue && cluster.Status.ReadyReplicas == int32(size) { + return true, nil + } + return false, nil + }) + + if err != nil { + return err + } + + t.Logf("ecs cluster ready: %s", p.Name) + return nil +} + +// WaitForClusterToTerminate will wait until all cluster pods are terminated +func WaitForClusterToTerminate(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.ECSCluster) error { + t.Logf("waiting for ecs cluster to terminate: %s", p.Name) + + listOptions := metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(util.LabelsForECSCluster(p)).String(), + } + + // Wait for Pods to terminate + err := wait.Poll(RetryInterval, 2*time.Minute, func() (done bool, err error) { + podList, err := f.KubeClient.Core().Pods(p.Namespace).List(listOptions) + if err != nil { + return false, err + } + + var names []string + for i := range podList.Items { + pod := &podList.Items[i] + names = append(names, pod.Name) + } + t.Logf("waiting for pods to terminate, running pods (%v)", names) + if len(names) != 0 { + return false, nil + } + return true, nil + }) + + if err != nil { + return err + } + + // Wait for PVCs to terminate + err = wait.Poll(RetryInterval, 1*time.Minute, func() (done bool, err error) { + pvcList, err := f.KubeClient.Core().PersistentVolumeClaims(p.Namespace).List(listOptions) + if err != nil { + return false, err + } + + var names []string + for i := range pvcList.Items { + pvc := &pvcList.Items[i] + names = append(names, pvc.Name) + } + t.Logf("waiting for pvc to terminate (%v)", names) + if len(names) != 0 { + return false, nil + } + return true, nil + }) + + if err != nil { + return err + } + + t.Logf("ecs cluster terminated: %s", p.Name) + return nil +} + +// WriteAndReadData writes sample data and reads it back from the given ECS cluster +func WriteAndReadData(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.ECSCluster) error { + t.Logf("writing and reading data from ecs cluster: %s", p.Name) + testJob := NewTestWriteReadJob(p.Namespace, util.ServiceNameForController(p.Name)) + err := f.Client.Create(goctx.TODO(), testJob, &framework.CleanupOptions{TestContext: ctx, Timeout: CleanupTimeout, RetryInterval: CleanupRetryInterval}) + if err != nil { + return fmt.Errorf("failed to create job: %s", err) + } + + err = wait.Poll(RetryInterval, 3*time.Minute, func() (done bool, err error) { + job, err := f.KubeClient.BatchV1().Jobs(p.Namespace).Get(testJob.Name, metav1.GetOptions{IncludeUninitialized: false}) + if err != nil { + return false, err + } + if job.Status.CompletionTime.IsZero() { + return false, nil + } + if job.Status.Failed > 0 { + return false, fmt.Errorf("failed to write and read data from cluster") + } + return true, nil + }) + + if err != nil { + return err + } + + t.Logf("ecs cluster validated: %s", p.Name) + return nil +} + +func RestartTier2(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, namespace string) error { + t.Log("restarting tier2 storage") + tier2 := NewTier2(namespace) + + err := f.Client.Delete(goctx.TODO(), tier2) + if err != nil { + return fmt.Errorf("failed to delete tier2: %v", err) + } + + err = wait.Poll(RetryInterval, 3*time.Minute, func() (done bool, err error) { + _, err = f.KubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(tier2.Name, metav1.GetOptions{IncludeUninitialized: false}) + if err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, err + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("failed to wait for tier2 termination: %s", err) + } + + tier2 = NewTier2(namespace) + err = f.Client.Create(goctx.TODO(), tier2, &framework.CleanupOptions{TestContext: ctx, Timeout: CleanupTimeout, RetryInterval: CleanupRetryInterval}) + if err != nil { + return fmt.Errorf("failed to create tier2: %s", err) + } + + t.Logf("ecs cluster tier2 restarted") + return nil +} + +func CheckPvcSanity(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.ECSCluster) error { + t.Logf("checking pvc sanity: %s", p.Name) + listOptions := metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(util.LabelsForBookie(p)).String(), + } + pvcList, err := f.KubeClient.CoreV1().PersistentVolumeClaims(p.Namespace).List(listOptions) + if err != nil { + return err + } + + for _, pvc := range pvcList.Items { + if pvc.Status.Phase != corev1.ClaimBound { + continue + } + if util.PvcIsOrphan(pvc.Name, p.Spec.Bookkeeper.Replicas) { + return fmt.Errorf("bookie pvc is illegal") + } + + } + + listOptions = metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(util.LabelsForNode(p)).String(), + } + pvcList, err = f.KubeClient.CoreV1().PersistentVolumeClaims(p.Namespace).List(listOptions) + if err != nil { + return err + } + + for _, pvc := range pvcList.Items { + if pvc.Status.Phase != corev1.ClaimBound { + continue + } + if util.PvcIsOrphan(pvc.Name, p.Spec.ECS.NodeReplicas) { + return fmt.Errorf("segment store pvc is illegal") + } + + } + + t.Logf("pvc validated: %s", p.Name) + return nil +} diff --git a/ecs-operator/cluster-operator/pkg/test/e2e/e2eutil/spec_util.go b/ecs-operator/cluster-operator/pkg/test/e2e/e2eutil/spec_util.go new file mode 100644 index 0000000..a1db535 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/test/e2e/e2eutil/spec_util.go @@ -0,0 +1,102 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2eutil + +import ( + "fmt" + "k8s.io/apimachinery/pkg/api/resource" + + api "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewDefaultCluster returns a cluster with an empty spec, which will be filled +// with default values +func NewDefaultCluster(namespace string) *api.ECSCluster { + return &api.ECSCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "ECSCluster", + APIVersion: "ecs.ecs.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: namespace, + }, + } +} + +func newTestJob(namespace string, command string) *batchv1.Job { + deadline := int64(180) + retries := int32(1) + return &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + Kind: "Job", + APIVersion: "batch/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-job-", + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + ActiveDeadlineSeconds: &deadline, + BackoffLimit: &retries, + + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "adrianmo/ecs-samples", + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/sh", "-c"}, + Args: []string{command}, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } +} + +// NewTestWriteReadJob returns a Job that can test ecs cluster by running a sample +func NewTestWriteReadJob(namespace string, controllerUri string) *batchv1.Job { + command := fmt.Sprintf("cd /samples/ecs-client-examples "+ + "&& bin/helloWorldWriter -u tcp://%s:9090 "+ + "&& bin/helloWorldReader -u tcp://%s:9090", + controllerUri, controllerUri) + return newTestJob(namespace, command) +} + +func NewTier2(namespace string) *corev1.PersistentVolumeClaim { + storageName := "nfs" + return &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolumeClaim", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ecs-tier2", + Namespace: namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageName, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.PersistentVolumeAccessMode(corev1.ReadWriteMany)}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("5Gi"), + }, + }, + }, + } +} diff --git a/ecs-operator/cluster-operator/pkg/util/k8sutil.go b/ecs-operator/cluster-operator/pkg/util/k8sutil.go new file mode 100644 index 0000000..671873b --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/util/k8sutil.go @@ -0,0 +1,114 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package util + +import ( + "context" + "time" + + "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func DownwardAPIEnv() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + } +} + +func PodAntiAffinity(component string, clusterName string) *corev1.Affinity { + return &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "component", + Operator: metav1.LabelSelectorOpIn, + Values: []string{component}, + }, + { + Key: "ecs_cluster", + Operator: metav1.LabelSelectorOpIn, + Values: []string{clusterName}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + } +} + +// Wait for pods in cluster to be terminated +func WaitForClusterToTerminate(kubeClient client.Client, p *v1alpha1.ECSCluster) (err error) { + listOptions := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(LabelsForECSCluster(p)), + } + + err = wait.Poll(5*time.Second, 2*time.Minute, func() (done bool, err error) { + podList := &corev1.PodList{} + err = kubeClient.List(context.TODO(), listOptions, podList) + if err != nil { + return false, err + } + + var names []string + for i := range podList.Items { + pod := &podList.Items[i] + names = append(names, pod.Name) + } + + if len(names) != 0 { + return false, nil + } + return true, nil + }) + + return err +} + +func IsPodReady(pod *corev1.Pod) bool { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} diff --git a/ecs-operator/cluster-operator/pkg/util/nautiluscluster.go b/ecs-operator/cluster-operator/pkg/util/nautiluscluster.go new file mode 100644 index 0000000..be65ff9 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/util/nautiluscluster.go @@ -0,0 +1,149 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package util + +import ( + "fmt" + "strconv" + "strings" + + "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" +) + +func PdbNameForBookie(clusterName string) string { + return fmt.Sprintf("%s-bookie", clusterName) +} + +func ConfigMapNameForBookie(clusterName string) string { + return fmt.Sprintf("%s-bookie", clusterName) +} + +func StatefulSetNameForBookie(clusterName string) string { + return fmt.Sprintf("%s-bookie", clusterName) +} + +func PdbNameForController(clusterName string) string { + return fmt.Sprintf("%s-ecs-controller", clusterName) +} + +func ConfigMapNameForController(clusterName string) string { + return fmt.Sprintf("%s-ecs-controller", clusterName) +} + +func ServiceNameForController(clusterName string) string { + return fmt.Sprintf("%s-ecs-controller", clusterName) +} + +func ServiceNameForNode(clusterName string, index int32) string { + return fmt.Sprintf("%s-ecs-node-%d", clusterName, index) +} + +func HeadlessServiceNameForNode(clusterName string) string { + return fmt.Sprintf("%s-ecs-node-headless", clusterName) +} + +func HeadlessServiceNameForBookie(clusterName string) string { + return fmt.Sprintf("%s-bookie-headless", clusterName) +} + +func DeploymentNameForController(clusterName string) string { + return fmt.Sprintf("%s-ecs-controller", clusterName) +} + +func PdbNameForNode(clusterName string) string { + return fmt.Sprintf("%s-node", clusterName) +} + +func ConfigMapNameForNode(clusterName string) string { + return fmt.Sprintf("%s-ecs-node", clusterName) +} + +func StatefulSetNameForNode(clusterName string) string { + return fmt.Sprintf("%s-ecs-node", clusterName) +} + +func LabelsForBookie(ecsCluster *v1alpha1.ECSCluster) map[string]string { + labels := LabelsForECSCluster(ecsCluster) + labels["component"] = "bookie" + return labels +} + +func LabelsForController(ecsCluster *v1alpha1.ECSCluster) map[string]string { + labels := LabelsForECSCluster(ecsCluster) + labels["component"] = "ecs-controller" + return labels +} + +func LabelsForNode(ecsCluster *v1alpha1.ECSCluster) map[string]string { + labels := LabelsForECSCluster(ecsCluster) + labels["component"] = "ecs-node" + return labels +} + +func LabelsForECSCluster(ecsCluster *v1alpha1.ECSCluster) map[string]string { + return map[string]string{ + "app": "ecs-cluster", + "ecs_cluster": ecsCluster.Name, + } +} + +func PvcIsOrphan(stsPvcName string, replicas int32) bool { + index := strings.LastIndexAny(stsPvcName, "-") + if index == -1 { + return false + } + + ordinal, err := strconv.Atoi(stsPvcName[index+1:]) + if err != nil { + return false + } + + return int32(ordinal) >= replicas +} + +func ECSControllerServiceURL(ecsCluster v1alpha1.ECSCluster) string { + return fmt.Sprintf("tcp://%v.%v:%v", ServiceNameForController(ecsCluster.Name), ecsCluster.Namespace, "9090") +} + +func HealthcheckCommand(port int32) []string { + return []string{"/bin/sh", "-c", fmt.Sprintf("netstat -ltn 2> /dev/null | grep %d || ss -ltn 2> /dev/null | grep %d", port, port)} +} + +// Min returns the smaller of x or y. +func Min(x, y int32) int32 { + if x > y { + return y + } + return x +} + +func ContainsString(slice []string, str string) bool { + for _, item := range slice { + if item == str { + return true + } + } + return false +} + +func RemoveString(slice []string, str string) (result []string) { + for _, item := range slice { + if item == str { + continue + } + result = append(result, item) + } + return result +} + +func GetClusterExpectedSize(p *v1alpha1.ECSCluster) (size int) { + return int(p.Spec.ECS.ControllerReplicas + p.Spec.ECS.NodeReplicas + p.Spec.Bookkeeper.Replicas) +} diff --git a/ecs-operator/cluster-operator/pkg/util/zookeeper_util.go b/ecs-operator/cluster-operator/pkg/util/zookeeper_util.go new file mode 100644 index 0000000..4901736 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/util/zookeeper_util.go @@ -0,0 +1,86 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package util + +import ( + "container/list" + "fmt" + "time" + + "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + "github.com/samuel/go-zookeeper/zk" +) + +const ( + // Set in https://github.com/ecs/ecs/blob/master/docker/bookkeeper/entrypoint.sh#L21 + ECSPath = "ecs" + ZkFinalizer = "cleanUpZookeeper" +) + +// Delete all znodes related to a specific ECS cluster +func DeleteAllZnodes(p *v1alpha1.ECSCluster) (err error) { + host := []string{p.Spec.ZookeeperUri} + conn, _, err := zk.Connect(host, time.Second*5) + if err != nil { + return fmt.Errorf("failed to connect to zookeeper: %v", err) + } + defer conn.Close() + + root := fmt.Sprintf("/%s/%s", ECSPath, p.Name) + exist, _, err := conn.Exists(root) + if err != nil { + return fmt.Errorf("failed to check if zookeeper path exists: %v", err) + } + + if exist { + // Construct BFS tree to delete all znodes recursively + tree, err := ListSubTreeBFS(conn, root) + if err != nil { + return fmt.Errorf("failed to construct BFS tree: %v", err) + } + + for tree.Len() != 0 { + err := conn.Delete(tree.Back().Value.(string), -1) + if err != nil { + return fmt.Errorf("failed to delete znode (%s): %v", tree.Back().Value.(string), err) + } + tree.Remove(tree.Back()) + } + } + return nil +} + +// Construct a BFS tree +func ListSubTreeBFS(conn *zk.Conn, root string) (*list.List, error) { + queue := list.New() + tree := list.New() + queue.PushBack(root) + tree.PushBack(root) + + for { + if queue.Len() == 0 { + break + } + node := queue.Front() + children, _, err := conn.Children(node.Value.(string)) + if err != nil { + return tree, err + } + + for _, child := range children { + childPath := fmt.Sprintf("%s/%s", node.Value.(string), child) + queue.PushBack(childPath) + tree.PushBack(childPath) + } + queue.Remove(node) + } + return tree, nil +} diff --git a/ecs-operator/cluster-operator/pkg/version/version.go b/ecs-operator/cluster-operator/pkg/version/version.go new file mode 100644 index 0000000..7ffaec7 --- /dev/null +++ b/ecs-operator/cluster-operator/pkg/version/version.go @@ -0,0 +1,17 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package version + +// Version represents the software version of the ECS Operator +var Version string + +// GitSHA represents the Git commit hash in short format +var GitSHA string diff --git a/ecs-operator/cluster-operator/scripts/check_format.sh b/ecs-operator/cluster-operator/scripts/check_format.sh new file mode 100755 index 0000000..bba3706 --- /dev/null +++ b/ecs-operator/cluster-operator/scripts/check_format.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# exit immediately when a command fails +set -e +# only exit with zero if all commands of the pipeline exit successfully +set -o pipefail +# error on unset variables +set -u + +goFiles=$(find . -name \*.go -not -path "./vendor/*" -print) +invalidFiles=$(gofmt -l $goFiles) + +if [ "$invalidFiles" ]; then + echo -e "These files did not pass the 'go fmt' check, please run 'go fmt' on them:" + echo -e $invalidFiles + exit 1 +fi diff --git a/ecs-operator/cluster-operator/scripts/check_license.sh b/ecs-operator/cluster-operator/scripts/check_license.sh new file mode 100755 index 0000000..0ce0ef6 --- /dev/null +++ b/ecs-operator/cluster-operator/scripts/check_license.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# exit immediately when a command fails +set -e +# only exit with zero if all commands of the pipeline exit successfully +set -o pipefail +# error on unset variables +set -u + +licRes=$( + find . -type f -iname '*.go' ! -path '*/vendor/*' -exec \ + sh -c 'head -n3 $1 | grep -Eq "(Copyright|generated|GENERATED)" || echo -e $1' {} {} \; +) + +if [ -n "${licRes}" ]; then + echo -e "license header checking failed:\\n${licRes}" + exit 255 +fi diff --git a/ecs-operator/cluster-operator/test/e2e/basic_test.go b/ecs-operator/cluster-operator/test/e2e/basic_test.go new file mode 100644 index 0000000..5284323 --- /dev/null +++ b/ecs-operator/cluster-operator/test/e2e/basic_test.go @@ -0,0 +1,145 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2e + +import ( + "testing" + + framework "github.com/operator-framework/operator-sdk/pkg/test" + ecs_e2eutil "github.com/ecs/ecs-operator/pkg/test/e2e/e2eutil" +) + +func testCreateDefaultCluster(t *testing.T) { + doCleanup := true + ctx := framework.NewTestCtx(t) + defer func() { + if doCleanup { + ctx.Cleanup() + } + }() + + namespace, err := ctx.GetNamespace() + if err != nil { + t.Fatal(err) + } + f := framework.Global + + ecs, err := ecs_e2eutil.CreateCluster(t, f, ctx, ecs_e2eutil.NewDefaultCluster(namespace)) + if err != nil { + t.Fatal(err) + } + + podSize := 5 + err = ecs_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, ecs, podSize) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.WriteAndReadData(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.DeleteCluster(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + // No need to do cleanup since the cluster CR has already been deleted + doCleanup = false + + err = ecs_e2eutil.WaitForClusterToTerminate(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + // A workaround for issue 93 + err = ecs_e2eutil.RestartTier2(t, f, ctx, namespace) + if err != nil { + t.Fatal(err) + } +} + +// Test recreate ECS cluster with the same name(issue 91) +func testRecreateDefaultCluster(t *testing.T) { + doCleanup := true + ctx := framework.NewTestCtx(t) + defer func() { + if doCleanup { + ctx.Cleanup() + } + }() + + namespace, err := ctx.GetNamespace() + if err != nil { + t.Fatal(err) + } + f := framework.Global + + defaultCluster := ecs_e2eutil.NewDefaultCluster(namespace) + + ecs, err := ecs_e2eutil.CreateCluster(t, f, ctx, defaultCluster) + if err != nil { + t.Fatal(err) + } + + podSize := 5 + err = ecs_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, ecs, podSize) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.DeleteCluster(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.WaitForClusterToTerminate(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + defaultCluster = ecs_e2eutil.NewDefaultCluster(namespace) + + ecs, err = ecs_e2eutil.CreateCluster(t, f, ctx, defaultCluster) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, ecs, podSize) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.WriteAndReadData(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.DeleteCluster(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + // No need to do cleanup since the cluster CR has already been deleted + doCleanup = false + + err = ecs_e2eutil.WaitForClusterToTerminate(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + // A workaround for issue 93 + err = ecs_e2eutil.RestartTier2(t, f, ctx, namespace) + if err != nil { + t.Fatal(err) + } +} diff --git a/ecs-operator/cluster-operator/test/e2e/main_test.go b/ecs-operator/cluster-operator/test/e2e/main_test.go new file mode 100644 index 0000000..6b14301 --- /dev/null +++ b/ecs-operator/cluster-operator/test/e2e/main_test.go @@ -0,0 +1,23 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2e + +import ( + "testing" + + f "github.com/operator-framework/operator-sdk/pkg/test" + + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +func TestMain(m *testing.M) { + f.MainEntry(m) +} diff --git a/ecs-operator/cluster-operator/test/e2e/nautiluscluster_test.go b/ecs-operator/cluster-operator/test/e2e/nautiluscluster_test.go new file mode 100644 index 0000000..b5560ef --- /dev/null +++ b/ecs-operator/cluster-operator/test/e2e/nautiluscluster_test.go @@ -0,0 +1,68 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2e + +import ( + "testing" + + framework "github.com/operator-framework/operator-sdk/pkg/test" + "github.com/operator-framework/operator-sdk/pkg/test/e2eutil" + apis "github.com/ecs/ecs-operator/pkg/apis" + operator "github.com/ecs/ecs-operator/pkg/apis/ecs/v1alpha1" + ecs_e2eutil "github.com/ecs/ecs-operator/pkg/test/e2e/e2eutil" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestECSCluster(t *testing.T) { + ecsClusterList := &operator.ECSClusterList{ + TypeMeta: metav1.TypeMeta{ + Kind: "ECSCluster", + APIVersion: "ecs.ecs.io/v1alpha1", + }, + } + err := framework.AddToFrameworkScheme(apis.AddToScheme, ecsClusterList) + if err != nil { + t.Fatalf("failed to add custom resource scheme to framework: %v", err) + } + // run subtests + t.Run("x", testECSCluster) +} + +func testECSCluster(t *testing.T) { + ctx := framework.NewTestCtx(t) + defer ctx.Cleanup() + err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: ecs_e2eutil.CleanupTimeout, RetryInterval: ecs_e2eutil.CleanupRetryInterval}) + if err != nil { + t.Fatalf("failed to initialize cluster resources: %v", err) + } + t.Log("Initialized cluster resources") + namespace, err := ctx.GetNamespace() + if err != nil { + t.Fatal(err) + } + // get global framework variables + f := framework.Global + // wait for ecs-operator to be ready + err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, "ecs-operator", 1, ecs_e2eutil.RetryInterval, ecs_e2eutil.Timeout) + if err != nil { + t.Fatal(err) + } + + testFuncs := map[string]func(t *testing.T){ + "testCreateDefaultCluster": testCreateDefaultCluster, + "testRecreateDefaultCluster": testRecreateDefaultCluster, + "testScaleCluster": testScaleCluster, + } + + for name, f := range testFuncs { + t.Run(name, f) + } +} diff --git a/ecs-operator/cluster-operator/test/e2e/resources/tier2.yaml b/ecs-operator/cluster-operator/test/e2e/resources/tier2.yaml new file mode 100644 index 0000000..48732b8 --- /dev/null +++ b/ecs-operator/cluster-operator/test/e2e/resources/tier2.yaml @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ecs-tier2 +spec: + storageClassName: "nfs" + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi diff --git a/ecs-operator/cluster-operator/test/e2e/resources/zookeeper.yaml b/ecs-operator/cluster-operator/test/e2e/resources/zookeeper.yaml new file mode 100644 index 0000000..b0aca36 --- /dev/null +++ b/ecs-operator/cluster-operator/test/e2e/resources/zookeeper.yaml @@ -0,0 +1,153 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: zookeeperclusters.zookeeper.ecs.io +spec: + group: zookeeper.ecs.io + names: + kind: ZookeeperCluster + listKind: ZookeeperClusterList + plural: zookeeperclusters + singular: zookeepercluster + shortNames: + - zk + additionalPrinterColumns: + - name: Replicas + type: integer + description: The number of ZooKeeper servers in the ensemble + JSONPath: .status.replicas + - name: Ready Replicas + type: integer + description: The number of ZooKeeper servers in the ensemble that are in a Ready state + JSONPath: .status.readyReplicas + - name: Internal Endpoint + type: string + description: Client endpoint internal to cluster network + JSONPath: .status.internalClientEndpoint + - name: External Endpoint + type: string + description: Client endpoint external to cluster network via LoadBalancer + JSONPath: .status.externalClientEndpoint + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + scope: Namespaced + version: v1beta1 + subresources: + status: {} + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: zookeeper-operator + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: zookeeper-operator +rules: +- apiGroups: + - zookeeper.ecs.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - "*" + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: zookeeper-operator-cluster-role-binding +subjects: +- kind: ServiceAccount + name: zookeeper-operator + namespace: default +roleRef: + kind: ClusterRole + name: zookeeper-operator + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper-operator +spec: + replicas: 1 + selector: + matchLabels: + name: zookeeper-operator + template: + metadata: + labels: + name: zookeeper-operator + spec: + serviceAccountName: zookeeper-operator + containers: + - name: zookeeper-operator + image: ecs/zookeeper-operator:0.2.1 + ports: + - containerPort: 60000 + name: metrics + command: + - zookeeper-operator + imagePullPolicy: Always + readinessProbe: + exec: + command: + - stat + - /tmp/operator-sdk-ready + initialDelaySeconds: 4 + periodSeconds: 10 + failureThreshold: 1 + env: + - name: WATCH_NAMESPACE + value: "" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "zookeeper-operator" + +--- + +apiVersion: "zookeeper.ecs.io/v1beta1" +kind: "ZookeeperCluster" +metadata: + name: "zk" +spec: + replicas: 1 diff --git a/ecs-operator/cluster-operator/test/e2e/scale_test.go b/ecs-operator/cluster-operator/test/e2e/scale_test.go new file mode 100644 index 0000000..1cac6a8 --- /dev/null +++ b/ecs-operator/cluster-operator/test/e2e/scale_test.go @@ -0,0 +1,111 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2e + +import ( + "testing" + + framework "github.com/operator-framework/operator-sdk/pkg/test" + ecs_e2eutil "github.com/ecs/ecs-operator/pkg/test/e2e/e2eutil" +) + +func testScaleCluster(t *testing.T) { + doCleanup := true + ctx := framework.NewTestCtx(t) + defer func() { + if doCleanup { + ctx.Cleanup() + } + }() + + namespace, err := ctx.GetNamespace() + if err != nil { + t.Fatal(err) + } + f := framework.Global + + ecs, err := ecs_e2eutil.CreateCluster(t, f, ctx, ecs_e2eutil.NewDefaultCluster(namespace)) + if err != nil { + t.Fatal(err) + } + + podSize := 5 + err = ecs_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, ecs, podSize) + if err != nil { + t.Fatal(err) + } + + // This is to get the latest ECS cluster object + ecs, err = ecs_e2eutil.GetCluster(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + ecs.Spec.Bookkeeper.Replicas = 4 + ecs.Spec.ECS.NodeReplicas = 2 + podSize = 7 + + err = ecs_e2eutil.UpdateCluster(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, ecs, podSize) + if err != nil { + t.Fatal(err) + } + + // This is to get the latest ECS cluster object + ecs, err = ecs_e2eutil.GetCluster(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + // Scale down ECS cluster back to default + ecs.Spec.Bookkeeper.Replicas = 3 + ecs.Spec.ECS.NodeReplicas = 1 + podSize = 5 + + err = ecs_e2eutil.UpdateCluster(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, ecs, podSize) + if err != nil { + t.Fatal(err) + } + + err = ecs_e2eutil.CheckPvcSanity(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + // Delete cluster + err = ecs_e2eutil.DeleteCluster(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + // No need to do cleanup since the cluster CR has already been deleted + doCleanup = false + + err = ecs_e2eutil.WaitForClusterToTerminate(t, f, ctx, ecs) + if err != nil { + t.Fatal(err) + } + + // A workaround for issue 93 + err = ecs_e2eutil.RestartTier2(t, f, ctx, namespace) + if err != nil { + t.Fatal(err) + } +} diff --git a/nautilus-operator/.gitignore b/nautilus-operator/.gitignore new file mode 100644 index 0000000..7c50470 --- /dev/null +++ b/nautilus-operator/.gitignore @@ -0,0 +1,77 @@ +# Temporary Build Files +build/_output +build/_test +# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* +# Org-mode +.org-id-locations +*_archive +# flymake-mode +*_flymake.* +# eshell files +/eshell/history +/eshell/lastdir +# elpa packages +/elpa/ +# reftex files +*.rel +# AUCTeX auto folder +/auto/ +# cask packages +.cask/ +dist/ +# Flycheck +flycheck_*.el +# server auth directory +/server/ +# projectiles files +.projectile +projectile-bookmarks.eld +# directory configuration +.dir-locals.el +# saveplace +places +# url cache +url/cache/ +# cedet +ede-projects.el +# smex +smex-items +# company-statistics +company-statistics-cache.el +# anaconda-mode +anaconda-mode/ +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +# Test binary, build with 'go test -c' +*.test +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +### Vim ### +# swap +.sw[a-p] +.*.sw[a-p] +# session +Session.vim +# temporary +.netrwhist +# auto-generated tag files +tags +### VisualStudioCode ### +.vscode/* +.history +# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode diff --git a/nautilus-operator/README.md b/nautilus-operator/README.md new file mode 100644 index 0000000..0005613 --- /dev/null +++ b/nautilus-operator/README.md @@ -0,0 +1,38 @@ +# Overview +This is a Kubernetes operator for DELLEMC Nautilus Stream Storage +https://asd-nautilus-jenkins.isus.emc.com/view/Nautilus%20Kubernetes/job/nautilus-docs-master/Docs-Site/ + +A guide to writing a Kubernetes operator for DellEMC Nautilus storage now follows: +This Nautilus operator manages Nautilus storage deployed to Kubernetes and automates tasks related to Nautilus. + +# Introduction: +This document strives to curate the information needed in one place to write an operator for hosting Nautilus on Kubernetes. Nautilus is an a Stream Storage software defined stack and Kubernetes is a containerization technology. Kubernetes is well-known for isolating applications and making them portable with a rich and extensible framework. This framework allows declaring all the resources and awareness that the application needs. This document is intended for a software developer audience. + +# Description: +A Kubernetes operator is a controller that takes its resource definitions for an application and reconciles the container configuration with the definitions. For example, an upgrade would be defined by the source and destination version numbers and associated locations to permit the controller to take the necessary actions. Definitions are for resources specific to the applications and they are called custom resource definitions. They are written in Yaml – a language for definitions and maintained in a folder called deploy under the root project folder. The other folder is for the logic in the controller and written in Go language. The operator logic has code for apis and controller. As with all logic, there is also code for the entry point invocation. Moreover, there is a Dockerfile created by packaging the operator into a Docker image that deploys the operator and an associated account to run it. +Writing the operator is facilitated with the help of an operator software development kit which is available as a command line tool that generates the scaffolds necessary for the api and the controller. The custom resource definitions have to be edited by hand. The operators have a naming convention of -operator. Definitions are broken out to their own yaml files and each definition in the file has a version, metadata and specification to help differentiate the changes made to the definitions. +The generated controller code allows structures, a primitive of the Go language for declarations, and functions for invocation. There is a Reconcile function that is primary to the controller. When invoked, the controller fetches the definition and matches what is available from the existing deployment. Each time the resource changes, Kubernetes invokes the reconcile corresponding to the operator for the resource definition. The logic is therefore state driven. The state, its handlers and ownership of activities are elaborated as detailed as possible. A controllerUtil.SetControllerReference function is used to describe the primary ownership of a resource. +The operator sdk tool also helps build the docker image and push it to Docker Hub. + +# Conclusion: + +Writing an operator for Kubernetes is made easy with the help of scaffoldings generated from the operator sdk tool for definitions and controller. The next step involves determining the custom definitions for the application. +The application would specify a list of resources, apis and controllers based on the operations it wants to support: +The typical operations for the application deployment involve +1) upgrading +2) scaling +3) backups and other jobs +Each controller implements the reconcile function described earlier. Each of the operations mentioned above may be part of a controller. Each controller is specific to a resource that it reconciles. A cluster controller, for example, will read the state of an Nautilus cluster object and make changes according to the state. The reconcile function is executed periodically. The Nautilus cluster instance is fetched. If it is not found, then the associated resource is cleaned up. By marking the cluster as reset, it is automatically garbage collected. If a cluster is found, it is treated as the current cluster. This helps prevent the cluster from being reconciled if it is not the current cluster in case there is a race condition. +The reconcile function itself ensures that the default values are applied when the fields are not set in the spec. It deploys the current cluster using the deploy interface on the cluster However, it performs a few checks before it does this. For example, it checks to see that the operator is not paused, there is no resource finalization occurring, and that the resource versions match. After the deployment, the cluster is reconciled, and the cluster is reset. It is easy to make web requests from the operator so the actions itself can be performed remotely by targeting the management ip address. +All the controllers are registered with the application manager. This is specifically called out per controller. The base controller merely calls all the controllers one by one and checks to see if the controller executed successfully or not. +The upgrade controller reconciles the upgrade object where the object defines the upgrade from and to images. Resume and reset are part of the upgrade reconciler. The upgrade itself might have other actions such as enabling, disabling, recording events, adding roles, service accounts and performing new image pull jobs. +There can be a generic job controller that assigns individual jobs to their encapsulations. The reconcile function in this case can provide consistency across the jobs. Validations, health checks and logs are also maintained with the clusters. +The APIs are merely a registry where the different resource types are registered. +The deploy folder contains the declarations in Yaml. +The cmd folder contains the invocations specific to the workflow. It contains the manager cmd which registers the components and starts them. It registers the namespaces, the schemes for the resources, and all controllers. +Reference: +https://coreos.com/blog/introducing-operator-framework + + + + diff --git a/nautilus-operator/cluster-operator/CONTRIBUTING.md b/nautilus-operator/cluster-operator/CONTRIBUTING.md new file mode 100644 index 0000000..e5bfb10 --- /dev/null +++ b/nautilus-operator/cluster-operator/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contributing to Nautilus Operator + + Please check the [Contributing](https://github.com/nautilus/nautilus-operator/wiki/Contributing) wiki page. + + Happy hacking! diff --git a/nautilus-operator/cluster-operator/Dockerfile b/nautilus-operator/cluster-operator/Dockerfile new file mode 100644 index 0000000..e1848c0 --- /dev/null +++ b/nautilus-operator/cluster-operator/Dockerfile @@ -0,0 +1,39 @@ +# +# Copyright (c) 2017 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +FROM golang:1.10.1-alpine3.7 as go-builder + +ARG PROJECT_NAME=nautilus-operator +ARG REPO_PATH=github.com/nautilus/${PROJECT_NAME} +ARG BUILD_PATH=${REPO_PATH}/cmd/manager + +# Build version and commit SHA should be passed in when performing docker build +ARG VERSION=0.0.0-localdev +ARG GIT_SHA=0000000 + +COPY pkg /go/src/${REPO_PATH}/pkg +COPY cmd /go/src/${REPO_PATH}/cmd +COPY vendor /go/src/${REPO_PATH}/vendor + +RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o ${GOBIN}/${PROJECT_NAME} \ + -ldflags "-X ${REPO_PATH}/pkg/version.Version=${VERSION} -X ${REPO_PATH}/pkg/version.GitSHA=${GIT_SHA}" \ + $BUILD_PATH + +# ============================================================================= +FROM alpine:3.7 AS final + +ARG PROJECT_NAME=nautilus-operator +ARG REPO_PATH=github.com/nautilus/$PROJECT_NAME + +COPY --from=go-builder ${GOBIN}/${PROJECT_NAME} /usr/local/bin/${PROJECT_NAME} + +RUN adduser -D ${PROJECT_NAME} +USER ${PROJECT_NAME} + +ENTRYPOINT ["/usr/local/bin/nautilus-operator"] diff --git a/nautilus-operator/cluster-operator/Gopkg.lock b/nautilus-operator/cluster-operator/Gopkg.lock new file mode 100644 index 0000000..0b19831 --- /dev/null +++ b/nautilus-operator/cluster-operator/Gopkg.lock @@ -0,0 +1,1035 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:fd1a7ca82682444a45424f6af37b1e0373f632e5a303441b111558ae8656a9b7" + name = "cloud.google.com/go" + packages = ["compute/metadata"] + pruneopts = "NT" + revision = "0ebda48a7f143b1cce9eb37a8c1106ac762a3430" + version = "v0.34.0" + +[[projects]] + digest = "1:75d2b55b13298745ec068057251d05d65bbae0a668201fe45ad6986551a55601" + name = "github.com/BurntSushi/toml" + packages = ["."] + pruneopts = "NT" + revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" + version = "v0.3.1" + +[[projects]] + digest = "1:d8ebbd207f3d3266d4423ce4860c9f3794956306ded6c7ba312ecc69cdfbf04c" + name = "github.com/PuerkitoBio/purell" + packages = ["."] + pruneopts = "NT" + revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727" + name = "github.com/PuerkitoBio/urlesc" + packages = ["."] + pruneopts = "NT" + revision = "de5bf2ad457846296e2031421a34e2568e304e35" + +[[projects]] + branch = "master" + digest = "1:c819830f4f5ef85874a90ac3cbcc96cd322c715f5c96fbe4722eacd3dafbaa07" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "NT" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + digest = "1:4b8b5811da6970495e04d1f4e98bb89518cc3cfc3b3f456bdb876ed7b6c74049" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "NT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:e6f888d4be8ec0f05c50e2aba83da4948b58045dee54d03be81fa74ea673302c" + name = "github.com/emicklei/go-restful" + packages = [ + ".", + "log", + ] + pruneopts = "NT" + revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0" + version = "v2.8.0" + +[[projects]] + digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "NT" + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:d421af4c4fe51d399667d573982d663fe1fa67020a88d3ae43466ebfe8e2b5c9" + name = "github.com/go-logr/logr" + packages = ["."] + pruneopts = "NT" + revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" + +[[projects]] + digest = "1:340497a512995aa69c0add901d79a2096b3449d35a44a6f1f1115091a9f8c687" + name = "github.com/go-logr/zapr" + packages = ["."] + pruneopts = "NT" + revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab" + version = "v0.1.0" + +[[projects]] + digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441" + name = "github.com/go-openapi/jsonpointer" + packages = ["."] + pruneopts = "NT" + revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" + version = "v0.17.2" + +[[projects]] + digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546" + name = "github.com/go-openapi/jsonreference" + packages = ["."] + pruneopts = "NT" + revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3" + version = "v0.17.2" + +[[projects]] + digest = "1:dfab391de021809e0041f0ab5648da6b74dd16a685472a1b8c3dc06b3dca1ee2" + name = "github.com/go-openapi/spec" + packages = ["."] + pruneopts = "NT" + revision = "5bae59e25b21498baea7f9d46e9c147ec106a42e" + version = "v0.17.2" + +[[projects]] + digest = "1:983f95b2fae6fe8fdd361738325ed6090f4f3bd15ce4db745e899fb5b0fdfc46" + name = "github.com/go-openapi/swag" + packages = ["."] + pruneopts = "NT" + revision = "5899d5c5e619fda5fa86e14795a835f473ca284c" + version = "v0.17.2" + +[[projects]] + digest = "1:756ec597ae63e724366f1b393e9477d3e4d980baf1790a029494a336386e89f1" + name = "github.com/gobuffalo/envy" + packages = ["."] + pruneopts = "NT" + revision = "7e7ddcbb431d1d9e32121d7eeee8d68bdecd7081" + version = "v1.6.10" + +[[projects]] + digest = "1:2a9d5e367df8c95e780975ca1dd4010bef8e39a3777066d3880ce274b39d4b5a" + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys", + ] + pruneopts = "NT" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" + +[[projects]] + branch = "master" + digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "NT" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + branch = "master" + digest = "1:aaedc94233e56ed57cdb04e3abfacc85c90c14082b62e3cdbe8ea72fc06ee035" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "NT" + revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa" + +[[projects]] + digest = "1:d7cb4458ea8782e6efacd8f4940796ec559c90833509c436f40c4085b98156dd" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "NT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + branch = "master" + digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" + name = "github.com/google/btree" + packages = ["."] + pruneopts = "NT" + revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" + +[[projects]] + branch = "master" + digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "NT" + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "NT" + revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8" + version = "v1.1.0" + +[[projects]] + digest = "1:289332c13b80edfefc88397cce5266c16845dcf204fa2f6ac7e464ee4c7f6e96" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "NT" + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" + +[[projects]] + branch = "master" + digest = "1:97972f03fbf34ec4247ddc78ddb681389c468c020492aa32b109744a54fc0c14" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache", + ] + pruneopts = "NT" + revision = "c63ab54fda8f77302f8d414e19933f2b6026a089" + +[[projects]] + digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "NT" + revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" + version = "v0.5.0" + +[[projects]] + digest = "1:76efa3b55850d9caa14f8c0b3a951797f9bc2ffc283526073dcad1b06b6e02d3" + name = "github.com/hpcloud/tail" + packages = [ + ".", + "ratelimiter", + "util", + "watch", + "winfile", + ] + pruneopts = "NT" + revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" + version = "v1.0.0" + +[[projects]] + digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "NT" + revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" + version = "v0.3.6" + +[[projects]] + digest = "1:f5b9328966ccea0970b1d15075698eff0ddb3e75889560aad2e9f76b289b536a" + name = "github.com/joho/godotenv" + packages = ["."] + pruneopts = "NT" + revision = "23d116af351c84513e1946b527c88823e476be13" + version = "v1.3.0" + +[[projects]] + digest = "1:1d39c063244ad17c4b18e8da1551163b6ffb52bd1640a49a8ec5c3b7bf4dbd5d" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "NT" + revision = "1624edc4454b8682399def8740d46db5e4362ba4" + version = "v1.1.5" + +[[projects]] + digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "NT" + revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" + version = "v1.0.1" + +[[projects]] + branch = "master" + digest = "1:7d9fcac7f1228470c4ea0ee31cdfb662a758c44df691e39b3e76c11d3e12ba8f" + name = "github.com/mailru/easyjson" + packages = [ + "buffer", + "jlexer", + "jwriter", + ] + pruneopts = "NT" + revision = "60711f1a8329503b04e1c88535f419d0bb440bff" + +[[projects]] + digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde" + name = "github.com/markbates/inflect" + packages = ["."] + pruneopts = "NT" + revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" + version = "v1.0.4" + +[[projects]] + branch = "master" + digest = "1:0e9bfc47ab9941ecc3344e580baca5deb4091177e84dd9773b48b38ec26b93d5" + name = "github.com/mattbaird/jsonpatch" + packages = ["."] + pruneopts = "NT" + revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" + +[[projects]] + digest = "1:ea1db000388d88b31db7531c83016bef0d6db0d908a07794bfc36aca16fbf935" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "NT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "NT" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "NT" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + digest = "1:0204417af2b5c56719f7e2809902c9a56ebb1b539d73ba520eeac84e98f21b72" + name = "github.com/onsi/ginkgo" + packages = [ + ".", + "config", + "internal/codelocation", + "internal/containernode", + "internal/failer", + "internal/leafnodes", + "internal/remote", + "internal/spec", + "internal/spec_iterator", + "internal/specrunner", + "internal/suite", + "internal/testingtproxy", + "internal/writer", + "reporters", + "reporters/stenographer", + "reporters/stenographer/support/go-colorable", + "reporters/stenographer/support/go-isatty", + "types", + ] + pruneopts = "NT" + revision = "2e1be8f7d90e9d3e3e58b0ce470f2f14d075406f" + version = "v1.7.0" + +[[projects]] + digest = "1:7efa6868c0394e8567b411d9160f10376d6f28926c5786d520f3603bc3e18198" + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types", + ] + pruneopts = "NT" + revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd" + version = "v1.4.3" + +[[projects]] + digest = "1:0b2dd5813eba320fd99869c1a5c4b54eb6950544259f2389c5b137be2168429a" + name = "github.com/operator-framework/operator-sdk" + packages = [ + "internal/util/fileutil", + "internal/util/k8sutil", + "internal/util/yamlutil", + "pkg/k8sutil", + "pkg/scaffold", + "pkg/scaffold/input", + "pkg/test", + "pkg/test/e2eutil", + "version", + ] + pruneopts = "NT" + revision = "cc5fe885869c181d820557bd296f092637fa70af" + version = "v0.4.0" + +[[projects]] + digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf" + name = "github.com/pborman/uuid" + packages = ["."] + pruneopts = "NT" + revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" + version = "v1.2" + +[[projects]] + branch = "master" + digest = "1:bf2ac97824a7221eb16b096aecc1c390d4c8a4e49524386aaa2e2dd215cbfb31" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + pruneopts = "NT" + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + digest = "1:e4e9e026b8e4c5630205cd0208efb491b40ad40552e57f7a646bb8a46896077b" + name = "github.com/peterbourgon/diskv" + packages = ["."] + pruneopts = "NT" + revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" + version = "v2.0.1" + +[[projects]] + digest = "1:ec2a29e3bd141038ae5c3d3a4f57db0c341fcc1d98055a607aedd683aed124ee" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "NT" + revision = "505eaef017263e299324067d40ca2c48f6a2cf50" + version = "v0.9.2" + +[[projects]] + branch = "master" + digest = "1:c2cc5049e927e2749c0d5163c9f8d924880d83e84befa732b9aad0b6be227bed" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "NT" + revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" + +[[projects]] + digest = "1:30261b5e263b5c4fb40571b53a41a99c96016c6b1b2c45c1cefd226fc3f6304b" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "NT" + revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250" + version = "v0.2.0" + +[[projects]] + branch = "master" + digest = "1:753d988fc383cc61173d5afdf94a149b853c75d399dafacfd93ba5b734e06044" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "NT" + revision = "316cf8ccfec56d206735d46333ca162eb374da8b" + +[[projects]] + digest = "1:4e63570205b765959739e2ef37add1d229cab7dbf70d80341a0608816120493b" + name = "github.com/rogpeppe/go-internal" + packages = [ + "modfile", + "module", + "semver", + ] + pruneopts = "NT" + revision = "d87f08a7d80821c797ffc8eb8f4e01675f378736" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:b3f691dbcabd091f701954bb37bd7e7f1dedaad22c64a9c4ff7ca52fc87e0650" + name = "github.com/samuel/go-zookeeper" + packages = ["zk"] + pruneopts = "NT" + revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" + +[[projects]] + digest = "1:cd2f2cba5b7ffafd0412fb647ff4bcff170292de57270f05fbbf391e3eb9566b" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "NT" + revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95" + version = "v1.2.0" + +[[projects]] + digest = "1:2a7c79c506479dc73c0100982a40bacc89e06d96dc458eb41c9b6aa44d9e0b6d" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "NT" + revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd" + version = "v1.1.2" + +[[projects]] + digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "NT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7" + name = "go.uber.org/atomic" + packages = ["."] + pruneopts = "NT" + revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" + version = "v1.3.2" + +[[projects]] + digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e" + name = "go.uber.org/multierr" + packages = ["."] + pruneopts = "NT" + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + digest = "1:572fa4496563920f3e3107a2294cf2621d6cc4ffd03403fb6397b1bab9fa082a" + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore", + ] + pruneopts = "NT" + revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" + version = "v1.9.1" + +[[projects]] + branch = "master" + digest = "1:d6d3b59b8c4ceb6a7db2f20169719e57a8dcfa2c055b4418feb3fcc7bbd1a936" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "NT" + revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447" + +[[projects]] + branch = "master" + digest = "1:b39fe73cabf4ae7600e25b0d116bb884a52d475e019bf583d03c08d98a567350" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "html", + "html/atom", + "html/charset", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + ] + pruneopts = "NT" + revision = "351d144fa1fc0bd934e2408202be0c29f25e35a0" + +[[projects]] + branch = "master" + digest = "1:bdb664c89389d18d2aa69fb3b61fe5e2effc09e55b333a56e3cb071026418e33" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt", + ] + pruneopts = "NT" + revision = "d668ce993890a79bda886613ee587a69dd5da7a6" + +[[projects]] + branch = "master" + digest = "1:0461030328ef9d2e0e38a2bc5febc8ce585e03db950028a6ab3d8d8ca1df151c" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "NT" + revision = "a5c9d58dba9a56f97aaa86f55e638b718c5a6c42" + +[[projects]] + digest = "1:8c74f97396ed63cc2ef04ebb5fc37bb032871b8fd890a25991ed40974b00cd2a" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "internal/utf8internal", + "language", + "runes", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + "width", + ] + pruneopts = "NT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "NT" + revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd" + +[[projects]] + branch = "master" + digest = "1:9c4e8d4c649b62e4eee87faf8b9aee75545e064ab05591bfc9ebaa9412467cbc" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "imports", + "internal/fastwalk", + "internal/gopathwalk", + ] + pruneopts = "NT" + revision = "22934f0fdb6201c132a3dc6120150dcb1646d74c" + +[[projects]] + digest = "1:2a4972ee51c3b9dfafbb3451fa0552e7a198d9d12c721bfc492050fe2f72e0f6" + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] + pruneopts = "NT" + revision = "4a4468ece617fc8205e99368fa2200e9d1fad421" + version = "v1.3.0" + +[[projects]] + digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" + name = "gopkg.in/fsnotify.v1" + packages = ["."] + pruneopts = "NT" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" + version = "v1.4.7" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "NT" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + branch = "v1" + digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1" + name = "gopkg.in/tomb.v1" + packages = ["."] + pruneopts = "NT" + revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" + +[[projects]] + digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "NT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[[projects]] + digest = "1:b3f8152a68d73095a40fdcf329a93fc42e8eadb3305171df23fdb6b4e41a6417" + name = "k8s.io/api" + packages = [ + "admission/v1beta1", + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "NT" + revision = "b503174bad5991eb66f18247f52e41c3258f6348" + version = "kubernetes-1.12.3" + +[[projects]] + digest = "1:82b4765488fd2a8bcefb93e196fdbfe342d33b16ae073a6f51bb4fb13e81e102" + name = "k8s.io/apiextensions-apiserver" + packages = [ + "pkg/apis/apiextensions", + "pkg/apis/apiextensions/v1beta1", + "pkg/client/clientset/clientset/scheme", + ] + pruneopts = "NT" + revision = "0cd23ebeb6882bd1cdc2cb15fc7b2d72e8a86a5b" + version = "kubernetes-1.12.3" + +[[projects]] + digest = "1:868de7cbaa0ecde6dc231c1529a10ae01bb05916095c0c992186e2a5cac57e79" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/strategicpatch", + "pkg/util/uuid", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/json", + "third_party/forked/golang/reflect", + ] + pruneopts = "NT" + revision = "eddba98df674a16931d2d4ba75edc3a389bf633a" + version = "kubernetes-1.12.3" + +[[projects]] + digest = "1:00089f60de414edb1a51e63efde2480ce87c95d2cb3536ea240afe483905d736" + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/cached", + "dynamic", + "kubernetes", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1alpha1", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/core/v1", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/networking/v1", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1beta1", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "plugin/pkg/client/auth/gcp", + "rest", + "rest/watch", + "restmapper", + "third_party/forked/golang/template", + "tools/auth", + "tools/cache", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/leaderelection", + "tools/leaderelection/resourcelock", + "tools/metrics", + "tools/pager", + "tools/record", + "tools/reference", + "transport", + "util/buffer", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/homedir", + "util/integer", + "util/jsonpath", + "util/retry", + "util/workqueue", + ] + pruneopts = "NT" + revision = "d082d5923d3cc0bfbb066ee5fbdea3d0ca79acf8" + version = "kubernetes-1.12.3" + +[[projects]] + digest = "1:4e2addcdbe0330f43800c1fcb905fc7a21b86415dfcca619e5c606c87257af1b" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "cmd/conversion-gen", + "cmd/conversion-gen/args", + "cmd/conversion-gen/generators", + "cmd/deepcopy-gen", + "cmd/deepcopy-gen/args", + "cmd/defaulter-gen", + "cmd/defaulter-gen/args", + "cmd/informer-gen", + "cmd/informer-gen/args", + "cmd/informer-gen/generators", + "cmd/lister-gen", + "cmd/lister-gen/args", + "cmd/lister-gen/generators", + "cmd/openapi-gen", + "cmd/openapi-gen/args", + "pkg/util", + ] + pruneopts = "T" + revision = "3dcf91f64f638563e5106f21f50c31fa361c918d" + version = "kubernetes-1.12.3" + +[[projects]] + branch = "master" + digest = "1:5edbd655d7ee65178fd5750bda9a3d3cd7fb96291937926f4969e6b2dfbc5743" + name = "k8s.io/gengo" + packages = [ + "args", + "examples/deepcopy-gen/generators", + "examples/defaulter-gen/generators", + "examples/set-gen/sets", + "generator", + "namer", + "parser", + "types", + ] + pruneopts = "NT" + revision = "fd15ee9cc2f77baa4f31e59e6acbf21146455073" + +[[projects]] + digest = "1:f3b42f307c7f49a1a7276c48d4b910db76e003220e88797f7acd41e3a9277ddf" + name = "k8s.io/klog" + packages = ["."] + pruneopts = "NT" + revision = "a5bc97fbc634d635061f3146511332c7e313a55a" + version = "v0.1.0" + +[[projects]] + branch = "master" + digest = "1:9ac2fdede4a8304e3b00ea3b36526536339f306d0306e320fc74f6cefeead18e" + name = "k8s.io/kube-openapi" + packages = [ + "cmd/openapi-gen/args", + "pkg/common", + "pkg/generators", + "pkg/generators/rules", + "pkg/util/proto", + "pkg/util/sets", + ] + pruneopts = "NT" + revision = "0317810137be915b9cf888946c6e115c1bfac693" + +[[projects]] + digest = "1:e03ddaf9f31bccbbb8c33eabad2c85025a95ca98905649fd744e0a54c630a064" + name = "sigs.k8s.io/controller-runtime" + packages = [ + "pkg/cache", + "pkg/cache/internal", + "pkg/client", + "pkg/client/apiutil", + "pkg/client/config", + "pkg/controller", + "pkg/controller/controllerutil", + "pkg/event", + "pkg/handler", + "pkg/internal/controller", + "pkg/internal/controller/metrics", + "pkg/internal/recorder", + "pkg/leaderelection", + "pkg/manager", + "pkg/metrics", + "pkg/patch", + "pkg/predicate", + "pkg/reconcile", + "pkg/recorder", + "pkg/runtime/inject", + "pkg/runtime/log", + "pkg/runtime/scheme", + "pkg/runtime/signals", + "pkg/source", + "pkg/source/internal", + "pkg/webhook/admission", + "pkg/webhook/admission/types", + "pkg/webhook/types", + ] + pruneopts = "NT" + revision = "c63ebda0bf4be5f0a8abd4003e4ea546032545ba" + version = "v0.1.8" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", + "github.com/operator-framework/operator-sdk/pkg/k8sutil", + "github.com/operator-framework/operator-sdk/pkg/test", + "github.com/operator-framework/operator-sdk/pkg/test/e2eutil", + "github.com/operator-framework/operator-sdk/version", + "github.com/samuel/go-zookeeper/zk", + "github.com/sirupsen/logrus", + "k8s.io/api/apps/v1", + "k8s.io/api/batch/v1", + "k8s.io/api/core/v1", + "k8s.io/api/policy/v1beta1", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/intstr", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/client-go/plugin/pkg/client/auth/gcp", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/conversion-gen", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/openapi-gen", + "k8s.io/gengo/args", + "sigs.k8s.io/controller-runtime/pkg/client", + "sigs.k8s.io/controller-runtime/pkg/client/config", + "sigs.k8s.io/controller-runtime/pkg/controller", + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil", + "sigs.k8s.io/controller-runtime/pkg/handler", + "sigs.k8s.io/controller-runtime/pkg/manager", + "sigs.k8s.io/controller-runtime/pkg/reconcile", + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme", + "sigs.k8s.io/controller-runtime/pkg/runtime/signals", + "sigs.k8s.io/controller-runtime/pkg/source", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/nautilus-operator/cluster-operator/Gopkg.toml b/nautilus-operator/cluster-operator/Gopkg.toml new file mode 100644 index 0000000..3075005 --- /dev/null +++ b/nautilus-operator/cluster-operator/Gopkg.toml @@ -0,0 +1,65 @@ +# Force dep to vendor the code generators, which aren't imported just used at dev time. +required = [ + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/conversion-gen", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/openapi-gen", + "k8s.io/gengo/args", +] + +[[override]] + name = "github.com/sirupsen/logrus" + version = "v1.2.0" + +[[override]] + name = "k8s.io/code-generator" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/api" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/apiextensions-apiserver" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/apiserver" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/client-go" + version = "kubernetes-1.12.3" + +[[override]] + name = "k8s.io/cli-runtime" + version = "kubernetes-1.12.3" + +[[override]] + name = "sigs.k8s.io/controller-runtime" + version = "=v0.1.8" + +[[override]] + source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" + name = "gopkg.in/fsnotify.v1" + +[[constraint]] + name = "github.com/operator-framework/operator-sdk" + # The version rule is used for a specific release and the master branch for in between releases. + # branch = "v0.4.x" #osdk_branch_annotation + version = "=v0.4.0" #osdk_version_annotation + +[prune] + go-tests = true + non-go = true + + [[prune.project]] + name = "k8s.io/code-generator" + non-go = false diff --git a/nautilus-operator/cluster-operator/LICENSE b/nautilus-operator/cluster-operator/LICENSE new file mode 100644 index 0000000..5c304d1 --- /dev/null +++ b/nautilus-operator/cluster-operator/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/nautilus-operator/cluster-operator/Makefile b/nautilus-operator/cluster-operator/Makefile new file mode 100644 index 0000000..9808cfb --- /dev/null +++ b/nautilus-operator/cluster-operator/Makefile @@ -0,0 +1,68 @@ +# Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +SHELL=/bin/bash -o pipefail + +PROJECT_NAME=nautilus-operator +REPO=nautilus/$(PROJECT_NAME) +VERSION=$(shell git describe --always --tags --dirty | sed "s/\(.*\)-g`git rev-parse --short HEAD`/\1/") +GIT_SHA=$(shell git rev-parse --short HEAD) +TEST_IMAGE=$(REPO)-testimages:$(VERSION) +GOOS=linux +GOARCH=amd64 + +.PHONY: all dep build check clean test + +all: check test build + +dep: + dep ensure -v + +build: build-go build-image + +build-go: + CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build \ + -ldflags "-X github.com/$(REPO)/pkg/version.Version=$(VERSION) -X github.com/$(REPO)/pkg/version.GitSHA=$(GIT_SHA)" \ + -o bin/$(PROJECT_NAME) cmd/manager/main.go + +build-image: + docker build --build-arg VERSION=$(VERSION) --build-arg GIT_SHA=$(GIT_SHA) -t $(REPO):$(VERSION) . + docker tag $(REPO):$(VERSION) $(REPO):latest + +test: test-unit test-e2e + +test-unit: + go test $$(go list ./... | grep -v /vendor/ | grep -v /test/e2e ) + +test-e2e: test-e2e-remote + +test-e2e-remote: login + operator-sdk build $(TEST_IMAGE) --enable-tests + docker push $(TEST_IMAGE) + operator-sdk test local ./test/e2e --namespace default --image $(TEST_IMAGE) --go-test-flags "-v -timeout 0" + +test-e2e-local: + operator-sdk test local ./test/e2e --namespace default --up-local --go-test-flags "-v -timeout 0" + +login: + @docker login -u "$(DOCKER_USER)" -p "$(DOCKER_PASS)" + +push: build login + docker push $(REPO):$(VERSION) + docker push $(REPO):latest + +clean: + rm -f bin/$(PROJECT_NAME) + +check: check-format check-license + +check-format: + ./scripts/check_format.sh + +check-license: + ./scripts/check_license.sh diff --git a/nautilus-operator/cluster-operator/README.md b/nautilus-operator/cluster-operator/README.md new file mode 100644 index 0000000..cc5db9e --- /dev/null +++ b/nautilus-operator/cluster-operator/README.md @@ -0,0 +1,409 @@ +# Nautilus cluster-operator + +[![Build Status](https://travis-ci.org/nautilus/cluster-operator.svg?branch=master)](https://travis-ci.org/nautilus/cluster-operator) +[![CircleCI](https://circleci.com/gh/nautilus/cluster-operator.svg?style=svg)](https://circleci.com/gh/nautilus/cluster-operator) + +The Nautilus Cluster Operator deploys and configures a Nautilus cluster on +Kubernetes. + +For quick installation of the cluster operator, use the [cluster operator helm +chart](https://github.com/nautilus/charts/tree/master/stable/nautiluscluster-operator). + +## Pre-requisites + +* Kubernetes 1.9+ +* Kubernetes must be configured to allow (configured by default in 1.10+): + * Privileged mode containers (enabled by default) + * Feature gate: MountPropagation=true. This can be done by appending + `--feature-gates MountPropagation=true` to the kube-apiserver and kubelet + services. + +Refer to the [Nautilus prerequisites docs](https://www.dellemc.com/en-us/collaterals/unauth/data-sheets/products/storage/h13117-emc-nautilus-appliance-ss.pdf) +for more information. + +## Setup/Development + +1. Install [operator-sdk](https://github.com/operator-framework/operator-sdk/tree/master#quick-start). +2. Run `operator-sdk generate k8s` if there's a change in api type. +3. Build operator container with `operator-sdk build nautilus/cluster-operator:` +4. Apply the manifests in `deploy/` to install the operator + * Apply `namespace.yaml` to create the `nautilus-operator` namespace. + * Apply `service_account.yaml`, `role.yaml` and `role_binding.yaml` to create + a service account and to grant all the permissions. + * Apply `crds/*_crd.yaml` to define the custom resources. + * Apply `operator.yaml` to install the operator. Change the container image + in this file when installing a new operator. + * Apply `crds/*_nautiluscluster_cr.yaml` to create a `NautilusCluster` + custom resource. + +**NOTE**: Installing Nautilus on Minikube is not currently supported due to +missing [kernel prerequisites](https://www.dellemc.com/en-us/collaterals/unauth/data-sheets/products/storage/h13117-emc-nautilus-appliance-ss.pdf). + +For development, run the operator outside of the k8s cluster by running: + +```bash +make local-run +``` + +Build operator container image: + +```bash +make image/cluster-operator OPERATOR_IMAGE=nautilus/cluster-operator:test +``` + +This builds all the components and copies the binaries into the same container. + +After creating a resource, query the resource: + +```bash +$ kubectl get nautiluscluster +NAME READY STATUS AGE +example-nautilus 3/3 Running 4m +``` + +## Inspect a NautilusCluster Resource + +Get all the details about the cluster: + +```bash +$ kubectl describe nautiluscluster/example-nautilus +Name: example-nautilus +Namespace: default +Labels: +Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"dellemc.com/v1","kind":"NautilusCluster","metadata":{"annotations":{},"name":"example-nautilus","namespace":"default"},"spec":{"... +API Version: dellemc.com/v1 +Kind: NautilusCluster +Metadata: + Creation Timestamp: 2018-07-21T12:57:11Z + Generation: 1 + Resource Version: 10939030 + Self Link: /apis/dellemc.com/v1/namespaces/default/nautilusclusters/example-nautilus + UID: 955b24a4-8ce5-11e8-956a-1866da35eee2 +Spec: + Join: test07 +Status: + Node Health Status: + ... + ... + Nodes: + test09 + test08 + test07 + Phase: Running + Ready: 3/3 +Events: +``` + +## NautilusCluster Resource Configuration + +Once the Nautilus operator is running, a Nautilus cluster can be deployed by +creating a Cluster Configuration. The parameters specified in the configuration +will define how Nautilus is deployed, the rest of the installation details are +handled by the operator. + +The following tables lists the configurable spec +parameters of the NautilusCluster custom resource and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`secretRefName` | Reference name of nautilus secret | +`secretRefNamespace` | Namespace of nautilus secret | +`namespace` | Namespace where nautilus cluster resources are created | `nautilus` +`images.nodeContainer` | Nautilus node container image | `nautilus/node:1.1.0` +`images.initContainer` | Nautilus init container image | `nautilus/init:0.1` +`images.csiNodeDriverRegistrarContainer` | CSI Node Driver Registrar Container image | `quay.io/k8scsi/csi-node-driver-registrar:v1.0.1` +`images.csiClusterDriverRegistrarContainer` | CSI Cluster Driver Registrar Container image | `quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1` +`images.csiExternalProvisionerContainer` | CSI External Provisioner Container image | `nautilus/csi-provisioner:v1.0.1` +`images.csiExternalAttacherContainer` | CSI External Attacher Container image | `quay.io/k8scsi/csi-attacher:v1.0.1` +`csi.enable` | Enable CSI setup | `false` +`csi.enableProvisionCreds` | Enable CSI provision credentials | `false` +`csi.enableControllerPublishCreds` | Enable CSI controller publish credentials | `false` +`csi.enableNodePublishCreds` | Enable CSI node publish credentials | `false` +`service.name` | Name of the Service used by the cluster | `nautilus` +`service.type` | Type of the Service used by the cluster | `ClusterIP` +`service.externalPort` | External port of the Service used by the cluster | `5705` +`service.internalPort` | Internal port of the Service used by the cluster | `5705` +`service.annotations` | Annotations of the Service used by the cluster | +`ingress.enable` | Enable ingress for the cluster | `false` +`ingress.hostname` | Hostname to be used in cluster ingress | `nautilus.local` +`ingress.tls` | Enable TLS for the ingress | `false` +`ingress.annotations` | Annotations of the ingress used by the cluster | +`sharedDir` | Path to be shared with kubelet container when deployed as a pod | `/var/lib/kubelet/plugins/kubernetes.io~nautilus` +`kvBackend.address` | Comma-separated list of addresses of external key-value store. (`1.2.3.4:2379,2.3.4.5:2379`) | +`kvBackend.backend` | Name of the key-value store to use. Set to `etcd` for external key-value store. | `embedded` +`pause` | Pause the operator for cluster maintenance | `false` +`debug` | Enable debug mode for all the cluster nodes | `false` +`disableFencing` | Disable Pod fencing | `false` +`disableTelemetry` | Disable telemetry reports | `false` +`nodeSelectorTerms` | Set node selector for nautilus pod placement | +`tolerations` | Set pod tolerations for nautilus pod placement | +`resources` | Set resource requirements for the containers | + +## Upgrading a Nautilus Cluster + +An existing Nautilus cluster can be upgraded to a new version of Nautilus by +creating an Upgrade Configuration. The cluster-operator takes care of +downloading the new container image and updating all the nodes with new version +of Nautilus. +An example of `NautilusUpgrade` resource is [nautilus_v1_nautilusupgrade_cr.yaml](/deploy/crds/nautilus_v1_nautilusupgrade_cr.yaml). + +Only offline upgrade is supported for now by cluster-operator. During the +upgrade, Nautilus maintenance mode is enabled, the applications that use +Nautilus volumes are scaled down and the whole Nautilus cluster is restarted +with a new version. Once the Nautilus cluster becomes usable, the applications +are scaled up to their previous configuration. Once the update is complete, make +sure to delete the upgrade resource to put the Nautilus cluster in normal mode. +This will disable the maintenance mode. + +Once an upgrade resource is created, events related to the upgrade can be +viewed in the upgrade object description. All the status and errors, if any, +encountered during the upgrade are posted as events. + +```bash +$ kubectl describe nautilusupgrades example-nautilusupgrade +Name: example-nautilusupgrade +Namespace: default +Labels: +Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"dellemc.com/v1","kind":"NautilusUpgrade","metadata":{"annotations":{},"name":"example-nautilusupgrade","namespace":"default"},... +API Version: dellemc.com/v1 +Kind: NautilusUpgrade +... +Spec: + New Image: nautilus/node:1.0.0 +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PullImage 4m nautilus-upgrader Pulling the new container image + Normal PauseClusterCtrl 2m nautilus-upgrader Pausing the cluster controller and enabling cluster maintenance mode + Normal UpgradeInit 2m nautilus-upgrader Nautilus upgrade of cluster example-nautilus started + Normal UpgradeComplete 0s nautilus-upgrader Nautilus upgraded to nautilus/node:1.0.0. Delete upgrade object to disable cluster maintenance mode +``` + +## NautilusUpgrade Resource Configuration + +The following table lists the configurable spec parameters of the +NautilusUpgrade custom resource and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`newImage` | Nautilus node container image to upgrade to | + +## Cleanup Old Configurations + +Nautilus creates and saves its files at `/var/lib/nautilus` on the hosts. This +also contains some configurations of the cluster. To do a fresh install of +Nautilus, these files need to be deleted. + +__WARNING__: This will delete any existing data and won't be recoverable. + +__NOTE__: When using an external etcd, the data related to nautilus should also +be removed. + +```bash +ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://nautilus-etcd-server:2379 del --prefix nautilus +``` + +The cluster-operator provides a `Job`resource that can execute certain tasks on +all nodes or on selected nodes. This can be used to easily perform cleanup +task. An example would be to create a `Job` resource: + +```yaml +apiVersion: dellemc.com/v1 +kind: Job +metadata: + name: cleanup-job +spec: + image: nautilus/cleanup:v0.0.2 + args: ["/var/lib/nautilus"] + mountPath: "/var/lib" + hostPath: "/var/lib" + completionWord: "done" + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: In + values: + - "true" +``` + +When applied, this job will run `nautilus/cleanup` container on the nodes that +have label `node-role.kubernetes.io/worker` with value `"true"`, mounting +`/var/lib` and passing the argument `/var/lib/nautilus`. This will run +`rm -rf /var/lib/nautilus` in the selected nodes and cleanup all the nautilus +files. To run it on all the nodes, remove the `nodeSelectorTerms` attribute. +On completion, the resource description shows that the task is completed and +can be deleted. + +```bash +$ kubectl describe jobs.dellemc.com cleanup-job +Name: cleanup-job +Namespace: default +... +... +Spec: + Completion Word: + Args: + /var/lib/nautilus + Host Path: /var/lib + Image: nautilus/cleanup:v0.0.2 + ... +Status: + Completed: true +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal JobCompleted 39s nautiluscluster-operator Job Completed. Safe to delete. +``` + +Deleting the resource, will terminate all the pods that were created to run the +task. + +Internally, this `Job` is backed by a controller that creates pods using a +DaemonSet. Job containers have to be built in a specific way to achieve this +behavior. + +In the above example, the cleanup container runs a shell script(`script.sh`): + +```bash +#!/bin/ash + +set -euo pipefail + +# Gracefully handle the TERM signal sent when deleting the daemonset +trap 'exit' TERM + +# This is the main command that's run by this script on +# all the nodes. +rm -rf $1 + +# Let the monitoring script know we're done. +echo "done" + +# this is a workaround to prevent the container from exiting +# and k8s restarting the daemonset pod +while true; do sleep 1; done +``` + +And the container image is made with Dockerfile: + +```dockerfile +FROM alpine:3.6 +COPY script.sh . +RUN chmod u+x script.sh +ENTRYPOINT ["./script.sh"] +``` + +The script, after running the main command, enters into a sleep state, instead +of exiting. This is needed because we don't want the container to exit and start +again and again. Once completed, it echos "done". This is read by the Job +controller to figure out when the task is completed. Once all the pods have +completed the task, the Job status is completed and it can be deleted. + +This can be extended to do other similar cluster management operations. This is +also used internally in the cluster upgrade process. + +## Job (jobs.dellemc.com) Resource Configuration + +The following table lists the configurable spec parameters of the +Job custom resource and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`image` | Container image that the job runs | +`args` | Any arguments to be passed when the container is run | +`hostPath` | Path on the host that is mounted on the job container | +`mountPath` | Path on the job container where the hostPath is mounted | +`completionWord` | The word that job controller looks for in the pod logs to determine if the task is completed | +`labelSelector` | Labels that are added to the job pods and are used to select them. | +`nodeSelectorTerms` | This can be used to select the nodes where the job runs. | + +## TLS Support + +To enable TLS, ensure that an ingress controller is installed in the cluster. +Set `ingress.enable` and `ingress.tls` to `true`. +Store the TLS cert and key as part of the nautilus secret as: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: "nautilus-api" +... +... +data: + # echo -n '' | base64 + ... + ... + # Add base64 encoded TLS cert and key. + tls.crt: + tls.key: +``` + +## CSI + +Nautilus also supports the [Container Storage Interface (CSI)](https://github.com/container-storage-interface/spec) +to communicate with Kubernetes. + +Only versions 1.10+ are supported. CSI ensures forward compatibility with +future releases of Kubernetes, as vendor-specific drivers will soon be +deprecated from Kubernetes. However, some functionality is not yet supported. + +To enable CSI, set `csi.enable` to `true` in the `NautilusCluster` resource +config. + +```yaml +apiVersion: "dellemc.com/v1" +kind: "NautilusCluster" +metadata: + name: "example-nautilus" + namespace: "default" +spec: + secretRefName: "nautilus-api" + secretRefNamespace: "default" + csi: + enable: true +``` + +### CSI Credentials + +To enable CSI Credentials, ensure that CSI is enabled by setting `csi.enable` to +`true`. Based on the type of credentials to enable, set the csi fields to +`true`: + +```yaml +apiVersion: "dellemc.com/v1" +kind: "NautilusCluster" +metadata: + name: "example-nautilus" + namespace: "default" +spec: + ... + ... + csi: + enable: true + enableProvisionCreds: true + enableControllerPublishCreds: true + enableNodePublishCreds: true + ... +``` + +Specify the CSI credentials as part of the nautilus secret object as: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: "nautilus-api" +... +... +data: + # echo -n '' | base64 + ... + ... + csiProvisionUsername: + csiProvisionPassword: + csiControllerPublishUsername: + csiControllerPublishPassword: + csiNodePublishUsername: + csiNodePublishPassword: +``` diff --git a/nautilus-operator/cluster-operator/charts/nautilus-operator/Chart.yaml b/nautilus-operator/cluster-operator/charts/nautilus-operator/Chart.yaml new file mode 100644 index 0000000..457f403 --- /dev/null +++ b/nautilus-operator/cluster-operator/charts/nautilus-operator/Chart.yaml @@ -0,0 +1,14 @@ +name: nautilus-operator +version: 0.1.0 +appVersion: 0.1.0 +description: | + nautilus operator deploys a custom resource for a nautilus cluster, and a + pod to provision and scale nautilus clusters. +keywords: +- nautilus +- storage +home: https://github.com/nautilus/nautilus-operator/blob/master/charts/nautilus-operator +icon: https://avatars3.githubusercontent.com/u/25698199 +sources: +- https://github.com/nautilus/charts/blob/master/nautilus-operator +engine: gotpl diff --git a/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/_helpers.tpl b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/_helpers.tpl new file mode 100644 index 0000000..9b6259d --- /dev/null +++ b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nautilusOp.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nautilusOp.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} \ No newline at end of file diff --git a/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/cluster-rbac.yaml b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/cluster-rbac.yaml new file mode 100644 index 0000000..a4fec9b --- /dev/null +++ b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/cluster-rbac.yaml @@ -0,0 +1,49 @@ +{{if eq .Values.watch.namespace ""}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "nautilusOp.fullname" . }} +rules: +- apiGroups: + - nautilus.nautilus.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: default-account-{{ template "nautilusOp.fullname" . }} +subjects: +- kind: ServiceAccount + name: default + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ template "nautilusOp.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/crd.yaml b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/crd.yaml new file mode 100644 index 0000000..c793e1f --- /dev/null +++ b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/crd.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: nautilusclusters.nautilus.nautilus.io +spec: + group: nautilus.nautilus.io + names: + kind: NautilusCluster + listKind: NautilusClusterList + plural: nautilusclusters + singular: nautiluscluster + scope: Namespaced + version: v1alpha1 \ No newline at end of file diff --git a/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/operator.yaml b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/operator.yaml new file mode 100644 index 0000000..1f04fd2 --- /dev/null +++ b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/operator.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "nautilusOp.fullname" . }} +spec: + replicas: 1 + selector: + matchLabels: + name: {{ template "nautilusOp.fullname" . }} + template: + metadata: + labels: + name: {{ template "nautilusOp.fullname" . }} + spec: + containers: + - name: {{ template "nautilusOp.fullname" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: "WATCH_NAMESPACE" + value: "{{ .Values.watch.namespace }}" diff --git a/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/rbac.yaml b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/rbac.yaml new file mode 100644 index 0000000..43f8d5b --- /dev/null +++ b/nautilus-operator/cluster-operator/charts/nautilus-operator/templates/rbac.yaml @@ -0,0 +1,48 @@ +{{if ne .Values.watch.namespace ""}} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "nautilusOp.fullname" . }} +rules: +- apiGroups: + - nautilus.nautilus.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" + +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: default-account-{{ template "nautilusOp.fullname" . }} +subjects: +- kind: ServiceAccount + name: default +roleRef: + kind: Role + name: {{ template "nautilusOp.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{end}} \ No newline at end of file diff --git a/nautilus-operator/cluster-operator/charts/nautilus-operator/values.yaml b/nautilus-operator/cluster-operator/charts/nautilus-operator/values.yaml new file mode 100644 index 0000000..3235550 --- /dev/null +++ b/nautilus-operator/cluster-operator/charts/nautilus-operator/values.yaml @@ -0,0 +1,8 @@ +image: + repository: nautilus/nautilus-operator + tag: 0.1.0 + pullPolicy: Always + +# Namespace to watch for NautilusCluster resources "" means ALL namespaces +watch: + namespace: "" diff --git a/nautilus-operator/cluster-operator/cmd/manager/main.go b/nautilus-operator/cluster-operator/cmd/manager/main.go new file mode 100644 index 0000000..2251081 --- /dev/null +++ b/nautilus-operator/cluster-operator/cmd/manager/main.go @@ -0,0 +1,111 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package main + +import ( + "context" + "flag" + "os" + "runtime" + + "github.com/nautilus/nautilus-operator/pkg/apis" + "github.com/nautilus/nautilus-operator/pkg/controller" + controllerconfig "github.com/nautilus/nautilus-operator/pkg/controller/config" + "github.com/nautilus/nautilus-operator/pkg/version" + + "github.com/operator-framework/operator-sdk/pkg/k8sutil" + "github.com/operator-framework/operator-sdk/pkg/leader" + "github.com/operator-framework/operator-sdk/pkg/ready" + sdkVersion "github.com/operator-framework/operator-sdk/version" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/runtime/signals" + + log "github.com/sirupsen/logrus" +) + +var ( + versionFlag bool +) + +func init() { + flag.BoolVar(&versionFlag, "version", false, "Show version and quit") + flag.BoolVar(&controllerconfig.TestMode, "test", false, "Enable test mode. Do not use this flag in production") +} + +func printVersion() { + log.Printf("nautilus-operator Version: %v", version.Version) + log.Printf("Git SHA: %s", version.GitSHA) + log.Printf("Go Version: %s", runtime.Version()) + log.Printf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH) + log.Printf("operator-sdk Version: %v", sdkVersion.Version) +} + +func main() { + flag.Parse() + + printVersion() + + if versionFlag { + os.Exit(0) + } + + if controllerconfig.TestMode { + log.Warn("----- Running in test mode. Make sure you are NOT in production -----") + } + + namespace, err := k8sutil.GetWatchNamespace() + if err != nil { + log.Fatal(err, "failed to get watch namespace") + } + + // Get a config to talk to the apiserver + cfg, err := config.GetConfig() + if err != nil { + log.Fatal(err) + } + + // Become the leader before proceeding + leader.Become(context.TODO(), "nautilus-operator-lock") + + r := ready.NewFileReady() + err = r.Set() + if err != nil { + log.Fatal(err, "") + } + defer r.Unset() + + // Create a new Cmd to provide shared dependencies and start components + mgr, err := manager.New(cfg, manager.Options{Namespace: namespace}) + if err != nil { + log.Fatal(err) + } + + log.Print("Registering Components") + + // Setup Scheme for all resources + if err := apis.AddToScheme(mgr.GetScheme()); err != nil { + log.Fatal(err) + } + + // Setup all Controllers + if err := controller.AddToManager(mgr); err != nil { + log.Fatal(err) + } + + log.Print("Starting the Cmd") + + // Start the Cmd + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Fatal(err, "manager exited non-zero") + } +} diff --git a/nautilus-operator/cluster-operator/deploy/crd.yaml b/nautilus-operator/cluster-operator/deploy/crd.yaml new file mode 100644 index 0000000..734b923 --- /dev/null +++ b/nautilus-operator/cluster-operator/deploy/crd.yaml @@ -0,0 +1,27 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: nautilusclusters.nautilus.nautilus.io +spec: + group: nautilus.nautilus.io + names: + kind: NautilusCluster + listKind: NautilusClusterList + plural: nautilusclusters + singular: nautiluscluster + additionalPrinterColumns: + - name: Desired Members + type: integer + description: The number of desired nautilus members + JSONPath: .status.replicas + - name: Ready Members + type: integer + description: The number nautilus members ready + JSONPath: .status.readyReplicas + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + scope: Namespaced + version: v1alpha1 + subresources: + status: {} diff --git a/nautilus-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_cr.yaml b/nautilus-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_cr.yaml new file mode 100644 index 0000000..c129d0e --- /dev/null +++ b/nautilus-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_cr.yaml @@ -0,0 +1,68 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: nautilus-tier2 +spec: + storageClassName: "nfs" + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: "nautilus.nautilus.io/v1alpha1" +kind: "NautilusCluster" +metadata: + name: "nautilus" +spec: + zookeeperUri: zk-client:2181 + + externalAccess: + enabled: true + type: LoadBalancer + + bookkeeper: + image: + repository: nautilus/bookkeeper + tag: latest + pullPolicy: IfNotPresent + + replicas: 3 + + storage: + ledgerVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + journalVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + autoRecovery: true + + nautilus: + controllerReplicas: 1 + nodeReplicas: 3 + + cacheVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 20Gi + + image: + repository: nautilus/nautilus + tag: latest + pullPolicy: IfNotPresent + + tier2: + filesystem: + persistentVolumeClaim: + claimName: nautilus-tier2 diff --git a/nautilus-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_crd.yaml b/nautilus-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_crd.yaml new file mode 100644 index 0000000..e98b329 --- /dev/null +++ b/nautilus-operator/cluster-operator/deploy/crds/nautilus_v1alpha1_nautiluscluster_crd.yaml @@ -0,0 +1,27 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: nautilusclusters.nautilus.nautilus.io +spec: + group: nautilus.nautilus.io + names: + kind: NautilusCluster + listKind: NautilusClusterList + plural: nautilusclusters + singular: nautiluscluster + additionalPrinterColumns: + - name: Members + type: integer + description: The number nautilus members running + JSONPath: .status.replicas + - name: Ready Members + type: integer + description: The number nautilus members ready + JSONPath: .status.readyReplicas + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + scope: Namespaced + version: v1alpha1 + subresources: + status: {} \ No newline at end of file diff --git a/nautilus-operator/cluster-operator/deploy/operator.yaml b/nautilus-operator/cluster-operator/deploy/operator.yaml new file mode 100644 index 0000000..8d6e5c6 --- /dev/null +++ b/nautilus-operator/cluster-operator/deploy/operator.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nautilus-operator +spec: + replicas: 1 + selector: + matchLabels: + name: nautilus-operator + template: + metadata: + labels: + name: nautilus-operator + spec: + serviceAccountName: nautilus-operator + containers: + - name: nautilus-operator + image: nautilus/nautilus-operator:latest + ports: + - containerPort: 60000 + name: metrics + command: + - nautilus-operator + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "nautilus-operator" diff --git a/nautilus-operator/cluster-operator/deploy/role.yaml b/nautilus-operator/cluster-operator/deploy/role.yaml new file mode 100644 index 0000000..c01aec7 --- /dev/null +++ b/nautilus-operator/cluster-operator/deploy/role.yaml @@ -0,0 +1,60 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nautilus-operator +rules: +- apiGroups: + - nautilus.nautilus.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - '*' +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - "*" +- apiGroups: + - batch + resources: + - jobs + verbs: + - '*' + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nautilus-operator +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list diff --git a/nautilus-operator/cluster-operator/deploy/role_binding.yaml b/nautilus-operator/cluster-operator/deploy/role_binding.yaml new file mode 100644 index 0000000..3d166f5 --- /dev/null +++ b/nautilus-operator/cluster-operator/deploy/role_binding.yaml @@ -0,0 +1,26 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nautilus-operator +subjects: +- kind: ServiceAccount + name: nautilus-operator +roleRef: + kind: Role + name: nautilus-operator + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: nautilus-operator +subjects: +- kind: ServiceAccount + name: nautilus-operator + namespace: default +roleRef: + kind: ClusterRole + name: nautilus-operator + apiGroup: rbac.authorization.k8s.io diff --git a/nautilus-operator/cluster-operator/deploy/service_account.yaml b/nautilus-operator/cluster-operator/deploy/service_account.yaml new file mode 100644 index 0000000..8f98f32 --- /dev/null +++ b/nautilus-operator/cluster-operator/deploy/service_account.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nautilus-operator diff --git a/nautilus-operator/cluster-operator/example/cr-detailed.yaml b/nautilus-operator/cluster-operator/example/cr-detailed.yaml new file mode 100644 index 0000000..901cde9 --- /dev/null +++ b/nautilus-operator/cluster-operator/example/cr-detailed.yaml @@ -0,0 +1,123 @@ +apiVersion: "nautilus.nautilus.io/v1alpha1" +kind: "NautilusCluster" +metadata: + name: "example" +spec: + zookeeperUri: zk-client:2181 + + bookkeeper: + image: + repository: nautilus/bookkeeper + tag: 0.4.0 + pullPolicy: IfNotPresent + + replicas: 3 + resources: + requests: + memory: "3Gi" + cpu: "1000m" + limits: + memory: "5Gi" + cpu: "2000m" + + storage: + ledgerVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + journalVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + indexVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 10Gi + + # Turns on automatic recovery + # see https://bookkeeper.apache.org/docs/latest/admin/autorecovery/ + autoRecovery: true + + # To enable bookkeeper metrics feature, take codahale for example here. + # See http://bookkeeper.apache.org/docs/4.7.0/admin/metrics/ for more metrics provider + # See http://bookkeeper.apache.org/docs/4.7.0/reference/config/#statistics for metrics provider configuration details + options: + enableStatistics: "true" + statsProviderClass: "org.apache.bookkeeper.stats.codahale.CodahaleMetricsProvider" + codahaleStatsGraphiteEndpoint: "graphite.example.com:2003" + # Default is 60 + codahaleStatsOutputFrequencySeconds: 30 + + nautilus: + controllerReplicas: 1 + controllerResources: + requests: + memory: "1Gi" + cpu: "1000m" + limits: + memory: "3Gi" + cpu: "2000m" + + nodeReplicas: 3 + nodeResources: + requests: + memory: "3Gi" + cpu: "1000m" + limits: + memory: "5Gi" + cpu: "2000m" + + # Turn on Nautilus Debug Logging + debugLogging: false + + image: + repository: nautilus/nautilus + tag: 0.4.0 + pullPolicy: IfNotPresent + + cacheVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 20Gi + + tier2: + filesystem: + persistentVolumeClaim: + claimName: nautilus-tier2 + +# ecs: +# uri: http://10.247.10.52:9020 +# bucket: shared +# root: "nautilus/example" +# namespace: nautilus +# credentials: ecs-credentials + +# hdfs: +# uri: hdfs://10.240.10.52:8020/ +# root: /example +# replicationFactor: 3 + + # See https://github.com/nautilus/nautilus/blob/3f5b65084ae17e74c8ef8e6a40e78e61fa98737b/config/config.properties + # for available configuration properties + options: + nautilusservice.containerCount: "4" + nautilusservice.cacheMaxSize: "17179869184" + nautilusservice.zkSessionTimeoutMs: "10000" + attributeIndex.readBlockSize: "1048576" + readIndex.storageReadAlignment: "1048576" + durableLog.checkpointMinCommitCount: "300" + bookkeeper.bkAckQuorumSize: "3" + metrics.dynamicCacheSize: "100000" + metrics.enableStatistics: "true" + metrics.statsdHost: "telegraph.default" + metrics.statsdPort: "8125" diff --git a/nautilus-operator/cluster-operator/example/cr.yaml b/nautilus-operator/cluster-operator/example/cr.yaml new file mode 100644 index 0000000..e5ca102 --- /dev/null +++ b/nautilus-operator/cluster-operator/example/cr.yaml @@ -0,0 +1,27 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: nautilus-tier2 +spec: + storageClassName: "nfs" + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: "nautilus.nautilus.io/v1alpha1" +kind: "NautilusCluster" +metadata: + name: "example" +spec: + zookeeperUri: zk-client:2181 + bookkeeper: + replicas: 3 + nautilus: + controllerReplicas: 1 + nodeReplicas: 3 + tier2: + filesystem: + persistentVolumeClaim: + claimName: nautilus-tier2 diff --git a/nautilus-operator/cluster-operator/pkg/apis/addtoscheme_nautilus_v1alpha1.go b/nautilus-operator/cluster-operator/pkg/apis/addtoscheme_nautilus_v1alpha1.go new file mode 100644 index 0000000..c851481 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/addtoscheme_nautilus_v1alpha1.go @@ -0,0 +1,20 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package apis + +import ( + "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) +} diff --git a/nautilus-operator/cluster-operator/pkg/apis/apis.go b/nautilus-operator/cluster-operator/pkg/apis/apis.go new file mode 100644 index 0000000..54aac90 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/apis.go @@ -0,0 +1,23 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/bookkeeper.go b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/bookkeeper.go new file mode 100644 index 0000000..f241b28 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/bookkeeper.go @@ -0,0 +1,227 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1 + +import ( + "fmt" + + "github.com/nautilus/nautilus-operator/pkg/controller/config" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + // DefaultBookkeeperImageRepository is the default Docker repository for + // the BookKeeper image + DefaultBookkeeperImageRepository = "nautilus/bookkeeper" + + // DefaultBookkeeperImageTag is the default tag used for for the BookKeeper + // Docker image + DefaultBookkeeperImageTag = "latest" + + // DefaultBookkeeperImagePullPolicy is the default image pull policy used + // for the BookKeeper Docker image + DefaultBookkeeperImagePullPolicy = v1.PullAlways + + // DefaultBookkeeperLedgerVolumeSize is the default volume size for the + // Bookkeeper ledger volume + DefaultBookkeeperLedgerVolumeSize = "10Gi" + + // DefaultBookkeeperJournalVolumeSize is the default volume size for the + // Bookkeeper journal volume + DefaultBookkeeperJournalVolumeSize = "10Gi" + + // DefaultBookkeeperIndexVolumeSize is the default volume size for the + // Bookkeeper index volume + DefaultBookkeeperIndexVolumeSize = "10Gi" + + // MinimumBookkeeperReplicas is the minimum number of Bookkeeper replicas + // accepted + MinimumBookkeeperReplicas = 3 + + // DefaultBookkeeperRequestCPU is the default CPU request for BookKeeper + DefaultBookkeeperRequestCPU = "500m" + + // DefaultBookkeeperLimitCPU is the default CPU limit for BookKeeper + DefaultBookkeeperLimitCPU = "1" + + // DefaultBookkeeperRequestMemory is the default memory request for BookKeeper + DefaultBookkeeperRequestMemory = "1Gi" + + // DefaultBookkeeperLimitMemory is the limit memory limit for BookKeeper + DefaultBookkeeperLimitMemory = "2Gi" +) + +// BookkeeperSpec defines the configuration of BookKeeper +type BookkeeperSpec struct { + // Image defines the BookKeeper Docker image to use. + // By default, "nautilus/bookkeeper:latest" will be used. + Image *BookkeeperImageSpec `json:"image"` + + // Replicas defines the number of BookKeeper replicas. + // Minimum is 3. Defaults to 3. + Replicas int32 `json:"replicas"` + + // Storage configures the storage for BookKeeper + Storage *BookkeeperStorageSpec `json:"storage"` + + // AutoRecovery indicates whether or not BookKeeper auto recovery is enabled. + // Defaults to true. + AutoRecovery *bool `json:"autoRecovery"` + + // ServiceAccountName configures the service account used on BookKeeper instances + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // BookieResources specifies the request and limit of resources that bookie can have. + // BookieResources includes CPU and memory resources + Resources *v1.ResourceRequirements `json:"resources,omitempty"` + + // Options is the Bookkeeper configuration that is to override the bk_server.conf + // in bookkeeper. Some examples can be found here + // https://github.com/apache/bookkeeper/blob/master/docker/README.md + Options map[string]string `json:"options"` +} + +func (s *BookkeeperSpec) withDefaults() (changed bool) { + if s.Image == nil { + changed = true + s.Image = &BookkeeperImageSpec{} + } + if s.Image.withDefaults() { + changed = true + } + + if !config.TestMode && s.Replicas < MinimumBookkeeperReplicas { + changed = true + s.Replicas = MinimumBookkeeperReplicas + } + + if s.Storage == nil { + changed = true + s.Storage = &BookkeeperStorageSpec{} + } + if s.Storage.withDefaults() { + changed = true + } + + if s.AutoRecovery == nil { + changed = true + boolTrue := true + s.AutoRecovery = &boolTrue + } + + if s.Resources == nil { + changed = true + s.Resources = &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultBookkeeperRequestCPU), + v1.ResourceMemory: resource.MustParse(DefaultBookkeeperRequestMemory), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultBookkeeperLimitCPU), + v1.ResourceMemory: resource.MustParse(DefaultBookkeeperLimitMemory), + }, + } + } + + if s.Options == nil { + s.Options = map[string]string{} + } + + return changed +} + +// BookkeeperImageSpec defines the fields needed for a BookKeeper Docker image +type BookkeeperImageSpec struct { + ImageSpec +} + +// String formats a container image struct as a Docker compatible repository string +func (s *BookkeeperImageSpec) String() string { + return fmt.Sprintf("%s:%s", s.Repository, s.Tag) +} + +func (s *BookkeeperImageSpec) withDefaults() (changed bool) { + if s.Repository == "" { + changed = true + s.Repository = DefaultBookkeeperImageRepository + } + + if s.Tag == "" { + changed = true + s.Tag = DefaultBookkeeperImageTag + } + + if s.PullPolicy == "" { + changed = true + s.PullPolicy = DefaultBookkeeperImagePullPolicy + } + + return changed +} + +// BookkeeperStorageSpec is the configuration of the volumes used in BookKeeper +type BookkeeperStorageSpec struct { + // LedgerVolumeClaimTemplate is the spec to describe PVC for the BookKeeper ledger + // This field is optional. If no PVC spec and there is no default storage class, + // stateful containers will use emptyDir as volume + LedgerVolumeClaimTemplate *v1.PersistentVolumeClaimSpec `json:"ledgerVolumeClaimTemplate"` + + // JournalVolumeClaimTemplate is the spec to describe PVC for the BookKeeper journal + // This field is optional. If no PVC spec and there is no default storage class, + // stateful containers will use emptyDir as volume + JournalVolumeClaimTemplate *v1.PersistentVolumeClaimSpec `json:"journalVolumeClaimTemplate"` + + // IndexVolumeClaimTemplate is the spec to describe PVC for the BookKeeper index + // This field is optional. If no PVC spec and there is no default storage class, + // stateful containers will use emptyDir as volume + IndexVolumeClaimTemplate *v1.PersistentVolumeClaimSpec `json:"indexVolumeClaimTemplate"` +} + +func (s *BookkeeperStorageSpec) withDefaults() (changed bool) { + if s.LedgerVolumeClaimTemplate == nil { + changed = true + s.LedgerVolumeClaimTemplate = &v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(DefaultBookkeeperLedgerVolumeSize), + }, + }, + } + } + + if s.JournalVolumeClaimTemplate == nil { + changed = true + s.JournalVolumeClaimTemplate = &v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(DefaultBookkeeperJournalVolumeSize), + }, + }, + } + } + + if s.IndexVolumeClaimTemplate == nil { + changed = true + s.IndexVolumeClaimTemplate = &v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(DefaultBookkeeperIndexVolumeSize), + }, + }, + } + } + + return changed +} diff --git a/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/doc.go b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/doc.go new file mode 100644 index 0000000..a75d0ef --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/doc.go @@ -0,0 +1,14 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +// Package v1alpha1 contains API Schema definitions for the nautilus v1alpha1 API group +// +k8s:deepcopy-gen=package,register +// +groupName=nautilus.nautilus.io +package v1alpha1 diff --git a/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautilus.go b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautilus.go new file mode 100644 index 0000000..e946c81 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautilus.go @@ -0,0 +1,278 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1 + +import ( + "fmt" + + "github.com/nautilus/nautilus-operator/pkg/controller/config" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + // DefaultNautilusImageRepository is the default Docker repository for + // the Nautilus image + DefaultNautilusImageRepository = "nautilus/nautilus" + + // DefaultNautilusImageTag is the default tag used for for the Nautilus + // Docker image + DefaultNautilusImageTag = "latest" + + // DefaultNautilusImagePullPolicy is the default image pull policy used + // for the Nautilus Docker image + DefaultNautilusImagePullPolicy = v1.PullAlways + + // DefaultNautilusCacheVolumeSize is the default volume size for the + // Nautilus Node cache volume + DefaultNautilusCacheVolumeSize = "20Gi" + + // DefaultNautilusTier2ClaimName is the default volume claim name used as Tier 2 + DefaultNautilusTier2ClaimName = "nautilus-tier2" + + // DefaultControllerReplicas is the default number of replicas for the Nautilus + // Controller component + DefaultControllerReplicas = 1 + + // DefaultNodeReplicas is the default number of replicas for the Nautilus + // Segment Store component + DefaultNodeReplicas = 1 + + // DefaultControllerRequestCPU is the default CPU request for Nautilus + DefaultControllerRequestCPU = "250m" + + // DefaultControllerLimitCPU is the default CPU limit for Nautilus + DefaultControllerLimitCPU = "500m" + + // DefaultControllerRequestMemory is the default memory request for Nautilus + DefaultControllerRequestMemory = "512Mi" + + // DefaultControllerLimitMemory is the default memory limit for Nautilus + DefaultControllerLimitMemory = "1Gi" + + // DefaultNodeRequestCPU is the default CPU request for Nautilus + DefaultNodeRequestCPU = "500m" + + // DefaultNodeLimitCPU is the default CPU limit for Nautilus + DefaultNodeLimitCPU = "1" + + // DefaultNodeRequestMemory is the default memory request for Nautilus + DefaultNodeRequestMemory = "1Gi" + + // DefaultNodeLimitMemory is the default memory limit for Nautilus + DefaultNodeLimitMemory = "2Gi" +) + +// NautilusSpec defines the configuration of Nautilus +type NautilusSpec struct { + // ControllerReplicas defines the number of Controller replicas. + // Defaults to 1. + ControllerReplicas int32 `json:"controllerReplicas"` + + // NodeReplicas defines the number of Segment Store replicas. + // Defaults to 1. + NodeReplicas int32 `json:"nodeReplicas"` + + // DebugLogging indicates whether or not debug level logging is enabled. + // Defaults to false. + DebugLogging bool `json:"debugLogging"` + + // Image defines the Nautilus Docker image to use. + // By default, "nautilus/nautilus:latest" will be used. + Image *NautilusImageSpec `json:"image"` + + // Options is the Nautilus configuration that is passed to the Nautilus processes + // as JAVA_OPTS. See the following file for a complete list of options: + // https://github.com/nautilus/nautilus/blob/master/config/config.properties + Options map[string]string `json:"options"` + + // CacheVolumeClaimTemplate is the spec to describe PVC for the Nautilus cache. + // This field is optional. If no PVC spec, stateful containers will use + // emptyDir as volume + CacheVolumeClaimTemplate *v1.PersistentVolumeClaimSpec `json:"cacheVolumeClaimTemplate"` + + // Tier2 is the configuration of Nautilus's tier 2 storage. If no configuration + // is provided, it will assume that a PersistentVolumeClaim called "nautilus-tier2" + // is present and it will use it as Tier 2 + Tier2 *Tier2Spec `json:"tier2"` + + // ControllerServiceAccountName configures the service account used on controller instances. + // If not specified, Kubernetes will automatically assign the default service account in the namespace + ControllerServiceAccountName string `json:"controllerServiceAccountName,omitempty"` + + // NodeServiceAccountName configures the service account used on segment store instances. + // If not specified, Kubernetes will automatically assign the default service account in the namespace + NodeServiceAccountName string `json:"nodeServiceAccountName,omitempty"` + + // ControllerResources specifies the request and limit of resources that controller can have. + // ControllerResources includes CPU and memory resources + ControllerResources *v1.ResourceRequirements `json:"controllerResources,omitempty"` + + // NodeResources specifies the request and limit of resources that node can have. + // NodeResources includes CPU and memory resources + NodeResources *v1.ResourceRequirements `json:"nodeResources,omitempty"` +} + +func (s *NautilusSpec) withDefaults() (changed bool) { + if !config.TestMode && s.ControllerReplicas < 1 { + changed = true + s.ControllerReplicas = 1 + } + + if !config.TestMode && s.NodeReplicas < 1 { + changed = true + s.NodeReplicas = 1 + } + + if s.Image == nil { + changed = true + s.Image = &NautilusImageSpec{} + } + if s.Image.withDefaults() { + changed = true + } + + if s.Options == nil { + changed = true + s.Options = map[string]string{} + } + + if s.CacheVolumeClaimTemplate == nil { + changed = true + s.CacheVolumeClaimTemplate = &v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(DefaultNautilusCacheVolumeSize), + }, + }, + } + } + + if s.Tier2 == nil { + changed = true + s.Tier2 = &Tier2Spec{} + } + + if s.Tier2.withDefaults() { + changed = true + } + + if s.ControllerResources == nil { + changed = true + s.ControllerResources = &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultControllerRequestCPU), + v1.ResourceMemory: resource.MustParse(DefaultControllerRequestMemory), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultControllerLimitCPU), + v1.ResourceMemory: resource.MustParse(DefaultControllerLimitMemory), + }, + } + } + + if s.NodeResources == nil { + changed = true + s.NodeResources = &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultNodeRequestCPU), + v1.ResourceMemory: resource.MustParse(DefaultNodeRequestMemory), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(DefaultNodeLimitCPU), + v1.ResourceMemory: resource.MustParse(DefaultNodeLimitMemory), + }, + } + } + + return changed +} + +// NautilusImageSpec defines the fields needed for a Nautilus Docker image +type NautilusImageSpec struct { + ImageSpec +} + +// String formats a container image struct as a Docker compatible repository string +func (s *NautilusImageSpec) String() string { + return fmt.Sprintf("%s:%s", s.Repository, s.Tag) +} + +func (s *NautilusImageSpec) withDefaults() (changed bool) { + if s.Repository == "" { + changed = true + s.Repository = DefaultNautilusImageRepository + } + + if s.Tag == "" { + changed = true + s.Tag = DefaultNautilusImageTag + } + + if s.PullPolicy == "" { + changed = true + s.PullPolicy = DefaultNautilusImagePullPolicy + } + + return changed +} + +// Tier2Spec configures the Tier 2 storage type to use with Nautilus. +// If not specified, Tier 2 will be configured in filesystem mode and will try +// to use a PersistentVolumeClaim with the name "nautilus-tier2" +type Tier2Spec struct { + // FileSystem is used to configure a pre-created Persistent Volume Claim + // as Tier 2 backend. + // It is default Tier 2 mode. + FileSystem *FileSystemSpec `json:"filesystem,omitempty"` + + // Ecs is used to configure a Dell EMC ECS system as a Tier 2 backend + Ecs *ECSSpec `json:"ecs,omitempty"` + + // Hdfs is used to configure an HDFS system as a Tier 2 backend + Hdfs *HDFSSpec `json:"hdfs,omitempty"` +} + +func (s *Tier2Spec) withDefaults() (changed bool) { + if s.FileSystem == nil && s.Ecs == nil && s.Hdfs == nil { + changed = true + fs := &FileSystemSpec{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: DefaultNautilusTier2ClaimName, + }, + } + s.FileSystem = fs + } + + return changed +} + +// FileSystemSpec contains the reference to a PVC. +type FileSystemSpec struct { + PersistentVolumeClaim *v1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim"` +} + +// ECSSpec contains the connection details to a Dell EMC ECS system +type ECSSpec struct { + Uri string `json:"uri"` + Bucket string `json:"bucket"` + Root string `json:"root"` + Namespace string `json:"namespace"` + Credentials string `json:"credentials"` +} + +// HDFSSpec contains the connection details to an HDFS system +type HDFSSpec struct { + Uri string `json:"uri"` + Root string `json:"root"` + ReplicationFactor int32 `json:"replicationFactor"` +} diff --git a/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautiluscluster_types.go b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautiluscluster_types.go new file mode 100644 index 0000000..40289cf --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/nautiluscluster_types.go @@ -0,0 +1,141 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // DefaultZookeeperUri is the default ZooKeeper URI in the form of "hostname:port" + DefaultZookeeperUri = "zk-client:2181" + + // DefaultServiceType is the default service type for external access + DefaultServiceType = v1.ServiceTypeLoadBalancer +) + +func init() { + SchemeBuilder.Register(&NautilusCluster{}, &NautilusClusterList{}) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NautilusClusterList contains a list of NautilusCluster +type NautilusClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NautilusCluster `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NautilusCluster is the Schema for the nautilusclusters API +// +k8s:openapi-gen=true +type NautilusCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSpec `json:"spec,omitempty"` + Status ClusterStatus `json:"status,omitempty"` +} + +// WithDefaults set default values when not defined in the spec. +func (p *NautilusCluster) WithDefaults() (changed bool) { + changed = p.Spec.withDefaults() + + return changed +} + +// ClusterSpec defines the desired state of NautilusCluster +type ClusterSpec struct { + // ZookeeperUri specifies the hostname/IP address and port in the format + // "hostname:port". + // By default, the value "zk-client:2181" is used, that corresponds to the + // default Zookeeper service created by the Nautilus Zookkeeper operator + // available at: https://github.com/nautilus/zookeeper-operator + ZookeeperUri string `json:"zookeeperUri"` + + // ExternalAccess specifies whether or not to allow external access + // to clients and the service type to use to achieve it + // By default, external access is not enabled + ExternalAccess *ExternalAccess `json:"externalAccess"` + + // Bookkeeper configuration + Bookkeeper *BookkeeperSpec `json:"bookkeeper"` + + // Nautilus configuration + Nautilus *NautilusSpec `json:"nautilus"` +} + +func (s *ClusterSpec) withDefaults() (changed bool) { + if s.ZookeeperUri == "" { + changed = true + s.ZookeeperUri = DefaultZookeeperUri + } + + if s.ExternalAccess == nil { + changed = true + s.ExternalAccess = &ExternalAccess{} + } + if s.ExternalAccess.withDefaults() { + changed = true + } + + if s.Bookkeeper == nil { + changed = true + s.Bookkeeper = &BookkeeperSpec{} + } + if s.Bookkeeper.withDefaults() { + changed = true + } + + if s.Nautilus == nil { + changed = true + s.Nautilus = &NautilusSpec{} + } + if s.Nautilus.withDefaults() { + changed = true + } + + return changed +} + +// ExternalAccess defines the configuration of the external access +type ExternalAccess struct { + // Enabled specifies whether or not external access is enabled + // By default, external access is not enabled + Enabled bool `json:"enabled"` + + // Type specifies the service type to achieve external access. + // Options are "LoadBalancer" and "NodePort". + // By default, if external access is enabled, it will use "LoadBalancer" + Type v1.ServiceType `json:"type,omitempty"` +} + +func (e *ExternalAccess) withDefaults() (changed bool) { + if e.Enabled == false && e.Type != "" { + changed = true + e.Type = "" + } else if e.Enabled == true && e.Type == "" { + changed = true + e.Type = DefaultServiceType + } + + return changed +} + +// ImageSpec defines the fields needed for a Docker repository image +type ImageSpec struct { + Repository string `json:"repository"` + Tag string `json:"tag"` + PullPolicy v1.PullPolicy `json:"pullPolicy"` +} diff --git a/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/register.go b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/register.go new file mode 100644 index 0000000..f7c38a9 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/register.go @@ -0,0 +1,27 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +// Package v1alpha1 contains API Schema definitions for the nautilus v1alpha1 API group +// +k8s:deepcopy-gen=package,register +// +groupName=nautilus.nautilus.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "nautilus.nautilus.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) diff --git a/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status.go b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status.go new file mode 100644 index 0000000..3df6b6b --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status.go @@ -0,0 +1,131 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" +) + +type ClusterConditionType string + +const ( + ClusterConditionPodsReady ClusterConditionType = "PodsReady" +) + +// ClusterStatus defines the observed state of NautilusCluster +type ClusterStatus struct { + // Conditions list all the applied conditions + Conditions []ClusterCondition `json:"conditions,omitempty"` + + // CurrentVersion is the current cluster version + CurrentVersion string `json:"currentVersion,omitempty"` + + // TargetVersion is the version the cluster upgrading to. + // If the cluster is not upgrading, TargetVersion is empty. + TargetVersion string `json:"targetVersion,omitempty"` + + // Replicas is the number of desired replicas in the cluster + Replicas int32 `json:"replicas"` + + // CurrentReplicas is the number of current replicas in the cluster + CurrentReplicas int32 `json:"currentReplicas"` + + // ReadyReplicas is the number of ready replicas in the cluster + ReadyReplicas int32 `json:"readyReplicas"` + + // Members is the Nautilus members in the cluster + Members MembersStatus `json:"members"` +} + +// MembersStatus is the status of the members of the cluster with both +// ready and unready node membership lists +type MembersStatus struct { + Ready []string `json:"ready"` + Unready []string `json:"unready"` +} + +// ClusterCondition shows the current condition of a Nautilus cluster. +// Comply with k8s API conventions +type ClusterCondition struct { + // Type of Nautilus cluster condition. + Type ClusterConditionType `json:"type"` + + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty"` + + // The last time this condition was updated. + LastUpdateTime string `json:"lastUpdateTime,omitempty"` + + // Last time the condition transitioned from one status to another. + LastTransitionTime string `json:"lastTransitionTime,omitempty"` +} + +func (ps *ClusterStatus) SetPodsReadyConditionTrue() { + c := newClusterCondition(ClusterConditionPodsReady, corev1.ConditionTrue, "", "") + ps.setClusterCondition(*c) +} + +func (ps *ClusterStatus) SetPodsReadyConditionFalse() { + c := newClusterCondition(ClusterConditionPodsReady, corev1.ConditionFalse, "", "") + ps.setClusterCondition(*c) +} + +func newClusterCondition(condType ClusterConditionType, status corev1.ConditionStatus, reason, message string) *ClusterCondition { + return &ClusterCondition{ + Type: condType, + Status: status, + Reason: reason, + Message: message, + LastUpdateTime: "", + LastTransitionTime: "", + } +} + +func (ps *ClusterStatus) GetClusterCondition(t ClusterConditionType) (int, *ClusterCondition) { + for i, c := range ps.Conditions { + if t == c.Type { + return i, &c + } + } + return -1, nil +} + +func (ps *ClusterStatus) setClusterCondition(newCondition ClusterCondition) { + now := time.Now().Format(time.RFC3339) + position, existingCondition := ps.GetClusterCondition(newCondition.Type) + + if existingCondition == nil { + ps.Conditions = append(ps.Conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = now + existingCondition.LastUpdateTime = now + } + + if existingCondition.Reason != newCondition.Reason || existingCondition.Message != newCondition.Message { + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message + existingCondition.LastUpdateTime = now + } + + ps.Conditions[position] = *existingCondition +} diff --git a/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status_test.go b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status_test.go new file mode 100644 index 0000000..aa57c76 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/status_test.go @@ -0,0 +1,91 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package v1alpha1_test + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" +) + +func TestV1alpha1(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "NautilusCluster Status") +} + +var _ = Describe("NautilusCluster Status", func() { + + var p v1alpha1.NautilusCluster + + BeforeEach(func() { + p = v1alpha1.NautilusCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + } + }) + + Context("manually set pods ready condition to be true", func() { + BeforeEach(func() { + condition := v1alpha1.ClusterCondition{ + Type: v1alpha1.ClusterConditionPodsReady, + Status: corev1.ConditionTrue, + Reason: "", + Message: "", + LastUpdateTime: "", + LastTransitionTime: "", + } + p.Status.Conditions = append(p.Status.Conditions, condition) + }) + + It("should contains pods ready condition and it is true status", func() { + _, condition := p.Status.GetClusterCondition(v1alpha1.ClusterConditionPodsReady) + Ω(condition.Status).To(Equal(corev1.ConditionTrue)) + }) + }) + + Context("set conditions", func() { + Context("set pods ready condition to be true", func() { + BeforeEach(func() { + p.Status.SetPodsReadyConditionFalse() + p.Status.SetPodsReadyConditionTrue() + }) + It("should have pods ready condition with true status", func() { + _, condition := p.Status.GetClusterCondition(v1alpha1.ClusterConditionPodsReady) + Ω(condition.Status).To(Equal(corev1.ConditionTrue)) + }) + }) + + Context("set pod ready condition to be false", func() { + BeforeEach(func() { + p.Status.SetPodsReadyConditionTrue() + p.Status.SetPodsReadyConditionFalse() + }) + + It("should have ready condition with false status", func() { + _, condition := p.Status.GetClusterCondition(v1alpha1.ClusterConditionPodsReady) + Ω(condition.Status).To(Equal(corev1.ConditionFalse)) + }) + + It("should have updated timestamps", func() { + _, condition := p.Status.GetClusterCondition(v1alpha1.ClusterConditionPodsReady) + // TODO: check the timestamps + Ω(condition.LastUpdateTime).NotTo(Equal("")) + Ω(condition.LastTransitionTime).NotTo(Equal("")) + }) + }) + }) +}) diff --git a/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/zz_generated.deepcopy.go b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..270a661 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/apis/nautilus/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,454 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BookkeeperImageSpec) DeepCopyInto(out *BookkeeperImageSpec) { + *out = *in + out.ImageSpec = in.ImageSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BookkeeperImageSpec. +func (in *BookkeeperImageSpec) DeepCopy() *BookkeeperImageSpec { + if in == nil { + return nil + } + out := new(BookkeeperImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BookkeeperSpec) DeepCopyInto(out *BookkeeperSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(BookkeeperImageSpec) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(BookkeeperStorageSpec) + (*in).DeepCopyInto(*out) + } + if in.AutoRecovery != nil { + in, out := &in.AutoRecovery, &out.AutoRecovery + *out = new(bool) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BookkeeperSpec. +func (in *BookkeeperSpec) DeepCopy() *BookkeeperSpec { + if in == nil { + return nil + } + out := new(BookkeeperSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BookkeeperStorageSpec) DeepCopyInto(out *BookkeeperStorageSpec) { + *out = *in + if in.LedgerVolumeClaimTemplate != nil { + in, out := &in.LedgerVolumeClaimTemplate, &out.LedgerVolumeClaimTemplate + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + if in.JournalVolumeClaimTemplate != nil { + in, out := &in.JournalVolumeClaimTemplate, &out.JournalVolumeClaimTemplate + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + if in.IndexVolumeClaimTemplate != nil { + in, out := &in.IndexVolumeClaimTemplate, &out.IndexVolumeClaimTemplate + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BookkeeperStorageSpec. +func (in *BookkeeperStorageSpec) DeepCopy() *BookkeeperStorageSpec { + if in == nil { + return nil + } + out := new(BookkeeperStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + if in.ExternalAccess != nil { + in, out := &in.ExternalAccess, &out.ExternalAccess + *out = new(ExternalAccess) + **out = **in + } + if in.Bookkeeper != nil { + in, out := &in.Bookkeeper, &out.Bookkeeper + *out = new(BookkeeperSpec) + (*in).DeepCopyInto(*out) + } + if in.Nautilus != nil { + in, out := &in.Nautilus, &out.Nautilus + *out = new(NautilusSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(*in)) + copy(*out, *in) + } + in.Members.DeepCopyInto(&out.Members) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECSSpec) DeepCopyInto(out *ECSSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECSSpec. +func (in *ECSSpec) DeepCopy() *ECSSpec { + if in == nil { + return nil + } + out := new(ECSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalAccess) DeepCopyInto(out *ExternalAccess) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAccess. +func (in *ExternalAccess) DeepCopy() *ExternalAccess { + if in == nil { + return nil + } + out := new(ExternalAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemSpec) DeepCopyInto(out *FileSystemSpec) { + *out = *in + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(v1.PersistentVolumeClaimVolumeSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemSpec. +func (in *FileSystemSpec) DeepCopy() *FileSystemSpec { + if in == nil { + return nil + } + out := new(FileSystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HDFSSpec) DeepCopyInto(out *HDFSSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSSpec. +func (in *HDFSSpec) DeepCopy() *HDFSSpec { + if in == nil { + return nil + } + out := new(HDFSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MembersStatus) DeepCopyInto(out *MembersStatus) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Unready != nil { + in, out := &in.Unready, &out.Unready + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MembersStatus. +func (in *MembersStatus) DeepCopy() *MembersStatus { + if in == nil { + return nil + } + out := new(MembersStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NautilusCluster) DeepCopyInto(out *NautilusCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NautilusCluster. +func (in *NautilusCluster) DeepCopy() *NautilusCluster { + if in == nil { + return nil + } + out := new(NautilusCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NautilusCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NautilusClusterList) DeepCopyInto(out *NautilusClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NautilusCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NautilusClusterList. +func (in *NautilusClusterList) DeepCopy() *NautilusClusterList { + if in == nil { + return nil + } + out := new(NautilusClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NautilusClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NautilusImageSpec) DeepCopyInto(out *NautilusImageSpec) { + *out = *in + out.ImageSpec = in.ImageSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NautilusImageSpec. +func (in *NautilusImageSpec) DeepCopy() *NautilusImageSpec { + if in == nil { + return nil + } + out := new(NautilusImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NautilusSpec) DeepCopyInto(out *NautilusSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(NautilusImageSpec) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CacheVolumeClaimTemplate != nil { + in, out := &in.CacheVolumeClaimTemplate, &out.CacheVolumeClaimTemplate + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + if in.Tier2 != nil { + in, out := &in.Tier2, &out.Tier2 + *out = new(Tier2Spec) + (*in).DeepCopyInto(*out) + } + if in.ControllerResources != nil { + in, out := &in.ControllerResources, &out.ControllerResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeResources != nil { + in, out := &in.NodeResources, &out.NodeResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NautilusSpec. +func (in *NautilusSpec) DeepCopy() *NautilusSpec { + if in == nil { + return nil + } + out := new(NautilusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tier2Spec) DeepCopyInto(out *Tier2Spec) { + *out = *in + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(FileSystemSpec) + (*in).DeepCopyInto(*out) + } + if in.Ecs != nil { + in, out := &in.Ecs, &out.Ecs + *out = new(ECSSpec) + **out = **in + } + if in.Hdfs != nil { + in, out := &in.Hdfs, &out.Hdfs + *out = new(HDFSSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tier2Spec. +func (in *Tier2Spec) DeepCopy() *Tier2Spec { + if in == nil { + return nil + } + out := new(Tier2Spec) + in.DeepCopyInto(out) + return out +} diff --git a/nautilus-operator/cluster-operator/pkg/controller/add_nautiluscluster.go b/nautilus-operator/cluster-operator/pkg/controller/add_nautiluscluster.go new file mode 100644 index 0000000..0d39c91 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/controller/add_nautiluscluster.go @@ -0,0 +1,20 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package controller + +import ( + "github.com/nautilus/nautilus-operator/pkg/controller/nautiluscluster" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, nautiluscluster.Add) +} diff --git a/nautilus-operator/cluster-operator/pkg/controller/config/config.go b/nautilus-operator/cluster-operator/pkg/controller/config/config.go new file mode 100644 index 0000000..81aee8b --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/controller/config/config.go @@ -0,0 +1,18 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package config + +// TestMode enables test mode in the operator and applies +// the following changes: +// - Disables BookKeeper minimum number of replicas +// - Disables Nautilus Controller minimum number of replicas +// - Disables Segment Store minimum number of replicas +var TestMode bool diff --git a/nautilus-operator/cluster-operator/pkg/controller/controller.go b/nautilus-operator/cluster-operator/pkg/controller/controller.go new file mode 100644 index 0000000..53ce364 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/controller/controller.go @@ -0,0 +1,28 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package controller + +import ( + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// AddToManagerFuncs is a list of functions to add all Controllers to the Manager +var AddToManagerFuncs []func(manager.Manager) error + +// AddToManager adds all Controllers to the Manager +func AddToManager(m manager.Manager) error { + for _, f := range AddToManagerFuncs { + if err := f(m); err != nil { + return err + } + } + return nil +} diff --git a/nautilus-operator/cluster-operator/pkg/controller/nautilus/bookie.go b/nautilus-operator/cluster-operator/pkg/controller/nautilus/bookie.go new file mode 100644 index 0000000..a4528a3 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/controller/nautilus/bookie.go @@ -0,0 +1,274 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package nautilus + +import ( + "fmt" + "strings" + + "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + "github.com/nautilus/nautilus-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + LedgerDiskName = "ledger" + JournalDiskName = "journal" + IndexDiskName = "index" +) + +func MakeBookieHeadlessService(nautilusCluster *v1alpha1.NautilusCluster) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.HeadlessServiceNameForBookie(nautilusCluster.Name), + Namespace: nautilusCluster.Namespace, + Labels: util.LabelsForBookie(nautilusCluster), + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "bookie", + Port: 3181, + }, + }, + Selector: util.LabelsForBookie(nautilusCluster), + ClusterIP: corev1.ClusterIPNone, + }, + } +} + +func MakeBookieStatefulSet(nautilusCluster *v1alpha1.NautilusCluster) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.StatefulSetNameForBookie(nautilusCluster.Name), + Namespace: nautilusCluster.Namespace, + Labels: util.LabelsForBookie(nautilusCluster), + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: util.HeadlessServiceNameForBookie(nautilusCluster.Name), + Replicas: &nautilusCluster.Spec.Bookkeeper.Replicas, + PodManagementPolicy: appsv1.ParallelPodManagement, + Template: makeBookieStatefulTemplate(nautilusCluster), + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForBookie(nautilusCluster), + }, + VolumeClaimTemplates: makeBookieVolumeClaimTemplates(nautilusCluster.Spec.Bookkeeper), + }, + } +} + +func makeBookieStatefulTemplate(nautilusCluster *v1alpha1.NautilusCluster) corev1.PodTemplateSpec { + return corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: util.LabelsForBookie(nautilusCluster), + }, + Spec: *makeBookiePodSpec(nautilusCluster.Name, nautilusCluster.Spec.Bookkeeper), + } +} + +func makeBookiePodSpec(clusterName string, bookkeeperSpec *v1alpha1.BookkeeperSpec) *corev1.PodSpec { + podSpec := &corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "bookie", + Image: bookkeeperSpec.Image.String(), + ImagePullPolicy: bookkeeperSpec.Image.PullPolicy, + Ports: []corev1.ContainerPort{ + { + Name: "bookie", + ContainerPort: 3181, + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: util.ConfigMapNameForBookie(clusterName), + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: LedgerDiskName, + MountPath: "/bk/journal", + }, + { + Name: JournalDiskName, + MountPath: "/bk/ledgers", + }, + { + Name: IndexDiskName, + MountPath: "/bk/index", + }, + }, + Resources: *bookkeeperSpec.Resources, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "/opt/bookkeeper/bin/bookkeeper shell bookiesanity"}, + }, + }, + // Bookie pods should start fast. We give it up to 1.5 minute to become ready. + InitialDelaySeconds: 20, + PeriodSeconds: 10, + FailureThreshold: 9, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(3181), + }, + }, + // We start the liveness probe from the maximum time the pod can take + // before becoming ready. + // If the pod fails the health check during 1 minute, Kubernetes + // will restart it. + InitialDelaySeconds: 60, + PeriodSeconds: 15, + FailureThreshold: 4, + }, + }, + }, + Affinity: util.PodAntiAffinity("bookie", clusterName), + } + + if bookkeeperSpec.ServiceAccountName != "" { + podSpec.ServiceAccountName = bookkeeperSpec.ServiceAccountName + } + + return podSpec +} + +func makeBookieVolumeClaimTemplates(spec *v1alpha1.BookkeeperSpec) []corev1.PersistentVolumeClaim { + return []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: JournalDiskName, + }, + Spec: *spec.Storage.JournalVolumeClaimTemplate, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: LedgerDiskName, + }, + Spec: *spec.Storage.LedgerVolumeClaimTemplate, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: IndexDiskName, + }, + Spec: *spec.Storage.IndexVolumeClaimTemplate, + }, + } +} + +func MakeBookieConfigMap(nautilusCluster *v1alpha1.NautilusCluster) *corev1.ConfigMap { + memoryOpts := []string{ + "-Xms1g", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseCGroupMemoryLimitForHeap", + "-XX:MaxRAMFraction=2", + "-XX:MaxDirectMemorySize=1g", + "-XX:+ExitOnOutOfMemoryError", + "-XX:+CrashOnOutOfMemoryError", + "-XX:+HeapDumpOnOutOfMemoryError", + } + + gcOpts := []string{ + "-XX:+UseG1GC", + "-XX:MaxGCPauseMillis=10", + "-XX:+ParallelRefProcEnabled", + "-XX:+AggressiveOpts", + "-XX:+DoEscapeAnalysis", + "-XX:ParallelGCThreads=32", + "-XX:ConcGCThreads=32", + "-XX:G1NewSizePercent=50", + "-XX:+DisableExplicitGC", + "-XX:-ResizePLAB", + } + + gcLoggingOpts := []string{ + "-XX:+PrintGCDetails", + "-XX:+PrintGCDateStamps", + "-XX:+PrintGCApplicationStoppedTime", + "-XX:+UseGCLogFileRotation", + "-XX:NumberOfGCLogFiles=5", + "-XX:GCLogFileSize=64m", + } + + configData := map[string]string{ + "BOOKIE_MEM_OPTS": strings.Join(memoryOpts, " "), + "BOOKIE_GC_OPTS": strings.Join(gcOpts, " "), + "BOOKIE_GC_LOGGING_OPTS": strings.Join(gcLoggingOpts, " "), + "ZK_URL": nautilusCluster.Spec.ZookeeperUri, + // Set useHostNameAsBookieID to false until BookKeeper Docker + // image is updated to 4.7 + // This value can be explicitly overridden when using the operator + // with images based on BookKeeper 4.7 or newer + "BK_useHostNameAsBookieID": "false", + "NAUTILUS_CLUSTER_NAME": nautilusCluster.ObjectMeta.Name, + "WAIT_FOR": nautilusCluster.Spec.ZookeeperUri, + } + + if *nautilusCluster.Spec.Bookkeeper.AutoRecovery { + configData["BK_AUTORECOVERY"] = "true" + } + + for k, v := range nautilusCluster.Spec.Bookkeeper.Options { + prefixKey := fmt.Sprintf("BK_%s", k) + configData[prefixKey] = v + } + + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ConfigMapNameForBookie(nautilusCluster.Name), + Namespace: nautilusCluster.ObjectMeta.Namespace, + }, + Data: configData, + } +} + +func MakeBookiePodDisruptionBudget(nautilusCluster *v1alpha1.NautilusCluster) *policyv1beta1.PodDisruptionBudget { + maxUnavailable := intstr.FromInt(1) + return &policyv1beta1.PodDisruptionBudget{ + TypeMeta: metav1.TypeMeta{ + Kind: "PodDisruptionBudget", + APIVersion: "policy/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.PdbNameForBookie(nautilusCluster.Name), + Namespace: nautilusCluster.Namespace, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MaxUnavailable: &maxUnavailable, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForBookie(nautilusCluster), + }, + }, + } +} diff --git a/nautilus-operator/cluster-operator/pkg/controller/nautilus/nautilus_controller.go b/nautilus-operator/cluster-operator/pkg/controller/nautilus/nautilus_controller.go new file mode 100644 index 0000000..15c4d29 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/controller/nautilus/nautilus_controller.go @@ -0,0 +1,213 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package nautilus + +import ( + "strings" + + "fmt" + + api "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + "github.com/nautilus/nautilus-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func MakeControllerDeployment(p *api.NautilusCluster) *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.DeploymentNameForController(p.Name), + Namespace: p.Namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &p.Spec.Nautilus.ControllerReplicas, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: util.LabelsForController(p), + }, + Spec: *makeControllerPodSpec(p.Name, p.Spec.Nautilus), + }, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForController(p), + }, + }, + } +} + +func makeControllerPodSpec(name string, nautilusSpec *api.NautilusSpec) *corev1.PodSpec { + podSpec := &corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nautilus-controller", + Image: nautilusSpec.Image.String(), + ImagePullPolicy: nautilusSpec.Image.PullPolicy, + Args: []string{ + "controller", + }, + Ports: []corev1.ContainerPort{ + { + Name: "rest", + ContainerPort: 10080, + }, + { + Name: "grpc", + ContainerPort: 9090, + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: util.ConfigMapNameForController(name), + }, + }, + }, + }, + Resources: *nautilusSpec.ControllerResources, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(9090), + }, + }, + // Controller pods start fast. We give it up to 1 minute to become ready. + PeriodSeconds: 5, + FailureThreshold: 12, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(9090), + }, + }, + // We start the liveness probe from the maximum time the pod can take + // before becoming ready. + // If the pod fails the health check during 1 minute, Kubernetes + // will restart it. + InitialDelaySeconds: 60, + PeriodSeconds: 15, + FailureThreshold: 4, + }, + }, + }, + Affinity: util.PodAntiAffinity("nautilus-controller", name), + } + + if nautilusSpec.ControllerServiceAccountName != "" { + podSpec.ServiceAccountName = nautilusSpec.ControllerServiceAccountName + } + + return podSpec +} + +func MakeControllerConfigMap(p *api.NautilusCluster) *corev1.ConfigMap { + var javaOpts = []string{ + "-Xms512m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseCGroupMemoryLimitForHeap", + "-XX:MaxRAMFraction=2", + "-XX:+ExitOnOutOfMemoryError", + "-XX:+CrashOnOutOfMemoryError", + "-XX:+HeapDumpOnOutOfMemoryError", + "-Dnautilusservice.clusterName=" + p.Name, + } + + for name, value := range p.Spec.Nautilus.Options { + javaOpts = append(javaOpts, fmt.Sprintf("-D%v=%v", name, value)) + } + + configData := map[string]string{ + "CLUSTER_NAME": p.Name, + "ZK_URL": p.Spec.ZookeeperUri, + "JAVA_OPTS": strings.Join(javaOpts, " "), + "REST_SERVER_PORT": "10080", + "CONTROLLER_SERVER_PORT": "9090", + "AUTHORIZATION_ENABLED": "false", + "TOKEN_SIGNING_KEY": "secret", + "USER_PASSWORD_FILE": "/etc/nautilus/conf/passwd", + "TLS_ENABLED": "false", + "WAIT_FOR": p.Spec.ZookeeperUri, + } + + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ConfigMapNameForController(p.Name), + Labels: util.LabelsForController(p), + Namespace: p.Namespace, + }, + Data: configData, + } + + return configMap +} + +func MakeControllerService(p *api.NautilusCluster) *corev1.Service { + serviceType := corev1.ServiceTypeClusterIP + if p.Spec.ExternalAccess.Enabled { + serviceType = p.Spec.ExternalAccess.Type + } + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ServiceNameForController(p.Name), + Namespace: p.Namespace, + Labels: util.LabelsForController(p), + }, + Spec: corev1.ServiceSpec{ + Type: serviceType, + Ports: []corev1.ServicePort{ + { + Name: "rest", + Port: 10080, + }, + { + Name: "grpc", + Port: 9090, + }, + }, + Selector: util.LabelsForController(p), + }, + } +} + +func MakeControllerPodDisruptionBudget(nautilusCluster *api.NautilusCluster) *policyv1beta1.PodDisruptionBudget { + minAvailable := intstr.FromInt(1) + return &policyv1beta1.PodDisruptionBudget{ + TypeMeta: metav1.TypeMeta{ + Kind: "PodDisruptionBudget", + APIVersion: "policy/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.PdbNameForController(nautilusCluster.Name), + Namespace: nautilusCluster.Namespace, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MinAvailable: &minAvailable, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForController(nautilusCluster), + }, + }, + } +} diff --git a/nautilus-operator/cluster-operator/pkg/controller/nautilus/nautilus_node.go b/nautilus-operator/cluster-operator/pkg/controller/nautilus/nautilus_node.go new file mode 100644 index 0000000..5d76b60 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/controller/nautilus/nautilus_node.go @@ -0,0 +1,365 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package nautilus + +import ( + "strings" + + "fmt" + + api "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + "github.com/nautilus/nautilus-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + cacheVolumeName = "cache" + cacheVolumeMountPoint = "/tmp/nautilus/cache" + tier2FileMountPoint = "/mnt/tier2" + tier2VolumeName = "tier2" + nodeKind = "nautilus-node" +) + +func MakeNodeStatefulSet(nautilusCluster *api.NautilusCluster) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.StatefulSetNameForNode(nautilusCluster.Name), + Namespace: nautilusCluster.Namespace, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "nautilus-node", + Replicas: &nautilusCluster.Spec.Nautilus.NodeReplicas, + PodManagementPolicy: appsv1.OrderedReadyPodManagement, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: util.LabelsForNode(nautilusCluster), + }, + Spec: makeNodePodSpec(nautilusCluster), + }, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForNode(nautilusCluster), + }, + VolumeClaimTemplates: makeCacheVolumeClaimTemplate(nautilusCluster.Spec.Nautilus), + }, + } +} + +func makeNodePodSpec(nautilusCluster *api.NautilusCluster) corev1.PodSpec { + environment := []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: util.ConfigMapNameForNode(nautilusCluster.Name), + }, + }, + }, + } + + nautilusSpec := nautilusCluster.Spec.Nautilus + + environment = configureTier2Secrets(environment, nautilusSpec) + + podSpec := corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nautilus-node", + Image: nautilusSpec.Image.String(), + ImagePullPolicy: nautilusSpec.Image.PullPolicy, + Args: []string{ + "node", + }, + Ports: []corev1.ContainerPort{ + { + Name: "server", + ContainerPort: 12345, + }, + }, + EnvFrom: environment, + Env: util.DownwardAPIEnv(), + VolumeMounts: []corev1.VolumeMount{ + { + Name: cacheVolumeName, + MountPath: cacheVolumeMountPoint, + }, + }, + Resources: *nautilusSpec.NodeResources, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(12345), + }, + }, + // Segment Stores can take a few minutes to become ready when the cluster + // is configured with external enabled as they need to wait for the allocation + // of the external IP address. + // This config gives it up to 5 minutes to become ready. + PeriodSeconds: 10, + FailureThreshold: 30, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: util.HealthcheckCommand(12345), + }, + }, + // In the readiness probe we allow the pod to take up to 5 minutes + // to become ready. Therefore, the liveness probe will give it + // a 5-minute grace period before starting monitoring the container. + // If the pod fails the health check during 1 minute, Kubernetes + // will restart it. + InitialDelaySeconds: 300, + PeriodSeconds: 15, + FailureThreshold: 4, + }, + }, + }, + Affinity: util.PodAntiAffinity("nautilus-node", nautilusCluster.Name), + } + + if nautilusSpec.NodeServiceAccountName != "" { + podSpec.ServiceAccountName = nautilusSpec.NodeServiceAccountName + } + + configureTier2Filesystem(&podSpec, nautilusSpec) + + return podSpec +} + +func MakeNodeConfigMap(p *api.NautilusCluster) *corev1.ConfigMap { + javaOpts := []string{ + "-Xms1g", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseCGroupMemoryLimitForHeap", + "-XX:MaxRAMFraction=2", + "-XX:+ExitOnOutOfMemoryError", + "-XX:+CrashOnOutOfMemoryError", + "-XX:+HeapDumpOnOutOfMemoryError", + "-Dnautilusservice.clusterName=" + p.Name, + } + + for name, value := range p.Spec.Nautilus.Options { + javaOpts = append(javaOpts, fmt.Sprintf("-D%v=%v", name, value)) + } + + configData := map[string]string{ + "AUTHORIZATION_ENABLED": "false", + "CLUSTER_NAME": p.Name, + "ZK_URL": p.Spec.ZookeeperUri, + "JAVA_OPTS": strings.Join(javaOpts, " "), + "CONTROLLER_URL": util.NautilusControllerServiceURL(*p), + } + + // Wait for at least 3 Bookies to come up + var waitFor []string + for i := int32(0); i < util.Min(3, p.Spec.Bookkeeper.Replicas); i++ { + waitFor = append(waitFor, + fmt.Sprintf("%s-%d.%s.%s:3181", + util.StatefulSetNameForBookie(p.Name), + i, + util.HeadlessServiceNameForBookie(p.Name), + p.Namespace)) + } + configData["WAIT_FOR"] = strings.Join(waitFor, ",") + + if p.Spec.ExternalAccess.Enabled { + configData["K8_EXTERNAL_ACCESS"] = "true" + } + + if p.Spec.Nautilus.DebugLogging { + configData["log.level"] = "DEBUG" + } + + for k, v := range getTier2StorageOptions(p.Spec.Nautilus) { + configData[k] = v + } + + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ConfigMapNameForNode(p.Name), + Namespace: p.Namespace, + Labels: util.LabelsForNode(p), + }, + Data: configData, + } +} + +func makeCacheVolumeClaimTemplate(nautilusSpec *api.NautilusSpec) []corev1.PersistentVolumeClaim { + return []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: cacheVolumeName, + }, + Spec: *nautilusSpec.CacheVolumeClaimTemplate, + }, + } +} + +func getTier2StorageOptions(nautilusSpec *api.NautilusSpec) map[string]string { + if nautilusSpec.Tier2.FileSystem != nil { + return map[string]string{ + "TIER2_STORAGE": "FILESYSTEM", + "NFS_MOUNT": tier2FileMountPoint, + } + } + + if nautilusSpec.Tier2.Ecs != nil { + // EXTENDEDS3_ACCESS_KEY_ID & EXTENDEDS3_SECRET_KEY will come from secret storage + return map[string]string{ + "TIER2_STORAGE": "EXTENDEDS3", + "EXTENDEDS3_BUCKET": nautilusSpec.Tier2.Ecs.Bucket, + "EXTENDEDS3_URI": nautilusSpec.Tier2.Ecs.Uri, + "EXTENDEDS3_ROOT": nautilusSpec.Tier2.Ecs.Root, + "EXTENDEDS3_NAMESPACE": nautilusSpec.Tier2.Ecs.Namespace, + } + } + + if nautilusSpec.Tier2.Hdfs != nil { + return map[string]string{ + "TIER2_STORAGE": "HDFS", + "HDFS_URL": nautilusSpec.Tier2.Hdfs.Uri, + "HDFS_ROOT": nautilusSpec.Tier2.Hdfs.Root, + } + } + + return make(map[string]string) +} + +func configureTier2Secrets(environment []corev1.EnvFromSource, nautilusSpec *api.NautilusSpec) []corev1.EnvFromSource { + if nautilusSpec.Tier2.Ecs != nil { + return append(environment, corev1.EnvFromSource{ + Prefix: "EXTENDEDS3_", + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: nautilusSpec.Tier2.Ecs.Credentials, + }, + }, + }) + } + + return environment +} + +func configureTier2Filesystem(podSpec *corev1.PodSpec, nautilusSpec *api.NautilusSpec) { + + if nautilusSpec.Tier2.FileSystem != nil { + podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: tier2VolumeName, + MountPath: tier2FileMountPoint, + }) + + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: tier2VolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: nautilusSpec.Tier2.FileSystem.PersistentVolumeClaim, + }, + }) + } +} + +func MakeNodeHeadlessService(nautilusCluster *api.NautilusCluster) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.HeadlessServiceNameForNode(nautilusCluster.Name), + Namespace: nautilusCluster.Namespace, + Labels: util.LabelsForNode(nautilusCluster), + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "server", + Port: 12345, + Protocol: "TCP", + }, + }, + Selector: util.LabelsForNode(nautilusCluster), + ClusterIP: corev1.ClusterIPNone, + }, + } +} + +func MakeNodeExternalServices(nautilusCluster *api.NautilusCluster) []*corev1.Service { + var service *corev1.Service + services := make([]*corev1.Service, nautilusCluster.Spec.Nautilus.NodeReplicas) + + for i := int32(0); i < nautilusCluster.Spec.Nautilus.NodeReplicas; i++ { + service = &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.ServiceNameForNode(nautilusCluster.Name, i), + Namespace: nautilusCluster.Namespace, + Labels: util.LabelsForNode(nautilusCluster), + }, + Spec: corev1.ServiceSpec{ + Type: nautilusCluster.Spec.ExternalAccess.Type, + Ports: []corev1.ServicePort{ + { + Name: "server", + Port: 12345, + Protocol: "TCP", + TargetPort: intstr.FromInt(12345), + }, + }, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeLocal, + Selector: map[string]string{ + appsv1.StatefulSetPodNameLabel: fmt.Sprintf("%s-%d", util.StatefulSetNameForNode(nautilusCluster.Name), i), + }, + }, + } + services[i] = service + } + return services +} + +func MakeNodePodDisruptionBudget(nautilusCluster *api.NautilusCluster) *policyv1beta1.PodDisruptionBudget { + var maxUnavailable intstr.IntOrString + + if nautilusCluster.Spec.Nautilus.NodeReplicas == int32(1) { + maxUnavailable = intstr.FromInt(0) + } else { + maxUnavailable = intstr.FromInt(1) + } + + return &policyv1beta1.PodDisruptionBudget{ + TypeMeta: metav1.TypeMeta{ + Kind: "PodDisruptionBudget", + APIVersion: "policy/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: util.PdbNameForNode(nautilusCluster.Name), + Namespace: nautilusCluster.Namespace, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MaxUnavailable: &maxUnavailable, + Selector: &metav1.LabelSelector{ + MatchLabels: util.LabelsForNode(nautilusCluster), + }, + }, + } +} diff --git a/nautilus-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller.go b/nautilus-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller.go new file mode 100644 index 0000000..cd0222f --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller.go @@ -0,0 +1,482 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package nautiluscluster + +import ( + "context" + "fmt" + "time" + + nautilusv1alpha1 "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + "github.com/nautilus/nautilus-operator/pkg/controller/nautilus" + "github.com/nautilus/nautilus-operator/pkg/util" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + log "github.com/sirupsen/logrus" +) + +// ReconcileTime is the delay between reconciliations +const ReconcileTime = 30 * time.Second + +// Add creates a new NautilusCluster Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileNautilusCluster{client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("nautiluscluster-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to primary resource NautilusCluster + err = c.Watch(&source.Kind{Type: &nautilusv1alpha1.NautilusCluster{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileNautilusCluster{} + +// ReconcileNautilusCluster reconciles a NautilusCluster object +type ReconcileNautilusCluster struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a NautilusCluster object and makes changes based on the state read +// and what is in the NautilusCluster.Spec +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ReconcileNautilusCluster) Reconcile(request reconcile.Request) (reconcile.Result, error) { + log.Printf("Reconciling NautilusCluster %s/%s\n", request.Namespace, request.Name) + + // Fetch the NautilusCluster instance + nautilusCluster := &nautilusv1alpha1.NautilusCluster{} + err := r.client.Get(context.TODO(), request.NamespacedName, nautilusCluster) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + log.Printf("NautilusCluster %s/%s not found. Ignoring since object must be deleted\n", request.Namespace, request.Name) + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + log.Printf("failed to get NautilusCluster: %v", err) + return reconcile.Result{}, err + } + + // Set default configuration for unspecified values + changed := nautilusCluster.WithDefaults() + if changed { + log.Printf("Setting default settings for nautilus-cluster: %s", request.Name) + if err = r.client.Update(context.TODO(), nautilusCluster); err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{Requeue: true}, nil + } + + err = r.run(nautilusCluster) + if err != nil { + log.Printf("failed to reconcile nautilus cluster (%s): %v", nautilusCluster.Name, err) + return reconcile.Result{}, err + } + + return reconcile.Result{RequeueAfter: ReconcileTime}, nil +} + +func (r *ReconcileNautilusCluster) run(p *nautilusv1alpha1.NautilusCluster) (err error) { + // Clean up zookeeper metadata + err = r.reconcileFinalizers(p) + if err != nil { + log.Printf("failed to clean up zookeeper: %v", err) + return err + } + + err = r.deployCluster(p) + if err != nil { + log.Printf("failed to deploy cluster: %v", err) + return err + } + + err = r.syncClusterSize(p) + if err != nil { + log.Printf("failed to sync cluster size: %v", err) + return err + } + + err = r.reconcileClusterStatus(p) + if err != nil { + log.Printf("failed to reconcile cluster status: %v", err) + return err + } + return nil +} + +func (r *ReconcileNautilusCluster) deployCluster(p *nautilusv1alpha1.NautilusCluster) (err error) { + err = r.deployBookie(p) + if err != nil { + log.Printf("failed to deploy bookie: %v", err) + return err + } + + err = r.deployController(p) + if err != nil { + log.Printf("failed to deploy controller: %v", err) + return err + } + + err = r.deployNode(p) + if err != nil { + log.Printf("failed to deploy segment store: %v", err) + return err + } + return nil +} + +func (r *ReconcileNautilusCluster) deployController(p *nautilusv1alpha1.NautilusCluster) (err error) { + pdb := nautilus.MakeControllerPodDisruptionBudget(p) + controllerutil.SetControllerReference(p, pdb, r.scheme) + err = r.client.Create(context.TODO(), pdb) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + configMap := nautilus.MakeControllerConfigMap(p) + controllerutil.SetControllerReference(p, configMap, r.scheme) + err = r.client.Create(context.TODO(), configMap) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + deployment := nautilus.MakeControllerDeployment(p) + controllerutil.SetControllerReference(p, deployment, r.scheme) + err = r.client.Create(context.TODO(), deployment) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + service := nautilus.MakeControllerService(p) + controllerutil.SetControllerReference(p, service, r.scheme) + err = r.client.Create(context.TODO(), service) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + return nil +} + +func (r *ReconcileNautilusCluster) deployNode(p *nautilusv1alpha1.NautilusCluster) (err error) { + + headlessService := nautilus.MakeNodeHeadlessService(p) + controllerutil.SetControllerReference(p, headlessService, r.scheme) + err = r.client.Create(context.TODO(), headlessService) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + if p.Spec.ExternalAccess.Enabled { + services := nautilus.MakeNodeExternalServices(p) + for _, service := range services { + controllerutil.SetControllerReference(p, service, r.scheme) + err = r.client.Create(context.TODO(), service) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + } + } + + pdb := nautilus.MakeNodePodDisruptionBudget(p) + controllerutil.SetControllerReference(p, pdb, r.scheme) + err = r.client.Create(context.TODO(), pdb) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + configMap := nautilus.MakeNodeConfigMap(p) + controllerutil.SetControllerReference(p, configMap, r.scheme) + err = r.client.Create(context.TODO(), configMap) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + statefulSet := nautilus.MakeNodeStatefulSet(p) + controllerutil.SetControllerReference(p, statefulSet, r.scheme) + for i := range statefulSet.Spec.VolumeClaimTemplates { + controllerutil.SetControllerReference(p, &statefulSet.Spec.VolumeClaimTemplates[i], r.scheme) + } + err = r.client.Create(context.TODO(), statefulSet) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + return nil +} + +func (r *ReconcileNautilusCluster) deployBookie(p *nautilusv1alpha1.NautilusCluster) (err error) { + headlessService := nautilus.MakeBookieHeadlessService(p) + controllerutil.SetControllerReference(p, headlessService, r.scheme) + err = r.client.Create(context.TODO(), headlessService) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + pdb := nautilus.MakeBookiePodDisruptionBudget(p) + controllerutil.SetControllerReference(p, pdb, r.scheme) + err = r.client.Create(context.TODO(), pdb) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + configMap := nautilus.MakeBookieConfigMap(p) + controllerutil.SetControllerReference(p, configMap, r.scheme) + err = r.client.Create(context.TODO(), configMap) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + statefulSet := nautilus.MakeBookieStatefulSet(p) + controllerutil.SetControllerReference(p, statefulSet, r.scheme) + for i := range statefulSet.Spec.VolumeClaimTemplates { + controllerutil.SetControllerReference(p, &statefulSet.Spec.VolumeClaimTemplates[i], r.scheme) + } + err = r.client.Create(context.TODO(), statefulSet) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + + return nil +} + +func (r *ReconcileNautilusCluster) syncClusterSize(p *nautilusv1alpha1.NautilusCluster) (err error) { + err = r.syncBookieSize(p) + if err != nil { + return err + } + + err = r.syncNodeSize(p) + if err != nil { + return err + } + + err = r.syncControllerSize(p) + if err != nil { + return err + } + + return nil +} + +func (r *ReconcileNautilusCluster) syncBookieSize(p *nautilusv1alpha1.NautilusCluster) (err error) { + sts := &appsv1.StatefulSet{} + name := util.StatefulSetNameForBookie(p.Name) + err = r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: p.Namespace}, sts) + if err != nil { + return fmt.Errorf("failed to get stateful-set (%s): %v", sts.Name, err) + } + + if *sts.Spec.Replicas != p.Spec.Bookkeeper.Replicas { + sts.Spec.Replicas = &(p.Spec.Bookkeeper.Replicas) + err = r.client.Update(context.TODO(), sts) + if err != nil { + return fmt.Errorf("failed to update size of stateful-set (%s): %v", sts.Name, err) + } + + err = r.syncStatefulSetPvc(sts) + if err != nil { + return fmt.Errorf("failed to sync pvcs of stateful-set (%s): %v", sts.Name, err) + } + } + return nil +} + +func (r *ReconcileNautilusCluster) syncNodeSize(p *nautilusv1alpha1.NautilusCluster) (err error) { + sts := &appsv1.StatefulSet{} + name := util.StatefulSetNameForNode(p.Name) + err = r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: p.Namespace}, sts) + if err != nil { + return fmt.Errorf("failed to get stateful-set (%s): %v", sts.Name, err) + } + + if *sts.Spec.Replicas != p.Spec.Nautilus.NodeReplicas { + sts.Spec.Replicas = &(p.Spec.Nautilus.NodeReplicas) + err = r.client.Update(context.TODO(), sts) + if err != nil { + return fmt.Errorf("failed to update size of stateful-set (%s): %v", sts.Name, err) + } + + err = r.syncStatefulSetPvc(sts) + if err != nil { + return fmt.Errorf("failed to sync pvcs of stateful-set (%s): %v", sts.Name, err) + } + } + return nil +} + +func (r *ReconcileNautilusCluster) syncControllerSize(p *nautilusv1alpha1.NautilusCluster) (err error) { + deploy := &appsv1.Deployment{} + name := util.DeploymentNameForController(p.Name) + err = r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: p.Namespace}, deploy) + if err != nil { + return fmt.Errorf("failed to get deployment (%s): %v", deploy.Name, err) + } + + if *deploy.Spec.Replicas != p.Spec.Nautilus.ControllerReplicas { + deploy.Spec.Replicas = &(p.Spec.Nautilus.ControllerReplicas) + err = r.client.Update(context.TODO(), deploy) + if err != nil { + return fmt.Errorf("failed to update size of deployment (%s): %v", deploy.Name, err) + } + } + return nil +} + +func (r *ReconcileNautilusCluster) reconcileFinalizers(p *nautilusv1alpha1.NautilusCluster) (err error) { + if p.DeletionTimestamp.IsZero() { + if !util.ContainsString(p.ObjectMeta.Finalizers, util.ZkFinalizer) { + p.ObjectMeta.Finalizers = append(p.ObjectMeta.Finalizers, util.ZkFinalizer) + if err = r.client.Update(context.TODO(), p); err != nil { + return fmt.Errorf("failed to add the finalizer (%s): %v", p.Name, err) + } + } + } else { + if util.ContainsString(p.ObjectMeta.Finalizers, util.ZkFinalizer) { + p.ObjectMeta.Finalizers = util.RemoveString(p.ObjectMeta.Finalizers, util.ZkFinalizer) + if err = r.client.Update(context.TODO(), p); err != nil { + return fmt.Errorf("failed to update Nautilus object (%s): %v", p.Name, err) + } + if err = r.cleanUpZookeeperMeta(p); err != nil { + return fmt.Errorf("failed to clean up metadata (%s): %v", p.Name, err) + } + } + } + return nil +} + +func (r *ReconcileNautilusCluster) cleanUpZookeeperMeta(p *nautilusv1alpha1.NautilusCluster) (err error) { + if err = util.WaitForClusterToTerminate(r.client, p); err != nil { + return fmt.Errorf("failed to wait for cluster pods termination (%s): %v", p.Name, err) + } + + if err = util.DeleteAllZnodes(p); err != nil { + return fmt.Errorf("failed to delete zookeeper znodes for (%s): %v", p.Name, err) + } + return nil +} + +func (r *ReconcileNautilusCluster) syncStatefulSetPvc(sts *appsv1.StatefulSet) error { + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: sts.Spec.Template.Labels, + }) + if err != nil { + return fmt.Errorf("failed to convert label selector: %v", err) + } + + pvcList := &corev1.PersistentVolumeClaimList{} + pvclistOps := &client.ListOptions{ + Namespace: sts.Namespace, + LabelSelector: selector, + } + err = r.client.List(context.TODO(), pvclistOps, pvcList) + if err != nil { + return err + } + + for _, pvcItem := range pvcList.Items { + if util.PvcIsOrphan(pvcItem.Name, *sts.Spec.Replicas) { + pvcDelete := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcItem.Name, + Namespace: pvcItem.Namespace, + }, + } + + err = r.client.Delete(context.TODO(), pvcDelete) + if err != nil { + return fmt.Errorf("failed to delete pvc: %v", err) + } + } + } + return nil +} + +func (r *ReconcileNautilusCluster) reconcileClusterStatus(p *nautilusv1alpha1.NautilusCluster) error { + expectedSize := util.GetClusterExpectedSize(p) + listOps := &client.ListOptions{ + Namespace: p.Namespace, + LabelSelector: labels.SelectorFromSet(util.LabelsForNautilusCluster(p)), + } + podList := &corev1.PodList{} + err := r.client.List(context.TODO(), listOps, podList) + if err != nil { + return err + } + + var ( + readyMembers []string + unreadyMembers []string + ) + + for _, p := range podList.Items { + if util.IsPodReady(&p) { + readyMembers = append(readyMembers, p.Name) + } else { + unreadyMembers = append(unreadyMembers, p.Name) + } + } + + if len(readyMembers) == expectedSize { + p.Status.SetPodsReadyConditionTrue() + } else { + p.Status.SetPodsReadyConditionFalse() + } + + p.Status.Replicas = int32(expectedSize) + p.Status.CurrentReplicas = int32(len(podList.Items)) + p.Status.ReadyReplicas = int32(len(readyMembers)) + p.Status.Members.Ready = readyMembers + p.Status.Members.Unready = unreadyMembers + + err = r.client.Status().Update(context.TODO(), p) + if err != nil { + return fmt.Errorf("failed to update cluster status: %v", err) + } + return nil +} diff --git a/nautilus-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller_test.go b/nautilus-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller_test.go new file mode 100644 index 0000000..fed076c --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/controller/nautiluscluster/nautiluscluster_controller_test.go @@ -0,0 +1,225 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package nautiluscluster + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + "github.com/nautilus/nautilus-operator/pkg/util" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestBookie(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Nautilus cluster") +} + +var _ = Describe("NautilusCluster Controller", func() { + const ( + Name = "example" + Namespace = "default" + ) + + var ( + s = scheme.Scheme + r *ReconcileNautilusCluster + ) + + Context("Reconcile", func() { + var ( + req reconcile.Request + p *v1alpha1.NautilusCluster + ) + + BeforeEach(func() { + req = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: Name, + Namespace: Namespace, + }, + } + p = &v1alpha1.NautilusCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: Name, + Namespace: Namespace, + }, + } + s.AddKnownTypes(v1alpha1.SchemeGroupVersion, p) + }) + + Context("Default spec", func() { + var ( + client client.Client + err error + ) + + BeforeEach(func() { + p.WithDefaults() + client = fake.NewFakeClient(p) + r = &ReconcileNautilusCluster{client: client, scheme: s} + _, err = r.Reconcile(req) + }) + + It("shouldn't error", func() { + Ω(err).Should(BeNil()) + }) + + Context("Default bookkeeper", func() { + It("should have a default bookie resource", func() { + foundBk := &appsv1.StatefulSet{} + nn := types.NamespacedName{ + Name: util.StatefulSetNameForBookie(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundBk) + Ω(err).Should(BeNil()) + Ω(foundBk.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("500m")) + Ω(foundBk.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("1Gi")) + Ω(foundBk.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("1")) + Ω(foundBk.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("2Gi")) + }) + }) + + Context("Default Nautilus controller", func() { + It("should have a default controller resource", func() { + foundController := &appsv1.Deployment{} + nn := types.NamespacedName{ + Name: util.DeploymentNameForController(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundController) + Ω(err).Should(BeNil()) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("250m")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("512Mi")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("500m")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("1Gi")) + }) + }) + + Context("Default Nautilus node", func() { + It("should have a default controller resource", func() { + foundSS := &appsv1.StatefulSet{} + nn := types.NamespacedName{ + Name: util.StatefulSetNameForNode(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundSS) + Ω(err).Should(BeNil()) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("500m")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("1Gi")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("1")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("2Gi")) + }) + }) + }) + + Context("Custom spec", func() { + var ( + client client.Client + err error + customReq *corev1.ResourceRequirements + ) + + BeforeEach(func() { + customReq = &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("6Gi"), + }, + } + p.Spec = v1alpha1.ClusterSpec{ + Bookkeeper: &v1alpha1.BookkeeperSpec{ + Resources: customReq, + }, + Nautilus: &v1alpha1.NautilusSpec{ + ControllerResources: customReq, + NodeResources: customReq, + }, + } + p.WithDefaults() + client = fake.NewFakeClient(p) + r = &ReconcileNautilusCluster{client: client, scheme: s} + _, err = r.Reconcile(req) + }) + + It("shouldn't error", func() { + Ω(err).Should(BeNil()) + }) + + Context("Custom bookkeeper", func() { + It("should have a custom bookie resource", func() { + foundBK := &appsv1.StatefulSet{} + nn := types.NamespacedName{ + Name: util.StatefulSetNameForBookie(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundBK) + Ω(err).Should(BeNil()) + Ω(foundBK.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("2")) + Ω(foundBK.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("4Gi")) + Ω(foundBK.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("4")) + Ω(foundBK.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("6Gi")) + }) + }) + + Context("Custom Nautilus controller", func() { + It("should have a custom controller resource", func() { + foundController := &appsv1.Deployment{} + nn := types.NamespacedName{ + Name: util.DeploymentNameForController(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundController) + Ω(err).Should(BeNil()) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("2")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("4Gi")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("4")) + Ω(foundController.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("6Gi")) + }) + }) + + Context("Custom Nautilus node", func() { + It("should have a custom node resource", func() { + foundSS := &appsv1.StatefulSet{} + nn := types.NamespacedName{ + Name: util.StatefulSetNameForNode(p.Name), + Namespace: Namespace, + } + err = client.Get(context.TODO(), nn, foundSS) + Ω(err).Should(BeNil()) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String()).Should(Equal("2")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()).Should(Equal("4Gi")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()).Should(Equal("4")) + Ω(foundSS.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String()).Should(Equal("6Gi")) + }) + }) + }) + }) +}) diff --git a/nautilus-operator/cluster-operator/pkg/test/e2e/e2eutil/nautiluscluster_util.go b/nautilus-operator/cluster-operator/pkg/test/e2e/e2eutil/nautiluscluster_util.go new file mode 100644 index 0000000..48f9e83 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/test/e2e/e2eutil/nautiluscluster_util.go @@ -0,0 +1,279 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2eutil + +import ( + goctx "context" + "fmt" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + framework "github.com/operator-framework/operator-sdk/pkg/test" + api "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + "github.com/nautilus/nautilus-operator/pkg/util" + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +var ( + RetryInterval = time.Second * 5 + Timeout = time.Second * 60 + CleanupRetryInterval = time.Second * 1 + CleanupTimeout = time.Second * 5 +) + +// CreateCluster creates a NautilusCluster CR with the desired spec +func CreateCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.NautilusCluster) (*api.NautilusCluster, error) { + t.Logf("creating nautilus cluster: %s", p.Name) + err := f.Client.Create(goctx.TODO(), p, &framework.CleanupOptions{TestContext: ctx, Timeout: CleanupTimeout, RetryInterval: CleanupRetryInterval}) + if err != nil { + return nil, fmt.Errorf("failed to create CR: %v", err) + } + + nautilus := &api.NautilusCluster{} + err = f.Client.Get(goctx.TODO(), types.NamespacedName{Namespace: p.Namespace, Name: p.Name}, nautilus) + if err != nil { + return nil, fmt.Errorf("failed to obtain created CR: %v", err) + } + t.Logf("created nautilus cluster: %s", nautilus.Name) + return nautilus, nil +} + +// DeleteCluster deletes the NautilusCluster CR specified by cluster spec +func DeleteCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.NautilusCluster) error { + t.Logf("deleting nautilus cluster: %s", p.Name) + err := f.Client.Delete(goctx.TODO(), p) + if err != nil { + return fmt.Errorf("failed to delete CR: %v", err) + } + + t.Logf("deleted nautilus cluster: %s", p.Name) + return nil +} + +// UpdateCluster updates the NautilusCluster CR +func UpdateCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.NautilusCluster) error { + t.Logf("updating nautilus cluster: %s", p.Name) + err := f.Client.Update(goctx.TODO(), p) + if err != nil { + return fmt.Errorf("failed to update CR: %v", err) + } + + t.Logf("updated nautilus cluster: %s", p.Name) + return nil +} + +// GetCluster returns the latest NautilusCluster CR +func GetCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.NautilusCluster) (*api.NautilusCluster, error) { + nautilus := &api.NautilusCluster{} + err := f.Client.Get(goctx.TODO(), types.NamespacedName{Namespace: p.Namespace, Name: p.Name}, nautilus) + if err != nil { + return nil, fmt.Errorf("failed to obtain created CR: %v", err) + } + return nautilus, nil +} + +// WaitForClusterToBecomeReady will wait until all cluster pods are ready +func WaitForClusterToBecomeReady(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.NautilusCluster, size int) error { + t.Logf("waiting for cluster pods to become ready: %s", p.Name) + + err := wait.Poll(RetryInterval, 5*time.Minute, func() (done bool, err error) { + cluster, err := GetCluster(t, f, ctx, p) + if err != nil { + return false, err + } + + t.Logf("\twaiting for pods to become ready (%d/%d), pods (%v)", cluster.Status.ReadyReplicas, size, cluster.Status.Members.Ready) + + _, condition := cluster.Status.GetClusterCondition(api.ClusterConditionPodsReady) + if condition != nil && condition.Status == corev1.ConditionTrue && cluster.Status.ReadyReplicas == int32(size) { + return true, nil + } + return false, nil + }) + + if err != nil { + return err + } + + t.Logf("nautilus cluster ready: %s", p.Name) + return nil +} + +// WaitForClusterToTerminate will wait until all cluster pods are terminated +func WaitForClusterToTerminate(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.NautilusCluster) error { + t.Logf("waiting for nautilus cluster to terminate: %s", p.Name) + + listOptions := metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(util.LabelsForNautilusCluster(p)).String(), + } + + // Wait for Pods to terminate + err := wait.Poll(RetryInterval, 2*time.Minute, func() (done bool, err error) { + podList, err := f.KubeClient.Core().Pods(p.Namespace).List(listOptions) + if err != nil { + return false, err + } + + var names []string + for i := range podList.Items { + pod := &podList.Items[i] + names = append(names, pod.Name) + } + t.Logf("waiting for pods to terminate, running pods (%v)", names) + if len(names) != 0 { + return false, nil + } + return true, nil + }) + + if err != nil { + return err + } + + // Wait for PVCs to terminate + err = wait.Poll(RetryInterval, 1*time.Minute, func() (done bool, err error) { + pvcList, err := f.KubeClient.Core().PersistentVolumeClaims(p.Namespace).List(listOptions) + if err != nil { + return false, err + } + + var names []string + for i := range pvcList.Items { + pvc := &pvcList.Items[i] + names = append(names, pvc.Name) + } + t.Logf("waiting for pvc to terminate (%v)", names) + if len(names) != 0 { + return false, nil + } + return true, nil + }) + + if err != nil { + return err + } + + t.Logf("nautilus cluster terminated: %s", p.Name) + return nil +} + +// WriteAndReadData writes sample data and reads it back from the given Nautilus cluster +func WriteAndReadData(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.NautilusCluster) error { + t.Logf("writing and reading data from nautilus cluster: %s", p.Name) + testJob := NewTestWriteReadJob(p.Namespace, util.ServiceNameForController(p.Name)) + err := f.Client.Create(goctx.TODO(), testJob, &framework.CleanupOptions{TestContext: ctx, Timeout: CleanupTimeout, RetryInterval: CleanupRetryInterval}) + if err != nil { + return fmt.Errorf("failed to create job: %s", err) + } + + err = wait.Poll(RetryInterval, 3*time.Minute, func() (done bool, err error) { + job, err := f.KubeClient.BatchV1().Jobs(p.Namespace).Get(testJob.Name, metav1.GetOptions{IncludeUninitialized: false}) + if err != nil { + return false, err + } + if job.Status.CompletionTime.IsZero() { + return false, nil + } + if job.Status.Failed > 0 { + return false, fmt.Errorf("failed to write and read data from cluster") + } + return true, nil + }) + + if err != nil { + return err + } + + t.Logf("nautilus cluster validated: %s", p.Name) + return nil +} + +func RestartTier2(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, namespace string) error { + t.Log("restarting tier2 storage") + tier2 := NewTier2(namespace) + + err := f.Client.Delete(goctx.TODO(), tier2) + if err != nil { + return fmt.Errorf("failed to delete tier2: %v", err) + } + + err = wait.Poll(RetryInterval, 3*time.Minute, func() (done bool, err error) { + _, err = f.KubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(tier2.Name, metav1.GetOptions{IncludeUninitialized: false}) + if err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, err + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("failed to wait for tier2 termination: %s", err) + } + + tier2 = NewTier2(namespace) + err = f.Client.Create(goctx.TODO(), tier2, &framework.CleanupOptions{TestContext: ctx, Timeout: CleanupTimeout, RetryInterval: CleanupRetryInterval}) + if err != nil { + return fmt.Errorf("failed to create tier2: %s", err) + } + + t.Logf("nautilus cluster tier2 restarted") + return nil +} + +func CheckPvcSanity(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, p *api.NautilusCluster) error { + t.Logf("checking pvc sanity: %s", p.Name) + listOptions := metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(util.LabelsForBookie(p)).String(), + } + pvcList, err := f.KubeClient.CoreV1().PersistentVolumeClaims(p.Namespace).List(listOptions) + if err != nil { + return err + } + + for _, pvc := range pvcList.Items { + if pvc.Status.Phase != corev1.ClaimBound { + continue + } + if util.PvcIsOrphan(pvc.Name, p.Spec.Bookkeeper.Replicas) { + return fmt.Errorf("bookie pvc is illegal") + } + + } + + listOptions = metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(util.LabelsForNode(p)).String(), + } + pvcList, err = f.KubeClient.CoreV1().PersistentVolumeClaims(p.Namespace).List(listOptions) + if err != nil { + return err + } + + for _, pvc := range pvcList.Items { + if pvc.Status.Phase != corev1.ClaimBound { + continue + } + if util.PvcIsOrphan(pvc.Name, p.Spec.Nautilus.NodeReplicas) { + return fmt.Errorf("segment store pvc is illegal") + } + + } + + t.Logf("pvc validated: %s", p.Name) + return nil +} diff --git a/nautilus-operator/cluster-operator/pkg/test/e2e/e2eutil/spec_util.go b/nautilus-operator/cluster-operator/pkg/test/e2e/e2eutil/spec_util.go new file mode 100644 index 0000000..b5733bd --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/test/e2e/e2eutil/spec_util.go @@ -0,0 +1,102 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2eutil + +import ( + "fmt" + "k8s.io/apimachinery/pkg/api/resource" + + api "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewDefaultCluster returns a cluster with an empty spec, which will be filled +// with default values +func NewDefaultCluster(namespace string) *api.NautilusCluster { + return &api.NautilusCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "NautilusCluster", + APIVersion: "nautilus.nautilus.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: namespace, + }, + } +} + +func newTestJob(namespace string, command string) *batchv1.Job { + deadline := int64(180) + retries := int32(1) + return &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + Kind: "Job", + APIVersion: "batch/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-job-", + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + ActiveDeadlineSeconds: &deadline, + BackoffLimit: &retries, + + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "adrianmo/nautilus-samples", + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/sh", "-c"}, + Args: []string{command}, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } +} + +// NewTestWriteReadJob returns a Job that can test nautilus cluster by running a sample +func NewTestWriteReadJob(namespace string, controllerUri string) *batchv1.Job { + command := fmt.Sprintf("cd /samples/nautilus-client-examples "+ + "&& bin/helloWorldWriter -u tcp://%s:9090 "+ + "&& bin/helloWorldReader -u tcp://%s:9090", + controllerUri, controllerUri) + return newTestJob(namespace, command) +} + +func NewTier2(namespace string) *corev1.PersistentVolumeClaim { + storageName := "nfs" + return &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolumeClaim", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "nautilus-tier2", + Namespace: namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageName, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.PersistentVolumeAccessMode(corev1.ReadWriteMany)}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("5Gi"), + }, + }, + }, + } +} diff --git a/nautilus-operator/cluster-operator/pkg/util/k8sutil.go b/nautilus-operator/cluster-operator/pkg/util/k8sutil.go new file mode 100644 index 0000000..0a67ab9 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/util/k8sutil.go @@ -0,0 +1,114 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package util + +import ( + "context" + "time" + + "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func DownwardAPIEnv() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + } +} + +func PodAntiAffinity(component string, clusterName string) *corev1.Affinity { + return &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "component", + Operator: metav1.LabelSelectorOpIn, + Values: []string{component}, + }, + { + Key: "nautilus_cluster", + Operator: metav1.LabelSelectorOpIn, + Values: []string{clusterName}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + } +} + +// Wait for pods in cluster to be terminated +func WaitForClusterToTerminate(kubeClient client.Client, p *v1alpha1.NautilusCluster) (err error) { + listOptions := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(LabelsForNautilusCluster(p)), + } + + err = wait.Poll(5*time.Second, 2*time.Minute, func() (done bool, err error) { + podList := &corev1.PodList{} + err = kubeClient.List(context.TODO(), listOptions, podList) + if err != nil { + return false, err + } + + var names []string + for i := range podList.Items { + pod := &podList.Items[i] + names = append(names, pod.Name) + } + + if len(names) != 0 { + return false, nil + } + return true, nil + }) + + return err +} + +func IsPodReady(pod *corev1.Pod) bool { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} diff --git a/nautilus-operator/cluster-operator/pkg/util/nautiluscluster.go b/nautilus-operator/cluster-operator/pkg/util/nautiluscluster.go new file mode 100644 index 0000000..53c5a58 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/util/nautiluscluster.go @@ -0,0 +1,149 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package util + +import ( + "fmt" + "strconv" + "strings" + + "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" +) + +func PdbNameForBookie(clusterName string) string { + return fmt.Sprintf("%s-bookie", clusterName) +} + +func ConfigMapNameForBookie(clusterName string) string { + return fmt.Sprintf("%s-bookie", clusterName) +} + +func StatefulSetNameForBookie(clusterName string) string { + return fmt.Sprintf("%s-bookie", clusterName) +} + +func PdbNameForController(clusterName string) string { + return fmt.Sprintf("%s-nautilus-controller", clusterName) +} + +func ConfigMapNameForController(clusterName string) string { + return fmt.Sprintf("%s-nautilus-controller", clusterName) +} + +func ServiceNameForController(clusterName string) string { + return fmt.Sprintf("%s-nautilus-controller", clusterName) +} + +func ServiceNameForNode(clusterName string, index int32) string { + return fmt.Sprintf("%s-nautilus-node-%d", clusterName, index) +} + +func HeadlessServiceNameForNode(clusterName string) string { + return fmt.Sprintf("%s-nautilus-node-headless", clusterName) +} + +func HeadlessServiceNameForBookie(clusterName string) string { + return fmt.Sprintf("%s-bookie-headless", clusterName) +} + +func DeploymentNameForController(clusterName string) string { + return fmt.Sprintf("%s-nautilus-controller", clusterName) +} + +func PdbNameForNode(clusterName string) string { + return fmt.Sprintf("%s-node", clusterName) +} + +func ConfigMapNameForNode(clusterName string) string { + return fmt.Sprintf("%s-nautilus-node", clusterName) +} + +func StatefulSetNameForNode(clusterName string) string { + return fmt.Sprintf("%s-nautilus-node", clusterName) +} + +func LabelsForBookie(nautilusCluster *v1alpha1.NautilusCluster) map[string]string { + labels := LabelsForNautilusCluster(nautilusCluster) + labels["component"] = "bookie" + return labels +} + +func LabelsForController(nautilusCluster *v1alpha1.NautilusCluster) map[string]string { + labels := LabelsForNautilusCluster(nautilusCluster) + labels["component"] = "nautilus-controller" + return labels +} + +func LabelsForNode(nautilusCluster *v1alpha1.NautilusCluster) map[string]string { + labels := LabelsForNautilusCluster(nautilusCluster) + labels["component"] = "nautilus-node" + return labels +} + +func LabelsForNautilusCluster(nautilusCluster *v1alpha1.NautilusCluster) map[string]string { + return map[string]string{ + "app": "nautilus-cluster", + "nautilus_cluster": nautilusCluster.Name, + } +} + +func PvcIsOrphan(stsPvcName string, replicas int32) bool { + index := strings.LastIndexAny(stsPvcName, "-") + if index == -1 { + return false + } + + ordinal, err := strconv.Atoi(stsPvcName[index+1:]) + if err != nil { + return false + } + + return int32(ordinal) >= replicas +} + +func NautilusControllerServiceURL(nautilusCluster v1alpha1.NautilusCluster) string { + return fmt.Sprintf("tcp://%v.%v:%v", ServiceNameForController(nautilusCluster.Name), nautilusCluster.Namespace, "9090") +} + +func HealthcheckCommand(port int32) []string { + return []string{"/bin/sh", "-c", fmt.Sprintf("netstat -ltn 2> /dev/null | grep %d || ss -ltn 2> /dev/null | grep %d", port, port)} +} + +// Min returns the smaller of x or y. +func Min(x, y int32) int32 { + if x > y { + return y + } + return x +} + +func ContainsString(slice []string, str string) bool { + for _, item := range slice { + if item == str { + return true + } + } + return false +} + +func RemoveString(slice []string, str string) (result []string) { + for _, item := range slice { + if item == str { + continue + } + result = append(result, item) + } + return result +} + +func GetClusterExpectedSize(p *v1alpha1.NautilusCluster) (size int) { + return int(p.Spec.Nautilus.ControllerReplicas + p.Spec.Nautilus.NodeReplicas + p.Spec.Bookkeeper.Replicas) +} diff --git a/nautilus-operator/cluster-operator/pkg/util/zookeeper_util.go b/nautilus-operator/cluster-operator/pkg/util/zookeeper_util.go new file mode 100644 index 0000000..d8d3197 --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/util/zookeeper_util.go @@ -0,0 +1,86 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package util + +import ( + "container/list" + "fmt" + "time" + + "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + "github.com/samuel/go-zookeeper/zk" +) + +const ( + // Set in https://github.com/nautilus/nautilus/blob/master/docker/bookkeeper/entrypoint.sh#L21 + NautilusPath = "nautilus" + ZkFinalizer = "cleanUpZookeeper" +) + +// Delete all znodes related to a specific Nautilus cluster +func DeleteAllZnodes(p *v1alpha1.NautilusCluster) (err error) { + host := []string{p.Spec.ZookeeperUri} + conn, _, err := zk.Connect(host, time.Second*5) + if err != nil { + return fmt.Errorf("failed to connect to zookeeper: %v", err) + } + defer conn.Close() + + root := fmt.Sprintf("/%s/%s", NautilusPath, p.Name) + exist, _, err := conn.Exists(root) + if err != nil { + return fmt.Errorf("failed to check if zookeeper path exists: %v", err) + } + + if exist { + // Construct BFS tree to delete all znodes recursively + tree, err := ListSubTreeBFS(conn, root) + if err != nil { + return fmt.Errorf("failed to construct BFS tree: %v", err) + } + + for tree.Len() != 0 { + err := conn.Delete(tree.Back().Value.(string), -1) + if err != nil { + return fmt.Errorf("failed to delete znode (%s): %v", tree.Back().Value.(string), err) + } + tree.Remove(tree.Back()) + } + } + return nil +} + +// Construct a BFS tree +func ListSubTreeBFS(conn *zk.Conn, root string) (*list.List, error) { + queue := list.New() + tree := list.New() + queue.PushBack(root) + tree.PushBack(root) + + for { + if queue.Len() == 0 { + break + } + node := queue.Front() + children, _, err := conn.Children(node.Value.(string)) + if err != nil { + return tree, err + } + + for _, child := range children { + childPath := fmt.Sprintf("%s/%s", node.Value.(string), child) + queue.PushBack(childPath) + tree.PushBack(childPath) + } + queue.Remove(node) + } + return tree, nil +} diff --git a/nautilus-operator/cluster-operator/pkg/version/version.go b/nautilus-operator/cluster-operator/pkg/version/version.go new file mode 100644 index 0000000..a620eed --- /dev/null +++ b/nautilus-operator/cluster-operator/pkg/version/version.go @@ -0,0 +1,17 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package version + +// Version represents the software version of the Nautilus Operator +var Version string + +// GitSHA represents the Git commit hash in short format +var GitSHA string diff --git a/nautilus-operator/cluster-operator/scripts/check_format.sh b/nautilus-operator/cluster-operator/scripts/check_format.sh new file mode 100755 index 0000000..bba3706 --- /dev/null +++ b/nautilus-operator/cluster-operator/scripts/check_format.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# exit immediately when a command fails +set -e +# only exit with zero if all commands of the pipeline exit successfully +set -o pipefail +# error on unset variables +set -u + +goFiles=$(find . -name \*.go -not -path "./vendor/*" -print) +invalidFiles=$(gofmt -l $goFiles) + +if [ "$invalidFiles" ]; then + echo -e "These files did not pass the 'go fmt' check, please run 'go fmt' on them:" + echo -e $invalidFiles + exit 1 +fi diff --git a/nautilus-operator/cluster-operator/scripts/check_license.sh b/nautilus-operator/cluster-operator/scripts/check_license.sh new file mode 100755 index 0000000..0ce0ef6 --- /dev/null +++ b/nautilus-operator/cluster-operator/scripts/check_license.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# exit immediately when a command fails +set -e +# only exit with zero if all commands of the pipeline exit successfully +set -o pipefail +# error on unset variables +set -u + +licRes=$( + find . -type f -iname '*.go' ! -path '*/vendor/*' -exec \ + sh -c 'head -n3 $1 | grep -Eq "(Copyright|generated|GENERATED)" || echo -e $1' {} {} \; +) + +if [ -n "${licRes}" ]; then + echo -e "license header checking failed:\\n${licRes}" + exit 255 +fi diff --git a/nautilus-operator/cluster-operator/test/e2e/basic_test.go b/nautilus-operator/cluster-operator/test/e2e/basic_test.go new file mode 100644 index 0000000..83d2c02 --- /dev/null +++ b/nautilus-operator/cluster-operator/test/e2e/basic_test.go @@ -0,0 +1,145 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2e + +import ( + "testing" + + framework "github.com/operator-framework/operator-sdk/pkg/test" + nautilus_e2eutil "github.com/nautilus/nautilus-operator/pkg/test/e2e/e2eutil" +) + +func testCreateDefaultCluster(t *testing.T) { + doCleanup := true + ctx := framework.NewTestCtx(t) + defer func() { + if doCleanup { + ctx.Cleanup() + } + }() + + namespace, err := ctx.GetNamespace() + if err != nil { + t.Fatal(err) + } + f := framework.Global + + nautilus, err := nautilus_e2eutil.CreateCluster(t, f, ctx, nautilus_e2eutil.NewDefaultCluster(namespace)) + if err != nil { + t.Fatal(err) + } + + podSize := 5 + err = nautilus_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, nautilus, podSize) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.WriteAndReadData(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.DeleteCluster(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + // No need to do cleanup since the cluster CR has already been deleted + doCleanup = false + + err = nautilus_e2eutil.WaitForClusterToTerminate(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + // A workaround for issue 93 + err = nautilus_e2eutil.RestartTier2(t, f, ctx, namespace) + if err != nil { + t.Fatal(err) + } +} + +// Test recreate Nautilus cluster with the same name(issue 91) +func testRecreateDefaultCluster(t *testing.T) { + doCleanup := true + ctx := framework.NewTestCtx(t) + defer func() { + if doCleanup { + ctx.Cleanup() + } + }() + + namespace, err := ctx.GetNamespace() + if err != nil { + t.Fatal(err) + } + f := framework.Global + + defaultCluster := nautilus_e2eutil.NewDefaultCluster(namespace) + + nautilus, err := nautilus_e2eutil.CreateCluster(t, f, ctx, defaultCluster) + if err != nil { + t.Fatal(err) + } + + podSize := 5 + err = nautilus_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, nautilus, podSize) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.DeleteCluster(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.WaitForClusterToTerminate(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + defaultCluster = nautilus_e2eutil.NewDefaultCluster(namespace) + + nautilus, err = nautilus_e2eutil.CreateCluster(t, f, ctx, defaultCluster) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, nautilus, podSize) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.WriteAndReadData(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.DeleteCluster(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + // No need to do cleanup since the cluster CR has already been deleted + doCleanup = false + + err = nautilus_e2eutil.WaitForClusterToTerminate(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + // A workaround for issue 93 + err = nautilus_e2eutil.RestartTier2(t, f, ctx, namespace) + if err != nil { + t.Fatal(err) + } +} diff --git a/nautilus-operator/cluster-operator/test/e2e/main_test.go b/nautilus-operator/cluster-operator/test/e2e/main_test.go new file mode 100644 index 0000000..6b14301 --- /dev/null +++ b/nautilus-operator/cluster-operator/test/e2e/main_test.go @@ -0,0 +1,23 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2e + +import ( + "testing" + + f "github.com/operator-framework/operator-sdk/pkg/test" + + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +func TestMain(m *testing.M) { + f.MainEntry(m) +} diff --git a/nautilus-operator/cluster-operator/test/e2e/nautiluscluster_test.go b/nautilus-operator/cluster-operator/test/e2e/nautiluscluster_test.go new file mode 100644 index 0000000..850e038 --- /dev/null +++ b/nautilus-operator/cluster-operator/test/e2e/nautiluscluster_test.go @@ -0,0 +1,68 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2e + +import ( + "testing" + + framework "github.com/operator-framework/operator-sdk/pkg/test" + "github.com/operator-framework/operator-sdk/pkg/test/e2eutil" + apis "github.com/nautilus/nautilus-operator/pkg/apis" + operator "github.com/nautilus/nautilus-operator/pkg/apis/nautilus/v1alpha1" + nautilus_e2eutil "github.com/nautilus/nautilus-operator/pkg/test/e2e/e2eutil" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNautilusCluster(t *testing.T) { + nautilusClusterList := &operator.NautilusClusterList{ + TypeMeta: metav1.TypeMeta{ + Kind: "NautilusCluster", + APIVersion: "nautilus.nautilus.io/v1alpha1", + }, + } + err := framework.AddToFrameworkScheme(apis.AddToScheme, nautilusClusterList) + if err != nil { + t.Fatalf("failed to add custom resource scheme to framework: %v", err) + } + // run subtests + t.Run("x", testNautilusCluster) +} + +func testNautilusCluster(t *testing.T) { + ctx := framework.NewTestCtx(t) + defer ctx.Cleanup() + err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: nautilus_e2eutil.CleanupTimeout, RetryInterval: nautilus_e2eutil.CleanupRetryInterval}) + if err != nil { + t.Fatalf("failed to initialize cluster resources: %v", err) + } + t.Log("Initialized cluster resources") + namespace, err := ctx.GetNamespace() + if err != nil { + t.Fatal(err) + } + // get global framework variables + f := framework.Global + // wait for nautilus-operator to be ready + err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, "nautilus-operator", 1, nautilus_e2eutil.RetryInterval, nautilus_e2eutil.Timeout) + if err != nil { + t.Fatal(err) + } + + testFuncs := map[string]func(t *testing.T){ + "testCreateDefaultCluster": testCreateDefaultCluster, + "testRecreateDefaultCluster": testRecreateDefaultCluster, + "testScaleCluster": testScaleCluster, + } + + for name, f := range testFuncs { + t.Run(name, f) + } +} diff --git a/nautilus-operator/cluster-operator/test/e2e/resources/tier2.yaml b/nautilus-operator/cluster-operator/test/e2e/resources/tier2.yaml new file mode 100644 index 0000000..eb02f88 --- /dev/null +++ b/nautilus-operator/cluster-operator/test/e2e/resources/tier2.yaml @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: nautilus-tier2 +spec: + storageClassName: "nfs" + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi diff --git a/nautilus-operator/cluster-operator/test/e2e/resources/zookeeper.yaml b/nautilus-operator/cluster-operator/test/e2e/resources/zookeeper.yaml new file mode 100644 index 0000000..bd13f14 --- /dev/null +++ b/nautilus-operator/cluster-operator/test/e2e/resources/zookeeper.yaml @@ -0,0 +1,153 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: zookeeperclusters.zookeeper.nautilus.io +spec: + group: zookeeper.nautilus.io + names: + kind: ZookeeperCluster + listKind: ZookeeperClusterList + plural: zookeeperclusters + singular: zookeepercluster + shortNames: + - zk + additionalPrinterColumns: + - name: Replicas + type: integer + description: The number of ZooKeeper servers in the ensemble + JSONPath: .status.replicas + - name: Ready Replicas + type: integer + description: The number of ZooKeeper servers in the ensemble that are in a Ready state + JSONPath: .status.readyReplicas + - name: Internal Endpoint + type: string + description: Client endpoint internal to cluster network + JSONPath: .status.internalClientEndpoint + - name: External Endpoint + type: string + description: Client endpoint external to cluster network via LoadBalancer + JSONPath: .status.externalClientEndpoint + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + scope: Namespaced + version: v1beta1 + subresources: + status: {} + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: zookeeper-operator + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: zookeeper-operator +rules: +- apiGroups: + - zookeeper.nautilus.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - "*" + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: zookeeper-operator-cluster-role-binding +subjects: +- kind: ServiceAccount + name: zookeeper-operator + namespace: default +roleRef: + kind: ClusterRole + name: zookeeper-operator + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper-operator +spec: + replicas: 1 + selector: + matchLabels: + name: zookeeper-operator + template: + metadata: + labels: + name: zookeeper-operator + spec: + serviceAccountName: zookeeper-operator + containers: + - name: zookeeper-operator + image: nautilus/zookeeper-operator:0.2.1 + ports: + - containerPort: 60000 + name: metrics + command: + - zookeeper-operator + imagePullPolicy: Always + readinessProbe: + exec: + command: + - stat + - /tmp/operator-sdk-ready + initialDelaySeconds: 4 + periodSeconds: 10 + failureThreshold: 1 + env: + - name: WATCH_NAMESPACE + value: "" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "zookeeper-operator" + +--- + +apiVersion: "zookeeper.nautilus.io/v1beta1" +kind: "ZookeeperCluster" +metadata: + name: "zk" +spec: + replicas: 1 diff --git a/nautilus-operator/cluster-operator/test/e2e/scale_test.go b/nautilus-operator/cluster-operator/test/e2e/scale_test.go new file mode 100644 index 0000000..ab32722 --- /dev/null +++ b/nautilus-operator/cluster-operator/test/e2e/scale_test.go @@ -0,0 +1,111 @@ +/** + * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +package e2e + +import ( + "testing" + + framework "github.com/operator-framework/operator-sdk/pkg/test" + nautilus_e2eutil "github.com/nautilus/nautilus-operator/pkg/test/e2e/e2eutil" +) + +func testScaleCluster(t *testing.T) { + doCleanup := true + ctx := framework.NewTestCtx(t) + defer func() { + if doCleanup { + ctx.Cleanup() + } + }() + + namespace, err := ctx.GetNamespace() + if err != nil { + t.Fatal(err) + } + f := framework.Global + + nautilus, err := nautilus_e2eutil.CreateCluster(t, f, ctx, nautilus_e2eutil.NewDefaultCluster(namespace)) + if err != nil { + t.Fatal(err) + } + + podSize := 5 + err = nautilus_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, nautilus, podSize) + if err != nil { + t.Fatal(err) + } + + // This is to get the latest Nautilus cluster object + nautilus, err = nautilus_e2eutil.GetCluster(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + nautilus.Spec.Bookkeeper.Replicas = 4 + nautilus.Spec.Nautilus.NodeReplicas = 2 + podSize = 7 + + err = nautilus_e2eutil.UpdateCluster(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, nautilus, podSize) + if err != nil { + t.Fatal(err) + } + + // This is to get the latest Nautilus cluster object + nautilus, err = nautilus_e2eutil.GetCluster(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + // Scale down Nautilus cluster back to default + nautilus.Spec.Bookkeeper.Replicas = 3 + nautilus.Spec.Nautilus.NodeReplicas = 1 + podSize = 5 + + err = nautilus_e2eutil.UpdateCluster(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.WaitForClusterToBecomeReady(t, f, ctx, nautilus, podSize) + if err != nil { + t.Fatal(err) + } + + err = nautilus_e2eutil.CheckPvcSanity(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + // Delete cluster + err = nautilus_e2eutil.DeleteCluster(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + // No need to do cleanup since the cluster CR has already been deleted + doCleanup = false + + err = nautilus_e2eutil.WaitForClusterToTerminate(t, f, ctx, nautilus) + if err != nil { + t.Fatal(err) + } + + // A workaround for issue 93 + err = nautilus_e2eutil.RestartTier2(t, f, ctx, namespace) + if err != nil { + t.Fatal(err) + } +}