diff --git a/collector/.mvn/wrapper/maven-wrapper.properties b/collector/.mvn/wrapper/maven-wrapper.properties
new file mode 100755
index 000000000..56bb0164e
--- /dev/null
+++ b/collector/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1 @@
+distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.5.0/apache-maven-3.5.0-bin.zip
\ No newline at end of file
diff --git a/collector/LICENSE b/collector/LICENSE
new file mode 100644
index 000000000..6c225e3b7
--- /dev/null
+++ b/collector/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+
+ Copyright 2017 Expedia, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/collector/Makefile b/collector/Makefile
new file mode 100644
index 000000000..1e6fe5074
--- /dev/null
+++ b/collector/Makefile
@@ -0,0 +1,32 @@
+.PHONY: all kinesis
+
+PWD := $(shell pwd)
+
+clean:
+ mvn clean
+
+build: clean
+ mvn package
+
+report-coverage:
+ docker run -it -v ~/.m2:/root/.m2 -w /src -v `pwd`:/src maven:3.5.0-jdk-8 /bin/sh -c 'mvn scoverage:report-only && mvn clean'
+
+all: clean kinesis http report-coverage
+
+kinesis: build_kinesis
+ cd kinesis && $(MAKE) integration_test
+
+build_kinesis:
+ mvn package -DfinalName=haystack-kinesis-span-collector -pl kinesis -am
+
+http: build_http
+ cd http && $(MAKE) integration_test
+
+build_http:
+ mvn package -DfinalName=haystack-http-span-collector -pl http -am
+
+# build all and release
+release: clean build_kinesis build_http
+ cd kinesis && $(MAKE) release
+ cd http && $(MAKE) release
+ ./.travis/deploy.sh
diff --git a/collector/README.md b/collector/README.md
new file mode 100644
index 000000000..3189d540d
--- /dev/null
+++ b/collector/README.md
@@ -0,0 +1,52 @@
+[](https://travis-ci.org/ExpediaDotCom/haystack-collector)
+[](https://github.com/ExpediaDotCom/haystack/blob/master/LICENSE)
+
+# haystack-collector
+This haystack component collects spans from various sources and publish to kafka. As of today, we support two sources:
+
+1. Kinesis: Kinesis span collector reads proto serialized spans from a kinesis stream, validates it and write the data to configured kafka topic.
+2. Http: Http span collector listens on port 8080 for proto or json serialized spans, validate them and write to configured kafka topic. For more detail read [this](./http/README.md)
+
+Spans are validated to ensure they dont't contain an empty service and operation name. The startTime and duration should be non-zero.
+
+## Building
+
+####
+Since this repo contains haystack-idl as the submodule, so use the following to clone the repo
+* git clone --recursive git@github.com:ExpediaDotCom/haystack-collector.git .
+
+####Prerequisite:
+
+* Make sure you have Java 1.8
+* Make sure you have maven 3.3.9 or higher
+* Make sure you have docker 1.13 or higher
+
+
+Note : For mac users you can download docker for mac to set you up for the last two steps.
+
+####Build
+
+For a full build, including unit tests and integration tests, docker image build, you can run -
+```
+make all
+```
+
+####Integration Test
+
+####Prerequisite:
+1. Install docker using Docker Tools or native docker if on mac
+2. Verify if docker-compose is installed by running following command else install it.
+```
+docker-compose
+
+```
+
+Run the build and integration tests for individual components with
+```
+make kinesis
+
+or
+
+make http
+
+```
diff --git a/collector/checkstyles/scalastyle_config.xml b/collector/checkstyles/scalastyle_config.xml
new file mode 100644
index 000000000..0b5ba9469
--- /dev/null
+++ b/collector/checkstyles/scalastyle_config.xml
@@ -0,0 +1,134 @@
+
+ Scalastyle standard configuration
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/collector/commons/pom.xml b/collector/commons/pom.xml
new file mode 100644
index 000000000..8dbc357ac
--- /dev/null
+++ b/collector/commons/pom.xml
@@ -0,0 +1,77 @@
+
+
+
+
+ haystack-collector
+ com.expedia.www
+ 1.0-SNAPSHOT
+
+
+ 4.0.0
+ haystack-collector-commons
+ 1.0-SNAPSHOT
+ jar
+
+
+
+
+ com.expedia.www
+ haystack-span-decorators
+ ${project.version}
+
+
+
+ org.apache.kafka
+ kafka-clients
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+
+
+
+ com.codahale.metrics
+ metrics-core
+
+
+
+ com.google.protobuf
+ protobuf-java-util
+
+
+
+
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+ com.expedia.www.haystack.collector.commons.unit
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/MetricsSupport.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/MetricsSupport.scala
new file mode 100644
index 000000000..258f6347c
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/MetricsSupport.scala
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons
+
+import com.codahale.metrics.MetricRegistry
+
+trait MetricsSupport {
+ val metricRegistry: MetricRegistry = MetricsRegistries.metricRegistry
+}
+
+object MetricsRegistries {
+ val metricRegistry = new MetricRegistry()
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/ProtoSpanExtractor.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/ProtoSpanExtractor.scala
new file mode 100644
index 000000000..d3688db60
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/ProtoSpanExtractor.scala
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons
+
+import java.nio.charset.Charset
+import java.time.Instant
+import java.time.temporal.ChronoUnit
+import java.util.concurrent.ConcurrentHashMap
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.collector.commons.ProtoSpanExtractor._
+import com.expedia.www.haystack.collector.commons.config.{ExtractorConfiguration, Format}
+import com.expedia.www.haystack.collector.commons.record.{KeyValueExtractor, KeyValuePair}
+import com.expedia.www.haystack.span.decorators.SpanDecorator
+import com.google.protobuf.util.JsonFormat
+import org.slf4j.Logger
+
+import scala.collection.JavaConverters._
+import scala.util.{Failure, Success, Try}
+
+object ProtoSpanExtractor {
+ private val DaysInYear1970 = 365
+ private val January_1_1971_00_00_00_GMT: Instant = Instant.EPOCH.plus(DaysInYear1970, ChronoUnit.DAYS)
+ // A common mistake clients often make is to pass in milliseconds instead of microseconds for start time.
+ // Insisting that all start times be > January 1 1971 GMT catches this error.
+ val SmallestAllowedStartTimeMicros: Long = January_1_1971_00_00_00_GMT.getEpochSecond * 1000000
+ val ServiceNameIsRequired = "Service Name is required: span=[%s]"
+ val OperationNameIsRequired = "Operation Name is required: serviceName=[%s]"
+ val SpanIdIsRequired = "Span ID is required: serviceName=[%s] operationName=[%s]"
+ val TraceIdIsRequired = "Trace ID is required: serviceName=[%s] operationName=[%s]"
+ val StartTimeIsInvalid = "Start time [%d] is invalid: serviceName=[%s] operationName=[%s]"
+ val DurationIsInvalid = "Duration [%d] is invalid: serviceName=[%s] operationName=[%s]"
+ val SpanSizeLimitExceeded = "Span Size Limit Exceeded: serviceName=[%s] operationName=[%s] traceId=[%s] spanSize=[%d] probableTags=[%s]"
+
+ val ServiceNameVsTtlAndOperationNames = new ConcurrentHashMap[String, TtlAndOperationNames]
+ val OperationNameCountExceededMeterName = "operation.name.count.exceeded"
+}
+
+class ProtoSpanExtractor(extractorConfiguration: ExtractorConfiguration,
+ val LOGGER: Logger, spanDecorators: List[SpanDecorator])
+ extends KeyValueExtractor with MetricsSupport {
+
+ private val printer = JsonFormat.printer().omittingInsignificantWhitespace()
+
+ private val invalidSpanMeter = metricRegistry.meter("invalid.span")
+ private val validSpanMeter = metricRegistry.meter("valid.span")
+ private val spanSizeLimitExceededMeter = metricRegistry.meter("sizeLimitExceeded.span")
+
+ override def configure(): Unit = ()
+
+ def validateServiceName(span: Span): Try[Span] = {
+ validate(span, span.getServiceName, ServiceNameIsRequired, span.toString)
+ }
+
+ def validateOperationName(span: Span): Try[Span] = {
+ validate(span, span.getOperationName, OperationNameIsRequired, span.getServiceName)
+ }
+
+ def validateSpanId(span: Span): Try[Span] = {
+ validate(span, span.getSpanId, SpanIdIsRequired, span.getServiceName, span.getOperationName)
+ }
+
+ def validateTraceId(span: Span): Try[Span] = {
+ validate(span, span.getTraceId, TraceIdIsRequired, span.getServiceName, span.getOperationName)
+ }
+
+ def validateStartTime(span: Span): Try[Span] = {
+ validate(span, span.getStartTime, StartTimeIsInvalid, SmallestAllowedStartTimeMicros, span.getServiceName, span.getOperationName)
+ }
+
+ def validateDuration(span: Span): Try[Span] = {
+ validate(span, span.getDuration, DurationIsInvalid, 0, span.getServiceName, span.getOperationName)
+ }
+
+ def validateSpanSize(span: Span): Try[Span] = {
+ if (extractorConfiguration.spanValidation.spanMaxSize.enable
+ && !extractorConfiguration.spanValidation.spanMaxSize.skipServices.contains(span.getServiceName.toLowerCase)) {
+ val spanSize = span.toByteArray.length
+ val maxSizeLimit = extractorConfiguration.spanValidation.spanMaxSize.maxSizeLimit
+ validate(span, spanSize, SpanSizeLimitExceeded, maxSizeLimit)
+ }
+ else
+ Success(span)
+ }
+
+ private def validate(span: Span,
+ valueToValidate: String,
+ msg: String,
+ serviceName: String): Try[Span] = {
+ if (Option(valueToValidate).getOrElse("").isEmpty) {
+ Failure(new IllegalArgumentException(msg.format(serviceName)))
+ } else {
+ Success(span)
+ }
+ }
+
+ private def validate(span: Span,
+ valueToValidate: String,
+ msg: String,
+ serviceName: String,
+ operationName: String): Try[Span] = {
+ if (Option(valueToValidate).getOrElse("").isEmpty) {
+ Failure(new IllegalArgumentException(msg.format(serviceName, operationName)))
+ } else {
+ Success(span)
+ }
+ }
+
+ private def validate(span: Span,
+ valueToValidate: Long,
+ msg: String,
+ smallestValidValue: Long,
+ serviceName: String,
+ operationName: String): Try[Span] = {
+ if (valueToValidate < smallestValidValue) {
+ Failure(new IllegalArgumentException(msg.format(valueToValidate, serviceName, operationName)))
+ } else {
+ Success(span)
+ }
+ }
+
+ private def validate(span: Span,
+ valueToValidate: Int,
+ msg: String,
+ highestValidValue: Int): Try[Span] = {
+
+ if (valueToValidate > highestValidValue) {
+ spanSizeLimitExceededMeter.mark()
+ LOGGER.debug(msg.format(span.getServiceName, span.getOperationName, span.getTraceId, valueToValidate, getProbableTagsExceedingSizeLimit(span)))
+ if (extractorConfiguration.spanValidation.spanMaxSize.logOnly) {
+ Success(span)
+ } else {
+ Success(truncateTags(span))
+ }
+ }
+ else {
+ Success(span)
+ }
+ }
+
+ private def getProbableTagsExceedingSizeLimit(span: Span): String = {
+ span.getTagsList.asScala
+ .filter(tag => tag.getVStrBytes.size > extractorConfiguration.spanValidation.spanMaxSize.maxSizeLimit)
+ .map(_.getKey)
+ .mkString(", ")
+ }
+
+ private def truncateTags(span: Span): Span = {
+ val skippedTags = span.getTagsList.asScala
+ .filter(tag => extractorConfiguration.spanValidation.spanMaxSize.skipTags.contains(tag.getKey.toLowerCase))
+
+ val spanBuilder = span.toBuilder
+ spanBuilder.clearTags()
+
+ skippedTags.foreach(spanBuilder.addTags)
+
+ val truncateTagKey = extractorConfiguration.spanValidation.spanMaxSize.infoTagKey
+ val truncateTagValue = extractorConfiguration.spanValidation.spanMaxSize.infoTagValue
+ spanBuilder.addTags(Tag.newBuilder().setKey(truncateTagKey).setVStr(truncateTagValue))
+
+ spanBuilder.build()
+ }
+
+
+ override def extractKeyValuePairs(recordBytes: Array[Byte]): List[KeyValuePair[Array[Byte], Array[Byte]]] = {
+ Try(Span.parseFrom(recordBytes))
+ .flatMap(span => validateSpanSize(span))
+ .flatMap(span => validateServiceName(span))
+ .flatMap(span => validateOperationName(span))
+ .flatMap(span => validateSpanId(span))
+ .flatMap(span => validateTraceId(span))
+ .flatMap(span => validateStartTime(span))
+ .flatMap(span => validateDuration(span))
+ match {
+ case Success(span) =>
+ validSpanMeter.mark()
+
+ val updatedSpan = decorateSpan(span)
+ val kvPair = extractorConfiguration.outputFormat match {
+ case Format.JSON => KeyValuePair(updatedSpan.getTraceId.getBytes, printer.print(span).getBytes(Charset.forName("UTF-8")))
+ case Format.PROTO => KeyValuePair(updatedSpan.getTraceId.getBytes, updatedSpan.toByteArray)
+ }
+ List(kvPair)
+
+ case Failure(ex) =>
+ invalidSpanMeter.mark()
+ ex match {
+ case ex: IllegalArgumentException => LOGGER.error(ex.getMessage)
+ case _: java.lang.Exception => LOGGER.error("Fail to deserialize the span proto bytes with exception", ex)
+ }
+ Nil
+ }
+ }
+
+ private def decorateSpan(span: Span): Span = {
+ if (spanDecorators.isEmpty) {
+ return span
+ }
+
+ var spanBuilder = span.toBuilder
+ spanDecorators.foreach(decorator => {
+ spanBuilder = decorator.decorate(spanBuilder)
+ })
+ spanBuilder.build()
+ }
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/SpanDecoratorFactory.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/SpanDecoratorFactory.scala
new file mode 100644
index 000000000..e2bda4b75
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/SpanDecoratorFactory.scala
@@ -0,0 +1,25 @@
+package com.expedia.www.haystack.collector.commons
+
+import com.expedia.www.haystack.span.decorators.plugin.config.Plugin
+import com.expedia.www.haystack.span.decorators.plugin.loader.SpanDecoratorPluginLoader
+import com.expedia.www.haystack.span.decorators.{AdditionalTagsSpanDecorator, SpanDecorator}
+import com.typesafe.config.ConfigFactory
+import org.slf4j.Logger
+
+import scala.collection.JavaConverters._
+
+object SpanDecoratorFactory {
+ def get(pluginConfig: Plugin, additionalTagsConfig: Map[String, String], LOGGER: Logger): List[SpanDecorator] = {
+ var tempList = List[SpanDecorator]()
+ if (pluginConfig != null) {
+ val externalSpanDecorators: List[SpanDecorator] = SpanDecoratorPluginLoader.getInstance(LOGGER, pluginConfig).getSpanDecorators().asScala.toList
+ if (externalSpanDecorators != null) {
+ tempList = tempList ++: externalSpanDecorators
+ }
+ }
+
+ val additionalTagsSpanDecorator = new AdditionalTagsSpanDecorator()
+ additionalTagsSpanDecorator.init(ConfigFactory.parseMap(additionalTagsConfig.asJava))
+ tempList.::(additionalTagsSpanDecorator)
+ }
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/TtlAndOperationNames.java b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/TtlAndOperationNames.java
new file mode 100644
index 000000000..5bf8b751f
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/TtlAndOperationNames.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons;
+
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * This class is used by ProtoSpanExtractor to keep track of the number of operation names for a particular service.
+ * It is written in Java because Java's Atomic classes are the preferred way of handling concurrent maps and sets
+ * in Scala, and the accesses to the objects that count operation names come from multiple threads.
+ */
+public class TtlAndOperationNames {
+ public final Set operationNames = ConcurrentHashMap.newKeySet();
+ private final AtomicLong ttlMillis;
+
+ TtlAndOperationNames(long ttlMillis) {
+ this.ttlMillis = new AtomicLong(ttlMillis);
+ }
+
+ public long getTtlMillis() {
+ return ttlMillis.get();
+ }
+
+ public void setTtlMillis(long ttlMillis) {
+ this.ttlMillis.set(ttlMillis);
+ }
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/config/ConfigurationLoader.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/config/ConfigurationLoader.scala
new file mode 100644
index 000000000..7598b61b1
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/config/ConfigurationLoader.scala
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.config
+
+import java.io.File
+import java.util
+import java.util.Properties
+
+import com.expedia.www.haystack.span.decorators.plugin.config.{Plugin, PluginConfiguration}
+import com.typesafe.config._
+import org.apache.kafka.clients.producer.ProducerConfig
+import org.apache.kafka.clients.producer.ProducerConfig.{KEY_SERIALIZER_CLASS_CONFIG, VALUE_SERIALIZER_CLASS_CONFIG}
+import org.apache.kafka.common.serialization.ByteArraySerializer
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
+
+object ConfigurationLoader {
+
+ private val LOGGER = LoggerFactory.getLogger(ConfigurationLoader.getClass)
+
+ private[haystack] val ENV_NAME_PREFIX = "HAYSTACK_PROP_"
+
+ /**
+ * Load and return the configuration
+ * if overrides_config_path env variable exists, then we load that config file and use base conf as fallback,
+ * else we load the config from env variables(prefixed with haystack) and use base conf as fallback
+ *
+ * @param resourceName name of the resource file to be loaded. Default value is `config/base.conf`
+ * @param envNamePrefix env variable prefix to override config values. Default is `HAYSTACK_PROP_`
+ * @return an instance of com.typesafe.Config
+ */
+ def loadConfigFileWithEnvOverrides(resourceName: String = "config/base.conf",
+ envNamePrefix: String = ENV_NAME_PREFIX): Config = {
+
+ require(resourceName != null && resourceName.length > 0, "resourceName is required")
+ require(envNamePrefix != null && envNamePrefix.length > 0, "envNamePrefix is required")
+
+ val baseConfig = ConfigFactory.load(resourceName)
+
+ val keysWithArrayValues = baseConfig.entrySet()
+ .asScala
+ .filter(_.getValue.valueType() == ConfigValueType.LIST)
+ .map(_.getKey)
+ .toSet
+
+ val config = sys.env.get("HAYSTACK_OVERRIDES_CONFIG_PATH") match {
+ case Some(overrideConfigPath) =>
+ val overrideConfig = ConfigFactory.parseFile(new File(overrideConfigPath))
+ ConfigFactory
+ .parseMap(parsePropertiesFromMap(sys.env, keysWithArrayValues, envNamePrefix).asJava)
+ .withFallback(overrideConfig)
+ .withFallback(baseConfig)
+ .resolve()
+ case _ => ConfigFactory
+ .parseMap(parsePropertiesFromMap(sys.env, keysWithArrayValues, envNamePrefix).asJava)
+ .withFallback(baseConfig)
+ .resolve()
+ }
+
+ // In key-value pairs that contain 'password' in the key, replace the value with asterisks
+ LOGGER.info(config.root()
+ .render(ConfigRenderOptions.defaults().setOriginComments(false))
+ .replaceAll("(?i)(\\\".*password\\\"\\s*:\\s*)\\\".+\\\"", "$1********"))
+
+ config
+ }
+
+ /**
+ * @return new config object with haystack specific environment variables
+ */
+ private[haystack] def parsePropertiesFromMap(envVars: Map[String, String],
+ keysWithArrayValues: Set[String],
+ envNamePrefix: String): Map[String, Object] = {
+ envVars.filter {
+ case (envName, _) => envName.startsWith(envNamePrefix)
+ } map {
+ case (envName, envValue) =>
+ val key = transformEnvVarName(envName, envNamePrefix)
+ if (keysWithArrayValues.contains(key)) (key, transformEnvVarArrayValue(envValue)) else (key, envValue)
+ }
+ }
+
+ /**
+ * converts the env variable to HOCON format
+ * for e.g. env variable HAYSTACK_KAFKA_STREAMS_NUM_STREAM_THREADS gets converted to kafka.streams.num.stream.threads
+ *
+ * @param env environment variable name
+ * @return variable name that complies with hocon key
+ */
+ private def transformEnvVarName(env: String, envNamePrefix: String): String = {
+ env.replaceFirst(envNamePrefix, "").toLowerCase.replace("_", ".")
+ }
+
+ /**
+ * converts the env variable value to iterable object if it starts and ends with '[' and ']' respectively.
+ *
+ * @param env environment variable value
+ * @return string or iterable object
+ */
+ private def transformEnvVarArrayValue(env: String): java.util.List[String] = {
+ if (env.startsWith("[") && env.endsWith("]")) {
+ import scala.collection.JavaConverters._
+ env.substring(1, env.length - 1).split(',').filter(str => (str != null) && str.nonEmpty).toList.asJava
+ } else {
+ throw new RuntimeException("config key is of array type, so it should start and end with '[', ']' respectively")
+ }
+ }
+
+ def kafkaProducerConfig(config: Config): KafkaProduceConfiguration = {
+ val props = new Properties()
+
+ val kafka = config.getConfig("kafka.producer")
+
+ kafka.getConfig("props").entrySet() foreach {
+ kv => {
+ props.setProperty(kv.getKey, kv.getValue.unwrapped().toString)
+ }
+ }
+
+ props.put(KEY_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer].getCanonicalName)
+ props.put(VALUE_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer].getCanonicalName)
+
+ val produceTopic = kafka.getString("topic")
+
+ // verify if at least bootstrap server config is set
+ require(props.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG).nonEmpty)
+ require(produceTopic.nonEmpty)
+
+ KafkaProduceConfiguration(produceTopic, props)
+ }
+
+ def extractorConfiguration(config: Config): ExtractorConfiguration = {
+ val extractor = config.getConfig("extractor")
+ val spanValidation = extractor.getConfig("spans.validation")
+ val maxSizeValidationConfig = spanValidation.getConfig("max.size")
+ ExtractorConfiguration(
+ outputFormat = if (extractor.hasPath("output.format")) Format.withName(extractor.getString("output.format")) else Format.PROTO,
+ spanValidation = SpanValidation(SpanMaxSize(
+ maxSizeValidationConfig.getBoolean("enable"),
+ maxSizeValidationConfig.getBoolean("log.only"),
+ maxSizeValidationConfig.getInt("max.size.limit"),
+ maxSizeValidationConfig.getString("message.tag.key"),
+ maxSizeValidationConfig.getString("message.tag.value"),
+ maxSizeValidationConfig.getStringList("skip.tags").map(_.toLowerCase),
+ maxSizeValidationConfig.getStringList("skip.services").map(_.toLowerCase))
+ ))
+ }
+
+ def externalKafkaConfiguration(config: Config): List[ExternalKafkaConfiguration] = {
+ if (!config.hasPath("external.kafka")) {
+ return List[ExternalKafkaConfiguration]()
+ }
+
+ val kafkaProducerConfig: ConfigObject = config.getObject("external.kafka")
+ kafkaProducerConfig.unwrapped().map(c => {
+ val props = new Properties()
+ val cfg = ConfigFactory.parseMap(c._2.asInstanceOf[util.HashMap[String, Object]])
+ val topic = cfg.getString("config.topic")
+ val tags = cfg.getConfig("tags").entrySet().foldRight(Map[String, String]())((t, tMap) => {
+ tMap + (t.getKey -> t.getValue.unwrapped().toString)
+ })
+ val temp = cfg.getConfig("config.props").entrySet() foreach {
+ kv => {
+ props.setProperty(kv.getKey, kv.getValue.unwrapped().toString)
+ }
+ }
+
+ props.put(KEY_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer].getCanonicalName)
+ props.put(VALUE_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer].getCanonicalName)
+
+ ExternalKafkaConfiguration(tags, KafkaProduceConfiguration(topic, props))
+ }).toList
+ }
+
+ def additionalTagsConfiguration(config: Config): Map[String, String] = {
+ if (!config.hasPath("additionaltags")) {
+ return Map[String, String]()
+ }
+ val additionalTagsConfig = config.getConfig("additionaltags")
+ val additionalTags = additionalTagsConfig.entrySet().foldRight(Map[String, String]())((t, tMap) => {
+ tMap + (t.getKey -> t.getValue.unwrapped().toString)
+ })
+ additionalTags
+ }
+
+ def pluginConfigurations(config: Config): Plugin = {
+ if (!config.hasPath("plugins")) {
+ return null
+ }
+ val directory = config.getString("plugins.directory")
+ val pluginConfigurationsList = config.getObject("plugins").unwrapped().filter(c => !"directory".equals(c._1)).map(c => {
+ val pluginConfig = ConfigFactory.parseMap(c._2.asInstanceOf[util.HashMap[String, Object]])
+ new PluginConfiguration(
+ pluginConfig.getString("name"),
+ pluginConfig.getConfig("config")
+ )
+ }).toList
+ new Plugin(directory, pluginConfigurationsList)
+ }
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/config/ExtractorConfiguration.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/config/ExtractorConfiguration.scala
new file mode 100644
index 000000000..e9db322ce
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/config/ExtractorConfiguration.scala
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.config
+
+import com.expedia.www.haystack.collector.commons.config.Format.Format
+
+
+object Format extends Enumeration {
+ type Format = Value
+ val JSON = Value("json")
+ val PROTO = Value("proto")
+}
+
+case class SpanValidation(spanMaxSize: SpanMaxSize)
+
+case class SpanMaxSize(enable: Boolean,
+ logOnly: Boolean,
+ maxSizeLimit: Int,
+ infoTagKey: String,
+ infoTagValue: String,
+ skipTags: Seq[String],
+ skipServices: Seq[String])
+
+case class ExtractorConfiguration(outputFormat: Format,
+ spanValidation: SpanValidation)
+
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/config/KafkaProduceConfiguration.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/config/KafkaProduceConfiguration.scala
new file mode 100644
index 000000000..444b9d85f
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/config/KafkaProduceConfiguration.scala
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.config
+
+import java.util.Properties
+
+case class KafkaProduceConfiguration(topic: String, props: Properties)
+
+case class ExternalKafkaConfiguration(tags: Map[String, String], kafkaProduceConfiguration: KafkaProduceConfiguration)
\ No newline at end of file
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/HealthController.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/HealthController.scala
new file mode 100644
index 000000000..ee194adb3
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/HealthController.scala
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.health
+
+import java.util.concurrent.atomic.AtomicReference
+
+import com.expedia.www.haystack.collector.commons.health.HealthStatus.HealthStatus
+import org.slf4j.LoggerFactory
+
+import scala.collection.mutable
+
+/**
+ * provides the health check of app
+ */
+object HealthController {
+
+ private val LOGGER = LoggerFactory.getLogger(HealthController.getClass)
+
+ // sets the initial health state as 'not set'
+ private val status = new AtomicReference[HealthStatus](HealthStatus.NOT_SET)
+
+ private var listeners = mutable.ListBuffer[HealthStatusChangeListener]()
+
+ /**
+ * set the app status as health
+ */
+ def setHealthy(): Unit = {
+ LOGGER.info("Setting the app status as 'HEALTHY'")
+ if(status.getAndSet(HealthStatus.HEALTHY) != HealthStatus.HEALTHY) notifyChange(HealthStatus.HEALTHY)
+ }
+
+ /**
+ * set the app status as unhealthy
+ */
+ def setUnhealthy(): Unit = {
+ LOGGER.error("Setting the app status as 'UNHEALTHY'")
+ if(status.getAndSet(HealthStatus.UNHEALTHY) != HealthStatus.UNHEALTHY) notifyChange(HealthStatus.UNHEALTHY)
+ }
+
+ /**
+ * @return true if app is healthy else false
+ */
+ def isHealthy: Boolean = status.get() == HealthStatus.HEALTHY
+
+ /**
+ * add health change listener that will be called on any change in the health status
+ * @param l listener
+ */
+ def addListener(l: HealthStatusChangeListener): Unit = listeners += l
+
+ private def notifyChange(status: HealthStatus): Unit = {
+ listeners foreach {
+ l =>
+ l.onChange(status)
+ }
+ }
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/HealthStatus.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/HealthStatus.scala
new file mode 100644
index 000000000..c58e0c57c
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/HealthStatus.scala
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.health
+
+object HealthStatus extends Enumeration {
+ type HealthStatus = Value
+ val HEALTHY, UNHEALTHY, NOT_SET = Value
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/HealthStatusChangeListener.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/HealthStatusChangeListener.scala
new file mode 100644
index 000000000..4bad6ac32
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/HealthStatusChangeListener.scala
@@ -0,0 +1,33 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.health
+
+import com.expedia.www.haystack.collector.commons.health.HealthStatus.HealthStatus
+
+/**
+ * health status listener
+ */
+trait HealthStatusChangeListener {
+
+ /**
+ * called whenever there there is a state change in health
+ * @param status current health status
+ */
+ def onChange(status: HealthStatus): Unit
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/UpdateHealthStatusFile.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/UpdateHealthStatusFile.scala
new file mode 100644
index 000000000..296c34aaf
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/health/UpdateHealthStatusFile.scala
@@ -0,0 +1,41 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.health
+
+import java.nio.charset.StandardCharsets
+import java.nio.file.{Files, Paths}
+
+import com.expedia.www.haystack.collector.commons.health.HealthStatus.HealthStatus
+
+/**
+ * writes the current health status to a status file. This can be used to provide the health to external system
+ * like container orchestration frameworks
+ * @param statusFilePath: file path where health status will be recorded.
+ */
+class UpdateHealthStatusFile(statusFilePath: String) extends HealthStatusChangeListener {
+
+ /**
+ * call on the any change in health status of app
+ * @param status: current health status
+ */
+ override def onChange(status: HealthStatus): Unit = {
+ val isHealthy = if (status == HealthStatus.HEALTHY) "true" else "false"
+ Files.write(Paths.get(statusFilePath), isHealthy.getBytes(StandardCharsets.UTF_8))
+ }
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/logger/LoggerUtils.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/logger/LoggerUtils.scala
new file mode 100644
index 000000000..6ad2d4dc4
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/logger/LoggerUtils.scala
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.logger
+
+import org.slf4j.{ILoggerFactory, LoggerFactory}
+
+object LoggerUtils {
+
+ /**
+ * shutdown the logger using reflection.
+ * for logback, it calls stop() method on loggerContext
+ * for log4j, it calls close() method on log4j context
+ */
+ def shutdownLogger(): Unit = {
+ val factory = LoggerFactory.getILoggerFactory
+ shutdownLoggerWithFactory(factory)
+ }
+
+ // just visible for testing
+ def shutdownLoggerWithFactory(factory: ILoggerFactory): Unit = {
+ val clazz = factory.getClass
+ try {
+ clazz.getMethod("stop").invoke(factory) // logback
+ } catch {
+ case _: ReflectiveOperationException =>
+ try {
+ clazz.getMethod("close").invoke(factory) // log4j
+ } catch {
+ case _: Exception =>
+ }
+ case _: Exception =>
+ }
+ }
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/record/KeyValueExtractor.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/record/KeyValueExtractor.scala
new file mode 100644
index 000000000..c869c7a20
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/record/KeyValueExtractor.scala
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.record
+
+case class KeyValuePair[K, V](key: K, value: V)
+
+trait KeyValueExtractor {
+ def configure(): Unit
+ def extractKeyValuePairs(recordBytes: Array[Byte]): List[KeyValuePair[Array[Byte], Array[Byte]]]
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/sink/RecordSink.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/sink/RecordSink.scala
new file mode 100644
index 000000000..b57eff92d
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/sink/RecordSink.scala
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.sink
+
+import java.io.Closeable
+
+import com.expedia.www.haystack.collector.commons.record.KeyValuePair
+
+trait RecordSink extends Closeable {
+ def toAsync(kvPair: KeyValuePair[Array[Byte], Array[Byte]],
+ callback: (KeyValuePair[Array[Byte], Array[Byte]], Exception) => Unit = null): Unit
+}
diff --git a/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/sink/kafka/KafkaRecordSink.scala b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/sink/kafka/KafkaRecordSink.scala
new file mode 100644
index 000000000..cd91d796a
--- /dev/null
+++ b/collector/commons/src/main/scala/com/expedia/www/haystack/collector/commons/sink/kafka/KafkaRecordSink.scala
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.sink.kafka
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.collector.commons.MetricsSupport
+import com.expedia.www.haystack.collector.commons.config.{ExternalKafkaConfiguration, KafkaProduceConfiguration}
+import com.expedia.www.haystack.collector.commons.record.KeyValuePair
+import com.expedia.www.haystack.collector.commons.sink.RecordSink
+import org.apache.kafka.clients.producer.{ProducerRecord, _}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
+
+class KafkaRecordSink(config: KafkaProduceConfiguration,
+ additionalKafkaProducerConfigs: List[ExternalKafkaConfiguration]) extends RecordSink with MetricsSupport {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[KafkaRecordSink])
+
+ private val defaultProducer: KafkaProducer[Array[Byte], Array[Byte]] = new KafkaProducer[Array[Byte], Array[Byte]](config.props)
+ private val additionalProducers: List[KafkaProducers] = additionalKafkaProducerConfigs
+ .map(cfg => {
+ KafkaProducers(cfg.tags, cfg.kafkaProduceConfiguration.topic, new KafkaProducer[Array[Byte], Array[Byte]](cfg.kafkaProduceConfiguration.props))
+ })
+
+ override def toAsync(kvPair: KeyValuePair[Array[Byte], Array[Byte]],
+ callback: (KeyValuePair[Array[Byte], Array[Byte]], Exception) => Unit = null): Unit = {
+ val kafkaMessage = new ProducerRecord(config.topic, kvPair.key, kvPair.value)
+
+ defaultProducer.send(kafkaMessage, new Callback {
+ override def onCompletion(recordMetadata: RecordMetadata, e: Exception): Unit = {
+ if (e != null) {
+ LOGGER.error(s"Fail to produce the message to kafka for topic=${config.topic} with reason", e)
+ }
+ if(callback != null) callback(kvPair, e)
+ }
+ })
+
+ getMatchingProducers(additionalProducers, Span.parseFrom(kvPair.value)).foreach(p => {
+ val tempKafkaMessage = new ProducerRecord(p.topic, kvPair.key, kvPair.value)
+ p.producer.send(tempKafkaMessage, new Callback {
+ override def onCompletion(recordMetadata: RecordMetadata, e: Exception): Unit = {
+ if (e != null) {
+ LOGGER.error(s"Fail to produce the message to kafka for topic=${p.topic} with reason", e)
+ }
+ if(callback != null) callback(kvPair, e)
+ }
+ })
+ })
+ }
+
+ private def getMatchingProducers(producers: List[KafkaProducers], span: Span): List[KafkaProducers] = {
+ val tagList: List[Tag] = span.getTagsList.asScala.toList
+ producers.filter(producer => producer.isMatched(tagList))
+ }
+
+ override def close(): Unit = {
+ if(defaultProducer != null) {
+ defaultProducer.flush()
+ defaultProducer.close()
+ }
+ additionalProducers.foreach(p => p.close())
+ }
+
+ case class KafkaProducers(tags: Map[String, String], topic: String, producer: KafkaProducer[Array[Byte], Array[Byte]]) {
+ def isMatched(spanTags: List[Tag]): Boolean = {
+ val filteredTags = spanTags.filter(t => t.getVStr.equals(tags.getOrElse(t.getKey, null)))
+ filteredTags.size.equals(tags.size)
+ }
+
+ def close(): Unit = {
+ producer.flush()
+ producer.close()
+ }
+ }
+}
diff --git a/collector/commons/src/test/resources/logback-test.xml b/collector/commons/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..adfa02c68
--- /dev/null
+++ b/collector/commons/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/HealthControllerSpec.scala b/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/HealthControllerSpec.scala
new file mode 100644
index 000000000..022cdc0ac
--- /dev/null
+++ b/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/HealthControllerSpec.scala
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.unit
+
+import com.expedia.www.haystack.collector.commons.health.{HealthController, UpdateHealthStatusFile}
+import org.scalatest.{FunSpec, Matchers}
+
+class HealthControllerSpec extends FunSpec with Matchers {
+
+ private val statusFile = "/tmp/app-health.status"
+
+ describe("file based health checker") {
+ it("should set the state as healthy if previous state is not set or unhealthy") {
+ val healthChecker = HealthController
+ healthChecker.addListener(new UpdateHealthStatusFile(statusFile))
+ healthChecker.isHealthy shouldBe false
+ healthChecker.setHealthy()
+ healthChecker.isHealthy shouldBe true
+ readStatusLine shouldEqual "true"
+ }
+
+ it("should set the state as unhealthy if previous state is healthy") {
+ val healthChecker = HealthController
+ healthChecker.addListener(new UpdateHealthStatusFile(statusFile))
+
+ healthChecker.setHealthy()
+ healthChecker.isHealthy shouldBe true
+ readStatusLine shouldEqual "true"
+
+ healthChecker.setUnhealthy()
+ healthChecker.isHealthy shouldBe false
+ readStatusLine shouldEqual "false"
+ }
+ }
+
+ private def readStatusLine = scala.io.Source.fromFile(statusFile).getLines().toList.head
+}
\ No newline at end of file
diff --git a/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/KeyExtractorSpec.scala b/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/KeyExtractorSpec.scala
new file mode 100644
index 000000000..ab8419956
--- /dev/null
+++ b/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/KeyExtractorSpec.scala
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.unit
+
+import java.nio.charset.Charset
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.collector.commons.config.{ExtractorConfiguration, Format, SpanMaxSize, SpanValidation}
+import com.expedia.www.haystack.collector.commons.{MetricsSupport, ProtoSpanExtractor}
+import com.google.protobuf.util.JsonFormat
+import org.scalatest.{FunSpec, Matchers}
+import org.slf4j.LoggerFactory
+
+class KeyExtractorSpec extends FunSpec with Matchers with MetricsSupport {
+ private val StartTimeMicros = System.currentTimeMillis() * 1000
+ private val DurationMicros = 42
+
+ describe("TransactionId Key Extractor with proto output type") {
+ it("should read the proto span object and set the right partition key and set value as the proto byte stream") {
+ val spanMap = Map(
+ "trace-id-1" -> createSpan("trace-id-1", "spanId_1", "service_1", "operation", StartTimeMicros, DurationMicros),
+ "trace-id-2" -> createSpan("trace-id-2", "spanId_2", "service_2", "operation", StartTimeMicros, DurationMicros))
+
+ val spanValidationConfig = SpanValidation(SpanMaxSize(enable = false, logOnly = false, 5000, "", "", Seq(), Seq()))
+
+ spanMap.foreach(sp => {
+ val kvPairs = new ProtoSpanExtractor(ExtractorConfiguration(Format.PROTO, spanValidationConfig), LoggerFactory.getLogger(classOf[ProtoSpanExtractor]), List()).extractKeyValuePairs(sp._2.toByteArray)
+ kvPairs.size shouldBe 1
+
+ kvPairs.head.key shouldBe sp._1.getBytes
+ kvPairs.head.value shouldBe sp._2.toByteArray
+ })
+ }
+ }
+
+ describe("TransactionId Key Extractor with json output type") {
+ it("should read the proto span object and set the right partition key and set value as the json byte stream") {
+ val spanMap = Map(
+ "trace-id-1" -> createSpan("trace-id-1", "spanId_1", "service_1", "operation", StartTimeMicros, 1),
+ "trace-id-2" -> createSpan("trace-id-2", "spanId_2", "service_2", "operation", StartTimeMicros, 1))
+
+ val spanValidationConfig = SpanValidation(SpanMaxSize(enable = false, logOnly = false, 5000, "", "", Seq(), Seq()))
+
+ spanMap.foreach(sp => {
+ val kvPairs = new ProtoSpanExtractor(ExtractorConfiguration(Format.JSON, spanValidationConfig), LoggerFactory.getLogger(classOf[ProtoSpanExtractor]), List()).extractKeyValuePairs(sp._2.toByteArray)
+ kvPairs.size shouldBe 1
+
+ kvPairs.head.key shouldBe sp._1.getBytes
+ kvPairs.head.value shouldBe JsonFormat.printer().omittingInsignificantWhitespace().print(sp._2).getBytes(Charset.forName("UTF-8"))
+ })
+ }
+ }
+
+ private def createSpan(traceId: String, spanId: String, serviceName: String, operationName: String,
+ startTime: Long, duration: Long) = {
+ Span.newBuilder()
+ .setServiceName(serviceName)
+ .setTraceId(traceId)
+ .setSpanId(spanId)
+ .setOperationName(operationName)
+ .setStartTime(startTime)
+ .setDuration(duration)
+ .build()
+ }
+}
diff --git a/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/LoggerUtilsSpec.scala b/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/LoggerUtilsSpec.scala
new file mode 100644
index 000000000..081d2a517
--- /dev/null
+++ b/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/LoggerUtilsSpec.scala
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.collector.commons.unit
+
+import com.expedia.www.haystack.collector.commons.logger.LoggerUtils
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+import org.slf4j.{ILoggerFactory, Logger}
+
+class LoggerUtilsSpec extends FunSpec with Matchers with EasyMockSugar {
+
+ describe("Logger Utils") {
+ it("should close the logger if it has stop method for e.g. logback") {
+ val logger = mock[Logger]
+ var isStopped = false
+
+ val loggerFactory = new ILoggerFactory {
+ override def getLogger(s: String): Logger = logger
+ def stop(): Unit = isStopped = true
+ }
+
+ whenExecuting(logger) {
+ LoggerUtils.shutdownLoggerWithFactory(loggerFactory)
+ isStopped shouldBe true
+ }
+ }
+
+ it("should close the logger if it has close method for e.g. log4j") {
+ val logger = mock[Logger]
+ var isStopped = false
+
+ val loggerFactory = new ILoggerFactory {
+ override def getLogger(s: String): Logger = logger
+ def close(): Unit = isStopped = true
+ }
+
+ whenExecuting(logger) {
+ LoggerUtils.shutdownLoggerWithFactory(loggerFactory)
+ isStopped shouldBe true
+ }
+ }
+
+ it("should not able to close the logger if it has neither stop/close method") {
+ val logger = mock[Logger]
+ var isStopped = false
+
+ val loggerFactory = new ILoggerFactory {
+ override def getLogger(s: String): Logger = logger
+ def shutdown(): Unit = isStopped = true
+ }
+
+ whenExecuting(logger) {
+ LoggerUtils.shutdownLoggerWithFactory(loggerFactory)
+ isStopped shouldBe false
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/ProtoSpanExtractorSpec.scala b/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/ProtoSpanExtractorSpec.scala
new file mode 100644
index 000000000..729d89752
--- /dev/null
+++ b/collector/commons/src/test/scala/com/expedia/www/haystack/collector/commons/unit/ProtoSpanExtractorSpec.scala
@@ -0,0 +1,145 @@
+package com.expedia.www.haystack.collector.commons.unit
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.collector.commons.ProtoSpanExtractor
+import com.expedia.www.haystack.collector.commons.ProtoSpanExtractor._
+import com.expedia.www.haystack.collector.commons.config.{ExtractorConfiguration, Format, SpanMaxSize, SpanValidation}
+import org.mockito.Mockito
+import org.mockito.Mockito.verify
+import org.scalatest.mockito.MockitoSugar
+import org.scalatest.{FunSpec, Matchers}
+import org.slf4j.Logger
+
+import scala.collection.JavaConverters._
+import scala.collection.immutable.ListMap
+import scala.collection.mutable.ArrayBuffer
+
+class ProtoSpanExtractorSpec extends FunSpec with Matchers with MockitoSugar {
+
+ private val EmptyString = ""
+ private val NullString = null
+ private val SpanId = "span ID"
+ private val TraceId = "trace ID"
+ private val ServiceName1 = "service name 1"
+ private val ServiceName2 = "service name 2"
+ private val OperationName1 = "operation name 1"
+ private val OperationName2 = "operation name 2"
+ private val StartTime = System.currentTimeMillis() * 1000
+ private val Duration = 42
+ private val Negative = -42
+ private val SampleErrorTag = Tag.newBuilder().setKey("error").setVBool(true).build()
+ private val SpanSizeLimit = 800
+ private val SkipTagTruncationServiceName = "skip_tag_truncation_service"
+
+ describe("Protobuf Span Extractor") {
+ val mockLogger = mock[Logger]
+
+ val spanSizeValidationConfig = SpanValidation(SpanMaxSize(enable = true, logOnly = false, SpanSizeLimit, "X-HAYSTACK-SPAN-INFO", "Tags Truncated", Seq("error"), Seq(SkipTagTruncationServiceName)))
+ val protoSpanExtractor = new ProtoSpanExtractor(ExtractorConfiguration(Format.PROTO, spanSizeValidationConfig), mockLogger, List())
+
+ val largestInvalidStartTime = SmallestAllowedStartTimeMicros - 1
+
+
+ // @formatter:off
+ val nullSpanIdSpan = createSpan(NullString, TraceId, ServiceName1, OperationName1, StartTime, Duration, createTags(1))
+ val emptySpanIdSpan = createSpan(EmptyString, TraceId, ServiceName2, OperationName1, StartTime, Duration, createTags(1))
+ val nullTraceIdSpan = createSpan(SpanId, NullString, ServiceName1, OperationName1, StartTime, Duration, createTags(1))
+ val emptyTraceIdSpan = createSpan(SpanId, EmptyString, ServiceName2, OperationName1, StartTime, Duration, createTags(1))
+ val nullServiceNameSpan = createSpan(SpanId, TraceId, NullString, OperationName1, StartTime, Duration, createTags(1))
+ val emptyServiceNameSpan = createSpan(SpanId, TraceId, EmptyString, OperationName2, StartTime, Duration, createTags(1))
+ val nullOperationNameSpan = createSpan(SpanId, TraceId, ServiceName1, NullString, StartTime, Duration, createTags(1))
+ val emptyOperationNameSpan = createSpan(SpanId, TraceId, ServiceName2, EmptyString, StartTime, Duration, createTags(1))
+ val tooSmallStartTimeSpan = createSpan(SpanId, TraceId, ServiceName1, OperationName1, largestInvalidStartTime, Duration, createTags(1))
+ val negativeStartTimeSpan = createSpan(SpanId, TraceId, ServiceName2, OperationName1, Negative, Duration, createTags(1))
+ val tooSmallDurationSpan = createSpan(SpanId, TraceId, ServiceName1, OperationName1, StartTime, Negative, createTags(1))
+ val largeSizeSpan = createSpan(SpanId, TraceId, ServiceName1, OperationName1, StartTime, Duration, createTags(50))
+ val largeSizeSpanWithSkippedService
+ = createSpan(SpanId, TraceId, SkipTagTruncationServiceName, OperationName1, StartTime, Duration, createTags(50))
+ val spanMap = ListMap(
+ "NullSpanId" -> (nullSpanIdSpan, SpanIdIsRequired.format(ServiceName1, OperationName1)),
+ "EmptySpanId" -> (emptySpanIdSpan, SpanIdIsRequired.format(ServiceName2, OperationName1)),
+ "NullTraceId" -> (nullTraceIdSpan, TraceIdIsRequired.format(ServiceName1, OperationName1)),
+ "EmptyTraceId" -> (emptyTraceIdSpan, TraceIdIsRequired.format(ServiceName2, OperationName1)),
+ "NullServiceName" -> (nullServiceNameSpan, ServiceNameIsRequired.format(nullServiceNameSpan.toString)),
+ "EmptyServiceName" -> (emptyServiceNameSpan, ServiceNameIsRequired.format(emptyServiceNameSpan.toString)),
+ "NullOperationName" -> (nullOperationNameSpan, OperationNameIsRequired.format(ServiceName1)),
+ "EmptyOperationName" -> (emptyOperationNameSpan, OperationNameIsRequired.format(ServiceName2)),
+ "TooSmallStartTime" -> (tooSmallStartTimeSpan, StartTimeIsInvalid.format(largestInvalidStartTime, ServiceName1, OperationName1)),
+ "NegativeStartTime" -> (negativeStartTimeSpan, StartTimeIsInvalid.format(Negative, ServiceName2, OperationName1)),
+ "TooSmallDuration" -> (tooSmallDurationSpan, DurationIsInvalid.format(Negative, ServiceName1, OperationName1))
+ )
+ // @formatter:on
+ it("should fail validation for spans with invalid data") {
+ spanMap.foreach(sp => {
+ val kvPairs = protoSpanExtractor.extractKeyValuePairs(sp._2._1.toByteArray)
+ withClue(sp._1) {
+ kvPairs shouldBe Nil
+ verify(mockLogger).error(sp._2._2)
+ }
+ })
+ Mockito.verifyNoMoreInteractions(mockLogger)
+ }
+
+ it("should truncate tags to reduce span when span size exceeded") {
+ val kvPairs = protoSpanExtractor.extractKeyValuePairs(largeSizeSpan.toByteArray)
+ kvPairs.foreach { kv =>
+ val spanRecordBytes = kv.value
+ val span = Span.parseFrom(spanRecordBytes)
+ assert(span.getTagsList.asScala.exists(tag => tag.getKey.equalsIgnoreCase("error")))
+ span.getTagsCount shouldBe 2
+ assert(spanRecordBytes.length < SpanSizeLimit)
+ }
+ }
+
+ it("shouldn't truncate tags to for skipped service even when span size exceeds limit") {
+ val kvPairs = protoSpanExtractor.extractKeyValuePairs(largeSizeSpanWithSkippedService.toByteArray)
+ kvPairs.foreach { kv =>
+ val spanRecordBytes = kv.value
+ val span = Span.parseFrom(spanRecordBytes)
+
+ spanRecordBytes.length > SpanSizeLimit shouldEqual true
+ span.getTagsList.size shouldEqual 50
+ span.getTagsList.asScala.exists(tag => tag.getKey.equalsIgnoreCase("error")) shouldEqual true
+
+ }
+ }
+ }
+
+ private def createTags(maxTagsLimit: Int): Array[Tag] = {
+ val tags = ArrayBuffer[Tag]()
+ // adding Error Tag by default
+ tags += SampleErrorTag
+ for (i <- 0 until (maxTagsLimit - 1)) { // creating one less tag since error tag is already added above
+ tags += Tag.newBuilder().setKey("key" + i).setVStr("value" + i).build()
+ }
+ tags.toArray
+ }
+
+ private def createSpan(spanId: String,
+ traceId: String,
+ serviceName: String,
+ operationName: String,
+ startTimeMicros: Long,
+ durationMicros: Long,
+ tags: Seq[Tag]) = {
+ val builder = Span.newBuilder()
+ if (spanId != null) {
+ builder.setSpanId(spanId)
+ }
+ if (traceId != null) {
+ builder.setTraceId(traceId)
+ }
+ if (serviceName != null) {
+ builder.setServiceName(serviceName)
+ }
+ if (operationName != null) {
+ builder.setOperationName(operationName)
+ }
+ if (tags.nonEmpty) {
+ tags.foreach(tag => builder.addTags(tag))
+ }
+ builder.setStartTime(startTimeMicros)
+ builder.setDuration(durationMicros)
+ builder.build()
+ }
+}
diff --git a/collector/deployment/scripts/publish-to-docker-hub.sh b/collector/deployment/scripts/publish-to-docker-hub.sh
new file mode 100755
index 000000000..9f22ceaf2
--- /dev/null
+++ b/collector/deployment/scripts/publish-to-docker-hub.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+set -e
+
+QUALIFIED_DOCKER_IMAGE_NAME=$DOCKER_ORG/$DOCKER_IMAGE_NAME
+echo "DOCKER_ORG=$DOCKER_ORG, DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME, QUALIFIED_DOCKER_IMAGE_NAME=$QUALIFIED_DOCKER_IMAGE_NAME"
+echo "BRANCH=$BRANCH, TAG=$TAG, SHA=$SHA"
+
+# login
+docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
+
+# Add tags
+if [[ $TAG =~ ([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
+ echo "releasing semantic versions"
+
+ unset MAJOR MINOR PATCH
+ MAJOR="${BASH_REMATCH[1]}"
+ MINOR="${BASH_REMATCH[2]}"
+ PATCH="${BASH_REMATCH[3]}"
+
+ # for tag, add MAJOR, MAJOR.MINOR, MAJOR.MINOR.PATCH and latest as tag
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR.$PATCH
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR.$PATCH
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:latest
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:latest
+
+ elif [[ "$BRANCH" == "master" ]]; then
+ echo "releasing master branch"
+
+ # for 'master' branch, add SHA as tags
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$SHA
+
+ # publish image with tags
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME
+fi
diff --git a/collector/deployment/terraform/http-span-collector/main.tf b/collector/deployment/terraform/http-span-collector/main.tf
new file mode 100644
index 000000000..cb136096c
--- /dev/null
+++ b/collector/deployment/terraform/http-span-collector/main.tf
@@ -0,0 +1,77 @@
+locals {
+ app_name = "${var.app_name}"
+ config_file_path = "${path.module}/templates/http-span-collector_conf.tpl"
+ deployment_yaml_file_path = "${path.module}/templates/deployment_yaml.tpl"
+ count = "${var.enabled?1:0}"
+ checksum = "${sha1("${data.template_file.config_data.rendered}")}"
+ configmap_name = "${local.app_name}-${local.checksum}"
+}
+
+resource "kubernetes_config_map" "haystack-config" {
+ metadata {
+ name = "${local.configmap_name}"
+ namespace = "${var.namespace}"
+ }
+ data {
+ "http-span-collector.conf" = "${data.template_file.config_data.rendered}"
+ }
+ count = "${local.count}"
+}
+
+data "template_file" "config_data" {
+ template = "${file("${local.config_file_path}")}"
+
+ vars {
+ kafka_endpoint = "${var.kafka_endpoint}"
+ container_port = "${var.container_port}"
+ max_spansize_validation_enabled = "${var.max_spansize_validation_enabled}"
+ max_spansize_log_only = "${var.max_spansize_log_only}"
+ max_spansize_limit = "${var.max_spansize_limit}"
+ message_tag_key = "${var.message_tag_key}"
+ message_tag_value = "${var.message_tag_value}"
+ max_spansize_skip_tags = "${var.max_spansize_skip_tags}"
+ max_spansize_skip_services = "${var.max_spansize_skip_services}"
+ }
+}
+
+
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ graphite_port = "${var.graphite_port}"
+ graphite_host = "${var.graphite_hostname}"
+ node_selecter_label = "${var.node_selecter_label}"
+ image = "${var.image}"
+ replicas = "${var.replicas}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ jvm_memory_limit = "${var.jvm_memory_limit}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ configmap_name = "${local.configmap_name}"
+ env_vars = "${indent(9,"${var.env_vars}")}"
+ container_port = "${var.container_port}"
+ service_port = "${var.service_port}"
+ }
+}
+
+resource "null_resource" "kubectl_apply" {
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+ count = "${local.count}"
+}
+
+
+resource "null_resource" "kubectl_destroy" {
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/collector/deployment/terraform/http-span-collector/outputs.tf b/collector/deployment/terraform/http-span-collector/outputs.tf
new file mode 100644
index 000000000..e69de29bb
diff --git a/collector/deployment/terraform/http-span-collector/templates/deployment_yaml.tpl b/collector/deployment/terraform/http-span-collector/templates/deployment_yaml.tpl
new file mode 100644
index 000000000..2347a1e0c
--- /dev/null
+++ b/collector/deployment/terraform/http-span-collector/templates/deployment_yaml.tpl
@@ -0,0 +1,74 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "HAYSTACK_OVERRIDES_CONFIG_PATH"
+ value: "/config/http-span-collector.conf"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "JAVA_XMS"
+ value: "${jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${jvm_memory_limit}m"
+ ${env_vars}
+ livenessProbe:
+ httpGet:
+ path: /isActive
+ port: ${container_port}
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ failureThreshold: 6
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
+# ------------------- Service ------------------- #
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ ports:
+ - port: ${service_port}
+ targetPort: ${container_port}
+ selector:
+ k8s-app: ${app_name}
diff --git a/collector/deployment/terraform/http-span-collector/templates/http-span-collector_conf.tpl b/collector/deployment/terraform/http-span-collector/templates/http-span-collector_conf.tpl
new file mode 100644
index 000000000..b0262c5a0
--- /dev/null
+++ b/collector/deployment/terraform/http-span-collector/templates/http-span-collector_conf.tpl
@@ -0,0 +1,33 @@
+kafka {
+ producer {
+ topic = "proto-spans"
+ props {
+ bootstrap.servers = "${kafka_endpoint}"
+ retries = 50
+ batch.size = 153600
+ linger.ms = 250
+ compression.type = "lz4"
+ }
+ }
+}
+
+extractor {
+ output.format = "proto"
+ spans.validation {
+ max.size {
+ enable = "${max_spansize_validation_enabled}"
+ log.only = "${max_spansize_log_only}"
+ max.size.limit = "${max_spansize_limit}"
+ message.tag.key = "${message_tag_key}"
+ message.tag.value = "${message_tag_value}"
+ skip.tags = "${max_spansize_skip_tags}"
+ skip.services = "${max_spansize_skip_services}"
+ }
+ }
+}
+
+http {
+ host = "0.0.0.0"
+ port = ${container_port}
+}
+
diff --git a/collector/deployment/terraform/http-span-collector/variables.tf b/collector/deployment/terraform/http-span-collector/variables.tf
new file mode 100644
index 000000000..80d59f58b
--- /dev/null
+++ b/collector/deployment/terraform/http-span-collector/variables.tf
@@ -0,0 +1,36 @@
+variable "image" {}
+variable "replicas" {}
+variable "enabled"{}
+variable "namespace" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "kafka_endpoint" {}
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selecter_label"{}
+variable "memory_request"{}
+variable "memory_limit"{}
+variable "jvm_memory_limit"{}
+variable "cpu_request"{}
+variable "cpu_limit"{}
+variable "app_name"{ default = "http-span-collector" }
+variable "env_vars" {}
+variable "service_port" {
+ default = 80
+}
+variable "container_port" {
+ default = 8080
+}
+
+variable "termination_grace_period" {
+ default = 30
+}
+variable "haystack_cluster_name" {}
+
+variable "max_spansize_validation_enabled" {}
+variable "max_spansize_log_only" {}
+variable "max_spansize_limit" {}
+variable "message_tag_key" {}
+variable "message_tag_value" {}
+variable "max_spansize_skip_tags" {}
+variable "max_spansize_skip_services" {}
diff --git a/collector/deployment/terraform/kinesis-span-collector/main.tf b/collector/deployment/terraform/kinesis-span-collector/main.tf
new file mode 100644
index 000000000..5bdb9f7ac
--- /dev/null
+++ b/collector/deployment/terraform/kinesis-span-collector/main.tf
@@ -0,0 +1,80 @@
+locals {
+ app_name = "${var.app_name}"
+ config_file_path = "${path.module}/templates/kinesis-span-collector_conf.tpl"
+ deployment_yaml_file_path = "${path.module}/templates/deployment_yaml.tpl"
+ count = "${var.enabled?1:0}"
+ checksum = "${sha1("${data.template_file.config_data.rendered}")}"
+ configmap_name = "${local.app_name}-${local.checksum}"
+}
+
+resource "kubernetes_config_map" "haystack-config" {
+ metadata {
+ name = "${local.configmap_name}"
+ namespace = "${var.namespace}"
+ }
+ data {
+ "kinesis-span-collector.conf" = "${data.template_file.config_data.rendered}"
+ }
+ count = "${local.count}"
+}
+
+data "template_file" "config_data" {
+ template = "${file("${local.config_file_path}")}"
+
+ vars {
+ kinesis_stream_region = "${var.kinesis_stream_region}"
+ kinesis_stream_name = "${var.kinesis_stream_name}"
+ kafka_endpoint = "${var.kafka_endpoint}"
+ sts_role_arn = "${var.sts_role_arn}"
+ app_group_name = "${var.haystack_cluster_name}-${var.app_name}"
+ max_spansize_validation_enabled = "${var.max_spansize_validation_enabled}"
+ max_spansize_log_only = "${var.max_spansize_log_only}"
+ max_spansize_limit = "${var.max_spansize_limit}"
+ message_tag_key = "${var.message_tag_key}"
+ message_tag_value = "${var.message_tag_value}"
+ max_spansize_skip_tags = "${var.max_spansize_skip_tags}"
+ max_spansize_skip_services = "${var.max_spansize_skip_services}"
+ }
+}
+
+
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ graphite_port = "${var.graphite_port}"
+ graphite_host = "${var.graphite_hostname}"
+ node_selecter_label = "${var.node_selecter_label}"
+ image = "${var.image}"
+ replicas = "${var.replicas}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ jvm_memory_limit = "${var.jvm_memory_limit}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ configmap_name = "${local.configmap_name}"
+ env_vars = "${indent(9,"${var.env_vars}")}"
+
+ }
+}
+
+resource "null_resource" "kubectl_apply" {
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+ count = "${local.count}"
+}
+
+
+resource "null_resource" "kubectl_destroy" {
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/collector/deployment/terraform/kinesis-span-collector/outputs.tf b/collector/deployment/terraform/kinesis-span-collector/outputs.tf
new file mode 100644
index 000000000..e69de29bb
diff --git a/collector/deployment/terraform/kinesis-span-collector/templates/deployment_yaml.tpl b/collector/deployment/terraform/kinesis-span-collector/templates/deployment_yaml.tpl
new file mode 100644
index 000000000..3f1ac6072
--- /dev/null
+++ b/collector/deployment/terraform/kinesis-span-collector/templates/deployment_yaml.tpl
@@ -0,0 +1,62 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "HAYSTACK_OVERRIDES_CONFIG_PATH"
+ value: "/config/kinesis-span-collector.conf"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "JAVA_XMS"
+ value: "${jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${jvm_memory_limit}m"
+ ${env_vars}
+ livenessProbe:
+ exec:
+ command:
+ - grep
+ - "true"
+ - /app/isHealthy
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ failureThreshold: 6
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
+
diff --git a/collector/deployment/terraform/kinesis-span-collector/templates/kinesis-span-collector_conf.tpl b/collector/deployment/terraform/kinesis-span-collector/templates/kinesis-span-collector_conf.tpl
new file mode 100644
index 000000000..e6dadc5f5
--- /dev/null
+++ b/collector/deployment/terraform/kinesis-span-collector/templates/kinesis-span-collector_conf.tpl
@@ -0,0 +1,54 @@
+kafka {
+ producer {
+ topic = "proto-spans"
+ props {
+ bootstrap.servers = "${kafka_endpoint}"
+ retries = 50
+ batch.size = 153600
+ linger.ms = 250
+ compression.type = "lz4"
+ }
+ }
+}
+
+extractor {
+ output.format = "proto"
+ spans.validation {
+ max.size {
+ enable = "${max_spansize_validation_enabled}"
+ log.only = "${max_spansize_log_only}"
+ max.size.limit = "${max_spansize_limit}"
+ message.tag.key = "${message_tag_key}"
+ message.tag.value = "${message_tag_value}"
+ skip.tags = "${max_spansize_skip_tags}"
+ skip.services = "${max_spansize_skip_services}"
+ }
+ }
+}
+
+kinesis {
+ sts.role.arn = "${sts_role_arn}"
+ aws.region = "${kinesis_stream_region}"
+ app.group.name = "${app_group_name}"
+
+ stream {
+ name = "${kinesis_stream_name}"
+ position = "LATEST"
+ }
+
+ checkpoint {
+ interval.ms = 15000
+ retries = 50
+ retry.interval.ms = 250
+ }
+
+ task.backoff.ms = 200
+ max.records.read = 2000
+ idle.time.between.reads.ms = 500
+ shard.sync.interval.ms = 30000
+
+ metrics {
+ level = "NONE"
+ buffer.time.ms = 15000
+ }
+}
diff --git a/collector/deployment/terraform/kinesis-span-collector/variables.tf b/collector/deployment/terraform/kinesis-span-collector/variables.tf
new file mode 100644
index 000000000..0ff57d567
--- /dev/null
+++ b/collector/deployment/terraform/kinesis-span-collector/variables.tf
@@ -0,0 +1,32 @@
+variable "image" {}
+variable "replicas" {}
+variable "enabled"{}
+variable "namespace" {}
+variable "kinesis_stream_region" {}
+variable "kinesis_stream_name" {}
+variable "sts_role_arn" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "kafka_endpoint" {}
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selecter_label"{}
+variable "memory_request"{}
+variable "memory_limit"{}
+variable "jvm_memory_limit"{}
+variable "cpu_request"{}
+variable "cpu_limit"{}
+variable "app_name"{ default = "kinesis-span-collector" }
+variable "env_vars" {}
+variable "max_spansize_validation_enabled" {}
+variable "max_spansize_log_only" {}
+variable "max_spansize_limit" {}
+variable "message_tag_key" {}
+variable "message_tag_value" {}
+variable "max_spansize_skip_tags" {}
+variable "max_spansize_skip_services" {}
+
+variable "termination_grace_period" {
+ default = 30
+}
+variable "haystack_cluster_name" {}
diff --git a/collector/deployment/terraform/main.tf b/collector/deployment/terraform/main.tf
new file mode 100644
index 000000000..9d901c290
--- /dev/null
+++ b/collector/deployment/terraform/main.tf
@@ -0,0 +1,69 @@
+locals {
+ default_kinesis_stream_name = "${var.kinesis-stream_name}"
+ default_kinesis_stream_region = "${var.kinesis-stream_region}"
+}
+
+module "kinesis-span-collector" {
+ source = "kinesis-span-collector"
+ image = "expediadotcom/haystack-kinesis-span-collector:${var.collector["version"]}"
+ replicas = "${var.collector["kinesis_span_collector_instances"]}"
+ enabled = "${var.collector["kinesis_span_collector_enabled"]}"
+
+ kinesis_stream_name = "${var.collector["kinesis_stream_name"] == "" ? local.default_kinesis_stream_name : var.collector["kinesis_stream_name"]}"
+ kinesis_stream_region = "${var.collector["kinesis_stream_region"] == "" ? local.default_kinesis_stream_region : var.collector["kinesis_stream_region"]}"
+
+ sts_role_arn = "${var.collector["kinesis_span_collector_sts_role_arn"]}"
+ env_vars = "${var.collector["kinesis_span_collector_environment_overrides"]}"
+
+ namespace = "${var.app_namespace}"
+ kafka_endpoint = "${var.kafka_hostname}:${var.kafka_port}"
+ graphite_hostname = "${var.graphite_hostname}"
+ graphite_port = "${var.graphite_port}"
+ haystack_cluster_name = "${var.haystack_cluster_name}"
+ node_selecter_label = "${var.node_selector_label}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ cpu_limit = "${var.collector["kinesis_span_collector_cpu_limit"]}"
+ cpu_request = "${var.collector["kinesis_span_collector_cpu_request"]}"
+ memory_request = "${var.collector["kinesis_span_collector_memory_request"]}"
+ memory_limit = "${var.collector["kinesis_span_collector_memory_limit"]}"
+ jvm_memory_limit = "${var.collector["kinesis_span_collector_jvm_memory_limit"]}"
+ app_name = "${var.collector["kinesis_span_collector_app_name"]}"
+ max_spansize_validation_enabled = "${var.collector["kinesis_span_collector_max_spansize_validation_enabled"]}"
+ max_spansize_log_only = "${var.collector["kinesis_span_collector_max_spansize_log_only"]}"
+ max_spansize_limit = "${var.collector["kinesis_span_collector_max_spansize_limit"]}"
+ message_tag_key = "${var.collector["kinesis_span_collector_message_tag_key"]}"
+ message_tag_value = "${var.collector["kinesis_span_collector_message_tag_value"]}"
+ max_spansize_skip_tags = "${var.collector["kinesis_span_collector_max_spansize_skip_tags"]}"
+ max_spansize_skip_services = "${var.collector["kinesis_span_collector_max_spansize_skip_services"]}"
+}
+
+module "http-span-collector" {
+ source = "http-span-collector"
+ image = "expediadotcom/haystack-http-span-collector:${var.collector["version"]}"
+ replicas = "${var.collector["http_span_collector_instances"]}"
+ enabled = "${var.collector["http_span_collector_enabled"]}"
+ env_vars = "${var.collector["http_span_collector_environment_overrides"]}"
+
+ namespace = "${var.app_namespace}"
+ kafka_endpoint = "${var.kafka_hostname}:${var.kafka_port}"
+ graphite_hostname = "${var.graphite_hostname}"
+ graphite_port = "${var.graphite_port}"
+ haystack_cluster_name = "${var.haystack_cluster_name}"
+ node_selecter_label = "${var.node_selector_label}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ cpu_limit = "${var.collector["http_span_collector_cpu_limit"]}"
+ cpu_request = "${var.collector["http_span_collector_cpu_request"]}"
+ memory_request = "${var.collector["http_span_collector_memory_request"]}"
+ memory_limit = "${var.collector["http_span_collector_memory_limit"]}"
+ jvm_memory_limit = "${var.collector["http_span_collector_jvm_memory_limit"]}"
+ app_name = "${var.collector["http_span_collector_app_name"]}"
+ max_spansize_validation_enabled = "${var.collector["http_span_collector_max_spansize_validation_enabled"]}"
+ max_spansize_log_only = "${var.collector["http_span_collector_max_spansize_log_only"]}"
+ max_spansize_limit = "${var.collector["http_span_collector_max_spansize_limit"]}"
+ message_tag_key = "${var.collector["http_span_collector_message_tag_key"]}"
+ message_tag_value = "${var.collector["http_span_collector_message_tag_value"]}"
+ max_spansize_skip_tags = "${var.collector["http_span_collector_max_spansize_skip_tags"]}"
+ max_spansize_skip_services = "${var.collector["http_span_collector_max_spansize_skip_services"]}"
+}
diff --git a/collector/deployment/terraform/outputs.tf b/collector/deployment/terraform/outputs.tf
new file mode 100644
index 000000000..e69de29bb
diff --git a/collector/deployment/terraform/variables.tf b/collector/deployment/terraform/variables.tf
new file mode 100644
index 000000000..71620860c
--- /dev/null
+++ b/collector/deployment/terraform/variables.tf
@@ -0,0 +1,17 @@
+
+variable "kafka_hostname" {}
+variable "kafka_port" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "haystack_cluster_name" {}
+variable "kubectl_context_name" {}
+variable "kubectl_executable_name" {}
+variable "app_namespace" {}
+variable "node_selector_label"{}
+variable "kinesis-stream_name" {}
+variable "kinesis-stream_region" {}
+
+# collectors config
+variable "collector" {
+ type = "map"
+}
diff --git a/collector/haystack-span-decorators/pom.xml b/collector/haystack-span-decorators/pom.xml
new file mode 100644
index 000000000..4ed0cbb9e
--- /dev/null
+++ b/collector/haystack-span-decorators/pom.xml
@@ -0,0 +1,24 @@
+
+
+
+
+ haystack-collector
+ com.expedia.www
+ 1.0-SNAPSHOT
+
+
+ 4.0.0
+ haystack-span-decorators
+ 1.0-SNAPSHOT
+ jar
+
+
+ src/main/java
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+
\ No newline at end of file
diff --git a/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/AdditionalTagsSpanDecorator.java b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/AdditionalTagsSpanDecorator.java
new file mode 100644
index 000000000..a65602f01
--- /dev/null
+++ b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/AdditionalTagsSpanDecorator.java
@@ -0,0 +1,49 @@
+package com.expedia.www.haystack.span.decorators;
+
+import com.expedia.open.tracing.Span;
+import com.expedia.open.tracing.Tag;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+
+import java.util.Map;
+import java.util.stream.Collectors;
+
+public class AdditionalTagsSpanDecorator implements SpanDecorator {
+
+ private Config tagConfig;
+
+ public AdditionalTagsSpanDecorator() {
+ }
+
+ @Override
+ public void init(Config config) {
+ tagConfig = config;
+ }
+
+ @Override
+ public Span.Builder decorate(Span.Builder span) {
+ return addHaystackMetadataTags(span);
+ }
+
+ @Override
+ public String name() {
+ return AdditionalTagsSpanDecorator.class.getName();
+ }
+
+ private Span.Builder addHaystackMetadataTags(Span.Builder spanBuilder) {
+ final Map spanTags = spanBuilder.getTagsList().stream()
+ .collect(Collectors.toMap(Tag::getKey, Tag::getVStr));
+
+ tagConfig.entrySet().forEach(tag -> {
+ final String tagValue = spanTags.getOrDefault(tag.getKey(), null);
+ if (StringUtils.isEmpty(tagValue)) {
+ spanBuilder.addTags(Tag.newBuilder().setKey(tag.getKey()).setVStr(tag.getValue().unwrapped().toString()));
+ }
+ });
+
+ return spanBuilder;
+ }
+
+}
diff --git a/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/SpanDecorator.java b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/SpanDecorator.java
new file mode 100644
index 000000000..b8f97bef0
--- /dev/null
+++ b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/SpanDecorator.java
@@ -0,0 +1,10 @@
+package com.expedia.www.haystack.span.decorators;
+
+import com.expedia.open.tracing.Span;
+import com.typesafe.config.Config;
+
+public interface SpanDecorator {
+ void init(Config config);
+ Span.Builder decorate(Span.Builder span);
+ String name();
+}
diff --git a/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/plugin/config/Plugin.java b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/plugin/config/Plugin.java
new file mode 100644
index 000000000..929e83f31
--- /dev/null
+++ b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/plugin/config/Plugin.java
@@ -0,0 +1,29 @@
+package com.expedia.www.haystack.span.decorators.plugin.config;
+
+import java.util.List;
+
+public class Plugin {
+ private String directory;
+ private List pluginConfigurationList;
+
+ public Plugin(String directory, List pluginConfigurationList) {
+ this.directory = directory;
+ this.pluginConfigurationList = pluginConfigurationList;
+ }
+
+ public String getDirectory() {
+ return directory;
+ }
+
+ public List getPluginConfigurationList() {
+ return pluginConfigurationList;
+ }
+
+ public void setDirectory(String directory) {
+ this.directory = directory;
+ }
+
+ public void setPluginConfigurationList(List pluginConfigurationList) {
+ this.pluginConfigurationList = pluginConfigurationList;
+ }
+}
diff --git a/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/plugin/config/PluginConfiguration.java b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/plugin/config/PluginConfiguration.java
new file mode 100644
index 000000000..e371c2d7f
--- /dev/null
+++ b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/plugin/config/PluginConfiguration.java
@@ -0,0 +1,33 @@
+package com.expedia.www.haystack.span.decorators.plugin.config;
+
+import com.typesafe.config.Config;
+
+public class PluginConfiguration {
+ private String name;
+ private Config config;
+
+ public PluginConfiguration(String name, Config config) {
+ this.name = name;
+ this.config = config;
+ }
+
+ public PluginConfiguration() {
+
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public Config getConfig() {
+ return config;
+ }
+
+ public void setConfig(Config config) {
+ this.config = config;
+ }
+}
diff --git a/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/plugin/loader/SpanDecoratorPluginLoader.java b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/plugin/loader/SpanDecoratorPluginLoader.java
new file mode 100644
index 000000000..3e8caebc0
--- /dev/null
+++ b/collector/haystack-span-decorators/src/main/java/com/expedia/www/haystack/span/decorators/plugin/loader/SpanDecoratorPluginLoader.java
@@ -0,0 +1,74 @@
+package com.expedia.www.haystack.span.decorators.plugin.loader;
+
+import com.expedia.www.haystack.span.decorators.SpanDecorator;
+
+import com.expedia.www.haystack.span.decorators.plugin.config.Plugin;
+import com.expedia.www.haystack.span.decorators.plugin.config.PluginConfiguration;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.ServiceLoader;
+
+public class SpanDecoratorPluginLoader {
+ private Logger logger;
+ private Plugin pluginConfig;
+ private static SpanDecoratorPluginLoader spanDecoratorPluginLoader;
+ private ServiceLoader loader;
+
+ private SpanDecoratorPluginLoader(Logger logger, Plugin pluginConfig) {
+ this.logger = logger;
+ this.pluginConfig = pluginConfig;
+ }
+
+ public static synchronized SpanDecoratorPluginLoader getInstance(Logger logger, Plugin pluginConfig) {
+ if (spanDecoratorPluginLoader == null) {
+ spanDecoratorPluginLoader = new SpanDecoratorPluginLoader(logger, pluginConfig);
+ }
+ spanDecoratorPluginLoader.createLoader();
+
+ return spanDecoratorPluginLoader;
+ }
+
+ private void createLoader() {
+ try {
+ final File[] pluginFiles = new File(pluginConfig.getDirectory()).listFiles();
+ if (pluginFiles != null) {
+ final List urls = new ArrayList<>();
+ for (final File file : pluginFiles) {
+ urls.add(file.toURI().toURL());
+ }
+ URLClassLoader urlClassLoader = new URLClassLoader(urls.toArray(new URL[0]), SpanDecorator.class.getClassLoader());
+ loader = ServiceLoader.load(SpanDecorator.class, urlClassLoader);
+ }
+ } catch (Exception ex) {
+ logger.error("Could not create the class loader for finding jar ", ex);
+ } catch (NoClassDefFoundError ex) {
+ logger.error("Could not find the class ", ex);
+ }
+ }
+
+ public List getSpanDecorators() {
+ List spanDecorators = new ArrayList<>();
+ try {
+ loader.forEach((spanDecorator) -> {
+ final PluginConfiguration validFirstConfig = pluginConfig.getPluginConfigurationList().stream().filter(pluginConfiguration ->
+ pluginConfiguration.getName().equals(spanDecorator.name())).findFirst().orElse(null);
+ if (validFirstConfig != null) {
+ spanDecorator.init(validFirstConfig.getConfig());
+ spanDecorators.add(spanDecorator);
+ logger.info("Successfully loaded the plugin {}", spanDecorator.name());
+ }
+ });
+ } catch (Exception ex) {
+ logger.error("Unable to load the external span decorators ", ex);
+ }
+
+ return spanDecorators;
+ }
+}
diff --git a/collector/haystack-span-decorators/src/test/java/com/expedia/www/haystack/span/decorators/AdditionalTagsSpanDecoratorTest.java b/collector/haystack-span-decorators/src/test/java/com/expedia/www/haystack/span/decorators/AdditionalTagsSpanDecoratorTest.java
new file mode 100644
index 000000000..217d73d10
--- /dev/null
+++ b/collector/haystack-span-decorators/src/test/java/com/expedia/www/haystack/span/decorators/AdditionalTagsSpanDecoratorTest.java
@@ -0,0 +1,75 @@
+package com.expedia.www.haystack.span.decorators;
+
+import com.expedia.open.tracing.Span;
+import com.expedia.open.tracing.Tag;
+import com.typesafe.config.ConfigFactory;
+import org.apache.commons.lang3.StringUtils;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.*;
+
+public class AdditionalTagsSpanDecoratorTest {
+ private final static Logger logger = LoggerFactory.getLogger(AdditionalTagsSpanDecorator.class);
+
+ @Before
+ public void setup() {
+
+ }
+
+ @Test
+ public void decorateWithNoDuplicateTags() {
+ final Map tagConfig = new HashMap(){{
+ put("X-HAYSTACK-TAG1", "VALUE1");
+ put("X-HAYSTACK-TAG2", "VALUE2");
+ }};
+ final AdditionalTagsSpanDecorator additionalTagsSpanDecorator = new AdditionalTagsSpanDecorator();
+ additionalTagsSpanDecorator.init(ConfigFactory.parseMap(tagConfig));
+ final Span resultSpan = additionalTagsSpanDecorator.decorate(Span.newBuilder()).build();
+
+ final boolean res = resultSpan.getTagsList().stream().allMatch(tag -> {
+ final String tagValue = tagConfig.getOrDefault(tag.getKey(), null);
+ if (StringUtils.isEmpty(tagValue)) {
+ return true;
+ } else if(tagValue.equals(tag.getVStr())) {
+
+ return true;
+ }
+ return false;
+ });
+
+ assertEquals(res, true);
+ }
+
+ @Test
+ public void decorateWithExistingDuplicateTags() {
+ final Map tagConfig = new HashMap(){{
+ put("X-HAYSTACK-TAG1", "VALUE1");
+ put("X-HAYSTACK-TAG2", "VALUE2");
+ }};
+ final AdditionalTagsSpanDecorator additionalTagsSpanDecorator = new AdditionalTagsSpanDecorator();
+ additionalTagsSpanDecorator.init(ConfigFactory.parseMap(tagConfig));
+ final Span.Builder spanBuilder = Span.newBuilder().addTags(Tag.newBuilder().setKey("X-HAYSTACK-TAG1").setVStr("VALUE3"));
+ final Span resultSpan = additionalTagsSpanDecorator.decorate(spanBuilder).build();
+
+ final boolean res = resultSpan.getTagsList().stream().allMatch(tag -> {
+ final String tagValue = tagConfig.getOrDefault(tag.getKey(), null);
+ if (StringUtils.isEmpty(tagValue)) {
+ return true;
+ } else if(tagValue.equals(tag.getVStr())) {
+ return true;
+ } else if(tag.getVStr().equals("VALUE3")) {
+ return true;
+ }
+ return false;
+ });
+
+ assertEquals(res, true);
+ }
+
+}
\ No newline at end of file
diff --git a/collector/haystack-span-decorators/src/test/resources/logback-test.xml b/collector/haystack-span-decorators/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..adfa02c68
--- /dev/null
+++ b/collector/haystack-span-decorators/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/collector/http/Makefile b/collector/http/Makefile
new file mode 100644
index 000000000..978675c20
--- /dev/null
+++ b/collector/http/Makefile
@@ -0,0 +1,33 @@
+.PHONY: docker_build release prepare_integration_test_env
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-http-span-collector
+PWD := $(shell pwd)
+
+docker_build:
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+prepare_integration_test_env: docker_build
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox up -d
+
+ # kafka sometimes take time to start
+ sleep 30
+
+integration_test: prepare_integration_test_env
+ # run tests in a container so that we can join the docker-compose network and talk to kafka
+ docker run \
+ -it \
+ --network=sandbox_default \
+ -v $(PWD)/..:/src \
+ -v ~/.m2:/root/.m2 \
+ -w /src \
+ maven:3.5.0-jdk-8 \
+ mvn scoverage:integration-check -pl http -am
+
+ # stop all the containers
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox stop
+ docker rm $(shell docker ps -a -q)
+ docker volume rm $(shell docker volume ls -q)
+
+release: docker_build
+ ../deployment/scripts/publish-to-docker-hub.sh
diff --git a/collector/http/README.md b/collector/http/README.md
new file mode 100644
index 000000000..35ca771ce
--- /dev/null
+++ b/collector/http/README.md
@@ -0,0 +1,40 @@
+# Http Span Collector
+
+The http collector is a web service built on akka-http. It accepts [proto](https://github.com/ExpediaDotCom/haystack-idl/tree/master/proto) serialized and json serialized spans on port 8080(configurable).
+
+Collector has two endpoints
+ 1. `/span`: It is for span ingestion. The 'Content-Type' header is used to understand the data format. Therefore one needs to set it correctly as:
+ * `application/json`: Json formatted spans
+ * `application/octet-stream`: Proto-serialized binary format.
+
+ 2. `/isActive`: It can be used as a health check by your load balancer
+
+### How to publish spans in json format
+
+Span's json schema should match with the object model described [here](./src/main/scala/com/expedia/www/haystack/http/span/collector/json/Span.scala)
+
+```
+curl -XPOST -H "Content-Type: application/json" -d ' \
+{
+ "traceId": "466848c0-a105-4867-8685-e3d00e3eb254",
+ "spanId": "8f79f97b-a317-4c8f-bbfd-5fd228550416",
+ "serviceName": "baz",
+ "operationName": "foo",
+ "startTime": 1521482680950000,
+ "duration": 2000,
+ "tags": [{
+ "key": "span.kind",
+ "value": "server"
+ }, {
+ "key": "error",
+ "value": false
+ }]
+}' \
+"http://localhost:8080/span"
+```
+
+### How to publish spans in proto format
+
+```
+curl -XPOST -H "Content-Type: application/octet-stream" -d '' "http://localhost:8080/span"
+```
\ No newline at end of file
diff --git a/collector/http/build/docker/Dockerfile b/collector/http/build/docker/Dockerfile
new file mode 100644
index 000000000..d5a34f1fb
--- /dev/null
+++ b/collector/http/build/docker/Dockerfile
@@ -0,0 +1,19 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-http-span-collector
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+
+RUN mkdir -p ${APP_HOME}
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+
+RUN chmod +x ${APP_HOME}/start-app.sh
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/collector/http/build/docker/jmxtrans-agent.xml b/collector/http/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..41a320477
--- /dev/null
+++ b/collector/http/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:true}
+ haystack.collector.http.#hostname#.
+
+ 60
+
diff --git a/collector/http/build/docker/start-app.sh b/collector/http/build/docker/start-app.sh
new file mode 100755
index 000000000..9b7a9a415
--- /dev/null
+++ b/collector/http/build/docker/start-app.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+[ -z "$JAVA_GC_OPTS" ] && JAVA_GC_OPTS="-XX:+UseG1GC -XX:+ExitOnOutOfMemoryError"
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+${JAVA_GC_OPTS} \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-Dcom.sun.management.jmxremote.authenticate=false \
+-Dcom.sun.management.jmxremote.ssl=false \
+-Dcom.sun.management.jmxremote.port=1098 \
+-Dcom.sun.management.jmxremote.rmi.port=1098 \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/collector/http/build/integration-tests/app-integration-test.conf b/collector/http/build/integration-tests/app-integration-test.conf
new file mode 100644
index 000000000..f3f7e2070
--- /dev/null
+++ b/collector/http/build/integration-tests/app-integration-test.conf
@@ -0,0 +1,17 @@
+kafka {
+ producer {
+ topic = "proto-spans"
+ props {
+ bootstrap.servers = "kafkasvc:9092"
+ }
+ }
+}
+
+extractor {
+ output.format = "proto"
+}
+
+http {
+ host = "0.0.0.0"
+ port = 8080
+}
\ No newline at end of file
diff --git a/collector/http/build/integration-tests/docker-compose.yml b/collector/http/build/integration-tests/docker-compose.yml
new file mode 100644
index 000000000..d6cba5549
--- /dev/null
+++ b/collector/http/build/integration-tests/docker-compose.yml
@@ -0,0 +1,20 @@
+version: '3'
+services:
+ zookeeper:
+ image: wurstmeister/zookeeper
+ ports:
+ - "2181"
+ kafkasvc:
+ image: wurstmeister/kafka:0.11.0.1
+ ports:
+ - "9092"
+ depends_on:
+ - zookeeper
+ links:
+ - zookeeper:zk
+ environment:
+ KAFKA_ADVERTISED_HOST_NAME: kafkasvc
+ KAFKA_ADVERTISED_PORT: 9092
+ KAFKA_ZOOKEEPER_CONNECT: zk:2181
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
diff --git a/collector/http/pom.xml b/collector/http/pom.xml
new file mode 100644
index 000000000..207580e10
--- /dev/null
+++ b/collector/http/pom.xml
@@ -0,0 +1,134 @@
+
+
+
+
+ haystack-collector
+ com.expedia.www
+ 1.0-SNAPSHOT
+
+
+ 4.0.0
+ haystack-http-span-collector
+ 1.0-SNAPSHOT
+ jar
+
+
+ com.expedia.www.haystack.http.span.collector.WebServer
+ ${project.artifactId}-${project.version}
+ 10.1.5
+ 2.5.17
+
+
+
+
+ com.expedia.www
+ haystack-collector-commons
+ ${project.version}
+
+
+
+ com.typesafe.akka
+ akka-http_${scala.major.minor.version}
+ ${akka-http.version}
+
+
+
+ com.typesafe.akka
+ akka-stream_${scala.major.minor.version}
+ ${akka-stream.version}
+
+
+
+ org.json4s
+ json4s-jackson_${scala.major.minor.version}
+
+
+
+
+ ${finalName}
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+ com.expedia.www.haystack.http.span.collector.unit
+
+
+
+ integration-test
+ integration-test
+
+ test
+
+
+ com.expedia.www.haystack.http.span.collector.integration
+
+ /src/http/build/integration-tests/app-integration-test.conf
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+ true
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ package
+
+ shade
+
+
+
+
+ reference.conf
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
+
+
diff --git a/collector/http/src/main/resources/config/base.conf b/collector/http/src/main/resources/config/base.conf
new file mode 100644
index 000000000..354554fcf
--- /dev/null
+++ b/collector/http/src/main/resources/config/base.conf
@@ -0,0 +1,37 @@
+kafka {
+ producer {
+ topic = "proto-spans"
+ props {
+ bootstrap.servers = "localhost:9092"
+ }
+ }
+}
+
+extractor {
+ output.format = "proto"
+ spans.validation {
+
+ # Validate size of span. Truncate span tags when size exceeds spcified limit.
+ # enable: true/false
+ # log.only: if enabled, only logs such spans but doesn't truncate the tags
+ # max.size.limit: maximum size allowed
+ # message.tag.key: this tag key will be added when tags are truncated
+ # message.tag.value: value of the above tag key indicating the truncation
+ # skip.tags: truncate all span tags except these
+ # skip.services: truncate span tags for all services except these
+ max.size {
+ enable = "false"
+ log.only = "false"
+ max.size.limit = 5000 // in bytes
+ message.tag.key = "X-HAYSTACK-SPAN-INFO"
+ message.tag.value = "Tags are truncated. REASON: Span Size Limit Exceeded. Please contact Haystack for more details"
+ skip.tags = ["error"]
+ skip.services = []
+ }
+ }
+}
+
+http {
+ host = "0.0.0.0"
+ port = 8080
+}
diff --git a/collector/http/src/main/resources/logback.xml b/collector/http/src/main/resources/logback.xml
new file mode 100644
index 000000000..ab4e25a63
--- /dev/null
+++ b/collector/http/src/main/resources/logback.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
diff --git a/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/ProjectConfiguration.scala b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/ProjectConfiguration.scala
new file mode 100644
index 000000000..a206940c4
--- /dev/null
+++ b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/ProjectConfiguration.scala
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector
+
+import com.expedia.www.haystack.collector.commons.config.{ConfigurationLoader, ExternalKafkaConfiguration, ExtractorConfiguration, KafkaProduceConfiguration}
+import com.expedia.www.haystack.http.span.collector.authenticator.{Authenticator, NoopAuthenticator}
+import com.expedia.www.haystack.span.decorators.plugin.config.Plugin
+import com.typesafe.config.Config
+
+import scala.reflect.ClassTag
+
+case class HttpConfiguration(host: String = "127.0.0.1", port: Int = 8080, authenticator: Authenticator = NoopAuthenticator)
+
+object ProjectConfiguration {
+ val config: Config = ConfigurationLoader.loadConfigFileWithEnvOverrides()
+
+ def kafkaProducerConfig(): KafkaProduceConfiguration = ConfigurationLoader.kafkaProducerConfig(config)
+ def extractorConfig(): ExtractorConfiguration = ConfigurationLoader.extractorConfiguration(config)
+ def externalKafkaConfig(): List[ExternalKafkaConfiguration] = ConfigurationLoader.externalKafkaConfiguration(config)
+ def additionalTagConfig(): Map[String, String] = ConfigurationLoader.additionalTagsConfiguration(config)
+ def pluginConfiguration(): Plugin = ConfigurationLoader.pluginConfigurations(config)
+
+ lazy val httpConfig: HttpConfiguration = {
+ val authenticator = if(config.hasPath("http.authenticator")) {
+ toInstance[Authenticator](config.getString("http.authenticator"))
+ } else {
+ NoopAuthenticator
+ }
+
+ // initialize the
+ authenticator.init(config)
+
+ HttpConfiguration(config.getString("http.host"), config.getInt("http.port"), authenticator)
+ }
+
+ private def toInstance[T](className: String)(implicit ct: ClassTag[T]): T = {
+ val c = Class.forName(className)
+ if (c == null) {
+ throw new RuntimeException(s"No class found with name $className")
+ } else {
+ val o = c.newInstance()
+ val baseClass = ct.runtimeClass
+
+ if (!baseClass.isInstance(o)) {
+ throw new RuntimeException(s"${c.getName} is not an instance of ${baseClass.getName}")
+ }
+ o.asInstanceOf[T]
+ }
+ }
+}
diff --git a/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/WebServer.scala b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/WebServer.scala
new file mode 100644
index 000000000..04ea5d0d2
--- /dev/null
+++ b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/WebServer.scala
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector
+
+import akka.actor.ActorSystem
+import akka.http.scaladsl.Http
+import akka.http.scaladsl.model._
+import akka.http.scaladsl.server.Directives._
+import akka.http.scaladsl.server.{AuthorizationFailedRejection, Route}
+import akka.stream.ActorMaterializer
+import akka.util.ByteString
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.collector.commons.sink.kafka.KafkaRecordSink
+import com.expedia.www.haystack.collector.commons.{MetricsSupport, ProtoSpanExtractor, SpanDecoratorFactory}
+import com.expedia.www.haystack.http.span.collector.json.Span
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+import org.slf4j.LoggerFactory
+
+import scala.concurrent.duration._
+import scala.concurrent.{Await, ExecutionContextExecutor, Future}
+import scala.sys._
+import scala.util.Try
+
+object WebServer extends App with MetricsSupport {
+ val LOGGER = LoggerFactory.getLogger(WebServer.getClass)
+
+ // setup kafka sink
+ private val kafkaSink = new KafkaRecordSink(ProjectConfiguration.kafkaProducerConfig(), ProjectConfiguration.externalKafkaConfig())
+ private val kvExtractor = new ProtoSpanExtractor(ProjectConfiguration.extractorConfig(),
+ LoggerFactory.getLogger(classOf[ProtoSpanExtractor]),
+ SpanDecoratorFactory.get(ProjectConfiguration.pluginConfiguration(), ProjectConfiguration.additionalTagConfig(), LOGGER))
+
+ private val http = ProjectConfiguration.httpConfig
+
+ // setup actor system
+ implicit val system: ActorSystem = ActorSystem("span-collector", ProjectConfiguration.config)
+ implicit val materializer: ActorMaterializer = ActorMaterializer()
+ implicit val executionContext: ExecutionContextExecutor = system.dispatcher
+ implicit val formats: DefaultFormats.type = DefaultFormats
+
+ // start jmx reporter
+ private val jmxReporter = JmxReporter.forRegistry(metricRegistry).build()
+ jmxReporter.start()
+
+ // start http server on given host and port
+ val bindingFuture = Http(system).bindAndHandle(routes(), http.host, http.port)
+ LOGGER.info(s"Server is now listening at http://${http.host}:${http.port}")
+
+ addShutdownHook { shutdownHook() }
+
+ def routes(): Route = {
+ // build the routes
+ path("span") {
+ post {
+ extractRequest {
+ req =>
+ if (http.authenticator(req)) {
+ val spanBytes = req.entity
+ .dataBytes
+ .runFold(ByteString.empty) { case (acc, b) => acc ++ b }
+ .map(_.compact.toArray[Byte])
+
+ req.entity.contentType match {
+ case ContentTypes.`application/json` =>
+ complete {
+ processJsonSpan(spanBytes)
+ }
+ case _ =>
+ complete {
+ processProtoSpan(spanBytes)
+ }
+ }
+ } else {
+ reject(AuthorizationFailedRejection)
+ }
+ }
+ }
+ } ~
+ path("isActive") {
+ get {
+ complete(HttpEntity(ContentTypes.`text/plain(UTF-8)`, "ACTIVE"))
+ }
+ }
+ }
+
+ def processProtoSpan(spanBytes: Future[Array[Byte]]): Future[StatusCode] = {
+ spanBytes
+ .map(kvExtractor.extractKeyValuePairs)
+ .map(kvPairs => {
+ kvPairs foreach { kv => kafkaSink.toAsync(kv) }
+ StatusCode.int2StatusCode(StatusCodes.Accepted.intValue)
+ })
+ }
+
+ def processJsonSpan(dataBytes: Future[Array[Byte]]): Future[StatusCode] = {
+ processProtoSpan(
+ dataBytes
+ .map(bytes => Serialization.read[Span](new String(bytes)))
+ .map(span => span.toProto))
+ }
+
+ def shutdownHook(): Unit = {
+ LOGGER.info("Terminating Server ...")
+ bindingFuture
+ .flatMap(_.unbind())
+ .onComplete { _ => close() }
+ Await.result(system.whenTerminated, 30.seconds)
+ }
+
+ def close(): Unit = {
+ Try(kafkaSink.close())
+ Try(http.authenticator.close())
+ materializer.shutdown()
+ system.terminate()
+ jmxReporter.close()
+ }
+}
diff --git a/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/authenticator/Authenticator.scala b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/authenticator/Authenticator.scala
new file mode 100644
index 000000000..1f3ea6c02
--- /dev/null
+++ b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/authenticator/Authenticator.scala
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector.authenticator
+
+import java.io.Closeable
+
+import akka.http.scaladsl.model.HttpRequest
+import com.typesafe.config.Config
+
+trait Authenticator extends Closeable {
+ def apply(req: HttpRequest): Boolean
+
+ def init(config: Config): Unit
+}
diff --git a/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/authenticator/NoopAuthenticator.scala b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/authenticator/NoopAuthenticator.scala
new file mode 100644
index 000000000..11aca77a3
--- /dev/null
+++ b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/authenticator/NoopAuthenticator.scala
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector.authenticator
+
+import akka.http.scaladsl.model.HttpRequest
+import com.typesafe.config.Config
+
+object NoopAuthenticator extends Authenticator {
+ override def apply(req: HttpRequest): Boolean = true
+
+ override def init(config: Config): Unit = ()
+
+ override def close(): Unit = ()
+}
diff --git a/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/json/Span.scala b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/json/Span.scala
new file mode 100644
index 000000000..0c6f9b0ef
--- /dev/null
+++ b/collector/http/src/main/scala/com/expedia/www/haystack/http/span/collector/json/Span.scala
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector.json
+
+import com.expedia.open.tracing.Tag.TagType
+import com.expedia.open.tracing.{Log => PLog, Span => PSpan, Tag => PTag}
+case class Tag(key: String, value: Any)
+case class Log(timestamp: Long, fields: List[Tag])
+
+case class Span(traceId: String,
+ spanId: String,
+ parentSpanId: Option[String],
+ serviceName: String,
+ operationName: String,
+ startTime: Long,
+ duration: Int,
+ tags: List[Tag],
+ logs: List[Log]) {
+ def toProto: Array[Byte] = {
+ val span = PSpan.newBuilder()
+ .setTraceId(traceId)
+ .setSpanId(spanId)
+ .setServiceName(serviceName)
+ .setOperationName(operationName)
+ .setParentSpanId(parentSpanId.getOrElse(""))
+ .setStartTime(startTime)
+ .setDuration(duration)
+
+ tags.map(tag => span.addTags(createProtoTag(tag)))
+ logs.map(log => {
+ val l = PLog.newBuilder().setTimestamp(log.timestamp)
+ log.fields.foreach(tag => l.addFields(createProtoTag(tag)))
+ span.addLogs(l)
+ })
+
+ span.build().toByteArray
+ }
+
+ private def createProtoTag(tag: Tag): PTag.Builder = {
+ tag.value match {
+ case _: Int =>
+ PTag.newBuilder().setKey(tag.key).setVLong(tag.value.asInstanceOf[Int]).setType(TagType.LONG)
+ case _: BigInt =>
+ PTag.newBuilder().setKey(tag.key).setVLong(tag.value.asInstanceOf[BigInt].longValue()).setType(TagType.LONG)
+ case _: Long =>
+ PTag.newBuilder().setKey(tag.key).setVLong(tag.value.asInstanceOf[Long]).setType(TagType.LONG)
+ case _: Double =>
+ PTag.newBuilder().setKey(tag.key).setVDouble(tag.value.asInstanceOf[Double]).setType(TagType.DOUBLE)
+ case _: Float =>
+ PTag.newBuilder().setKey(tag.key).setVDouble(tag.value.asInstanceOf[Float].toDouble).setType(TagType.DOUBLE)
+ case _: Boolean =>
+ PTag.newBuilder().setKey(tag.key).setVBool(tag.value.asInstanceOf[Boolean]).setType(TagType.BOOL)
+ case _ => PTag.newBuilder().setKey(tag.key).setVStr(tag.value.toString).setType(TagType.STRING)
+ }
+ }
+}
diff --git a/collector/http/src/test/resources/logback-test.xml b/collector/http/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..8f39588cc
--- /dev/null
+++ b/collector/http/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/HttpProducer.scala b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/HttpProducer.scala
new file mode 100644
index 000000000..e2acad694
--- /dev/null
+++ b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/HttpProducer.scala
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector.integration
+
+import akka.actor.ActorSystem
+import akka.http.scaladsl.Http
+import akka.http.scaladsl.model._
+import akka.stream.ActorMaterializer
+
+import scala.concurrent.Await
+import scala.concurrent.duration._
+import scala.util.{Failure, Success}
+
+trait HttpProducer {
+ protected implicit val system = ActorSystem()
+ protected implicit val materializer = ActorMaterializer()
+ protected implicit val executionContext = system.dispatcher
+ private val http = Http(system)
+
+ def postHttp(records: List[Array[Byte]], contentType: ContentType = ContentTypes.`application/octet-stream`): Unit = {
+ records foreach { record =>
+ val entity = HttpEntity(contentType, record)
+ val request = HttpRequest(method = HttpMethods.POST, uri = "http://localhost:8080/span", entity = entity)
+ http.singleRequest(request) onComplete {
+ case Failure(ex) => println(s"Failed to post, reason: $ex")
+ case Success(response) => println(s"Server responded with $response")
+ }
+ }
+ }
+
+ def isActiveHttpCall(): String = {
+ val responseFuture = http.singleRequest(HttpRequest(uri = "http://localhost:8080/isActive"))
+ .flatMap(response => response.entity.toStrict(5.seconds).map(_.data))
+ .map(p => new String(p.compact.toArray[Byte]))
+ Await.result(responseFuture, 5.seconds)
+ }
+}
diff --git a/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/IntegrationTestSpec.scala b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/IntegrationTestSpec.scala
new file mode 100644
index 000000000..c63423f79
--- /dev/null
+++ b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/IntegrationTestSpec.scala
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector.integration
+
+import java.util.concurrent.Executors
+
+import com.expedia.www.haystack.http.span.collector.WebServer
+import org.scalatest._
+
+class IntegrationTestSpec extends WordSpec with GivenWhenThen with Matchers with HttpProducer with LocalKafkaConsumer
+ with OptionValues with BeforeAndAfterAll {
+
+ private val executor = Executors.newSingleThreadExecutor()
+
+ override def beforeAll(): Unit = {
+ executor.submit(new Runnable {
+ override def run(): Unit = WebServer.main(null)
+ })
+ // wait for few sec to let app start
+ Thread.sleep(15000)
+ }
+
+ override def afterAll(): Unit = { }
+}
\ No newline at end of file
diff --git a/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/LocalKafkaConsumer.scala b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/LocalKafkaConsumer.scala
new file mode 100644
index 000000000..0b37faf11
--- /dev/null
+++ b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/LocalKafkaConsumer.scala
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector.integration
+
+import java.util.Properties
+
+import com.expedia.www.haystack.http.span.collector.integration.config.TestConfiguration
+import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener
+import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
+import org.apache.kafka.common.serialization.ByteArrayDeserializer
+
+import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+import scala.concurrent.duration._
+
+trait LocalKafkaConsumer {
+
+ private val kafkaConsumer = {
+ val consumerProperties = new Properties()
+ consumerProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "http-to-kafka-test")
+ consumerProperties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, TestConfiguration.remoteKafkaHost + ":" + TestConfiguration.kafkaPort)
+ consumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getCanonicalName)
+ consumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getCanonicalName)
+ new KafkaConsumer[Array[Byte], Array[Byte]](consumerProperties)
+ }
+
+ kafkaConsumer.subscribe(List(TestConfiguration.kafkaStreamName).asJava, new NoOpConsumerRebalanceListener())
+
+ def readRecordsFromKafka(minExpectedCount: Int, maxWait: FiniteDuration): List[Array[Byte]] = {
+ val records = mutable.ListBuffer[Array[Byte]]()
+ var received: Int = 0
+
+ var waitTimeLeft = maxWait.toMillis
+ var done = true
+ while (done) {
+ kafkaConsumer.poll(250).records(TestConfiguration.kafkaStreamName).map(rec => {
+ received += 1
+ records += rec.value()
+ })
+ if(received < minExpectedCount && waitTimeLeft > 0) {
+ Thread.sleep(1000)
+ waitTimeLeft -= 1000
+ } else {
+ done = false
+ }
+ }
+
+ if(records.size < minExpectedCount) throw new RuntimeException("Fail to read the expected records from kafka")
+
+ records.toList
+ }
+
+ def shutdownKafkaConsumer(): Unit = {
+ if(kafkaConsumer != null) kafkaConsumer.close()
+ }
+}
diff --git a/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/config/TestConfiguration.scala b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/config/TestConfiguration.scala
new file mode 100644
index 000000000..099a6605d
--- /dev/null
+++ b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/config/TestConfiguration.scala
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector.integration.config
+
+object TestConfiguration {
+ val remoteKafkaHost = "kafkasvc"
+ val kafkaPort = 9092
+ val kafkaStreamName = "proto-spans"
+}
diff --git a/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/tests/HttpSpanCollectorSpec.scala b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/tests/HttpSpanCollectorSpec.scala
new file mode 100644
index 000000000..e39f59a3a
--- /dev/null
+++ b/collector/http/src/test/scala/com/expedia/www/haystack/http/span/collector/integration/tests/HttpSpanCollectorSpec.scala
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.http.span.collector.integration.tests
+
+import akka.http.scaladsl.model.ContentTypes
+import com.expedia.open.tracing.Tag.TagType
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.http.span.collector.integration.IntegrationTestSpec
+import com.expedia.www.haystack.http.span.collector.json.{Log => JLog, Span => JSpan, Tag => JTag}
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+class HttpSpanCollectorSpec extends IntegrationTestSpec {
+
+ implicit val formats = DefaultFormats
+ private val StartTimeMicros = System.currentTimeMillis() * 1000
+ private val DurationMicros = 42
+
+ "Http span collector" should {
+
+ // this test is primarily to work around issue with Kafka docker image
+ // it fails for first put for some reasons
+ "connect with http and kafka" in {
+
+ Given("a valid span")
+ val spanBytes = Span.newBuilder().setTraceId("traceid").setSpanId("span-id-1").build().toByteArray
+
+ When("the span is sent over http")
+ postHttp(List(spanBytes, spanBytes))
+
+ Then("it should be pushed to kafka")
+ readRecordsFromKafka(0, 1.second).headOption
+ }
+
+ "read valid proto spans from kafka if produced proto spans on http" in {
+
+ Given("valid proto spans")
+ val span_1 = Span.newBuilder().setTraceId("trace-id-1").setSpanId("span-id-1").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros).build().toByteArray
+ val span_2 = Span.newBuilder().setTraceId("trace-id-1").setSpanId("span-id-2").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros).build().toByteArray
+ val span_3 = Span.newBuilder().setTraceId("trace-id-2").setSpanId("span-id-3").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros).build().toByteArray
+ val span_4 = Span.newBuilder().setTraceId("trace-id-2").setSpanId("span-id-4").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros).build().toByteArray
+
+ When("the span is sent to http span collector")
+ postHttp(List(span_1, span_2, span_3, span_4))
+
+ Then("it should be pushed to kafka with partition key as its trace id")
+ val records = readRecordsFromKafka(4, 5.seconds)
+ records should not be empty
+ val spans = records.map(Span.parseFrom)
+ spans.map(_.getTraceId).toSet should contain allOf("trace-id-1", "trace-id-2")
+ spans.map(_.getSpanId) should contain allOf("span-id-1", "span-id-2", "span-id-3", "span-id-4")
+ }
+
+
+ "read valid proto spans from kafka if produced json spans" in {
+ Given("valid json spans")
+ val tags = List(JTag("number", 100), JTag("some-string", "str"), JTag("some-boolean", true), JTag("some-double", 10.5))
+ val logs = List(JLog(StartTimeMicros, List(JTag("errorcode", 1))))
+ val spanJsonBytesList = List(
+ JSpan("trace-id-1", "span-id-1", None, "service1", "operation1", StartTimeMicros, DurationMicros, tags, Nil),
+ JSpan("trace-id-2", "span-id-2", Some("parent-span-id-2"), "service2", "operation2", StartTimeMicros, DurationMicros, tags, logs),
+ JSpan("trace-id-3", "span-id-3", None, "service3", "operation3", StartTimeMicros, DurationMicros, tags, Nil),
+ JSpan("trace-id-1", "span-id-4", None, "service1", "operation1", StartTimeMicros, DurationMicros, Nil, Nil)
+ ).map(jsonSpan => Serialization.write(jsonSpan).getBytes("utf-8"))
+
+ When("the span is sent to http span collector")
+ postHttp(spanJsonBytesList, ContentTypes.`application/json`)
+
+ Then("it should be pushed to kafka with partition key as its trace id")
+ val records = readRecordsFromKafka(4, 5.seconds)
+ records should not be empty
+ val spans = records.map(Span.parseFrom)
+ spans.map(_.getServiceName).toSet should contain allOf("service1", "service2", "service3")
+ spans.map(_.getOperationName).toSet should contain allOf("operation1", "operation2", "operation3")
+ spans.map(_.getStartTime).toSet.head shouldBe StartTimeMicros
+ spans.map(_.getDuration).toSet.head shouldBe DurationMicros
+ spans.map(_.getTraceId).toSet should contain allOf("trace-id-1", "trace-id-2", "trace-id-3")
+ spans.map(_.getSpanId) should contain allOf("span-id-1", "span-id-2", "span-id-3", "span-id-4")
+
+ val span2 = spans.find(_.getTraceId == "trace-id-2").get
+ span2.getParentSpanId shouldBe "parent-span-id-2"
+ span2.getTagsCount shouldBe tags.size
+
+ tags.foreach { jTag =>
+ val tag = span2.getTagsList.asScala.find(_.getKey.equalsIgnoreCase(jTag.key)).get
+ protoTagValue(tag) shouldBe jTag.value
+ }
+
+ span2.getLogsCount shouldBe logs.size
+ logs foreach { jLog =>
+ val logFields = span2.getLogsList.asScala.find(_.getTimestamp == jLog.timestamp).get.getFieldsList.asScala
+ jLog.fields.foreach { jTag =>
+ val tag = logFields.find(_.getKey.equalsIgnoreCase(jTag.key)).get
+ protoTagValue(tag) shouldBe jTag.value
+ }
+ }
+ }
+
+ "isActive endpoint should work" in {
+ Given("an empty http request")
+ When("/isActive endpoint is called")
+ val response = isActiveHttpCall()
+ Then("response should be active")
+ response shouldBe "ACTIVE"
+ }
+ }
+
+ private def protoTagValue(tag: Tag): Any = {
+ tag.getType match {
+ case TagType.STRING => tag.getVStr
+ case TagType.BOOL => tag.getVBool
+ case TagType.LONG => tag.getVLong
+ case TagType.DOUBLE => tag.getVDouble
+ case _ => fail("fail to find the proto tag value")
+ }
+ }
+}
diff --git a/collector/kinesis/Makefile b/collector/kinesis/Makefile
new file mode 100644
index 000000000..8083d14c6
--- /dev/null
+++ b/collector/kinesis/Makefile
@@ -0,0 +1,50 @@
+.PHONY: docker_build prepare_integration_test_env integration_test release
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-kinesis-span-collector
+PWD := $(shell pwd)
+
+docker_build:
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+prepare_integration_test_env: docker_build
+ #copy plugin jars to test
+ mvn -f ${PWD}/../ clean package -pl sample-span-decorator -am
+ mkdir -p ${PWD}/plugins/decorators
+ cp ${PWD}/../sample-span-decorator/target/sample-span-decorator.jar ${PWD}/plugins/decorators/.
+
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox up -d
+
+ # kafka sometimes take time to start
+ sleep 30
+
+ # create the stream and dynamodb table
+ docker run \
+ -it --network=sandbox_default \
+ -e "AWS_CBOR_DISABLE=1" \
+ -v $(PWD)/build/integration-tests/scripts:/scripts \
+ -w /scripts \
+ node:6.11.3 \
+ ./setup.sh
+
+integration_test: prepare_integration_test_env
+ # run tests in a container so that we can join the docker-compose network and talk to kafka and kinesis
+ docker run \
+ -it \
+ --network=sandbox_default \
+ -e "AWS_CBOR_DISABLE=1" \
+ -e "AWS_ACCESS_KEY=fake" \
+ -e "AWS_SECRET_KEY=fake" \
+ -v $(PWD)/..:/src \
+ -v ~/.m2:/root/.m2 \
+ -w /src \
+ maven:3.5.0-jdk-8 \
+ mvn scoverage:integration-check -pl kinesis -am
+
+ # stop all the containers
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox stop
+ docker rm $(shell docker ps -a -q)
+ docker volume rm $(shell docker volume ls -q)
+
+release: docker_build
+ ../deployment/scripts/publish-to-docker-hub.sh
diff --git a/collector/kinesis/README.md b/collector/kinesis/README.md
new file mode 100644
index 000000000..e50780e67
--- /dev/null
+++ b/collector/kinesis/README.md
@@ -0,0 +1,13 @@
+# haystack-kinesis-span-collector
+This haystack component reads the batch of spans from kinesis stream and publish them to kafka topic.
+It expects the [Batch](https://github.com/ExpediaDotCom/haystack-idl/blob/master/proto/span.proto) protobuf object in the stream.
+It deserializes this proto object and use each span's TraceId as the partition key for writing to the kafka topic.
+This component uses the [KCL](http://docs.aws.amazon.com/streams/latest/dev/developing-consumers-with-kcl.html#kinesis-record-processor-overview-kcl)
+library to build the pipeline for reading from kinesis stream and writing to kafka using high level kafka consumer api.
+
+##Required Reading
+
+In order to understand the haystack, we recommend to read the details of [haystack](https://github.com/ExpediaDotCom/haystack) project.
+
+##Technical Details
+Fill this as we go along..
diff --git a/collector/kinesis/build/docker/Dockerfile b/collector/kinesis/build/docker/Dockerfile
new file mode 100644
index 000000000..d918f54cb
--- /dev/null
+++ b/collector/kinesis/build/docker/Dockerfile
@@ -0,0 +1,19 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-kinesis-span-collector
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+
+RUN mkdir -p ${APP_HOME}
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+
+RUN chmod +x ${APP_HOME}/start-app.sh
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/collector/kinesis/build/docker/jmxtrans-agent.xml b/collector/kinesis/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..a7af20fce
--- /dev/null
+++ b/collector/kinesis/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:true}
+ ${HAYSTACK_GRAPHITE_PREFIX:haystack.collector.kinesis}.#hostname#.
+
+ 60
+
diff --git a/collector/kinesis/build/docker/start-app.sh b/collector/kinesis/build/docker/start-app.sh
new file mode 100755
index 000000000..9b7a9a415
--- /dev/null
+++ b/collector/kinesis/build/docker/start-app.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+[ -z "$JAVA_GC_OPTS" ] && JAVA_GC_OPTS="-XX:+UseG1GC -XX:+ExitOnOutOfMemoryError"
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+${JAVA_GC_OPTS} \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-Dcom.sun.management.jmxremote.authenticate=false \
+-Dcom.sun.management.jmxremote.ssl=false \
+-Dcom.sun.management.jmxremote.port=1098 \
+-Dcom.sun.management.jmxremote.rmi.port=1098 \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/collector/kinesis/build/integration-tests/app-integration-test.conf b/collector/kinesis/build/integration-tests/app-integration-test.conf
new file mode 100644
index 000000000..b910d0ba7
--- /dev/null
+++ b/collector/kinesis/build/integration-tests/app-integration-test.conf
@@ -0,0 +1,40 @@
+kafka {
+ producer {
+ topic = "proto-spans"
+ props {
+ bootstrap.servers = "kafkasvc:9092"
+ }
+ }
+}
+
+extractor {
+ output.format = "proto"
+}
+
+kinesis {
+ endpoint = "http://localstack:4568"
+ app.group.name = "haystack-kinesis-proto-span-collector"
+
+ dynamodb.endpoint = "http://localstack:4569"
+
+ stream {
+ name = "haystack-proto-spans"
+ position = "LATEST"
+ }
+
+ checkpoint {
+ interval.ms = 15000
+ retries = 50
+ retry.interval.ms = 250
+ }
+
+ task.backoff.ms = 200
+ max.records.read = 2000
+ idle.time.between.reads.ms = 500
+ shard.sync.interval.ms = 30000
+
+ metrics {
+ level = "NONE"
+ buffer.time.ms = 15000
+ }
+}
\ No newline at end of file
diff --git a/collector/kinesis/build/integration-tests/docker-compose.yml b/collector/kinesis/build/integration-tests/docker-compose.yml
new file mode 100644
index 000000000..d6f78d00b
--- /dev/null
+++ b/collector/kinesis/build/integration-tests/docker-compose.yml
@@ -0,0 +1,34 @@
+version: '3'
+services:
+ zookeeper:
+ image: wurstmeister/zookeeper
+ ports:
+ - "2181"
+ kafkasvc:
+ image: wurstmeister/kafka:0.10.2.1
+ ports:
+ - "9092"
+ depends_on:
+ - zookeeper
+ links:
+ - zookeeper:zk
+ environment:
+ KAFKA_ADVERTISED_HOST_NAME: kafkasvc
+ KAFKA_ADVERTISED_PORT: 9092
+ KAFKA_ZOOKEEPER_CONNECT: zk:2181
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ localstack:
+ image: localstack/localstack:0.11.0
+ ports:
+ - '4563-4599:4563-4599'
+ - '8055:8080'
+ environment:
+ - SERVICES=kinesis,dynamodb
+ - KINESIS_SHARD_LIMIT=1
+ - KINESIS_LATENCY=0
+ - DATA_DIR=/tmp/localstack/data
+ - START_WEB=0
+ volumes:
+ - './.localstack:/tmp/localstack'
+ - '/var/run/docker.sock:/var/run/docker.sock'
diff --git a/collector/kinesis/build/integration-tests/scripts/create-dynamo-table.js b/collector/kinesis/build/integration-tests/scripts/create-dynamo-table.js
new file mode 100644
index 000000000..80b937462
--- /dev/null
+++ b/collector/kinesis/build/integration-tests/scripts/create-dynamo-table.js
@@ -0,0 +1,37 @@
+var AWS = require('aws-sdk');
+
+var config = {
+ "accessKeyId": "FAKE",
+ "secretAccessKey": "FAKE",
+ "region": "us-east-1",
+ "dynamoEndpoint": "http://localstack:4569",
+ "tableName": "haystack-kinesis-proto-span-collector",
+ "ShardCount": 1
+};
+
+var dynamodb = new AWS.DynamoDB({ endpoint: new AWS.Endpoint(config.dynamoEndpoint),
+ accessKeyId: config.accessKeyId,
+ secretAccessKey: config.secretAccessKey,
+ region: config.region});
+
+var params = {
+ TableName : config.tableName,
+ KeySchema: [
+ { AttributeName: "leaseKey", KeyType: "HASH"} //Partition key
+ ],
+ AttributeDefinitions: [
+ { AttributeName: "leaseKey", AttributeType: "S" }
+ ],
+ ProvisionedThroughput: {
+ ReadCapacityUnits: 10,
+ WriteCapacityUnits: 10
+ }
+};
+
+dynamodb.createTable(params, function(err, data) {
+ if (err) {
+ console.error("Unable to create table. Error JSON:", JSON.stringify(err, null, 2));
+ } else {
+ console.log("Created table. Table description JSON:", JSON.stringify(data, null, 2));
+ }
+});
\ No newline at end of file
diff --git a/collector/kinesis/build/integration-tests/scripts/create-kinesis-stream.js b/collector/kinesis/build/integration-tests/scripts/create-kinesis-stream.js
new file mode 100644
index 000000000..779e4752e
--- /dev/null
+++ b/collector/kinesis/build/integration-tests/scripts/create-kinesis-stream.js
@@ -0,0 +1,41 @@
+var AWS = require('aws-sdk');
+
+var config = {
+ "accessKeyId": "FAKE",
+ "secretAccessKey": "FAKE",
+ "region": "us-east-1",
+ "kinesisEndpoint": "http://localstack:4568",
+ "kinesisPort": 4568,
+ "StreamName": "haystack-proto-spans",
+ "ShardCount": 1
+};
+
+var kinesis = new AWS.Kinesis({
+ endpoint: config.kinesisEndpoint,
+ accessKeyId: config.accessKeyId,
+ secretAccessKey: config.secretAccessKey,
+ region: config.region
+});
+
+AWS.config.update({});
+
+
+kinesis.listStreams({ }, function(err, data) {
+ if (err) throw err;
+
+ console.log('Stream ready: ', data);
+
+ if(data.StreamNames.includes(config.StreamName)) {
+ console.log('Stream already exists');
+ } else {
+ kinesis.createStream({ StreamName: config.StreamName, ShardCount: config.ShardCount }, function (err) {
+ if (err) throw err;
+
+ kinesis.describeStream({ StreamName: config.StreamName }, function(err, data) {
+ if (err) throw err;
+ console.log('Stream ready: ', data);
+ });
+ });
+ }
+});
+
diff --git a/collector/kinesis/build/integration-tests/scripts/package.json b/collector/kinesis/build/integration-tests/scripts/package.json
new file mode 100644
index 000000000..53856247d
--- /dev/null
+++ b/collector/kinesis/build/integration-tests/scripts/package.json
@@ -0,0 +1,8 @@
+{
+ "name": "kinesis-stream-bootstrap",
+ "version": "1.0.0",
+ "private": true,
+ "dependencies": {
+ "aws-sdk": "2.7.9"
+ }
+}
diff --git a/collector/kinesis/build/integration-tests/scripts/setup.sh b/collector/kinesis/build/integration-tests/scripts/setup.sh
new file mode 100755
index 000000000..71f39af14
--- /dev/null
+++ b/collector/kinesis/build/integration-tests/scripts/setup.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+npm install
+node create-kinesis-stream.js
+node create-dynamo-table.js
\ No newline at end of file
diff --git a/collector/kinesis/pom.xml b/collector/kinesis/pom.xml
new file mode 100644
index 000000000..ae1c213a9
--- /dev/null
+++ b/collector/kinesis/pom.xml
@@ -0,0 +1,176 @@
+
+
+
+
+ haystack-collector
+ com.expedia.www
+ 1.0-SNAPSHOT
+
+
+ 4.0.0
+ haystack-kinesis-span-collector
+ 1.0-SNAPSHOT
+ jar
+
+
+ 1.13.2
+ 0.12.3
+ 1.11.670
+ com.expedia.www.haystack.kinesis.span.collector.App
+ ${project.artifactId}-${project.version}
+
+
+
+
+ com.expedia.www
+ haystack-collector-commons
+ ${project.version}
+
+
+
+ com.expedia.www
+ haystack-span-decorators
+ ${project.version}
+
+
+
+ org.apache.httpcomponents
+ httpclient
+
+
+
+
+ com.amazonaws
+ amazon-kinesis-client
+ ${kinesis.client.version}
+
+
+
+ org.slf4j
+ jcl-over-slf4j
+
+
+
+ com.amazonaws
+ aws-java-sdk-sts
+ ${aws-sdk.version}
+
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+
+
+
+
+ com.amazonaws
+ amazon-kinesis-producer
+ ${kinesis.producer.version}
+ test
+
+
+ com.fasterxml.jackson.dataformat
+ jackson-dataformat-cbor
+
+
+ com.amazonaws
+ aws-java-sdk-core
+
+
+
+
+
+
+ ${finalName}
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+
+ haystack-test
+
+ com.expedia.www.haystack.kinesis.span.collector.unit
+
+
+
+ integration-test
+ integration-test
+
+ test
+
+
+ com.expedia.www.haystack.kinesis.span.collector.integration
+
+ /src/kinesis/build/integration-tests/app-integration-test.conf
+ fake
+ 1
+ fake
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+ true
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ package
+
+ shade
+
+
+
+
+ reference.conf
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
+
+
diff --git a/collector/kinesis/src/main/resources/config/base.conf b/collector/kinesis/src/main/resources/config/base.conf
new file mode 100644
index 000000000..5e8171e5e
--- /dev/null
+++ b/collector/kinesis/src/main/resources/config/base.conf
@@ -0,0 +1,70 @@
+health.status.path = "/app/isHealthy"
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
+
+kafka {
+ producer {
+ topic = "proto-spans"
+ props {
+ bootstrap.servers = "kafkasvc:9092"
+ }
+ }
+}
+
+extractor {
+ output.format = "proto"
+
+ spans.validation {
+
+ # Validate size of span. Truncate span tags when size exceeds spcified limit.
+ # enable: true/false
+ # log.only: if enabled, only logs such spans but doesn't truncate the tags
+ # max.size.limit: maximum size allowed
+ # message.tag.key: this tag key will be added when tags are truncated
+ # message.tag.value: value of the above tag key indicating the truncation
+ # skip.tags: truncate all span tags except these
+ # skip.services: truncate span tags for all services except these
+ max.size {
+ enable = "false"
+ log.only = "false"
+ max.size.limit = 5000 // in bytes
+ message.tag.key = "X-HAYSTACK-SPAN-INFO"
+ message.tag.value = "Tags are truncated. REASON: Span Size Limit Exceeded. Please contact Haystack for more details"
+ skip.tags = ["error"]
+ skip.services = []
+ }
+ }
+}
+
+kinesis {
+ #optional, uncomment following if you want to connect to kinesis using sts role arn
+ #sts.role.arn = "provide the arn here"
+
+ aws.region = "us-west-2"
+ app.group.name = "haystack-kinesis-proto-span-collector"
+
+ # optional, use endpoint property along with aws.region for cross region reading of data. Otherwise, you might see
+ # latency issues for cross region access
+ #endpoint = "vpce-xxxxxxxxx-yyyyyy.kinesis.us-east-1.vpce.amazonaws.com"
+
+ stream {
+ name = "haystack-proto-spans"
+ position = "LATEST"
+ }
+
+ checkpoint {
+ interval.ms = 15000
+ retries = 50
+ retry.interval.ms = 250
+ }
+
+ task.backoff.ms = 200
+ max.records.read = 2000
+ idle.time.between.reads.ms = 500
+ shard.sync.interval.ms = 30000
+
+ metrics {
+ level = "NONE"
+ buffer.time.ms = 15000
+ }
+}
diff --git a/collector/kinesis/src/main/resources/logback.xml b/collector/kinesis/src/main/resources/logback.xml
new file mode 100644
index 000000000..971c590ab
--- /dev/null
+++ b/collector/kinesis/src/main/resources/logback.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
diff --git a/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/App.scala b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/App.scala
new file mode 100644
index 000000000..60e725bbc
--- /dev/null
+++ b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/App.scala
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.collector.commons.MetricsSupport
+import com.expedia.www.haystack.collector.commons.health.{HealthController, UpdateHealthStatusFile}
+import com.expedia.www.haystack.collector.commons.logger.LoggerUtils
+import com.expedia.www.haystack.kinesis.span.collector.config.ProjectConfiguration
+import com.expedia.www.haystack.kinesis.span.collector.pipeline.KinesisToKafkaPipeline
+import org.slf4j.LoggerFactory
+
+object App extends MetricsSupport {
+ private val LOGGER = LoggerFactory.getLogger(App.getClass)
+
+ private var pipeline: KinesisToKafkaPipeline = _
+ private var jmxReporter: JmxReporter = _
+
+ def main(args: Array[String]): Unit = {
+ startJmxReporter()
+
+ addShutdownHook()
+
+ import ProjectConfiguration._
+ try {
+
+ healthStatusFile().foreach(statusFile => HealthController.addListener(new UpdateHealthStatusFile(statusFile)))
+
+ pipeline = new KinesisToKafkaPipeline(kafkaProducerConfig(), externalKafkaConfig(), kinesisConsumerConfig(), extractorConfiguration(), additionalTagConfig(), pluginConfiguration())
+ pipeline.run()
+ } catch {
+ case ex: Exception =>
+ LOGGER.error("Observed fatal exception while running the app", ex)
+ shutdown()
+ System.exit(1)
+ }
+ }
+
+ private def addShutdownHook(): Unit = {
+ Runtime.getRuntime.addShutdownHook(new Thread(new Runnable {
+ override def run(): Unit = {
+ LOGGER.info("Shutdown hook is invoked, tearing down the application.")
+ shutdown()
+ }
+ }))
+ }
+
+ private def shutdown(): Unit = {
+ if (pipeline != null) pipeline.close()
+ if (jmxReporter != null) jmxReporter.stop()
+ LoggerUtils.shutdownLogger()
+ }
+
+ private def startJmxReporter() = {
+ jmxReporter = JmxReporter.forRegistry(metricRegistry).build()
+ jmxReporter.start()
+ }
+}
diff --git a/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/config/ProjectConfiguration.scala b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/config/ProjectConfiguration.scala
new file mode 100644
index 000000000..901d3e9ca
--- /dev/null
+++ b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/config/ProjectConfiguration.scala
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.config
+
+import java.util.concurrent.TimeUnit
+
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream
+import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel
+import com.expedia.www.haystack.collector.commons.config.{ConfigurationLoader, ExternalKafkaConfiguration, ExtractorConfiguration, KafkaProduceConfiguration}
+import com.expedia.www.haystack.kinesis.span.collector.config.entities.KinesisConsumerConfiguration
+import com.expedia.www.haystack.span.decorators.plugin.config.Plugin
+
+import scala.concurrent.duration._
+
+object ProjectConfiguration {
+
+ private val config = ConfigurationLoader.loadConfigFileWithEnvOverrides()
+
+ def healthStatusFile(): Option[String] = if (config.hasPath("health.status.path")) Some(config.getString("health.status.path")) else None
+
+ def kafkaProducerConfig(): KafkaProduceConfiguration = ConfigurationLoader.kafkaProducerConfig(config)
+
+ def extractorConfiguration(): ExtractorConfiguration = ConfigurationLoader.extractorConfiguration(config)
+
+ def kinesisConsumerConfig(): KinesisConsumerConfiguration = {
+ val kinesis = config.getConfig("kinesis")
+ val stsRoleArn = if (kinesis.hasPath("sts.role.arn")) Some(kinesis.getString("sts.role.arn")) else None
+
+ KinesisConsumerConfiguration(
+ awsRegion = kinesis.getString("aws.region"),
+ stsRoleArn = stsRoleArn,
+ appGroupName = kinesis.getString("app.group.name"),
+ streamName = kinesis.getString("stream.name"),
+ streamPosition = InitialPositionInStream.valueOf(kinesis.getString("stream.position")),
+ kinesis.getDuration("checkpoint.interval.ms", TimeUnit.MILLISECONDS).millis,
+ kinesis.getInt("checkpoint.retries"),
+ kinesis.getDuration("checkpoint.retry.interval.ms", TimeUnit.MILLISECONDS).millis,
+ kinesisEndpoint = if (kinesis.hasPath("endpoint")) Some(kinesis.getString("endpoint")) else None,
+ dynamoEndpoint = if (kinesis.hasPath("dynamodb.endpoint")) Some(kinesis.getString("dynamodb.endpoint")) else None,
+ dynamoTableName = if (kinesis.hasPath("dynamodb.table")) Some(kinesis.getString("dynamodb.table")) else None,
+ maxRecordsToRead = kinesis.getInt("max.records.read"),
+ idleTimeBetweenReads = kinesis.getDuration("idle.time.between.reads.ms", TimeUnit.MILLISECONDS).millis,
+ shardSyncInterval = kinesis.getDuration("shard.sync.interval.ms", TimeUnit.MILLISECONDS).millis,
+ metricsLevel = MetricsLevel.fromName(kinesis.getString("metrics.level")),
+ metricsBufferTime = kinesis.getDuration("metrics.buffer.time.ms", TimeUnit.MILLISECONDS).millis,
+ taskBackoffTime = kinesis.getDuration("task.backoff.ms", TimeUnit.MILLISECONDS).millis)
+ }
+
+ def externalKafkaConfig(): List[ExternalKafkaConfiguration] = ConfigurationLoader.externalKafkaConfiguration(config)
+
+ def additionalTagConfig(): Map[String, String] = ConfigurationLoader.additionalTagsConfiguration(config)
+
+ def pluginConfiguration(): Plugin = ConfigurationLoader.pluginConfigurations(config)
+}
diff --git a/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/config/entities/KinesisConsumerConfiguration.scala b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/config/entities/KinesisConsumerConfiguration.scala
new file mode 100644
index 000000000..97fdc03ac
--- /dev/null
+++ b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/config/entities/KinesisConsumerConfiguration.scala
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.config.entities
+
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream
+import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel
+
+import scala.concurrent.duration.FiniteDuration
+
+case class KinesisConsumerConfiguration(awsRegion: String,
+ stsRoleArn: Option[String],
+ appGroupName: String,
+ streamName: String,
+ streamPosition: InitialPositionInStream,
+ checkpointInterval: FiniteDuration,
+ checkpointRetries: Int,
+ checkpointRetryInterval: FiniteDuration,
+ kinesisEndpoint: Option[String],
+ dynamoEndpoint: Option[String],
+ dynamoTableName: Option[String],
+ maxRecordsToRead: Int,
+ idleTimeBetweenReads: FiniteDuration,
+ shardSyncInterval: FiniteDuration,
+ metricsLevel: MetricsLevel,
+ metricsBufferTime: FiniteDuration,
+ taskBackoffTime: FiniteDuration)
diff --git a/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/kinesis/RecordProcessor.scala b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/kinesis/RecordProcessor.scala
new file mode 100644
index 000000000..14e0e8c01
--- /dev/null
+++ b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/kinesis/RecordProcessor.scala
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.kinesis
+
+import java.util.Date
+
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason
+import com.amazonaws.services.kinesis.clientlibrary.types.{InitializationInput, ProcessRecordsInput, ShutdownInput}
+import com.expedia.www.haystack.collector.commons.MetricsSupport
+import com.expedia.www.haystack.collector.commons.health.HealthController
+import com.expedia.www.haystack.collector.commons.record.{KeyValueExtractor, KeyValuePair}
+import com.expedia.www.haystack.collector.commons.sink.RecordSink
+import com.expedia.www.haystack.kinesis.span.collector.config.entities.KinesisConsumerConfiguration
+import com.expedia.www.haystack.kinesis.span.collector.metrics.AppMetricNames
+import org.slf4j.LoggerFactory
+
+import scala.annotation.tailrec
+import scala.collection.JavaConversions._
+import scala.concurrent.duration.FiniteDuration
+import scala.util.{Failure, Success, Try}
+
+object RecordProcessor extends MetricsSupport {
+ private val LOGGER = LoggerFactory.getLogger(classOf[RecordProcessor])
+ private val ingestionSuccessMeter = metricRegistry.meter(AppMetricNames.KINESIS_INGESTION_SUCCESS)
+ private val processingLagHistogram = metricRegistry.histogram(AppMetricNames.KINESIS_PROCESSING_LAG)
+ private val checkpointFailureMeter = metricRegistry.meter(AppMetricNames.KINESIS_CHECKPOINT_FAILURE)
+}
+
+class RecordProcessor(config: KinesisConsumerConfiguration, keyValueExtractor: KeyValueExtractor, sink: RecordSink)
+ extends IRecordProcessor {
+
+ import RecordProcessor._
+
+ private var shardId: String = _
+ private var nextCheckpointTimeInMillis: Long = 0L
+
+ private def checkpoint(checkpointer: IRecordProcessorCheckpointer): Unit = {
+ LOGGER.debug(s"Performing the checkpointing for shardId=$shardId")
+
+ retryWithBackOff(config.checkpointRetries, config.checkpointRetryInterval)(() => {
+ checkpointer.checkpoint()
+ }) match {
+ case Failure(r) =>
+ checkpointFailureMeter.mark()
+ LOGGER.error(s"Fail to checkpoint after all retries for shardId=$shardId with reason", r)
+ case _ => LOGGER.info(s"Successfully checkpointing done for shardId=$shardId")
+ }
+ }
+
+ /**
+ * process the incoming kinesis records. This processor extracts the traceId (partition key for kafka) and
+ * span as byte array.
+ * @param records kinesis records
+ */
+ override def processRecords(records: ProcessRecordsInput): Unit = {
+ var lastRecordArrivalTimestamp:Date = null
+
+ records
+ .getRecords
+ .foreach(record => {
+ lastRecordArrivalTimestamp = record.getApproximateArrivalTimestamp
+ Try(keyValueExtractor.extractKeyValuePairs(record.getData.array())) match {
+ case Success(spans) => spans.foreach(sp => sink.toAsync(sp, sinkResponseHandler))
+ case _ => /* skip logging as extractor does it*/
+ }
+ })
+
+ // this is somewhat similar to the IteratorAgeMilliseconds metric reported by Cloudwatch for Kinesis stream
+ if(lastRecordArrivalTimestamp != null) {
+ processingLagHistogram.update(System.currentTimeMillis() - lastRecordArrivalTimestamp.getTime)
+ }
+
+ ingestionSuccessMeter.mark(records.getRecords.size())
+
+ if (System.currentTimeMillis > nextCheckpointTimeInMillis) {
+ checkpoint(records.getCheckpointer)
+ nextCheckpointTimeInMillis = System.currentTimeMillis + config.checkpointInterval.toMillis
+ }
+ }
+
+ /**
+ * initialize the kinesis record processor
+ * @param input: initialization input contains the shardId and sequenceNumber
+ */
+ override def initialize(input: InitializationInput): Unit = {
+ LOGGER.info(s"Initializing the processor for shardId=${input.getShardId} and SeqNumber=${input.getExtendedSequenceNumber}")
+ this.shardId = input.getShardId
+ }
+
+ /**
+ * shutdown the processor, it shutdown reason is terminate, then perform the pending checkpointing.
+ * @param shutdownInput: shutdown input that contains the reason
+ */
+ override def shutdown(shutdownInput: ShutdownInput): Unit = {
+ LOGGER.info(s"Shutting down record processor for shardId=$shardId")
+
+ // Important to checkpoint after reaching end of shard, so we can start processing data from child shards.
+ if (shutdownInput.getShutdownReason == ShutdownReason.TERMINATE) {
+ checkpoint(shutdownInput.getCheckpointer)
+ }
+ }
+
+ @tailrec
+ final def retryWithBackOff[T](maxRetry: Int, backOff: FiniteDuration)(f: () => T): Try[T] = {
+ Try {
+ f()
+ } match {
+ case Failure(reason) if maxRetry > 0 && !reason.isInstanceOf[InterruptedException] && !reason.isInstanceOf[ShutdownException] =>
+ LOGGER.error(s"Fail to perform the checkpointing operation with retries left=$maxRetry ", reason)
+ Thread.sleep(backOff.toMillis)
+ retryWithBackOff(maxRetry - 1, backOff)(f)
+ case result@_ => result
+ }
+ }
+
+ private val sinkResponseHandler = (_: KeyValuePair[Array[Byte], Array[Byte]], ex: Exception) => {
+ if (ex != null) HealthController.setUnhealthy()
+ }
+}
diff --git a/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/kinesis/client/KinesisConsumer.scala b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/kinesis/client/KinesisConsumer.scala
new file mode 100644
index 000000000..36222aa13
--- /dev/null
+++ b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/kinesis/client/KinesisConsumer.scala
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.kinesis.client
+
+import java.util.UUID
+
+import com.amazonaws.auth.DefaultAWSCredentialsProviderChain
+import com.amazonaws.auth.profile.internal.securitytoken.{RoleInfo, STSProfileCredentialsServiceProvider}
+import com.amazonaws.regions.Regions
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.{KinesisClientLibConfiguration, Worker}
+import com.expedia.www.haystack.collector.commons.health.HealthController
+import com.expedia.www.haystack.collector.commons.record.KeyValueExtractor
+import com.expedia.www.haystack.collector.commons.sink.RecordSink
+import com.expedia.www.haystack.kinesis.span.collector.config.entities.KinesisConsumerConfiguration
+import com.expedia.www.haystack.kinesis.span.collector.kinesis.RecordProcessor
+import org.apache.commons.lang3.StringUtils
+import org.slf4j.LoggerFactory
+
+class KinesisConsumer(config: KinesisConsumerConfiguration,
+ keyValueExtractor: KeyValueExtractor,
+ sink: RecordSink) extends AutoCloseable {
+ private val LOGGER = LoggerFactory.getLogger(classOf[KinesisConsumer])
+
+ private var worker: Worker = _
+
+ // this is a blocking call
+ def startWorker(): Unit = {
+ worker = buildWorker(createProcessorFactory())
+
+ LOGGER.info("Starting the kinesis worker now.")
+
+ // mark collector as healthy
+ HealthController.setHealthy()
+
+ // the run method will block this thread, process loop will start now..
+ worker.run()
+ }
+
+ private def createProcessorFactory() = {
+ new IRecordProcessorFactory {
+ override def createProcessor() = new RecordProcessor(config, keyValueExtractor, sink)
+ }
+ }
+
+ /**
+ * build single kinesis consumer worker. This worker creates the processors for shards
+ *
+ * @param processorFactory factory to create processor
+ * @return
+ */
+ private def buildWorker(processorFactory: IRecordProcessorFactory): Worker = {
+ val region = Regions.fromName(config.awsRegion)
+
+ val workerId = UUID.randomUUID.toString
+
+
+ val kinesisCredsProvider = config.stsRoleArn match {
+ case Some(arn) if StringUtils.isNotEmpty(arn) => new STSProfileCredentialsServiceProvider(new RoleInfo().withRoleArn(arn).withRoleSessionName(config.appGroupName))
+ case _ => DefaultAWSCredentialsProviderChain.getInstance
+ }
+
+ val kinesisClientConfig = new KinesisClientLibConfiguration(
+ config.appGroupName,
+ config.streamName,
+ kinesisCredsProvider,
+ DefaultAWSCredentialsProviderChain.getInstance,
+ DefaultAWSCredentialsProviderChain.getInstance,
+ workerId)
+
+ kinesisClientConfig
+ .withMaxRecords(config.maxRecordsToRead)
+ .withIdleTimeBetweenReadsInMillis(config.idleTimeBetweenReads.toMillis)
+ .withShardSyncIntervalMillis(config.shardSyncInterval.toMillis)
+ .withInitialPositionInStream(config.streamPosition)
+ .withMetricsLevel(config.metricsLevel)
+ .withMetricsBufferTimeMillis(config.metricsBufferTime.toMillis)
+ .withRegionName(region.getName)
+ .withTableName(config.dynamoTableName.getOrElse(config.appGroupName))
+ .withTaskBackoffTimeMillis(config.taskBackoffTime.toMillis)
+
+ config.dynamoEndpoint.map(kinesisClientConfig.withDynamoDBEndpoint)
+ config.kinesisEndpoint.map(kinesisClientConfig.withKinesisEndpoint)
+
+ new Worker.Builder()
+ .config(kinesisClientConfig)
+ .recordProcessorFactory(processorFactory)
+ .build()
+ }
+
+ /**
+ * close the kinesis worker. The shutdown will also cleanup resources allocated by the worker
+ */
+ override def close(): Unit = worker.shutdown()
+}
diff --git a/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/kinesis/record/TtlAndOperationNames.java b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/kinesis/record/TtlAndOperationNames.java
new file mode 100644
index 000000000..4a22bdcf6
--- /dev/null
+++ b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/kinesis/record/TtlAndOperationNames.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.kinesis.record;
+
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * This class is used by ProtoSpanExtractor to keep track of the number of operation names for a particular service.
+ * It is written in Java because Java's Atomic classes are the preferred way of handling concurrent maps and sets
+ * in Scala, and the accesses to the objects that count operation names come from multiple threads.
+ */
+public class TtlAndOperationNames {
+ public final Set operationNames = ConcurrentHashMap.newKeySet();
+ private final AtomicLong ttlMillis;
+
+ TtlAndOperationNames(long ttlMillis) {
+ this.ttlMillis = new AtomicLong(ttlMillis);
+ }
+
+ public long getTtlMillis() {
+ return ttlMillis.get();
+ }
+
+ void setTtlMillis(long ttlMillis) {
+ this.ttlMillis.set(ttlMillis);
+ }
+}
diff --git a/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/metrics/AppMetricNames.scala b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/metrics/AppMetricNames.scala
new file mode 100644
index 000000000..13fc763ca
--- /dev/null
+++ b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/metrics/AppMetricNames.scala
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.metrics
+
+object AppMetricNames {
+
+ val KINESIS_PROCESSING_LAG = "kinesis.processing.lag"
+ val KINESIS_INGESTION_SUCCESS = "kinesis.ingestion-success"
+ val KINESIS_CHECKPOINT_FAILURE = "kinesis.checkpoint.failure"
+}
diff --git a/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/pipeline/KinesisToKafkaPipeline.scala b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/pipeline/KinesisToKafkaPipeline.scala
new file mode 100644
index 000000000..cb7fe64a7
--- /dev/null
+++ b/collector/kinesis/src/main/scala/com/expedia/www/haystack/kinesis/span/collector/pipeline/KinesisToKafkaPipeline.scala
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.pipeline
+
+import com.expedia.www.haystack.collector.commons.config.{ExternalKafkaConfiguration, ExtractorConfiguration, KafkaProduceConfiguration}
+import com.expedia.www.haystack.collector.commons.sink.kafka.KafkaRecordSink
+import com.expedia.www.haystack.collector.commons.{MetricsSupport, ProtoSpanExtractor, SpanDecoratorFactory}
+import com.expedia.www.haystack.kinesis.span.collector.config.entities.KinesisConsumerConfiguration
+import com.expedia.www.haystack.kinesis.span.collector.kinesis.client.KinesisConsumer
+import com.expedia.www.haystack.span.decorators.SpanDecorator
+import com.expedia.www.haystack.span.decorators.plugin.config.Plugin
+import org.slf4j.LoggerFactory
+
+import scala.util.Try
+
+class KinesisToKafkaPipeline(kafkaProducerConfig: KafkaProduceConfiguration,
+ listExternalKafkaConfig: List[ExternalKafkaConfiguration],
+ kinesisConsumerConfig: KinesisConsumerConfiguration,
+ extractorConfiguration: ExtractorConfiguration,
+ additionalTagsConfig: Map[String, String],
+ pluginConfig: Plugin
+ )
+ extends AutoCloseable with MetricsSupport {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[KinesisToKafkaPipeline])
+
+ private var kafkaSink: KafkaRecordSink = _
+ private var consumer: KinesisConsumer = _
+ private var listSpanDecorator: List[SpanDecorator] = List()
+
+ /**
+ * run the pipeline. start the kinesis consumer worker and produce the read spans to kafka
+ * the run is a blocking call. kinesis consumer blocks after spinning off the workers
+ */
+ def run(): Unit = {
+ listSpanDecorator = SpanDecoratorFactory.get(pluginConfig, additionalTagsConfig, LOGGER)
+ kafkaSink = new KafkaRecordSink(kafkaProducerConfig, listExternalKafkaConfig)
+ consumer = new KinesisConsumer(kinesisConsumerConfig, new ProtoSpanExtractor(extractorConfiguration, LoggerFactory.getLogger(classOf[ProtoSpanExtractor]), listSpanDecorator), kafkaSink)
+ consumer.startWorker()
+ }
+
+ override def close(): Unit = {
+ Try(consumer.close())
+ Try(kafkaSink.close())
+ }
+}
diff --git a/collector/kinesis/src/test/resources/config/base.conf b/collector/kinesis/src/test/resources/config/base.conf
new file mode 100644
index 000000000..3037f1d95
--- /dev/null
+++ b/collector/kinesis/src/test/resources/config/base.conf
@@ -0,0 +1,86 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ producer {
+ topic = "proto-spans"
+ props {
+ bootstrap.servers = "kafkasvc:9092"
+ }
+ }
+}
+
+extractor {
+ output.format = "proto"
+ spans.validation {
+
+ # Validate size of span. Truncate span tags when size exceeds spcified limit.
+ # enable: true/false
+ # log.only: if enabled, only logs such spans but doesn't truncate the tags
+ # max.size.limit: maximum size allowed
+ # message.tag.key: this tag key will be added when tags are truncated
+ # message.tag.value: value of the above tag key indicating the truncation
+ # skip.tags: truncate all span tags except these
+ # skip.services: truncate span tags for all services except these
+ max.size {
+ enable = "false"
+ log.only = "false"
+ max.size.limit = 5000 // in bytes
+ message.tag.key = "X-HAYSTACK-SPAN-INFO"
+ message.tag.value = "Tags are truncated. REASON: Span Size Limit Exceeded. Please contact Haystack for more details"
+ skip.tags = ["error"]
+ skip.services = []
+ }
+ }
+}
+
+kinesis {
+ aws.region = "us-west-2"
+
+ app.group.name = "haystack-kinesis-proto-span-collector"
+
+ stream {
+ name = "haystack-proto-spans"
+ position = "LATEST"
+ }
+
+ checkpoint {
+ interval.ms = 15000
+ retries = 50
+ retry.interval.ms = 250
+ }
+
+ task.backoff.ms = 200
+ max.records.read = 2000
+ idle.time.between.reads.ms = 500
+ shard.sync.interval.ms = 30000
+
+ metrics {
+ level = "NONE"
+ buffer.time.ms = 10000
+ }
+}
+
+additionaltags.X-HAYSTACK-SPAN-ADDITIONAL-TAG = ADDITIONAL-TAG
+
+external.kafka.kafka1 {
+ tags {
+ X-HAYSTACK-SPAN-OWNER = OWNER1
+ X-HAYSTACK-SPAN-SENDER = SENDER1
+ }
+ config {
+ topic = "external-proto-spans"
+ props {
+ bootstrap.servers = "kafkasvc:9092"
+ }
+ }
+}
+
+plugins {
+ directory = plugins/decorators
+ plugin1 {
+ name="SAMPLE_SPAN_DECORATOR"
+ config {
+ tag.key = "X-HAYSTACK-PLUGIN-SPAN-DECORATOR"
+ }
+ }
+}
diff --git a/collector/kinesis/src/test/resources/logback-test.xml b/collector/kinesis/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..6d6308afa
--- /dev/null
+++ b/collector/kinesis/src/test/resources/logback-test.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
diff --git a/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/IntegrationTestSpec.scala b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/IntegrationTestSpec.scala
new file mode 100644
index 000000000..ffe240bb0
--- /dev/null
+++ b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/IntegrationTestSpec.scala
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.integration
+
+import java.io.File
+import java.util.concurrent.Executors
+
+import com.expedia.www.haystack.kinesis.span.collector.App
+import org.scalatest._
+
+class IntegrationTestSpec extends WordSpec with GivenWhenThen with Matchers with LocalKinesisProducer with LocalKafkaConsumer
+ with OptionValues with BeforeAndAfterAll {
+
+ private val executor = Executors.newSingleThreadExecutor()
+
+ override def beforeAll(): Unit = {
+ // check if the stream exists, if not create one
+ createStreamIfNotExists()
+
+ new File("/app").mkdir()
+
+ executor.submit(new Runnable {
+ override def run(): Unit = App.main(null)
+ })
+ // wait for few sec to let app start
+ Thread.sleep(60000)
+ }
+
+ override def afterAll(): Unit = {
+ // check if the stream exists, if not create one
+ shutdownKinesisClient()
+ }
+}
\ No newline at end of file
diff --git a/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/LocalKafkaConsumer.scala b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/LocalKafkaConsumer.scala
new file mode 100644
index 000000000..3f59a9569
--- /dev/null
+++ b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/LocalKafkaConsumer.scala
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.integration
+
+import java.util.Properties
+import java.util.stream.Collectors
+
+import com.expedia.www.haystack.collector.commons.config.ExternalKafkaConfiguration
+import com.expedia.www.haystack.kinesis.span.collector.config.ProjectConfiguration
+import com.expedia.www.haystack.kinesis.span.collector.integration.config.TestConfiguration
+import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener
+import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
+import org.apache.kafka.common.serialization.ByteArrayDeserializer
+
+import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+import scala.concurrent.duration._
+
+trait LocalKafkaConsumer {
+
+ private val kafkaConsumer = {
+ val consumerProperties = new Properties()
+ consumerProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "kinesis-to-kafka-test")
+ consumerProperties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, TestConfiguration.remoteKafkaHost + ":" + TestConfiguration.kafkaPort)
+ consumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getCanonicalName)
+ consumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getCanonicalName)
+ new KafkaConsumer[Array[Byte], Array[Byte]](consumerProperties)
+ }
+
+ private val externalKafkaConsumerMap: Map[String, KafkaConsumer[Array[Byte], Array[Byte]]] = {
+ val externalKafkaList: List[ExternalKafkaConfiguration] = ProjectConfiguration.externalKafkaConfig()
+ externalKafkaList.zipWithIndex.map { case (c, i) => {
+ val consumerProperties = new Properties()
+ consumerProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, s"kinesis-to-kafka-test-${i}")
+ consumerProperties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, c.kafkaProduceConfiguration.props.getProperty("bootstrap.servers"))
+ consumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getCanonicalName)
+ consumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getCanonicalName)
+ val consumer = new KafkaConsumer[Array[Byte], Array[Byte]](consumerProperties)
+ consumer.subscribe(List(c.kafkaProduceConfiguration.topic).asJava, new NoOpConsumerRebalanceListener())
+ c.kafkaProduceConfiguration.topic -> consumer
+ }}.toMap
+ }
+
+ kafkaConsumer.subscribe(List(TestConfiguration.kafkaStreamName).asJava, new NoOpConsumerRebalanceListener())
+
+ def readRecordsFromKafka(minExpectedCount: Int, maxWait: FiniteDuration): List[Array[Byte]] = {
+ val records = mutable.ListBuffer[Array[Byte]]()
+ var received: Int = 0
+
+ var waitTimeLeft = maxWait.toMillis
+ var done = true
+ while (done) {
+ kafkaConsumer.poll(250).records(TestConfiguration.kafkaStreamName).map(rec => {
+ received += 1
+ records += rec.value()
+ })
+ if(received < minExpectedCount && waitTimeLeft > 0) {
+ Thread.sleep(1000)
+ waitTimeLeft -= 1000
+ } else {
+ done = false
+ }
+ }
+
+ if(records.size < minExpectedCount) throw new RuntimeException("Fail to read the expected records from kafka")
+
+ records.toList
+ }
+
+ def readRecordsFromExternalKafka(minExpectedCount: Int, maxWait: FiniteDuration): List[Array[Byte]] = {
+ val records = mutable.ListBuffer[Array[Byte]]()
+ var received: Int = 0
+
+ var waitTimeLeft = maxWait.toMillis
+
+ externalKafkaConsumerMap.foreach(externalKafkaConsumer => {
+ var done = true
+ while (done) {
+ externalKafkaConsumer._2.poll(250).records(externalKafkaConsumer._1).map(rec => {
+ received += 1
+ records += rec.value()
+ })
+ if(received < minExpectedCount && waitTimeLeft > 0) {
+ Thread.sleep(1000)
+ waitTimeLeft -= 1000
+ } else {
+ done = false
+ }
+ }
+ })
+
+ if(records.size < minExpectedCount) throw new RuntimeException("Fail to read the expected records from kafka")
+
+ records.toList
+ }
+
+ def shutdownKafkaConsumer(): Unit = {
+ if(kafkaConsumer != null) kafkaConsumer.close()
+ externalKafkaConsumerMap.foreach(c => c._2.close())
+ }
+}
diff --git a/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/LocalKinesisProducer.scala b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/LocalKinesisProducer.scala
new file mode 100644
index 000000000..6e0dd8897
--- /dev/null
+++ b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/LocalKinesisProducer.scala
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.integration
+
+import java.nio.ByteBuffer
+import java.util.UUID
+
+import com.amazonaws.client.builder.AwsClientBuilder
+import com.amazonaws.services.kinesis.AmazonKinesisClientBuilder
+import com.amazonaws.services.kinesis.model.ResourceNotFoundException
+import com.amazonaws.{ClientConfiguration, Protocol}
+import com.expedia.www.haystack.kinesis.span.collector.integration.config.TestConfiguration
+
+trait LocalKinesisProducer {
+
+ private val client = {
+ val endpointConfig = new AwsClientBuilder.EndpointConfiguration(s"http://${TestConfiguration.remoteKinesisHost}:${TestConfiguration.kinesisPort}", "us-west-2")
+ val clientConfig = new ClientConfiguration().withProtocol(Protocol.HTTP)
+
+ AmazonKinesisClientBuilder
+ .standard()
+ .withClientConfiguration(clientConfig)
+ .withEndpointConfiguration(endpointConfig)
+ .build()
+ }
+
+ protected def createStreamIfNotExists(): Unit = {
+ try {
+ client.describeStream(TestConfiguration.kinesisStreamName)
+ } catch {
+ case _: ResourceNotFoundException =>
+ println(s"Creating kinesis stream ${TestConfiguration.kinesisStreamName}")
+ client.createStream(TestConfiguration.kinesisStreamName, 1)
+ }
+ }
+
+ def produceRecordsToKinesis(records: List[Array[Byte]]): Unit = {
+ records.foreach(record => {
+ client.putRecord(TestConfiguration.kinesisStreamName, ByteBuffer.wrap(record), UUID.randomUUID().toString)
+ })
+ }
+
+ protected def shutdownKinesisClient(): Unit = if(client != null) client.shutdown()
+}
diff --git a/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/config/TestConfiguration.scala b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/config/TestConfiguration.scala
new file mode 100644
index 000000000..7e95cec29
--- /dev/null
+++ b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/config/TestConfiguration.scala
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.integration.config
+
+object TestConfiguration {
+ val remoteKafkaHost = "kafkasvc"
+ val kafkaPort = 9092
+ val remoteKinesisHost = "localstack"
+ val kinesisPort = 4568
+ val kafkaStreamName = "proto-spans"
+ val kinesisStreamName = "haystack-proto-spans"
+}
diff --git a/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/tests/KinesisSpanCollectorSpec.scala b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/tests/KinesisSpanCollectorSpec.scala
new file mode 100644
index 000000000..e9d3d9f36
--- /dev/null
+++ b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/integration/tests/KinesisSpanCollectorSpec.scala
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.integration.tests
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.kinesis.span.collector.config.ProjectConfiguration
+import com.expedia.www.haystack.kinesis.span.collector.integration._
+
+import scala.concurrent.duration._
+
+class KinesisSpanCollectorSpec extends IntegrationTestSpec {
+
+ private val StartTimeMicros = System.currentTimeMillis() * 1000
+ private val DurationMicros = 42
+
+ "Kinesis span collector" should {
+
+ // this test is primarily to work around issue with Kafka docker image
+ // it fails for first put for some reasons
+ "connect with kinesis and kafka" in {
+
+ Given("a valid span")
+ val spanBytes = Span.newBuilder().setTraceId("traceid").setSpanId("span-id-1").build().toByteArray
+
+ When("the span is sent to kinesis")
+ produceRecordsToKinesis(List(spanBytes, spanBytes))
+
+ Then("it should be pushed to kafka")
+ readRecordsFromKafka(0, 1.second).headOption
+ }
+
+ "read valid spans from kinesis and store individual spans in kafka" in {
+
+ Given("valid spans")
+ val span_1 = Span.newBuilder().setTraceId("trace-id-1").setSpanId("span-id-1").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros).build().toByteArray
+ val span_2 = Span.newBuilder().setTraceId("trace-id-1").setSpanId("span-id-2").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros).build().toByteArray
+ val span_3 = Span.newBuilder().setTraceId("trace-id-2").setSpanId("span-id-3").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros).build().toByteArray
+ val span_4 = Span.newBuilder().setTraceId("trace-id-2").setSpanId("span-id-4").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros).build().toByteArray
+
+ When("the span is sent to kinesis")
+ produceRecordsToKinesis(List(span_1, span_2, span_3, span_4))
+
+ Then("it should be pushed to kafka with partition key as its trace id")
+ val records = readRecordsFromKafka(4, 5.seconds)
+ val externalrecords = readRecordsFromExternalKafka(0, 10.seconds)
+ externalrecords.size shouldEqual 0
+ records.size shouldEqual 4
+
+ val spans = records.map(Span.parseFrom)
+ spans.map(_.getTraceId).toSet should contain allOf("trace-id-1", "trace-id-2")
+ spans.map(_.getSpanId) should contain allOf("span-id-1", "span-id-2", "span-id-3", "span-id-4")
+ }
+
+ "read valid spans from kinesis and store individual spans in kafka and external kafka" in {
+
+ Given("valid spans")
+ val tags: List[Tag] = List(
+ Tag.newBuilder().setKey("X-HAYSTACK-SPAN-OWNER").setVStr("OWNER1").build(),
+ Tag.newBuilder().setKey("X-HAYSTACK-SPAN-SENDER").setVStr("SENDER1").build()
+ )
+ val span_1 = Span.newBuilder().setTraceId("trace-id-1").setSpanId("span-id-1").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros)
+ .addTags(tags(0)).addTags(tags(1)).build().toByteArray
+ val span_2 = Span.newBuilder().setTraceId("trace-id-1").setSpanId("span-id-2").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros)
+ .addTags(tags(0)).addTags(tags(1)).build().toByteArray
+ val span_3 = Span.newBuilder().setTraceId("trace-id-2").setSpanId("span-id-3").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros)
+ .addTags(tags(0)).addTags(tags(1)).build().toByteArray
+ val span_4 = Span.newBuilder().setTraceId("trace-id-2").setSpanId("span-id-4").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros)
+ .addTags(tags(0)).addTags(tags(1)).build().toByteArray
+
+ When("the span is sent to kinesis")
+ produceRecordsToKinesis(List(span_1, span_2, span_3, span_4))
+
+ Then("it should be pushed to default kafka and external kafka with partition key as its trace id")
+ val records = readRecordsFromKafka(4, 5.seconds)
+ val numConsumers = ProjectConfiguration.externalKafkaConfig().size
+ val externalrecords = readRecordsFromExternalKafka(4 * numConsumers, (10 * numConsumers).seconds)
+ externalrecords.size should equal(4)
+ records.size should equal(4)
+ val spans = records.map(Span.parseFrom)
+ val externalSpans = externalrecords.map(Span.parseFrom)
+ numConsumers should equal(1)
+ spans.map(_.getTraceId).toSet should contain allOf("trace-id-1", "trace-id-2")
+ externalSpans.map(_.getTraceId).toSet should contain allOf("trace-id-1", "trace-id-2")
+ spans.map(_.getSpanId) should contain allOf("span-id-1", "span-id-2", "span-id-3", "span-id-4")
+ externalSpans.map(_.getSpanId) should contain allOf("span-id-1", "span-id-2", "span-id-3", "span-id-4")
+ }
+
+ "load appropriate span decorator plugin using configuration provided " in {
+
+ Given("Jar file for SAMPLE_SPAN_DECORATOR plugin in plugins/decorators directory")
+ val span_1 = Span.newBuilder().setTraceId("trace-id-1").setSpanId("span-id-1").setOperationName("operation")
+ .setServiceName("service").setStartTime(StartTimeMicros).setDuration(DurationMicros).build().toByteArray
+
+ When("the app is initialised")
+ produceRecordsToKinesis(List(span_1))
+
+ Then("the appropriate span decorator plugin should be loaded using spi")
+ val records = readRecordsFromKafka(1, 5.seconds)
+ records should not be empty
+
+ val spans = records.map(Span.parseFrom)
+ spans.map(_.getTraceId).toSet should contain("trace-id-1")
+ spans.map(_.getSpanId) should contain("span-id-1")
+ spans(0).getTagsList should contain(Tag.newBuilder().setKey("X-HAYSTACK-PLUGIN-SPAN-DECORATOR").setVStr("SAMPLE-TAG").build())
+ }
+ }
+}
diff --git a/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/unit/tests/ConfigurationLoaderSpec.scala b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/unit/tests/ConfigurationLoaderSpec.scala
new file mode 100644
index 000000000..5c486ee1f
--- /dev/null
+++ b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/unit/tests/ConfigurationLoaderSpec.scala
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.unit.tests
+
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream
+import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel
+import com.expedia.www.haystack.collector.commons.config.ExternalKafkaConfiguration
+import com.expedia.www.haystack.kinesis.span.collector.config.ProjectConfiguration
+import com.expedia.www.haystack.span.decorators.plugin.config.Plugin
+import org.apache.kafka.clients.producer.ProducerConfig
+import org.scalatest.{FunSpec, Matchers}
+
+class ConfigurationLoaderSpec extends FunSpec with Matchers {
+
+ val project = ProjectConfiguration
+
+ describe("Configuration com.expedia.www.haystack.span.loader") {
+ it("should load the kinesis config from base.conf") {
+ val kinesis = project.kinesisConsumerConfig()
+ kinesis.metricsLevel shouldEqual MetricsLevel.NONE
+ kinesis.awsRegion shouldEqual "us-west-2"
+ kinesis.appGroupName shouldEqual "haystack-kinesis-proto-span-collector"
+ kinesis.checkpointRetries shouldBe 50
+ kinesis.dynamoTableName shouldBe None
+ kinesis.checkpointInterval.toMillis shouldBe 15000L
+ kinesis.streamPosition shouldEqual InitialPositionInStream.LATEST
+ kinesis.streamName shouldEqual "haystack-proto-spans"
+ kinesis.maxRecordsToRead shouldBe 2000
+ kinesis.metricsBufferTime.toMillis shouldBe 10000
+ kinesis.shardSyncInterval.toMillis shouldBe 30000
+ kinesis.kinesisEndpoint.isEmpty shouldBe true
+ kinesis.dynamoEndpoint.isEmpty shouldBe true
+ kinesis.taskBackoffTime.toMillis shouldBe 200
+ }
+
+ it("should load the kafka config only from base.conf") {
+ val kafka = project.kafkaProducerConfig()
+ kafka.topic shouldEqual "proto-spans"
+ kafka.props.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG) shouldEqual "kafkasvc:9092"
+ }
+
+ it("should load the extractor config only from base.conf") {
+ val extractorConfig = project.extractorConfiguration()
+ extractorConfig.outputFormat.toString shouldEqual "proto"
+ extractorConfig.spanValidation.spanMaxSize.maxSizeLimit shouldEqual 5000
+ extractorConfig.spanValidation.spanMaxSize.enable shouldEqual false
+ extractorConfig.spanValidation.spanMaxSize.skipTags.contains("error") shouldEqual true
+ extractorConfig.spanValidation.spanMaxSize.skipServices.size shouldEqual 0
+
+ }
+
+ it("should load the external kafka config from the base.conf") {
+ val externalKafka: List[ExternalKafkaConfiguration] = project.externalKafkaConfig()
+ externalKafka.head.tags("X-HAYSTACK-SPAN-OWNER") shouldEqual "OWNER1"
+ externalKafka.head.tags("X-HAYSTACK-SPAN-SENDER") shouldEqual "SENDER1"
+ externalKafka.head.kafkaProduceConfiguration.topic shouldEqual "external-proto-spans"
+ externalKafka.head.kafkaProduceConfiguration.props.getProperty("bootstrap.servers") shouldEqual "kafkasvc:9092"
+ }
+
+ it("should load the plugins config from the base.conf") {
+ val plugin: Plugin = project.pluginConfiguration()
+ plugin.getDirectory shouldEqual "plugins/decorators"
+ plugin.getPluginConfigurationList.get(0).getName shouldEqual "SAMPLE_SPAN_DECORATOR"
+ plugin.getPluginConfigurationList.get(0).getConfig.getString("tag.key") shouldEqual "X-HAYSTACK-PLUGIN-SPAN-DECORATOR"
+ }
+
+ it("should load the health status file") {
+ project.healthStatusFile() shouldEqual Some("/app/isHealthy")
+ }
+ }
+}
diff --git a/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/unit/tests/RecordProcessorSpec.scala b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/unit/tests/RecordProcessorSpec.scala
new file mode 100644
index 000000000..dddbbcc35
--- /dev/null
+++ b/collector/kinesis/src/test/scala/com/expedia/www/haystack/kinesis/span/collector/unit/tests/RecordProcessorSpec.scala
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.kinesis.span.collector.unit.tests
+
+import java.nio.ByteBuffer
+import java.util.Date
+
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream
+import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput
+import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel
+import com.amazonaws.services.kinesis.model.Record
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.collector.commons.config.{ExtractorConfiguration, Format, SpanMaxSize, SpanValidation}
+import com.expedia.www.haystack.collector.commons.record.KeyValuePair
+import com.expedia.www.haystack.collector.commons.sink.RecordSink
+import com.expedia.www.haystack.collector.commons.{MetricsSupport, ProtoSpanExtractor}
+import com.expedia.www.haystack.kinesis.span.collector.config.entities.KinesisConsumerConfiguration
+import com.expedia.www.haystack.kinesis.span.collector.kinesis.RecordProcessor
+import org.easymock.EasyMock
+import org.easymock.EasyMock._
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+class RecordProcessorSpec extends FunSpec with Matchers with EasyMockSugar with MetricsSupport {
+ private val StartTimeMicros = System.currentTimeMillis() * 1000
+ private val DurationMicros = 42
+ describe("Record Processor") {
+
+ val kinesisConfig = KinesisConsumerConfiguration("us-west-2", None,
+ "app-group", "stream-1", InitialPositionInStream.LATEST, 10.seconds, 10, 10.seconds, None, None, None,
+ 10000, 500.millis, 10000.millis, MetricsLevel.NONE, 10000.millis, 200.millis)
+
+ it("should process the record, sends to sink and perform checkpointing") {
+ val sink = mock[RecordSink]
+ val checkpointer = mock[IRecordProcessorCheckpointer]
+
+ val span_1 = Span.newBuilder()
+ .setSpanId("span-id-1")
+ .setTraceId("trace-id")
+ .setServiceName("service")
+ .setOperationName("operation")
+ .setStartTime(StartTimeMicros)
+ .setDuration(DurationMicros)
+ .build()
+ val record = new Record()
+ .withApproximateArrivalTimestamp(new Date())
+ .withData(ByteBuffer.wrap(span_1.toByteArray))
+
+ val capturedKVPair = EasyMock.newCapture[KeyValuePair[Array[Byte], Array[Byte]]]()
+
+ expecting {
+ sink.toAsync(
+ capture(capturedKVPair),
+ anyObject(classOf[(KeyValuePair[Array[Byte], Array[Byte]], Exception) => Unit]))
+ }.times(2)
+
+ expecting {
+ checkpointer.checkpoint()
+ }.once()
+
+ whenExecuting(sink, checkpointer) {
+ val spanValidationConfig = SpanValidation(SpanMaxSize(enable = false, logOnly = false, 5000, "", "", Seq(), Seq()))
+ val processor = new RecordProcessor(kinesisConfig, new ProtoSpanExtractor(ExtractorConfiguration(Format.PROTO, spanValidationConfig), LoggerFactory.getLogger(classOf[ProtoSpanExtractor]), List()), sink)
+ val input = new ProcessRecordsInput().withRecords(List(record).asJava).withCheckpointer(checkpointer)
+ processor.processRecords(input)
+
+ capturedKVPair.getValue.key shouldEqual "trace-id".getBytes("UTF-8")
+ capturedKVPair.getValue.value shouldEqual span_1.toByteArray
+
+ // check-pointing should be called just once
+ processor.processRecords(input)
+ }
+ }
+
+ it("should process a span without transactionid, but send to sink and perform checkpointing") {
+ val sink = mock[RecordSink]
+ val checkpointer = mock[IRecordProcessorCheckpointer]
+
+ val span_1 = Span.newBuilder()
+ .setSpanId("span-id-1")
+ .setTraceId("trace-id-1")
+ .setServiceName("service")
+ .setOperationName("operation")
+ .setStartTime(StartTimeMicros)
+ .setDuration(DurationMicros)
+ .build()
+
+ val span_2 = Span.newBuilder()
+ .setSpanId("span-id-2")
+ .setTraceId("trace-id-2")
+ .setServiceName("service")
+ .setOperationName("operation")
+ .setStartTime(StartTimeMicros)
+ .setDuration(DurationMicros)
+ .build()
+
+ val record_1 = new Record()
+ .withPartitionKey(null)
+ .withApproximateArrivalTimestamp(new Date())
+ .withData(ByteBuffer.wrap(span_1.toByteArray))
+
+ val record_2 = new Record()
+ .withPartitionKey(null)
+ .withApproximateArrivalTimestamp(new Date())
+ .withData(ByteBuffer.wrap(span_2.toByteArray))
+
+ val captureKvPair = EasyMock.newCapture[KeyValuePair[Array[Byte], Array[Byte]]]()
+
+ expecting {
+ sink.toAsync(
+ capture(captureKvPair),
+ anyObject(classOf[(KeyValuePair[Array[Byte], Array[Byte]], Exception) => Unit]))
+ }.times(2)
+
+ expecting {
+ checkpointer.checkpoint()
+ }.once()
+
+ whenExecuting(sink, checkpointer) {
+ val spanValidationConfig = SpanValidation(SpanMaxSize(enable = false, logOnly = false, 5000, "", "", Seq(), Seq()))
+ val processor = new RecordProcessor(kinesisConfig, new ProtoSpanExtractor(ExtractorConfiguration(Format.PROTO, spanValidationConfig), LoggerFactory.getLogger(classOf[ProtoSpanExtractor]), List()), sink)
+ val input_1 = new ProcessRecordsInput().withRecords(List(record_1).asJava).withCheckpointer(checkpointer)
+ processor.processRecords(input_1)
+
+ captureKvPair.getValue.key shouldEqual "trace-id-1".getBytes("UTF-8")
+ captureKvPair.getValue.value shouldEqual span_1.toByteArray
+
+ val input = new ProcessRecordsInput().withRecords(List(record_2).asJava).withCheckpointer(checkpointer)
+ processor.processRecords(input)
+
+ captureKvPair.getValue.key shouldEqual "trace-id-2".getBytes("UTF-8")
+ captureKvPair.getValue.value shouldEqual span_2.toByteArray
+
+ }
+ }
+
+ it("should not emit an illegal json span to sink but perform checkpointing") {
+ val sink = mock[RecordSink]
+ val checkpointer = mock[IRecordProcessorCheckpointer]
+
+ val spanData = "random-span-proto-bytes".getBytes()
+ val record = new Record().withPartitionKey(null).withApproximateArrivalTimestamp(new Date()).withData(ByteBuffer.wrap(spanData))
+
+ expecting {
+ checkpointer.checkpoint()
+ }.once
+
+ whenExecuting(sink, checkpointer) {
+ val spanValidationConfig = SpanValidation(SpanMaxSize(enable = false, logOnly = false, 5000, "", "", Seq(), Seq()))
+ val processor = new RecordProcessor(kinesisConfig, new ProtoSpanExtractor(ExtractorConfiguration(Format.PROTO, spanValidationConfig), LoggerFactory.getLogger(classOf[ProtoSpanExtractor]), List()), sink)
+ val input = new ProcessRecordsInput().withRecords(List(record).asJava).withCheckpointer(checkpointer)
+ processor.processRecords(input)
+ }
+ }
+ }
+}
diff --git a/collector/mvnw b/collector/mvnw
new file mode 100755
index 000000000..e96ccd5fb
--- /dev/null
+++ b/collector/mvnw
@@ -0,0 +1,227 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven2 Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+ # TODO classpath?
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project base directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/collector/mvnw.cmd b/collector/mvnw.cmd
new file mode 100755
index 000000000..019bd74d7
--- /dev/null
+++ b/collector/mvnw.cmd
@@ -0,0 +1,143 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven2 Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/collector/pom.xml b/collector/pom.xml
new file mode 100644
index 000000000..8f24d96a7
--- /dev/null
+++ b/collector/pom.xml
@@ -0,0 +1,464 @@
+
+
+
+ 4.0.0
+ com.expedia.www
+ haystack-collector
+ 1.0-SNAPSHOT
+ pom
+
+
+ haystack-span-decorators
+ commons
+ kinesis
+ http
+ sample-span-decorator
+
+
+
+ scm:git:git://github.com/ExpediaDotCom/haystack-collector.git
+ scm:git:ssh://github.com/ExpediaDotCom/haystack-collector.git
+ http://github.com/ExpediaDotCom/haystack-collector
+
+
+ ${project.groupId}:${project.artifactId}
+ Haystack component that collects spans from various sources and publish to kafka
+ https://github.com/ExpediaDotCom/haystack-collector/tree/master
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+
+
+ haystack
+ Haystack Team
+ haystack@expedia.com
+ https://github.com/ExpediaDotCom/haystack
+
+
+
+
+ 1.8
+ 3.4.0
+
+ 1.2.3
+ 1.7.25
+ 3.4
+ 3.5.3
+ 1.3.1
+
+ 0.11.0.0
+
+ 4.5.3
+ 1.7.7
+
+ 0.1.12
+ 2
+ 11
+ 8
+ ${scala.major.version}.${scala.minor.version}
+ ${scala.major.version}.${scala.minor.version}.${scala.tiny.version}
+
+ 6.8
+ 1.6.0
+ 3.0.3
+ 1.3.0
+ true
+ 3.0.1
+ 1.6.8
+ 1.6
+
+
+
+
+
+ com.expedia.www
+ haystack-idl-java
+ 1.0.64
+
+
+
+
+ com.google.protobuf
+ protobuf-java
+ ${protobuf.version}
+
+
+
+ com.google.protobuf
+ protobuf-java-util
+ ${protobuf.version}
+
+
+
+
+ org.scala-lang
+ scala-library
+ ${scala-library.version}
+
+
+ org.scala-lang
+ scala-reflect
+ ${scala-library.version}
+
+
+
+
+ com.typesafe
+ config
+ ${typesafe-config.version}
+
+
+
+ com.codahale.metrics
+ metrics-core
+ 3.0.2
+
+
+
+
+ ch.qos.logback
+ logback-classic
+ ${logback.version}
+
+
+ ch.qos.logback
+ logback-core
+ ${logback.version}
+
+
+ org.slf4j
+ slf4j-api
+ ${slf4j-api.version}
+
+
+
+ org.json4s
+ json4s-jackson_${scala.major.minor.version}
+ ${json4s.version}
+
+
+
+ org.apache.kafka
+ kafka-clients
+ ${kafka.version}
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ ${kafka.version}
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+
+
+
+ org.apache.commons
+ commons-lang3
+ ${commons-lang.version}
+
+
+
+ org.apache.httpcomponents
+ httpclient
+ ${httpclient.version}
+
+
+
+ org.slf4j
+ jcl-over-slf4j
+ ${jcl-slf4j.version}
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+ ${haystack.logback.metrics.appender.version}
+
+
+
+
+
+
+ com.expedia.www
+ haystack-idl-java
+
+
+
+ com.typesafe
+ config
+
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+ org.scala-lang
+ scala-library
+
+
+
+ org.scala-lang
+ scala-reflect
+
+
+
+ ch.qos.logback
+ logback-classic
+
+
+
+ ch.qos.logback
+ logback-core
+
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+ org.apache.commons
+ commons-lang3
+
+
+
+
+ org.scalatest
+ scalatest_${scala.major.minor.version}
+ ${scalatest.version}
+ test
+
+
+ org.pegdown
+ pegdown
+ ${pegdown.version}
+ test
+
+
+ junit
+ junit
+ 4.12
+ test
+
+
+ org.easymock
+ easymock
+ 3.4
+ test
+
+
+ org.mockito
+ mockito-core
+ 2.23.0
+ test
+
+
+
+
+ ${basedir}/src/main/scala
+
+
+ ${basedir}/src/main/resources
+ true
+
+
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+ 1.0
+
+
+ test
+
+ test
+
+
+
+
+
+
+ com.github.os72
+ protoc-jar-maven-plugin
+ 3.3.0.1
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+ 1.6
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+ 0.8.0
+
+ true
+ false
+ ${basedir}/../checkstyles/scalastyle_config.xml
+ ${basedir}/src/main/scala
+ ${basedir}/src/test/scala
+ ${project.build.directory}/scalastyle-output.xml
+ UTF-8
+
+
+
+ compile-scalastyle
+
+ check
+
+ compile
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+ 3.2.1
+
+
+ scala-compile-first
+ process-resources
+
+ add-source
+ compile
+
+
+
+ scala-test-compile
+ process-test-resources
+
+ testCompile
+
+
+
+ attach-javadocs
+
+ doc-jar
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.6.1
+
+ ${project.jdk.version}
+ ${project.jdk.version}
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+ ${scoverage.plugin.version}
+
+
+ 34
+ true
+ true
+ ${scala-library.version}
+ true
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ ${nexus-staging-maven-plugin.version}
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+ ${maven-gpg-plugin.version}
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+
+ true
+ ${skipGpg}
+
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ ${maven-source-plugin.version}
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+
+
+
+
+ ossrh
+ https://oss.sonatype.org/content/repositories/snapshots
+
+
+ ossrh
+ http://oss.sonatype.org/service/local/staging/deploy/maven2/
+
+
+
diff --git a/collector/sample-span-decorator/pom.xml b/collector/sample-span-decorator/pom.xml
new file mode 100644
index 000000000..d7074d2f6
--- /dev/null
+++ b/collector/sample-span-decorator/pom.xml
@@ -0,0 +1,45 @@
+
+
+
+
+ haystack-collector
+ com.expedia.www
+ 1.0-SNAPSHOT
+
+
+ 4.0.0
+ sample-span-decorator
+ 1.0-SNAPSHOT
+ jar
+
+
+
+ com.expedia.www
+ haystack-span-decorators
+ ${project.version}
+
+
+
+
+ src/main/java
+ ${project.artifactId}
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+
+ package
+
+ shade
+
+
+
+
+
+
+
diff --git a/collector/sample-span-decorator/src/main/java/com/expedia/www/sample/span/decorator/SampleSpanDecorator.java b/collector/sample-span-decorator/src/main/java/com/expedia/www/sample/span/decorator/SampleSpanDecorator.java
new file mode 100644
index 000000000..8bb959ad7
--- /dev/null
+++ b/collector/sample-span-decorator/src/main/java/com/expedia/www/sample/span/decorator/SampleSpanDecorator.java
@@ -0,0 +1,32 @@
+package com.expedia.www.sample.span.decorator;
+
+import com.expedia.open.tracing.Span;
+import com.expedia.open.tracing.Tag;
+import com.expedia.www.haystack.span.decorators.SpanDecorator;
+import com.typesafe.config.Config;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SampleSpanDecorator implements SpanDecorator {
+ private static final Logger logger = LoggerFactory.getLogger(SampleSpanDecorator.class);
+ private Config config;
+
+ public void init(Config config) {
+ this.config = config;
+ }
+
+ public SampleSpanDecorator() {
+ }
+
+ @Override
+ public Span.Builder decorate(Span.Builder spanBuilder) {
+ spanBuilder.addTags(Tag.newBuilder().setKey(config.getString("tag.key"))
+ .setVStr("SAMPLE-TAG").build());
+ return spanBuilder;
+ }
+
+ @Override
+ public String name() {
+ return "SAMPLE_SPAN_DECORATOR";
+ }
+}
diff --git a/collector/sample-span-decorator/src/main/resources/META-INF/services/com.expedia.www.haystack.span.decorators.SpanDecorator b/collector/sample-span-decorator/src/main/resources/META-INF/services/com.expedia.www.haystack.span.decorators.SpanDecorator
new file mode 100644
index 000000000..4b596420b
--- /dev/null
+++ b/collector/sample-span-decorator/src/main/resources/META-INF/services/com.expedia.www.haystack.span.decorators.SpanDecorator
@@ -0,0 +1 @@
+com.expedia.www.sample.span.decorator.SampleSpanDecorator
\ No newline at end of file
diff --git a/collector/sample-span-decorator/src/main/resources/logback.xml b/collector/sample-span-decorator/src/main/resources/logback.xml
new file mode 100644
index 000000000..6d6308afa
--- /dev/null
+++ b/collector/sample-span-decorator/src/main/resources/logback.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
diff --git a/commons/.gitignore b/commons/.gitignore
new file mode 100644
index 000000000..3054462c2
--- /dev/null
+++ b/commons/.gitignore
@@ -0,0 +1,12 @@
+*.log
+*.ipr
+*.iws
+.classpath
+.project
+target/
+lib/
+logs/
+**/.idea/
+*.iml
+*.DS_Store
+**/target/
diff --git a/commons/.gitmodules b/commons/.gitmodules
new file mode 100644
index 000000000..3b40a897a
--- /dev/null
+++ b/commons/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "haystack-idl"]
+ path = haystack-idl
+ url = https://github.com/ExpediaDotCom/haystack-idl.git
diff --git a/commons/.mvn/wrapper/MavenWrapperDownloader.java b/commons/.mvn/wrapper/MavenWrapperDownloader.java
new file mode 100755
index 000000000..fa4f7b499
--- /dev/null
+++ b/commons/.mvn/wrapper/MavenWrapperDownloader.java
@@ -0,0 +1,110 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+import java.net.*;
+import java.io.*;
+import java.nio.channels.*;
+import java.util.Properties;
+
+public class MavenWrapperDownloader {
+
+ /**
+ * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
+ */
+ private static final String DEFAULT_DOWNLOAD_URL =
+ "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar";
+
+ /**
+ * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
+ * use instead of the default one.
+ */
+ private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
+ ".mvn/wrapper/maven-wrapper.properties";
+
+ /**
+ * Path where the maven-wrapper.jar will be saved to.
+ */
+ private static final String MAVEN_WRAPPER_JAR_PATH =
+ ".mvn/wrapper/maven-wrapper.jar";
+
+ /**
+ * Name of the property which should be used to override the default download url for the wrapper.
+ */
+ private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
+
+ public static void main(String args[]) {
+ System.out.println("- Downloader started");
+ File baseDirectory = new File(args[0]);
+ System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
+
+ // If the maven-wrapper.properties exists, read it and check if it contains a custom
+ // wrapperUrl parameter.
+ File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
+ String url = DEFAULT_DOWNLOAD_URL;
+ if(mavenWrapperPropertyFile.exists()) {
+ FileInputStream mavenWrapperPropertyFileInputStream = null;
+ try {
+ mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
+ Properties mavenWrapperProperties = new Properties();
+ mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
+ url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
+ } catch (IOException e) {
+ System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
+ } finally {
+ try {
+ if(mavenWrapperPropertyFileInputStream != null) {
+ mavenWrapperPropertyFileInputStream.close();
+ }
+ } catch (IOException e) {
+ // Ignore ...
+ }
+ }
+ }
+ System.out.println("- Downloading from: : " + url);
+
+ File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
+ if(!outputFile.getParentFile().exists()) {
+ if(!outputFile.getParentFile().mkdirs()) {
+ System.out.println(
+ "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'");
+ }
+ }
+ System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
+ try {
+ downloadFileFromURL(url, outputFile);
+ System.out.println("Done");
+ System.exit(0);
+ } catch (Throwable e) {
+ System.out.println("- Error downloading");
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+
+ private static void downloadFileFromURL(String urlString, File destination) throws Exception {
+ URL website = new URL(urlString);
+ ReadableByteChannel rbc;
+ rbc = Channels.newChannel(website.openStream());
+ FileOutputStream fos = new FileOutputStream(destination);
+ fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+ fos.close();
+ rbc.close();
+ }
+
+}
diff --git a/commons/.mvn/wrapper/maven-wrapper.jar b/commons/.mvn/wrapper/maven-wrapper.jar
new file mode 100755
index 000000000..01e679973
Binary files /dev/null and b/commons/.mvn/wrapper/maven-wrapper.jar differ
diff --git a/commons/.mvn/wrapper/maven-wrapper.properties b/commons/.mvn/wrapper/maven-wrapper.properties
new file mode 100755
index 000000000..00d32aab1
--- /dev/null
+++ b/commons/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1 @@
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip
\ No newline at end of file
diff --git a/commons/.travis.yml b/commons/.travis.yml
new file mode 100644
index 000000000..bc24fbc8e
--- /dev/null
+++ b/commons/.travis.yml
@@ -0,0 +1,20 @@
+language: java
+
+cache:
+ directories:
+ - $HOME/.m2
+
+install:
+ - java -XX:+PrintFlagsFinal -version
+ - ./mvnw --version
+
+script:
+ # build and deploy if master branch else just build
+ - if ([ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]) || [ -n "$TRAVIS_TAG" ]; then .travis/deploy.sh; else ./mvnw clean compile scoverage:report; fi
+
+
+jdk: openjdk8
+
+notifications:
+ email:
+ - haystack-notifications@expedia.com
diff --git a/commons/CONTRIBUTING.md b/commons/CONTRIBUTING.md
new file mode 100644
index 000000000..317757128
--- /dev/null
+++ b/commons/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+##Bugs
+We use Github Issues for our bug reporting. Please make sure the bug isn't already listed before opening a new issue.
+
+##Development
+All work on Haystack happens directly on Github. Core Haystack team members will review opened pull requests.
+
+##Requests
+If you see a feature that you would like to be added, please open an issue in the respective repository or in the general Haystack repo.
+
+##Contributing to Documentation
+To contribute to documentation, you can directly modify the corresponding .md files in the docs directory under the base haystack repository, and submit a pull request. Once your PR is merged, the documentation is automatically built and deployed to https://expediadotcom.github.io/haystack.
+
+##License
+By contributing to Haystack, you agree that your contributions will be licensed under its Apache License.
\ No newline at end of file
diff --git a/commons/LICENSE b/commons/LICENSE
new file mode 100644
index 000000000..9f133f5cd
--- /dev/null
+++ b/commons/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2017 Expedia, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/commons/README.md b/commons/README.md
new file mode 100644
index 000000000..67373394a
--- /dev/null
+++ b/commons/README.md
@@ -0,0 +1,40 @@
+[](https://travis-ci.org/ExpediaDotCom/haystack-commons)
+[](https://github.com/ExpediaDotCom/haystack/blob/master/LICENSE)
+
+
+# haystack-commons
+Module with common code that is used by various haystack components
+
+## Building
+
+Since this repo contains haystack-idl as the submodule, so use the following to clone the repo
+
+```git clone --recursive git@github.com:ExpediaDotCom/haystack-commons.git```
+
+#### Prerequisite:
+
+* Make sure you have Java 1.8
+* Make sure you have maven 3.3.9 or higher
+
+
+#### Build
+
+For a full build including unit tests, one can run -
+
+```
+mvn clean package
+```
+
+#### Updating haystack-idl
+
+* Run:
+
+```git submodule update --recursive --remote```
+
+* Update maven version
+
+* Raise a PR
+
+#### Releasing haystack-commons
+* https://github.com/ExpediaDotCom/haystack-commons/blob/master/Release.md
+
diff --git a/commons/Release.md b/commons/Release.md
new file mode 100644
index 000000000..ca79c4b93
--- /dev/null
+++ b/commons/Release.md
@@ -0,0 +1,10 @@
+#Releasing
+Currently we publish the repo to docker hub and nexus central repository.
+
+#How to release and publish
+
+* Git tagging:
+
+```git tag -a 1.x.x -m "Release description..."```
+
+Or you can also tag using UI: https://github.com/ExpediaDotCom/haystack-commons/releases
\ No newline at end of file
diff --git a/commons/commons/pom.xml b/commons/commons/pom.xml
new file mode 100644
index 000000000..18709ef8d
--- /dev/null
+++ b/commons/commons/pom.xml
@@ -0,0 +1,246 @@
+
+
+
+
+
+ 4.0.0
+
+
+ com.expedia.www
+ haystack-commons-parent
+ 1.0.66-SNAPSHOT
+
+
+ haystack-commons
+ jar
+
+
+
+ com.expedia.www
+ haystack-idl-java
+ ${project.version}
+
+
+
+ com.google.guava
+ guava
+ provided
+
+
+
+
+ javax.annotation
+ javax.annotation-api
+ provided
+
+
+
+ com.typesafe
+ config
+ provided
+
+
+
+
+ org.json4s
+ json4s-jackson_${scala.major.minor.version}
+ provided
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+ provided
+
+
+ org.slf4j
+ slf4j-api
+
+
+ org.slf4j
+ slf4j-nop
+
+
+ org.slf4j
+ slf4j-jdk14
+
+
+ org.slf4j
+ jcl-over-slf4j
+
+
+
+
+
+ org.scala-lang
+ scala-library
+
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+ com.expedia
+ metrics-java
+
+
+
+ io.grpc
+ grpc-protobuf
+ provided
+
+
+ io.grpc
+ grpc-stub
+ provided
+
+
+
+ com.codahale.metrics
+ metrics-core
+ provided
+
+
+
+ commons-codec
+ commons-codec
+
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ provided
+
+
+
+ org.apache.kafka
+ kafka-streams
+ provided
+
+
+
+ org.msgpack
+ msgpack-core
+ provided
+
+
+
+
+
+ org.scalatest
+ scalatest_${scala.major.minor.version}
+ test
+
+
+
+ org.easymock
+ easymock
+ test
+
+
+
+
+
+ org.pegdown
+ pegdown
+ test
+
+
+
+
+
+ ${basedir}/src/main/scala
+
+
+
+ ${basedir}/src/main/resources
+ true
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+
+
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/config/ConfigurationLoader.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/config/ConfigurationLoader.scala
new file mode 100644
index 000000000..569c8acb5
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/config/ConfigurationLoader.scala
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.config
+
+import java.io.File
+
+import com.typesafe.config.{Config, ConfigFactory, ConfigRenderOptions, ConfigValueType}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+
+object ConfigurationLoader {
+
+ private val LOGGER = LoggerFactory.getLogger(ConfigurationLoader.getClass)
+
+ private[haystack] val ENV_NAME_PREFIX = "HAYSTACK_PROP_"
+
+ /**
+ * Load and return the configuration
+ * if overrides_config_path env variable exists, then we load that config file and use base conf as fallback,
+ * else we load the config from env variables(prefixed with haystack) and use base conf as fallback
+ *
+ * @param resourceName name of the resource file to be loaded. Default value is `config/base.conf`
+ * @param envNamePrefix env variable prefix to override config values. Default is `HAYSTACK_PROP_`
+ *
+ * @return an instance of com.typesafe.Config
+ */
+ def loadConfigFileWithEnvOverrides(resourceName : String = "config/base.conf",
+ envNamePrefix : String = ENV_NAME_PREFIX) : Config = {
+
+ require(resourceName != null && resourceName.length > 0 , "resourceName is required")
+ require(envNamePrefix != null && envNamePrefix.length > 0 , "envNamePrefix is required")
+
+ val baseConfig = ConfigFactory.load(resourceName)
+
+ val keysWithArrayValues = baseConfig.entrySet()
+ .asScala
+ .filter(_.getValue.valueType() == ConfigValueType.LIST)
+ .map(_.getKey)
+ .toSet
+
+ val config = sys.env.get("HAYSTACK_OVERRIDES_CONFIG_PATH") match {
+ case Some(overrideConfigPath) =>
+ val overrideConfig = ConfigFactory.parseFile(new File(overrideConfigPath))
+ ConfigFactory
+ .parseMap(parsePropertiesFromMap(sys.env, keysWithArrayValues, envNamePrefix).asJava)
+ .withFallback(overrideConfig)
+ .withFallback(baseConfig)
+ .resolve()
+ case _ => ConfigFactory
+ .parseMap(parsePropertiesFromMap(sys.env, keysWithArrayValues, envNamePrefix).asJava)
+ .withFallback(baseConfig)
+ .resolve()
+ }
+
+ // In key-value pairs that contain 'password' in the key, replace the value with asterisks
+ LOGGER.info(config.root()
+ .render(ConfigRenderOptions.defaults().setOriginComments(false))
+ .replaceAll("(?i)(\\\".*password\\\"\\s*:\\s*)\\\".+\\\"", "$1********"))
+
+ config
+ }
+
+ /**
+ * @return new config object with haystack specific environment variables
+ */
+ private[haystack] def parsePropertiesFromMap(envVars: Map[String, String],
+ keysWithArrayValues: Set[String],
+ envNamePrefix: String): Map[String, Object] = {
+ envVars.filter {
+ case (envName, _) => envName.startsWith(envNamePrefix)
+ } map {
+ case (envName, envValue) =>
+ val key = transformEnvVarName(envName, envNamePrefix)
+ if (keysWithArrayValues.contains(key)) (key, transformEnvVarArrayValue(envValue)) else (key, envValue)
+ }
+ }
+
+ /**
+ * converts the env variable to HOCON format
+ * for e.g. env variable HAYSTACK_KAFKA_STREAMS_NUM_STREAM_THREADS gets converted to kafka.streams.num.stream.threads
+ * @param env environment variable name
+ * @return variable name that complies with hocon key
+ */
+ private def transformEnvVarName(env: String, envNamePrefix: String): String = {
+ env.replaceFirst(envNamePrefix, "").toLowerCase.replace("_", ".")
+ }
+
+ /**
+ * converts the env variable value to iterable object if it starts and ends with '[' and ']' respectively.
+ * @param env environment variable value
+ * @return string or iterable object
+ */
+ private def transformEnvVarArrayValue(env: String): java.util.List[String] = {
+ if (env.startsWith("[") && env.endsWith("]")) {
+ import scala.collection.JavaConverters._
+ env.substring(1, env.length - 1).split(',').filter(str => (str != null) && str.nonEmpty).toList.asJava
+ } else {
+ throw new RuntimeException("config key is of array type, so it should start and end with '[', ']' respectively")
+ }
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/GraphEdge.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/GraphEdge.scala
new file mode 100644
index 000000000..373772aaa
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/GraphEdge.scala
@@ -0,0 +1,28 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.entities
+
+
+/**
+ * Case class with enough information to build a relationship between two service graph nodes
+ * @param source identifier for the source graph node
+ * @param destination identifier for the destination graph node
+ * @param operation identifier for the graph edge
+ * @param sourceTimestamp timestamp of source in millis
+ */
+case class GraphEdge(source: GraphVertex, destination: GraphVertex, operation: String, sourceTimestamp: Long = System.currentTimeMillis())
\ No newline at end of file
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/GraphVertex.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/GraphVertex.scala
new file mode 100644
index 000000000..b9c3ce259
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/GraphVertex.scala
@@ -0,0 +1,27 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.entities
+
+
+/**
+ * Vertex of a graph that includes the name of the vertex and tags associated with the vertex
+ * @param name: Name of the service vertex
+ * @param tags: List of tag names associated with the service vertex
+ */
+case class GraphVertex(name: String, tags: Map[String, String] = Map.empty[String, String])
+
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/Interval.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/Interval.scala
new file mode 100644
index 000000000..6b516aa6f
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/Interval.scala
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.entities
+
+/**
+ * This enum contains the metric intervals supported by the app
+ */
+object Interval extends Enumeration {
+ type Interval = IntervalVal
+ val ONE_MINUTE = IntervalVal("OneMinute", 60)
+ val FIVE_MINUTE = IntervalVal("FiveMinute", 300)
+ val FIFTEEN_MINUTE = IntervalVal("FifteenMinute", 900)
+ val ONE_HOUR = IntervalVal("OneHour", 3600)
+
+ def all: List[Interval] = {
+ List(ONE_MINUTE, FIVE_MINUTE, FIFTEEN_MINUTE, ONE_HOUR)
+ }
+
+ def fromName(name: String): IntervalVal = {
+ name match {
+ case "OneMinute" => ONE_MINUTE
+ case "FiveMinute" => FIVE_MINUTE
+ case "FifteenMinute" => FIFTEEN_MINUTE
+ case "OneHour" => ONE_HOUR
+ case _ => ONE_MINUTE
+ }
+ }
+
+ def fromVal(value: Long): IntervalVal = {
+ value match {
+ case 60 => ONE_MINUTE
+ case 300 => FIVE_MINUTE
+ case 900 => FIFTEEN_MINUTE
+ case 3600 => ONE_HOUR
+ case _ => ONE_MINUTE
+ }
+ }
+
+ sealed case class IntervalVal(name: String, timeInSeconds: Int) extends Val(name) {
+ }
+
+}
+
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/TagKeys.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/TagKeys.scala
new file mode 100644
index 000000000..8b415ef05
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/TagKeys.scala
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.entities
+
+/**
+The Tag keys are according to metrics 2.0 specifications see http://metrics20.org/spec/#tag-keys
+ */
+object TagKeys {
+ /**
+ * OPERATION_NAME_KEY is a identifier for the operation name specified in haystack.
+ */
+ val OPERATION_NAME_KEY = "operationName"
+ /**
+ * SERVICE_NAME_KEY is a identifier for the service name specified in haystack.
+ */
+ val SERVICE_NAME_KEY = "serviceName"
+ /**
+ * RESULT_KEY is a identifier for the values: such as ok, fail
+ */
+ val RESULT_KEY = "result"
+ /**
+ * STATS_KEY is a identifier to clarify the statistical view
+ */
+ val STATS_KEY = "stat"
+ /**
+ * ERROR_KEY is a identifier to specify whether a span is a success or failure. Useful in trending for success or
+ * failure count
+ */
+ val ERROR_KEY = "error"
+ /**
+ * INTERVAL_KEY is a identifier to specify whether the interval of a trend. For eg: OneMinute, FiveMinute etc
+ */
+ val INTERVAL_KEY = "interval"
+ /**
+ * ORG_ID_KEY is a identifier to specify the organization sending the span/trend.
+ */
+ val ORG_ID_KEY = "orgId"
+ /**
+ * PRODUCT_KEY is a identifier to specify the namespace of the trend.
+ */
+ val PRODUCT_KEY = "product"
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/Base64Encoder.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/Base64Encoder.scala
new file mode 100644
index 000000000..4276f44b9
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/Base64Encoder.scala
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.entities.encoders
+
+import java.nio.charset.StandardCharsets
+
+import com.google.common.base.Charsets
+import com.google.common.io.BaseEncoding
+
+class Base64Encoder extends Encoder {
+ def encode(value: String): String = {
+ BaseEncoding.base64().withPadChar('_').encode(value.getBytes(Charsets.UTF_8))
+ }
+
+ def decode(value: String): String = {
+ new String(BaseEncoding.base64().withPadChar('_').decode(value), StandardCharsets.UTF_8)
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/Encoder.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/Encoder.scala
new file mode 100644
index 000000000..88557970c
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/Encoder.scala
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.entities.encoders
+
+trait Encoder {
+
+ def encode(value: String): String
+
+ def decode(value: String): String
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/EncoderFactory.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/EncoderFactory.scala
new file mode 100644
index 000000000..eab806a10
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/EncoderFactory.scala
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.entities.encoders
+
+object EncoderFactory {
+ final val BASE_64 = "base64"
+ final val PERIOD_REPLACEMENT = "periodReplacement"
+
+ def newInstance(key: String): Encoder = {
+ if (BASE_64.equalsIgnoreCase(key)) {
+ new Base64Encoder()
+ } else if (PERIOD_REPLACEMENT.equalsIgnoreCase(key)) {
+ new PeriodReplacementEncoder()
+ } else {
+ new NoopEncoder()
+ }
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/NoopEncoder.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/NoopEncoder.scala
new file mode 100644
index 000000000..156214481
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/NoopEncoder.scala
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.entities.encoders
+
+class NoopEncoder extends Encoder {
+ def encode(value: String): String = {
+ value
+ }
+
+ def decode(value: String): String = {
+ value
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/PeriodReplacementEncoder.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/PeriodReplacementEncoder.scala
new file mode 100644
index 000000000..774cd5e40
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/entities/encoders/PeriodReplacementEncoder.scala
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.entities.encoders
+
+class PeriodReplacementEncoder extends Encoder {
+ def encode(value: String): String = {
+ value.replace(".", "___")
+ }
+
+ def decode(value: String): String = {
+ value.replace("___", ".")
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/graph/GraphEdgeTagCollector.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/graph/GraphEdgeTagCollector.scala
new file mode 100644
index 000000000..fd4127eed
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/graph/GraphEdgeTagCollector.scala
@@ -0,0 +1,59 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.graph
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.Tag.TagType
+import com.expedia.www.haystack.commons.entities.TagKeys
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+/**
+ * Define tag names that should be collected when building a GraphEdge.
+ * @param tags: Set of tag names to be collected for building the graph edge
+ */
+class GraphEdgeTagCollector(tags: Set[String] = Set()) {
+
+ /**
+ * Default tags that will always be collected.
+ */
+ private val defaultTags: Set[String] = Set(TagKeys.ERROR_KEY)
+
+ private val filteredTags = defaultTags ++ tags
+
+ /**
+ * @param span: Span containing all the tags
+ * @return Filtered list of tag keys and values in the span that match the pre defined tag names.
+ */
+ def collectTags(span: Span): Map[String, String] = {
+ val edgeTags = mutable.Map[String, String]()
+ span.getTagsList.asScala.filter(t => filteredTags.contains(t.getKey)).foreach { tag =>
+ tag.getType match {
+ case TagType.STRING => edgeTags += (tag.getKey -> tag.getVStr)
+ case TagType.BOOL => edgeTags += (tag.getKey -> tag.getVBool.toString)
+ case TagType.DOUBLE => edgeTags += (tag.getKey -> tag.getVDouble.toString)
+ case TagType.LONG => edgeTags += (tag.getKey -> tag.getVLong.toString)
+ case _ => throw new IllegalArgumentException("Invalid tag type detected.")
+ }
+ }
+ edgeTags.toMap
+ }
+}
+
+
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/health/HealthController.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/health/HealthController.scala
new file mode 100644
index 000000000..f3299709d
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/health/HealthController.scala
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.health
+
+import java.util.concurrent.atomic.AtomicReference
+
+import com.expedia.www.haystack.commons.health.HealthStatus.HealthStatus
+import org.slf4j.LoggerFactory
+
+import scala.collection.mutable
+
+object HealthStatus extends Enumeration {
+ type HealthStatus = Value
+ val HEALTHY, UNHEALTHY, NOT_SET = Value
+}
+
+/**
+ * provides the health check of app
+ */
+class HealthStatusController {
+ private val LOGGER = LoggerFactory.getLogger(classOf[HealthStatusController])
+ private val status = new AtomicReference[HealthStatus](HealthStatus.NOT_SET)
+ private var listeners = mutable.ListBuffer[HealthStatusChangeListener]()
+
+ def setHealthy(): Unit = {
+ LOGGER.info("Setting the app status as 'HEALTHY'")
+ if(status.getAndSet(HealthStatus.HEALTHY) != HealthStatus.HEALTHY) notifyChange(HealthStatus.HEALTHY)
+ }
+
+ def setUnhealthy(): Unit = {
+ LOGGER.error("Setting the app status as 'UNHEALTHY'")
+ if(status.getAndSet(HealthStatus.UNHEALTHY) != HealthStatus.UNHEALTHY) notifyChange(HealthStatus.UNHEALTHY)
+ }
+
+ def isHealthy: Boolean = status.get() == HealthStatus.HEALTHY
+
+ def addListener(l: HealthStatusChangeListener): Unit = listeners += l
+
+ private def notifyChange(status: HealthStatus): Unit = {
+ listeners foreach {
+ l =>
+ l.onChange(status)
+ }
+ }
+}
+
+object HealthController {
+ private val healthController = new HealthStatusController
+
+ /**
+ * set the app status as health
+ */
+ def setHealthy(): Unit = {
+ healthController.setHealthy()
+ }
+
+ /**
+ * set the app status as unhealthy
+ */
+ def setUnhealthy(): Unit = {
+ healthController.setUnhealthy()
+ }
+
+ /**
+ * @return true if app is healthy else false
+ */
+ def isHealthy: Boolean = healthController.isHealthy
+
+ /**
+ * add health change listener that will be called on any change in the health status
+ * @param l listener
+ */
+ def addListener(l: HealthStatusChangeListener): Unit = healthController.addListener(l)
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/health/HealthStatusChangeListener.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/health/HealthStatusChangeListener.scala
new file mode 100644
index 000000000..da71e2ce7
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/health/HealthStatusChangeListener.scala
@@ -0,0 +1,33 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.health
+
+import com.expedia.www.haystack.commons.health.HealthStatus.HealthStatus
+
+/**
+ * health status listener
+ */
+trait HealthStatusChangeListener {
+
+ /**
+ * called whenever there there is a state change in health
+ * @param status
+ */
+ def onChange(status: HealthStatus): Unit
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/health/UpdateHealthStatusFile.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/health/UpdateHealthStatusFile.scala
new file mode 100644
index 000000000..c8329322a
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/health/UpdateHealthStatusFile.scala
@@ -0,0 +1,41 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.health
+
+import java.nio.charset.StandardCharsets
+import java.nio.file.{Files, Paths}
+
+import com.expedia.www.haystack.commons.health.HealthStatus.HealthStatus
+
+/**
+ * writes the current health status to a status file. This can be used to provide the health to external system
+ * like container orchestration frameworks
+ * @param statusFilePath: file path where health status will be recorded.
+ */
+class UpdateHealthStatusFile(statusFilePath: String) extends HealthStatusChangeListener {
+
+ /**
+ * call on the any change in health status of app
+ * @param status: current health status
+ */
+ override def onChange(status: HealthStatus): Unit = {
+ val isHealthy = if(status == HealthStatus.HEALTHY) "true" else "false"
+ Files.write(Paths.get(statusFilePath), isHealthy.getBytes(StandardCharsets.UTF_8))
+ }
+}
\ No newline at end of file
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/GraphEdgeTimestampExtractor.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/GraphEdgeTimestampExtractor.scala
new file mode 100644
index 000000000..ce6d9f43a
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/GraphEdgeTimestampExtractor.scala
@@ -0,0 +1,34 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams
+
+import com.expedia.www.haystack.commons.entities.GraphEdge
+import org.apache.kafka.clients.consumer.ConsumerRecord
+import org.apache.kafka.streams.processor.TimestampExtractor
+
+
+class GraphEdgeTimestampExtractor extends TimestampExtractor with IteratorAgeMetricSupport {
+ override def extract(consumerRecord: ConsumerRecord[AnyRef, AnyRef], previousTimestamp: Long): Long = {
+
+ // sourceTimestamp of GraphEdge in millis
+ val sourceTimestampMs = consumerRecord.value().asInstanceOf[GraphEdge].sourceTimestamp
+ updateIteratorAge(sourceTimestampMs)
+ sourceTimestampMs
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/IteratorAgeMetricSupport.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/IteratorAgeMetricSupport.scala
new file mode 100644
index 000000000..ba2d59e7a
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/IteratorAgeMetricSupport.scala
@@ -0,0 +1,13 @@
+package com.expedia.www.haystack.commons.kstreams
+
+import com.codahale.metrics.Histogram
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+
+trait IteratorAgeMetricSupport extends MetricsSupport {
+
+ val iteratorAge: Histogram = metricRegistry.histogram("kafka.iterator.age.ms")
+
+ def updateIteratorAge(timeInMs: Long): Unit = {
+ iteratorAge.update(System.currentTimeMillis() - timeInMs)
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/MetricDataTimestampExtractor.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/MetricDataTimestampExtractor.scala
new file mode 100644
index 000000000..81715988f
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/MetricDataTimestampExtractor.scala
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams
+
+import com.expedia.metrics.MetricData
+import org.apache.kafka.clients.consumer.ConsumerRecord
+import org.apache.kafka.streams.processor.TimestampExtractor
+
+class MetricDataTimestampExtractor extends TimestampExtractor with IteratorAgeMetricSupport {
+
+ override def extract(record: ConsumerRecord[AnyRef, AnyRef], previousTimestamp: Long): Long = {
+
+ //The startTime for metricData in computed in seconds and hence multiplying by 1000 to create the epochTimeInMs
+ val metricDataTimestampMs = record.value().asInstanceOf[MetricData].getTimestamp * 1000
+ updateIteratorAge(metricDataTimestampMs)
+ metricDataTimestampMs
+
+ }
+
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/SpanTimestampExtractor.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/SpanTimestampExtractor.scala
new file mode 100644
index 000000000..8337ca4eb
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/SpanTimestampExtractor.scala
@@ -0,0 +1,34 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams
+
+import com.expedia.open.tracing.Span
+import org.apache.kafka.clients.consumer.ConsumerRecord
+import org.apache.kafka.streams.processor.TimestampExtractor
+
+class SpanTimestampExtractor extends TimestampExtractor with IteratorAgeMetricSupport {
+
+ override def extract(record: ConsumerRecord[AnyRef, AnyRef], previousTimestamp: Long): Long = {
+
+ //The startTime for span in computed in microseconds and hence dividing by 1000 to create the epochTimeInMs
+ val spanStartTimeMs = record.value().asInstanceOf[Span].getStartTime / 1000
+ updateIteratorAge(spanStartTimeMs)
+ spanStartTimeMs
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/Main.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/Main.scala
new file mode 100644
index 000000000..fc71402f5
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/Main.scala
@@ -0,0 +1,103 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.kstreams.app
+
+import java.util.concurrent.atomic.AtomicBoolean
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.commons.logger.LoggerUtils
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import org.slf4j.LoggerFactory
+
+/**
+ * Starting point of a Kafka Streams application. One should extend this
+ * trait and provide a valid instance of `StreamsRunner` by overriding
+ * createStreamsRunner method to create and start a Kafka Streams application
+ */
+trait Main extends MetricsSupport {
+
+ def main(args: Array[String]): Unit = {
+ //create an instance of the application
+ val jmxReporter: JmxReporter = JmxReporter.forRegistry(metricRegistry).build()
+ val app = new Application(createStreamsRunner(), jmxReporter)
+
+ //start the application
+ app.start()
+
+ //add a shutdown hook
+ Runtime.getRuntime.addShutdownHook(new Thread() {
+ override def run(): Unit = app.stop()
+ })
+ }
+
+ /**
+ * This method should create and return a new instance of the `StreamsRunner` class
+ *
That instance will be started and stopped as part of the application lifecycle
+ * @return Instance of `StreamsRunner` to be managed
+ */
+ def createStreamsRunner(): StreamsRunner
+}
+
+/**
+ * This is the main application class. This controls the application
+ * start and shutdown actions
+ *
+ * @param streamsRunner instance of StreamsRunner to start and stop the
+ * streams application
+ */
+class Application(streamsRunner: StreamsRunner, jmxReporter: JmxReporter) extends MetricsSupport {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[Application])
+ private val running = new AtomicBoolean(false)
+
+ require(streamsRunner != null)
+ require(jmxReporter != null)
+
+ /**
+ * Starts the given `StreamsRunner` and `JmxReporter` instances
+ */
+ def start(): Unit = {
+ //start JMX reporter for metricRegistry
+ jmxReporter.start()
+
+ //start the topology
+ streamsRunner.start()
+
+ //initialized
+ running.set(true)
+ }
+
+ /**
+ * This method stops the given `StreamsRunner` and `JmxReporter` is they have been
+ * previously started. If not, this method does nothing
+ */
+ def stop(): Unit = {
+ if (running.getAndSet(false)) {
+ LOGGER.info("Shutting down topology")
+ streamsRunner.close()
+
+ LOGGER.info("Shutting down jmxReporter")
+ jmxReporter.close()
+
+ LOGGER.info("Shutting down logger. Bye!")
+ LoggerUtils.shutdownLogger()
+ }
+ }
+}
+
+
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/ManagedKafkaStreams.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/ManagedKafkaStreams.scala
new file mode 100644
index 000000000..ef8a38c4e
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/ManagedKafkaStreams.scala
@@ -0,0 +1,68 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.kstreams.app
+
+import java.util.concurrent.TimeUnit
+import java.util.concurrent.atomic.AtomicBoolean
+
+import org.apache.kafka.streams.KafkaStreams
+
+/**
+ * Simple service wrapper over `KafkaStreams` to manage the life cycle of the
+ * instance.
+ *
+ * @param kafkaStreams underlying KafkaStreams instance that needs to be
+ * managed
+ * @param closeWaitInSeconds time to wait in seconds while stopping KafkaStreams
+ */
+class ManagedKafkaStreams(kafkaStreams: KafkaStreams, closeWaitInSeconds: Int) extends ManagedService {
+ require(kafkaStreams != null)
+ private val isRunning: AtomicBoolean = new AtomicBoolean(false)
+
+ /**
+ * This creates a managed KafkaStreams that waits for ever at
+ * stop. To provide a specific timeout use the other constructor
+ *
+ * @param kafkaStreams underlying KafkaStreams instance that needs to be
+ * managed
+ */
+ def this(kafkaStreams: KafkaStreams) = this(kafkaStreams, 0)
+
+ /**
+ * @see ManagedService.start
+ */
+ override def start(): Unit = {
+ kafkaStreams.start()
+ isRunning.set(true)
+ }
+
+ /**
+ * @see ManagedService.stop
+ */
+ override def stop(): Unit = {
+ if (isRunning.getAndSet(false)) {
+ kafkaStreams.close(closeWaitInSeconds, TimeUnit.SECONDS)
+ }
+ }
+
+ /**
+ * @see ManagedService.hasStarted
+ * @return
+ */
+ override def hasStarted: Boolean = isRunning.get()
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/ManagedService.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/ManagedService.scala
new file mode 100644
index 000000000..8fd176fe2
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/ManagedService.scala
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.kstreams.app
+
+/**
+ * A simple trait for managing a service
+ */
+trait ManagedService {
+ /**
+ * This method is called when the service needs to be started
+ *
Any exception thrown by this method is propagated up the calling chain
+ *
After a successful start, `hasStarted` should return true
+ */
+ def start()
+
+ /**
+ * This method is called when the service needs to be stopped.
+ *
If the service has not been started, this method should do nothing or
+ * should have no side effect
+ *
After successfully stopping, `hasStarted` should return false
+ */
+ def stop()
+
+ /**
+ * Indicates the state of the service
+ * @return
+ */
+ def hasStarted : Boolean
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/StateChangeListener.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/StateChangeListener.scala
new file mode 100644
index 000000000..b0edde1ae
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/StateChangeListener.scala
@@ -0,0 +1,53 @@
+package com.expedia.www.haystack.commons.kstreams.app
+
+import com.expedia.www.haystack.commons.health.HealthStatusController
+import org.apache.kafka.streams.KafkaStreams
+import org.apache.kafka.streams.KafkaStreams.StateListener
+import org.slf4j.LoggerFactory
+
+/**
+ * Watches the state of a KafkaStreams application and sets the health of the process
+ * using the provided `HealthStatusController` instance
+ * @param healthStatusController required instance of `HealthStatusController` that manages
+ * the state of the current process
+ */
+class StateChangeListener(healthStatusController: HealthStatusController) extends StateListener
+ with Thread.UncaughtExceptionHandler {
+
+ require(healthStatusController != null)
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[StateChangeListener])
+
+ /**
+ * Method to set the status of the application
+ * @param healthy sets the state as healthy if this is true and unhealthy if the state is false
+ */
+ def state(healthy : Boolean) : Unit =
+ if (healthy) {
+ healthStatusController.setHealthy()
+ }
+ else {
+ healthStatusController.setUnhealthy()
+ }
+
+ /**
+ * This method is called when state of the KafkaStreams application changes.
+ *
+ * @param newState new state
+ * @param oldState previous state
+ */
+ override def onChange(newState: KafkaStreams.State, oldState: KafkaStreams.State): Unit = {
+ LOGGER.info(s"State change event called with newState=$newState and oldState=$oldState")
+ }
+
+ /**
+ * This method is invoked when the given thread terminates due to the
+ * given uncaught exception.
+ * @param t the thread that had an unhandled exception
+ * @param e the exception that caused the thread to terminate
+ */
+ override def uncaughtException(t: Thread, e: Throwable): Unit = {
+ LOGGER.error(s"uncaught exception occurred running kafka streams for thread=${t.getName}", e)
+ state(false)
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/StreamsFactory.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/StreamsFactory.scala
new file mode 100644
index 000000000..4615e00f2
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/StreamsFactory.scala
@@ -0,0 +1,107 @@
+package com.expedia.www.haystack.commons.kstreams.app
+
+import java.util.Properties
+import java.util.concurrent.TimeUnit
+import java.util.function.Supplier
+
+import org.apache.kafka.clients.admin.AdminClient
+import org.apache.kafka.streams.{KafkaStreams, StreamsConfig, Topology}
+
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+import org.slf4j.LoggerFactory
+
+import scala.util.Try
+
+/**
+ * Factory class to create a KafkaStreams instance and wrap it as a simple service {@see ManagedKafkaStreams}
+ *
+ * Optionally this class can check the presence of consuming topic
+ *
+ * @param topologySupplier A supplier that creates and returns a Kafka Stream Topology
+ * @param streamsConfig Configuration instance for KafkaStreams
+ * @param consumerTopic Optional consuming topic name
+ */
+class StreamsFactory(topologySupplier: Supplier[Topology], streamsConfig: StreamsConfig, consumerTopic: String) {
+
+ require(topologySupplier != null, "streamsBuilder is required")
+ require(streamsConfig != null, "streamsConfig is required")
+
+ val consumerTopicName = Option(consumerTopic)
+
+ def this(streamsSupplier: Supplier[Topology], streamsConfig: StreamsConfig) = this(streamsSupplier, streamsConfig, null)
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[StreamsFactory])
+
+ /**
+ * creates a new instance of KafkaStreams application wrapped as a {@link ManagedService} instance
+ * @param listener instance of StateChangeListener that observes KafkaStreams state changes
+ * @return instance of ManagedService
+ */
+ def create(listener: StateChangeListener): ManagedService = {
+ checkConsumerTopic()
+
+ val streams = new KafkaStreams(topologySupplier.get(), streamsConfig)
+ streams.setStateListener(listener)
+ streams.setUncaughtExceptionHandler(listener)
+ streams.cleanUp()
+
+ val timeOut = Option(streamsConfig.getInt(StreamsConfig.REQUEST_TIMEOUT_MS_CONFIG)) match {
+ case Some(v) if v > 0 => v / 1000
+ case _ => 5
+ }
+
+ new ManagedKafkaStreams(streams, timeOut)
+ }
+
+ private def checkConsumerTopic(): Unit = {
+ if (consumerTopicName.nonEmpty) {
+ val topicName = consumerTopicName.get
+ LOGGER.info(s"checking for the consumer topic $topicName")
+ val adminClient = AdminClient.create(getBootstrapProperties)
+ try {
+ val present = adminClient.listTopics().names().get().contains(topicName)
+ if (!present) {
+ throw new TopicNotPresentException(topicName,
+ s"Topic '$topicName' is configured as a consumer and it is not present")
+ }
+ }
+ finally {
+ Try(adminClient.close(5, TimeUnit.SECONDS))
+ }
+ }
+ }
+
+ private def getBootstrapProperties: Properties = {
+ val properties = new Properties()
+ properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
+ streamsConfig.getList(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG))
+ properties
+ }
+
+ /**
+ * Custom RuntimeException that represents a required Kafka topic not being present
+ * @param topic Name of the topic that is missing
+ * @param message Message
+ */
+ class TopicNotPresentException(topic: String, message: String) extends RuntimeException(message) {
+ def getTopic : String = topic
+ }
+}
+
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/StreamsRunner.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/StreamsRunner.scala
new file mode 100644
index 000000000..7c5c5d303
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/app/StreamsRunner.scala
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.kstreams.app
+
+import org.slf4j.LoggerFactory
+
+import scala.util.{Failure, Success, Try}
+
+class StreamsRunner(streamsFactory: StreamsFactory, stateChangeListener: StateChangeListener) extends AutoCloseable {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[StreamsRunner])
+ private var managedStreams : ManagedService = _
+
+ require(streamsFactory != null, "valid streamsFactory is required")
+ require(stateChangeListener != null, "valid stateChangeListener is required")
+
+ def start(): Unit = {
+ LOGGER.info("Starting the given topology.")
+
+ Try(streamsFactory.create(stateChangeListener)) match {
+ case Success(streams) =>
+ managedStreams = streams
+ managedStreams.start()
+ stateChangeListener.state(true)
+ LOGGER.info("KafkaStreams started successfully")
+ case Failure(e) =>
+ LOGGER.error(s"KafkaStreams failed to start : ${e.getMessage}", e)
+ stateChangeListener.state(false)
+ }
+ }
+
+ def close(): Unit = {
+ if (managedStreams != null) {
+ managedStreams.stop()
+ }
+ }
+}
+
+
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/SpanSerde.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/SpanSerde.scala
new file mode 100644
index 000000000..c0fd3a859
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/SpanSerde.scala
@@ -0,0 +1,75 @@
+package com.expedia.www.haystack.commons.kstreams.serde
+
+import java.util
+
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer}
+
+class SpanSerde extends Serde[Span] with MetricsSupport {
+
+ override def configure(configs: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+ def serializer: Serializer[Span] = {
+ new SpanSerializer
+ }
+
+ def deserializer: Deserializer[Span] = {
+ new SpanDeserializer
+ }
+}
+
+class SpanSerializer extends Serializer[Span] {
+ override def configure(configs: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+ override def serialize(topic: String, obj: Span): Array[Byte] = if (obj != null) obj.toByteArray else null
+}
+
+class SpanDeserializer extends Deserializer[Span] with MetricsSupport {
+ private val spanSerdeMeter = metricRegistry.meter("span.serde.failure")
+
+ override def configure(configs: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+ override def deserialize(topic: String, data: Array[Byte]): Span = performDeserialize(data)
+
+ /**
+ * converts the binary protobuf bytes into Span object
+ *
+ * @param data serialized bytes of Span
+ * @return
+ */
+ private def performDeserialize(data: Array[Byte]): Span = {
+ try {
+ if (data == null || data.length == 0) null else Span.parseFrom(data)
+ } catch {
+ case _: Exception =>
+ /* may be log and add metric */
+ spanSerdeMeter.mark()
+ null
+ }
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeKeySerde.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeKeySerde.scala
new file mode 100644
index 000000000..3754cb035
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeKeySerde.scala
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams.serde.graph
+
+import java.util
+
+import com.expedia.www.haystack.commons.entities.{GraphEdge, GraphVertex}
+import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer}
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+
+class GraphEdgeKeySerde extends Serde[GraphEdge] {
+ implicit val formats = DefaultFormats
+ override def deserializer(): Deserializer[GraphEdge] = new GraphEdgeKeyDeserializer()
+
+ override def serializer(): Serializer[GraphEdge] = new GraphEdgeKeySerializer()
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+
+ class GraphEdgeKeyDeserializer extends Deserializer[GraphEdge] {
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+ override def deserialize(topic: String, data: Array[Byte]): GraphEdge = {
+ Serialization.read[GraphEdge](new String(data))
+ }
+ }
+
+ class GraphEdgeKeySerializer extends Serializer[GraphEdge] {
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def serialize(topic: String, edge: GraphEdge): Array[Byte] = {
+ Serialization.write(normalizeKey(edge)).getBytes("utf-8")
+ }
+
+ override def close(): Unit = ()
+ }
+
+ private def normalizeKey(edge: GraphEdge): GraphEdge = {
+ GraphEdge(source = GraphVertex(edge.source.name), destination = GraphVertex(edge.destination.name), edge.operation, 0l)
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeValueSerde.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeValueSerde.scala
new file mode 100644
index 000000000..264f191e8
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeValueSerde.scala
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.kstreams.serde.graph
+
+import java.util
+
+import com.expedia.www.haystack.commons.entities.GraphEdge
+import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer}
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+
+class GraphEdgeValueSerde extends Serde[GraphEdge] {
+ implicit val formats = DefaultFormats
+
+ override def deserializer(): Deserializer[GraphEdge] = new GraphEdgeDeserializer
+
+ override def serializer(): Serializer[GraphEdge] = new GraphEdgeSerializer
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+ class GraphEdgeSerializer extends Serializer[GraphEdge] {
+ override def serialize(topic: String, graphEdge: GraphEdge): Array[Byte] = {
+ Serialization.write(graphEdge).getBytes("utf-8")
+ }
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+ }
+
+ class GraphEdgeDeserializer extends Deserializer[GraphEdge] {
+ override def deserialize(topic: String, data: Array[Byte]): GraphEdge = {
+ Serialization.read[GraphEdge](new String(data))
+ }
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+ }
+}
+
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/metricdata/MetricDataSerde.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/metricdata/MetricDataSerde.scala
new file mode 100644
index 000000000..963a0c7f8
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/metricdata/MetricDataSerde.scala
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2019 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams.serde.metricdata
+
+import java.nio.ByteBuffer
+import java.util
+
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer}
+import org.msgpack.core.MessagePack.Code
+import org.msgpack.core.{MessagePack, MessagePacker}
+import org.msgpack.value.impl.ImmutableLongValueImpl
+import org.msgpack.value.{Value, ValueFactory}
+
+import scala.collection.JavaConverters._
+import scala.collection.immutable.ListMap
+
+/**
+ * This class takes a metric data object and serializes it into a messagepack encoded bytestream
+ * which is metrics 2.0 format. The serialized data is finally streamed to kafka
+ */
+class MetricDataSerde() extends Serde[MetricData] with MetricsSupport {
+
+ override def deserializer(): MetricDeserializer = {
+ new MetricDeserializer()
+ }
+
+ override def serializer(): MetricSerializer = {
+ new MetricSerializer()
+ }
+
+ override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+}
+
+class MetricDeserializer() extends Deserializer[MetricData] with MetricsSupport {
+
+ private val metricDataDeserFailureMeter = metricRegistry.meter("metricdata.deser.failure")
+ private val TAG_DELIMETER = "="
+ private val metricKey = "Metric"
+ private val valueKey = "Value"
+ private val timeKey = "Time"
+ private val tagsKey = "Tags"
+
+
+ override def configure(map: java.util.Map[String, _], b: Boolean): Unit = ()
+
+ /**
+ * converts the messagepack bytes into MetricData object
+ *
+ * @param data serialized bytes of MetricData
+ * @return
+ */
+ override def deserialize(topic: String, data: Array[Byte]): MetricData = {
+ try {
+ val unpacker = MessagePack.newDefaultUnpacker(data)
+ val metricData = unpacker.unpackValue().asMapValue().map()
+ val key = metricData.get(ValueFactory.newString(metricKey)).asStringValue().toString
+ val tags = createTags(metricData)
+ val metricDefinition = new MetricDefinition(key, new TagCollection(tags.asJava), TagCollection.EMPTY)
+ new MetricData(metricDefinition, metricData.get(ValueFactory.newString(valueKey)).asFloatValue().toDouble,
+ metricData.get(ValueFactory.newString(timeKey)).asIntegerValue().toLong)
+ } catch {
+ case ex: Exception =>
+ /* may be log and add metric */
+ metricDataDeserFailureMeter.mark()
+ null
+ }
+ }
+
+
+ private def createTags(metricData: util.Map[Value, Value]): Map[String, String] = {
+ ListMap(metricData.get(ValueFactory.newString(tagsKey)).asArrayValue().list().asScala.map(tag => {
+ val kvPairs = tag.toString.split(TAG_DELIMETER)
+ (kvPairs(0), kvPairs(1))
+ }): _*)
+ }
+
+
+ override def close(): Unit = ()
+}
+
+class MetricSerializer() extends Serializer[MetricData] with MetricsSupport {
+ private val metricDataSerFailureMeter = metricRegistry.meter("metricdata.ser.failure")
+ private val metricDataSerSuccessMeter = metricRegistry.meter("metricdata.ser.success")
+ private val metricKey = "Metric"
+ private val valueKey = "Value"
+ private val timeKey = "Time"
+ private val tagsKey = "Tags"
+
+ override def configure(map: java.util.Map[String, _], b: Boolean): Unit = ()
+
+ override def serialize(topic: String, metricData: MetricData): Array[Byte] = {
+ try {
+ val packer = MessagePack.newDefaultBufferPacker()
+
+ val metricDataMap = Map[Value, Value](
+ ValueFactory.newString(metricKey) -> ValueFactory.newString(metricData.getMetricDefinition.getKey),
+ ValueFactory.newString(valueKey) -> ValueFactory.newFloat(metricData.getValue),
+ ValueFactory.newString(timeKey) -> new ImmutableSignedLongValueImpl(metricData.getTimestamp),
+ ValueFactory.newString(tagsKey) -> ValueFactory.newArray(retrieveTags(metricData).asJava)
+ )
+ packer.packValue(ValueFactory.newMap(metricDataMap.asJava))
+ val data = packer.toByteArray
+ metricDataSerSuccessMeter.mark()
+ data
+ } catch {
+ case ex: Exception =>
+ /* may be log and add metric */
+ metricDataSerFailureMeter.mark()
+ null
+ }
+ }
+
+ private def retrieveTags(metricData: MetricData): List[Value] = {
+ getMetricTags(metricData).asScala.map(tuple => {
+ ValueFactory.newString(s"${tuple._1}=${tuple._2}")
+ }).toList
+ }
+
+ private def getMetricTags(metricData: MetricData): util.Map[String, String] = {
+ metricData.getMetricDefinition.getTags.getKv
+ }
+
+ override def close(): Unit = ()
+
+ /**
+ * This is a value extention class for signed long type. The java client for messagepack packs positive longs as unsigned
+ * and there is no way to force a signed long who's numberal value is positive.
+ * Metric Tank schema requres a signed long type for the timestamp key.
+ *
+ * @param long
+ */
+ class ImmutableSignedLongValueImpl(long: Long) extends ImmutableLongValueImpl(long) {
+
+ override def writeTo(pk: MessagePacker) {
+ val buffer = ByteBuffer.allocate(java.lang.Long.BYTES + 1)
+ buffer.put(Code.INT64)
+ buffer.putLong(long)
+ pk.addPayload(buffer.array())
+ }
+ }
+
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/metricdata/MetricTankSerde.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/metricdata/MetricTankSerde.scala
new file mode 100644
index 000000000..459b24238
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/kstreams/serde/metricdata/MetricTankSerde.scala
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams.serde.metricdata
+
+import java.nio.ByteBuffer
+import java.util
+
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.entities.TagKeys._
+import com.expedia.www.haystack.commons.entities.{Interval, TagKeys}
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import org.apache.commons.codec.digest.DigestUtils
+import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer}
+import org.msgpack.core.MessagePack.Code
+import org.msgpack.core.{MessagePack, MessagePacker}
+import org.msgpack.value.impl.ImmutableLongValueImpl
+import org.msgpack.value.{Value, ValueFactory}
+
+import scala.collection.JavaConverters._
+import scala.collection.immutable.ListMap
+
+/**
+ * This class takes a metric data object and serializes it into a messagepack encoded bytestream
+ * which can be directly consumed by metrictank. The serialized data is finally streamed to kafka
+ */
+class MetricTankSerde() extends Serde[MetricData] with MetricsSupport {
+
+ override def deserializer(): MetricDataDeserializer = {
+ new MetricDataDeserializer()
+ }
+
+ override def serializer(): MetricDataSerializer = {
+ new MetricDataSerializer()
+ }
+
+ override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+}
+
+class MetricDataDeserializer() extends Deserializer[MetricData] with MetricsSupport {
+
+ private val metricPointDeserFailureMeter = metricRegistry.meter("metricpoint.deser.failure")
+ private val TAG_DELIMETER = "="
+ private val metricKey = "Metric"
+ private val valueKey = "Value"
+ private val timeKey = "Time"
+ private val typeKey = "Mtype"
+ private val tagsKey = "Tags"
+ private val idKey = "Id"
+
+ override def configure(map: java.util.Map[String, _], b: Boolean): Unit = ()
+
+ /**
+ * converts the messagepack bytes into MetricPoint object
+ *
+ * @param data serialized bytes of MetricPoint
+ * @return
+ */
+ override def deserialize(topic: String, data: Array[Byte]): MetricData = {
+ try {
+ val unpacker = MessagePack.newDefaultUnpacker(data)
+ val metricData = unpacker.unpackValue().asMapValue().map()
+ val key = metricData.get(ValueFactory.newString(metricKey)).asStringValue().toString
+ val tags = createTags(metricData)
+ val metricDefinition = new MetricDefinition(key, new TagCollection(tags.asJava), TagCollection.EMPTY)
+ new MetricData(metricDefinition, metricData.get(ValueFactory.newString(valueKey)).asFloatValue().toDouble,
+ metricData.get(ValueFactory.newString(timeKey)).asIntegerValue().toLong)
+ } catch {
+ case ex: Exception =>
+ /* may be log and add metric */
+ metricPointDeserFailureMeter.mark()
+ null
+ }
+ }
+
+ private def createMetricNameFromMetricKey(metricKey: String): String = {
+ metricKey.split("\\.").last
+ }
+
+
+ private def createTags(metricData: util.Map[Value, Value]): Map[String, String] = {
+ ListMap(metricData.get(ValueFactory.newString(tagsKey)).asArrayValue().list().asScala.map(tag => {
+ val kvPairs = tag.toString.split("=")
+ (kvPairs(0), kvPairs(1))
+ }): _*)
+ }
+
+
+ override def close(): Unit = ()
+}
+
+class MetricDataSerializer() extends Serializer[MetricData] with MetricsSupport {
+ private val metricPointSerFailureMeter = metricRegistry.meter("metricpoint.ser.failure")
+ private val metricPointSerSuccessMeter = metricRegistry.meter("metricpoint.ser.success")
+ private val DEFAULT_ORG_ID = 1
+ private[commons] val DEFAULT_INTERVAL_IN_SEC = 60
+ private val idKey = "Id"
+ private val orgIdKey = "OrgId"
+ private val nameKey = "Name"
+ private val metricKey = "Metric"
+ private val valueKey = "Value"
+ private val timeKey = "Time"
+ private val typeKey = "Mtype"
+ private val tagsKey = "Tags"
+ private[commons] val intervalKey = "Interval"
+
+ override def configure(map: java.util.Map[String, _], b: Boolean): Unit = ()
+
+ override def serialize(topic: String, metricData: MetricData): Array[Byte] = {
+ try {
+ val packer = MessagePack.newDefaultBufferPacker()
+
+ val metricDataMap = Map[Value, Value](
+ ValueFactory.newString(idKey) -> ValueFactory.newString(s"${getId(metricData)}"),
+ ValueFactory.newString(nameKey) -> ValueFactory.newString(metricData.getMetricDefinition.getKey),
+ ValueFactory.newString(orgIdKey) -> ValueFactory.newInteger(getOrgId(metricData)),
+ ValueFactory.newString(intervalKey) -> new ImmutableSignedLongValueImpl(retrieveInterval(metricData)),
+ ValueFactory.newString(metricKey) -> ValueFactory.newString(metricData.getMetricDefinition.getKey),
+ ValueFactory.newString(valueKey) -> ValueFactory.newFloat(metricData.getValue),
+ ValueFactory.newString(timeKey) -> new ImmutableSignedLongValueImpl(metricData.getTimestamp),
+ ValueFactory.newString(typeKey) -> ValueFactory.newString(retrieveType(metricData)),
+ ValueFactory.newString(tagsKey) -> ValueFactory.newArray(retrieveTags(metricData).asJava)
+ )
+ packer.packValue(ValueFactory.newMap(metricDataMap.asJava))
+ val data = packer.toByteArray
+ metricPointSerSuccessMeter.mark()
+ data
+ } catch {
+ case ex: Exception =>
+ /* may be log and add metric */
+ metricPointSerFailureMeter.mark()
+ null
+ }
+ }
+
+ //Retrieves the interval in case its present in the tags else uses the default interval
+ private def retrieveInterval(metricData: MetricData): Int = {
+ getMetricTags(metricData).asScala.get(TagKeys.INTERVAL_KEY).map(stringInterval => Interval.fromName(stringInterval).timeInSeconds).getOrElse(DEFAULT_INTERVAL_IN_SEC)
+ }
+
+ private def retrieveType(metricData: MetricData): String = {
+ getMetricTags(metricData).get(MetricDefinition.MTYPE)
+ }
+
+ private def retrieveTags(metricData: MetricData): List[Value] = {
+ getMetricTags(metricData).asScala.map(tuple => {
+ ValueFactory.newString(s"${tuple._1}=${tuple._2}")
+ }).toList
+ }
+
+ private def getId(metricData: MetricData): String = {
+ s"${getOrgId(metricData)}.${DigestUtils.md5Hex(getKey(metricData))}"
+ }
+
+ private def getKey(metricData: MetricData): String = {
+ val metricTags = getMetricTags(metricData).asScala.foldLeft("")((tag, tuple) => {
+ tag + s"${tuple._1}.${tuple._2}."
+ })
+ s"$metricTags${metricData.getMetricDefinition.getKey}"
+ }
+
+ private def getOrgId(metricData: MetricData): Int = {
+ getMetricTags(metricData).getOrDefault(ORG_ID_KEY, DEFAULT_ORG_ID.toString).toInt
+ }
+
+ private def getMetricTags(metricData: MetricData) : util.Map[String, String] = {
+ metricData.getMetricDefinition.getTags.getKv
+ }
+
+ override def close(): Unit = ()
+
+ /**
+ * This is a value extention class for signed long type. The java client for messagepack packs positive longs as unsigned
+ * and there is no way to force a signed long who's numberal value is positive.
+ * Metric Tank schema requres a signed long type for the timestamp key.
+ *
+ * @param long
+ */
+ class ImmutableSignedLongValueImpl(long: Long) extends ImmutableLongValueImpl(long) {
+
+ override def writeTo(pk: MessagePacker) {
+ val buffer = ByteBuffer.allocate(java.lang.Long.BYTES + 1)
+ buffer.put(Code.INT64)
+ buffer.putLong(long)
+ pk.addPayload(buffer.array())
+ }
+ }
+
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/logger/LoggerUtils.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/logger/LoggerUtils.scala
new file mode 100644
index 000000000..017b22a41
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/logger/LoggerUtils.scala
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.logger
+
+import org.slf4j.{ILoggerFactory, LoggerFactory}
+
+object LoggerUtils {
+
+ /**
+ * shutdown the logger using reflection.
+ * for logback, it calls stop() method on loggerContext
+ * for log4j, it calls close() method on log4j context
+ */
+ def shutdownLogger(): Unit = {
+ val factory = LoggerFactory.getILoggerFactory
+ shutdownLoggerWithFactory(factory)
+ }
+
+ def shutdownLoggerWithFactory(factory: ILoggerFactory): Unit = {
+ val clazz = factory.getClass
+ try {
+ clazz.getMethod("stop").invoke(factory) // logback
+ } catch {
+ case _: ReflectiveOperationException =>
+ try {
+ clazz.getMethod("close").invoke(factory) // log4j
+ } catch {
+ case _: Exception =>
+ }
+ case _: Exception =>
+ }
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/metrics/MetricsSupport.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/metrics/MetricsSupport.scala
new file mode 100644
index 000000000..756e39a56
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/metrics/MetricsSupport.scala
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.metrics
+
+import com.codahale.metrics.{Metric, MetricRegistry}
+
+trait MetricsSupport {
+ val metricRegistry: MetricRegistry = MetricsRegistries.metricRegistry
+}
+
+object MetricsRegistries {
+
+ val metricRegistry = new MetricRegistry()
+
+ implicit class MetricRegistryExtension(val metricRegistry: MetricRegistry) extends AnyVal {
+
+ def getOrAddGauge[T](expectedName: String, gauge: com.codahale.metrics.Gauge[T]): Boolean = {
+ val existingGauges = metricRegistry.getGauges((existingName: String, _: Metric) => {
+ existingName.equalsIgnoreCase(expectedName)
+ })
+
+ if (existingGauges == null || existingGauges.size() == 0) {
+ metricRegistry.register(expectedName, gauge)
+ true
+ } else {
+ false
+ }
+ }
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/retries/MaxRetriesAttemptedException.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/retries/MaxRetriesAttemptedException.scala
new file mode 100644
index 000000000..8ca915a6d
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/retries/MaxRetriesAttemptedException.scala
@@ -0,0 +1,21 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.retries
+
+class MaxRetriesAttemptedException(message: String, reason: Throwable ) extends RuntimeException(message, reason)
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/retries/RetryOperation.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/retries/RetryOperation.scala
new file mode 100644
index 000000000..7ad7bdb23
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/retries/RetryOperation.scala
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.commons.retries
+
+import scala.annotation.tailrec
+import scala.util.{Failure, Try}
+
+object RetryOperation {
+
+ /**
+ * retry configuration
+ * @param maxRetries maximum number of retry attempts
+ * @param backOffInMillis initial backkoff in millis
+ * @param backoffFactor exponential backoff that gets applied on the previousBackoff value
+ */
+ case class Config(maxRetries: Int, backOffInMillis: Long, backoffFactor: Double) {
+ /**
+ * @return next back off config after applying the exponential factor to initialBackOffInMillis
+ */
+ def nextBackOffConfig: Config = this.copy(backOffInMillis = Math.ceil(backOffInMillis * backoffFactor).toLong)
+ }
+
+ trait Callback {
+ def onResult[T](result: T): Unit
+
+ def onError(ex: Throwable, retry: Boolean): Unit
+
+ def lastError(): Throwable
+ }
+
+ /**
+ * executes the given function with a retry on failures
+ *
+ * @param f main function to execute and retry if fail
+ * @param retryConfig retry configuration with max retry count, backoff values
+ * @tparam T result object from the main 'f' function
+ */
+ def executeWithRetryBackoff[T](f: () => T, retryConfig: Config): Try[T] = {
+ executeWithRetryBackoff(f, 0, retryConfig)
+ }
+
+ @tailrec
+ private def executeWithRetryBackoff[T](f: () => T, currentRetryCount: Int, retryConfig: Config): Try[T] = {
+ Try {
+ f()
+ } match {
+ case Failure(reason) if currentRetryCount < retryConfig.maxRetries && !reason.isInstanceOf[InterruptedException] =>
+ Thread.sleep(retryConfig.backOffInMillis)
+ executeWithRetryBackoff(f, currentRetryCount + 1, retryConfig.nextBackOffConfig)
+ case result@_ => result
+ }
+ }
+
+ /**
+ * executes the given async function with a retry on failures
+ *
+ * @param f main function to execute and retry if fail
+ * @param retryConfig retry configuration with max retry count, backoff values
+ * @param onSuccess this callback is called if the main 'f' function executes with success
+ * @param onFailure this callback is called if the main 'f' function fails after all reattempts
+ * @tparam T result object from the main 'f' function
+ */
+ def withRetryBackoff[T](f: (Callback) => Unit,
+ retryConfig: Config,
+ onSuccess: (T) => Unit,
+ onFailure: (Exception) => Unit): Unit = {
+ withRetryBackoff(f, 0, retryConfig, onSuccess, onFailure)
+ }
+
+ private def withRetryBackoff[T](f: (Callback) => Unit,
+ currentRetry: Int,
+ retryConfig: Config,
+ onSuccess: (T) => Unit,
+ onFailure: (Exception) => Unit,
+ lastSeenError: Throwable = null): Unit = {
+ try {
+ val retryResult = new Callback {
+ override def onResult[Any](result: Any): Unit = {
+ onSuccess(result.asInstanceOf[T])
+ }
+
+ override def onError(ex: Throwable, retry: Boolean): Unit = {
+ if (retry && currentRetry < retryConfig.maxRetries) {
+ Thread.sleep(retryConfig.backOffInMillis)
+ withRetryBackoff(f, currentRetry + 1, retryConfig.nextBackOffConfig, onSuccess, onFailure, ex)
+ } else {
+ onFailure(new MaxRetriesAttemptedException(s"max retries=${retryConfig.maxRetries} have reached and all attempts have failed!", ex))
+ }
+ }
+
+ override def lastError(): Throwable = lastSeenError
+ }
+ f(retryResult)
+ } catch {
+ case ex: Exception => onFailure(ex)
+ }
+ }
+}
diff --git a/commons/commons/src/main/scala/com/expedia/www/haystack/commons/util/MetricDefinitionKeyGenerator.scala b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/util/MetricDefinitionKeyGenerator.scala
new file mode 100644
index 000000000..e5ba4c09f
--- /dev/null
+++ b/commons/commons/src/main/scala/com/expedia/www/haystack/commons/util/MetricDefinitionKeyGenerator.scala
@@ -0,0 +1,19 @@
+package com.expedia.www.haystack.commons.util
+
+import com.expedia.metrics.{MetricDefinition, TagCollection}
+
+import scala.collection.JavaConverters._
+import scala.collection.immutable.ListMap
+
+object MetricDefinitionKeyGenerator {
+
+ def generateKey(metricDefinition: MetricDefinition): String = {
+ List(s"key=${metricDefinition.getKey}", getTagsAsString(metricDefinition.getTags),
+ getTagsAsString(metricDefinition.getMeta)).filter(!_.isEmpty).mkString(",")
+ }
+
+ def getTagsAsString(tags: TagCollection): String = {
+ ListMap(tags.getKv.asScala.toSeq.sortBy(_._1): _*).map(tag => s"${tag._1}=${tag._2}").mkString(",")
+ }
+
+}
diff --git a/commons/commons/src/test/resources/logback-test.xml b/commons/commons/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..adfa02c68
--- /dev/null
+++ b/commons/commons/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/commons/commons/src/test/resources/sample.conf b/commons/commons/src/test/resources/sample.conf
new file mode 100644
index 000000000..e7980d7a0
--- /dev/null
+++ b/commons/commons/src/test/resources/sample.conf
@@ -0,0 +1,7 @@
+haystack {
+ graphite {
+ host = "influxdb.kube-system.svc"
+ port = 2003
+ }
+}
+
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/config/ConfigurationLoaderSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/config/ConfigurationLoaderSpec.scala
new file mode 100644
index 000000000..49406056c
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/config/ConfigurationLoaderSpec.scala
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.config
+
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import com.typesafe.config.ConfigFactory
+import scala.collection.JavaConverters._
+
+class ConfigurationLoaderSpec extends UnitTestSpec {
+ private val keyName = "traces.key.sequence"
+
+ "ConfigurationLoader.loadConfigFileWithEnvOverrides" should {
+
+ "load a given config file as expected when no environment overrides are present" in {
+ Given("a sample HOCON conf file")
+ val file = "sample.conf"
+ When("loadConfigFileWithEnvOverrides is invoked with no environment variables")
+ val config = ConfigurationLoader.loadConfigFileWithEnvOverrides(resourceName = file)
+ Then("it should load the configuration entries as expected")
+ "influxdb.kube-system.svc" should equal(config.getString("haystack.graphite.host"))
+ 2003 should equal(config.getInt("haystack.graphite.port"))
+ }
+ }
+
+ "ConfigurationLoader.parsePropertiesFromMap" should {
+ "parses a given map and returns transformed key-value that matches a given prefix" in {
+ Given("a sample map with a key-value")
+ val data = Map("FOO_HAYSTACK_GRAPHITE_HOST" -> "influxdb.kube-system.svc", "foo.bar" -> "baz")
+ When("parsePropertiesFromMap is invoked with matching prefix")
+ val config = ConfigurationLoader.parsePropertiesFromMap(data, Set(), "FOO_")
+ Then("it should transform the entries that match the prefix as expected")
+ Some("influxdb.kube-system.svc") should equal(config.get("haystack.graphite.host"))
+ None should be(config.get("foo.bar"))
+ }
+
+ "parses a given map with empty array of values and return transformed key-value that matches a given prefix" in {
+ Given("a sample map with a key and empty array of values")
+ val envVars = Map[String, String](ConfigurationLoader.ENV_NAME_PREFIX + "TRACES_KEY_SEQUENCE" -> "[]")
+ When("parsePropertiesFromMap is invoked")
+ val config = ConfigFactory.parseMap(ConfigurationLoader.parsePropertiesFromMap(envVars, Set(keyName), ConfigurationLoader.ENV_NAME_PREFIX).asJava)
+ Then("it should return an empty list with given key")
+ config.getList(keyName).size() shouldBe 0
+ }
+
+ "parses a given map with non-empty array of values and return transformed key-value that matches a given prefix" in {
+ Given("a sample map with a key and empty array of values")
+ val envVars = Map[String, String](ConfigurationLoader.ENV_NAME_PREFIX + "TRACES_KEY_SEQUENCE" -> "[v1]")
+ When("parsePropertiesFromMap is invoked")
+ val config = ConfigFactory.parseMap(ConfigurationLoader.parsePropertiesFromMap(envVars, Set(keyName), ConfigurationLoader.ENV_NAME_PREFIX).asJava)
+ Then("it should return an empty list with given key")
+ config.getStringList(keyName).size() shouldBe 1
+ config.getStringList(keyName).get(0) shouldBe "v1"
+ }
+
+ "should throw runtime exception if env variable doesn't comply array value signature - [..]" in {
+ Given("a sample map with a key and non compliant array of values")
+ val envVars = Map[String, String](ConfigurationLoader.ENV_NAME_PREFIX + "TRACES_KEY_SEQUENCE" -> "v1")
+ When("parsePropertiesFromMap is invoked")
+ val exception = intercept[RuntimeException] {
+ ConfigurationLoader.parsePropertiesFromMap(envVars, Set(keyName), ConfigurationLoader.ENV_NAME_PREFIX)
+ }
+ Then("it should throw exception with excepted message")
+ exception.getMessage shouldEqual "config key is of array type, so it should start and end with '[', ']' respectively"
+ }
+
+ "should load config from env variable with non-empty value" in {
+ Given("a sample map with a key and empty array of values")
+ val envVars = Map[String, String](
+ ConfigurationLoader.ENV_NAME_PREFIX + "TRACES_KEY_SEQUENCE" -> "[v1]",
+ ConfigurationLoader.ENV_NAME_PREFIX + "TRACES_KEY2" -> "v2",
+ "NON_HAYSTACK_KEY" -> "not_interested")
+
+ When("parsePropertiesFromMap is invoked")
+ val config = ConfigFactory.parseMap(ConfigurationLoader.parsePropertiesFromMap(envVars, Set(keyName), ConfigurationLoader.ENV_NAME_PREFIX).asJava)
+ Then("it should return an empty list with given key")
+ config.getStringList(keyName).size() shouldBe 1
+ config.getStringList(keyName).get(0) shouldBe "v1"
+ config.getString("traces.key2") shouldBe "v2"
+ config.hasPath("non.haystack.key") shouldBe false
+ }
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/entities/encoders/EncoderFactorySpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/entities/encoders/EncoderFactorySpec.scala
new file mode 100644
index 000000000..d1dcec832
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/entities/encoders/EncoderFactorySpec.scala
@@ -0,0 +1,56 @@
+package com.expedia.www.haystack.commons.entities.encoders
+
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+
+class EncoderFactorySpec extends UnitTestSpec {
+ "EncoderFactory" should {
+
+ "return a NoopEncoder by default for null" in {
+ When("encoder is null")
+ val encoder = EncoderFactory.newInstance(null)
+
+ Then("should be a NoopEncoder")
+ encoder shouldBe an[NoopEncoder]
+ }
+
+ "return a NoopEncoder by default for empty string" in {
+ When("encoder is empty string")
+ val encoder = EncoderFactory.newInstance("")
+
+ Then("should be a NoopEncoder")
+ encoder shouldBe an[NoopEncoder]
+ }
+
+ "return a Base64Encoder when value = base64" in {
+ When("encoder is empty string")
+ val encoder = EncoderFactory.newInstance(EncoderFactory.BASE_64)
+
+ Then("should be a Base64Encoder")
+ encoder shouldBe an[Base64Encoder]
+ }
+
+ "return a Base64Encoder when value = baSe64" in {
+ When("encoder is empty string")
+ val encoder = EncoderFactory.newInstance("baSe64")
+
+ Then("should be a Base64Encoder")
+ encoder shouldBe an[Base64Encoder]
+ }
+
+ "return a PeriodReplacementEncoder when value = periodreplacement" in {
+ When("encoder is empty string")
+ val encoder = EncoderFactory.newInstance("periodreplacement")
+
+ Then("should be a PeriodReplacementEncoder")
+ encoder shouldBe an[PeriodReplacementEncoder]
+ }
+
+ "return a PeriodReplacementEncoder when value = periodReplacement" in {
+ When("encoder is empty string")
+ val encoder = EncoderFactory.newInstance(EncoderFactory.PERIOD_REPLACEMENT)
+
+ Then("should be a PeriodReplacementEncoder")
+ encoder shouldBe an[PeriodReplacementEncoder]
+ }
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/graph/GraphEdgeCollectorSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/graph/GraphEdgeCollectorSpec.scala
new file mode 100644
index 000000000..ade664059
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/graph/GraphEdgeCollectorSpec.scala
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.graph
+
+import com.expedia.open.tracing.Tag.TagType
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.commons.entities.TagKeys
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+
+
+class GraphEdgeCollectorSpec extends UnitTestSpec {
+
+
+ "graph edge collector" should {
+
+ "should collect predefined collection tags" in {
+ Given("a graph edge collector with a list of tags to be collected")
+ val tags = Set("tag1", "tag2")
+ And("a span containing tags")
+ val span = Span.newBuilder().addTags(Tag.newBuilder().setKey("tag1").setVStr("val1")).build()
+
+ When("collecting the tags for a given span")
+ val edgeTagCollector = new GraphEdgeTagCollector(tags)
+ val collectedTags = edgeTagCollector.collectTags(span)
+
+ Then("only the predefined tags that are also part of the span should be collected")
+ collectedTags.get("tag1") should be (Some("val1"))
+ collectedTags should not contain ("tag2")
+ }
+
+ "should always collect default tags" in {
+ Given("a graph edge collector and an empty tag list")
+ val tags = Set[String]()
+ And("a span containing only the default tag")
+ val span = Span.newBuilder().addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVBool(true)
+ .setType(TagType.BOOL)).build()
+
+ When("collecting the tags for a given span")
+ val edgeTagCollector = new GraphEdgeTagCollector(tags)
+ val collectedTags = edgeTagCollector.collectTags(span)
+
+ Then("only the predefined tags that are also part of the span should be collected")
+ collectedTags.get(TagKeys.ERROR_KEY) should be (Some("true"))
+ }
+
+ "should throw an exception if tag type cannot be converted to string" in {
+ Given("a graph edge collector and an empty tag list")
+ val tags = Set("test")
+ And("a span containing a tag whose type is not supported")
+ val span = Span.newBuilder().addTags(Tag.newBuilder().setKey("test").setType(TagType.BINARY)).build()
+
+ When("collecting the tags for a given span")
+ Then("only the predefined tags that are also part of the span should be collected")
+ val edgeTagCollector = new GraphEdgeTagCollector(tags)
+ intercept[IllegalArgumentException] {
+ edgeTagCollector.collectTags(span)
+ }
+ }
+
+ }
+
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/health/HealthControllerSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/health/HealthControllerSpec.scala
new file mode 100644
index 000000000..147a0b9ed
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/health/HealthControllerSpec.scala
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.health
+
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+
+class HealthControllerSpec extends UnitTestSpec {
+ val statusFile = "/tmp/app-health.status"
+
+
+ "file based health checker" should {
+
+ "set the value with the correct boolean value for the app's health status" in {
+ Given("a file path")
+
+ When("checked with default state")
+ val healthChecker = HealthController
+ healthChecker.addListener(new UpdateHealthStatusFile(statusFile))
+ val status = healthChecker.isHealthy
+
+ Then("default state should be unhealthy")
+ status shouldBe false
+
+ When("explicitly set as healthy")
+ healthChecker.setHealthy()
+
+ Then("The state should be updated to healthy")
+ healthChecker.isHealthy shouldBe true
+ readStatusLine shouldEqual "true"
+
+ When("explicitly set as unhealthy")
+ healthChecker.setUnhealthy()
+
+ Then("The state should be updated to unhealthy")
+ healthChecker.isHealthy shouldBe false
+ readStatusLine shouldBe "false"
+ }
+ }
+
+ private def readStatusLine = scala.io.Source.fromFile(statusFile).getLines().toList.head
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/GraphEdgeTimestampExtractorSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/GraphEdgeTimestampExtractorSpec.scala
new file mode 100644
index 000000000..c8c532cbc
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/GraphEdgeTimestampExtractorSpec.scala
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams
+
+import com.expedia.www.haystack.commons.entities.{GraphEdge, GraphVertex}
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import org.apache.kafka.clients.consumer.ConsumerRecord
+
+class GraphEdgeTimestampExtractorSpec extends UnitTestSpec {
+
+ "GraphEdgeTimestampExtractor" should {
+
+ "extract timestamp from GraphEdge" in {
+
+ Given("a GraphEdge with some timestamp")
+ val time = System.currentTimeMillis()
+ val graphEdge = GraphEdge(GraphVertex("svc1"), GraphVertex("svc2"), "oper1", time)
+ val extractor = new GraphEdgeTimestampExtractor
+ val record: ConsumerRecord[AnyRef, AnyRef] = new ConsumerRecord("dummy-topic", 1, 1, "dummy-key", graphEdge)
+
+ When("extract timestamp")
+ val epochTime = extractor.extract(record, System.currentTimeMillis())
+
+ Then("extracted time should equal GraphEdge time in milliseconds")
+ epochTime shouldEqual time
+ }
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/MetricDataTimestampExtractorSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/MetricDataTimestampExtractorSpec.scala
new file mode 100644
index 000000000..510ab0450
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/MetricDataTimestampExtractorSpec.scala
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams
+
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import org.apache.kafka.clients.consumer.ConsumerRecord
+
+import scala.util.Random
+
+class MetricDataTimestampExtractorSpec extends UnitTestSpec {
+
+ "MetricDataTimestampExtractor" should {
+
+ "extract timestamp from MetricData" in {
+
+ Given("a metric data with some timestamp")
+ val currentTimeInSecs = computeCurrentTimeInSecs
+ val metricData = getMetricData(currentTimeInSecs)
+ val metricDataTimestampExtractor = new MetricDataTimestampExtractor
+ val record: ConsumerRecord[AnyRef, AnyRef] = new ConsumerRecord("dummy-topic", 1, 1, "dummy-key", metricData)
+
+ When("extract timestamp")
+ val epochTime = metricDataTimestampExtractor.extract(record, System.currentTimeMillis())
+
+ Then("extracted time should equal metric point time in milliseconds")
+ epochTime shouldEqual currentTimeInSecs * 1000
+ }
+ }
+
+ private def getMetricData(timeStamp : Long): MetricData = {
+ val metricDefinition = new MetricDefinition("duration")
+ new MetricData(metricDefinition, Random.nextDouble(), timeStamp)
+ }
+
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/SpanTimestampExtractorSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/SpanTimestampExtractorSpec.scala
new file mode 100644
index 000000000..af992507a
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/SpanTimestampExtractorSpec.scala
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams
+
+import java.util.UUID
+
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import org.apache.kafka.clients.consumer.ConsumerRecord
+
+class SpanTimestampExtractorSpec extends UnitTestSpec {
+
+ "SpanTimestampExtractor" should {
+
+ " should extract timestamp from Span" in {
+
+ Given("a span with some timestamp")
+ val currentTimeInMicroSeconds = System.currentTimeMillis() * 1000
+
+ val span = generateTestSpan(UUID.randomUUID().toString, currentTimeInMicroSeconds, "foo", "bar", 20, client = false, server = true)
+ val spanTimestampExtractor = new SpanTimestampExtractor
+ val record: ConsumerRecord[AnyRef, AnyRef] = new ConsumerRecord("dummy-topic", 1, 1, "dummy-key", span)
+
+ When("extract timestamp")
+ val epochTime = spanTimestampExtractor.extract(record, System.currentTimeMillis())
+
+ Then("extracted time should equal span startTime time")
+ epochTime shouldEqual currentTimeInMicroSeconds / 1000
+ }
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/ApplicationSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/ApplicationSpec.scala
new file mode 100644
index 000000000..c4fec1548
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/ApplicationSpec.scala
@@ -0,0 +1,66 @@
+package com.expedia.www.haystack.commons.kstreams.app
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import org.easymock.EasyMock._
+
+class ApplicationSpec extends UnitTestSpec {
+
+ "Application" should {
+
+ "require an instance of StreamsRunner" in {
+ Given("only a valid instance of jmxReporter")
+ val streamsRunner : StreamsRunner = null
+ val jmxReporter = mock[JmxReporter]
+ When("an instance of Application is created")
+ Then("it should throw an exception")
+ intercept[IllegalArgumentException] {
+ new Application(streamsRunner, jmxReporter)
+ }
+ }
+ "require an instance of JmxReporter" in {
+ Given("only a valid instance of StreamsRunner")
+ val streamsRunner : StreamsRunner = mock[StreamsRunner]
+ val jmxReporter = null
+ When("an instance of Application is created")
+ Then("it should throw an exception")
+ intercept[IllegalArgumentException] {
+ new Application(streamsRunner, jmxReporter)
+ }
+ }
+ "start both JmxReporter and StreamsRunner at start" in {
+ Given("a fully configured application")
+ val streamsRunner : StreamsRunner = mock[StreamsRunner]
+ val jmxReporter = mock[JmxReporter]
+ val application = new Application(streamsRunner, jmxReporter)
+ When("application is started")
+ expecting {
+ streamsRunner.start().once()
+ jmxReporter.start().once()
+ }
+ replay(streamsRunner, jmxReporter)
+ application.start()
+ Then("it should call start on both streamsRunner and jmxReporter")
+ verify(streamsRunner, jmxReporter)
+ }
+ "close both JmxReporter and StreamsRunner at stop" in {
+ Given("a fully configured and running application")
+ val streamsRunner : StreamsRunner = mock[StreamsRunner]
+ val jmxReporter = mock[JmxReporter]
+ val application = new Application(streamsRunner, jmxReporter)
+ When("application is stopped")
+ expecting {
+ streamsRunner.start().once()
+ jmxReporter.start().once()
+ streamsRunner.close().once()
+ jmxReporter.close().once()
+ }
+ replay(streamsRunner, jmxReporter)
+ application.start()
+ application.stop()
+ Then("it should call close on both streamsRunner and jmxReporter")
+ verify(streamsRunner, jmxReporter)
+ }
+
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/ManagedKafkaStreamsSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/ManagedKafkaStreamsSpec.scala
new file mode 100644
index 000000000..80749eb98
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/ManagedKafkaStreamsSpec.scala
@@ -0,0 +1,66 @@
+package com.expedia.www.haystack.commons.kstreams.app
+
+import java.util.concurrent.TimeUnit
+
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import org.apache.kafka.streams.KafkaStreams
+import org.easymock.EasyMock._
+
+class ManagedKafkaStreamsSpec extends UnitTestSpec {
+ "ManagedKafkaStreams" should {
+ "start the underlying kafkaStreams when started" in {
+ Given("a fully configured ManagedKafkaStreams instance")
+ val kafkaStreams = mock[KafkaStreams]
+ val managedKafkaStreams = new ManagedKafkaStreams(kafkaStreams)
+ When("start is invoked")
+ expecting {
+ kafkaStreams.start().once()
+ }
+ replay(kafkaStreams)
+ managedKafkaStreams.start()
+ Then("it should start the KafkaStreams application")
+ verify(kafkaStreams)
+ }
+ "close the KafkaStreams when stopped" in {
+ Given("a fully configured ManagedKafkaStreams instance")
+ val kafkaStreams = mock[KafkaStreams]
+ val managedKafkaStreams = new ManagedKafkaStreams(kafkaStreams)
+ When("stop is invoked")
+ expecting {
+ kafkaStreams.start().once()
+ kafkaStreams.close(0, TimeUnit.SECONDS).andReturn(true).once()
+ }
+ replay(kafkaStreams)
+ managedKafkaStreams.start()
+ Then("it should close the KafkaStreams application")
+ managedKafkaStreams.stop()
+ verify(kafkaStreams)
+ }
+ "not do anything when stop is called without starting" in {
+ Given("a fully configured ManagedKafkaStreams instance")
+ val kafkaStreams = mock[KafkaStreams]
+ val managedKafkaStreams = new ManagedKafkaStreams(kafkaStreams)
+ When("stop is invoked without starting")
+ replay(kafkaStreams)
+ Then("it should do nothing")
+ managedKafkaStreams.stop()
+ verify(kafkaStreams)
+ }
+ "close the KafkaStreams with the given timeout when stopped" in {
+ Given("a fully configured ManagedKafkaStreams instance with a timeout")
+ val kafkaStreams = mock[KafkaStreams]
+ val managedKafkaStreams = new ManagedKafkaStreams(kafkaStreams, 5)
+ When("stop is invoked")
+ expecting {
+ kafkaStreams.start().once()
+ kafkaStreams.close(5, TimeUnit.SECONDS).andReturn(true).once()
+ }
+ replay(kafkaStreams)
+ managedKafkaStreams.start()
+ Then("it should close the KafkaStreams application with the given timeout")
+ managedKafkaStreams.stop()
+ verify(kafkaStreams)
+ }
+ }
+
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/StateChangeListenerSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/StateChangeListenerSpec.scala
new file mode 100644
index 000000000..f4b070c52
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/StateChangeListenerSpec.scala
@@ -0,0 +1,52 @@
+package com.expedia.www.haystack.commons.kstreams.app
+
+import com.expedia.www.haystack.commons.health.HealthStatusController
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import org.easymock.EasyMock._
+
+class StateChangeListenerSpec extends UnitTestSpec {
+ "StateChangeListener" should {
+ "set the health status to healthy when requested" in {
+ Given("a valid instance of StateChangeListener")
+ val healthStatusController = mock[HealthStatusController]
+ val stateChangeListener = new StateChangeListener(healthStatusController)
+ When("set healthy is invoked")
+ expecting {
+ healthStatusController.setHealthy().once()
+ }
+ replay(healthStatusController)
+ stateChangeListener.state(true)
+ Then("it should set health status to healthy")
+ verify(healthStatusController)
+ }
+ "set the health status to unhealthy when requested" in {
+ Given("a valid instance of StateChangeListener")
+ val healthStatusController = mock[HealthStatusController]
+ val stateChangeListener = new StateChangeListener(healthStatusController)
+ When("set unhealthy is invoked")
+ expecting {
+ healthStatusController.setUnhealthy().once()
+ }
+ replay(healthStatusController)
+ stateChangeListener.state(false)
+ Then("it should set health status to healthy")
+ verify(healthStatusController)
+ }
+ "set application status to unhealthy when an un caught exception is raised" in {
+ Given("a valid instance of StateChangeListener")
+ val healthStatusController = mock[HealthStatusController]
+ val stateChangeListener = new StateChangeListener(healthStatusController)
+ val exception = new IllegalArgumentException
+ val thread = new Thread("Thread-1")
+ When("an uncaught exception is raised")
+ expecting {
+ healthStatusController.setUnhealthy().once()
+ }
+ replay(healthStatusController)
+ stateChangeListener.uncaughtException(thread, exception)
+ Then("it should set the status to unhealthy")
+ verify(healthStatusController)
+ }
+ }
+
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/StreamsRunnerSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/StreamsRunnerSpec.scala
new file mode 100644
index 000000000..6a61c157b
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/app/StreamsRunnerSpec.scala
@@ -0,0 +1,46 @@
+package com.expedia.www.haystack.commons.kstreams.app
+
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import org.easymock.EasyMock._
+
+class StreamsRunnerSpec extends UnitTestSpec {
+ "StreamsRunner" should {
+ "start managed KStreams when the factory successfully creates one" in {
+ Given("a StreamsFactory")
+ val factory = mock[StreamsFactory]
+ And("a StateChangeListener")
+ val stateChangeListener = mock[StateChangeListener]
+ val managedService = mock[ManagedService]
+ val streamsRunner = new StreamsRunner(factory, stateChangeListener)
+ When("streamsRunner is asked to start the application")
+ expecting {
+ factory.create(stateChangeListener).andReturn(managedService).once()
+ managedService.start().once()
+ stateChangeListener.state(true).once()
+ }
+ replay(factory, managedService, stateChangeListener)
+ streamsRunner.start()
+ Then("it should create an instance of managed streams from the given factory and start it. " +
+ "It should also set the state to healthy")
+ verify(factory, managedService, stateChangeListener)
+ }
+ "set the state to unhealthy when the factory fails to create one" in {
+ Given("a StreamsFactory")
+ val factory = mock[StreamsFactory]
+ And("a StateChangeListener")
+ val stateChangeListener = mock[StateChangeListener]
+ val managedService = mock[ManagedService]
+ val streamsRunner = new StreamsRunner(factory, stateChangeListener)
+ When("streamsRunner is asked to start the application and factory fails")
+ expecting {
+ factory.create(stateChangeListener).andThrow(new RuntimeException).once()
+ stateChangeListener.state(false).once()
+ }
+ replay(factory, managedService, stateChangeListener)
+ streamsRunner.start()
+ Then("it should attempt tp create an instance of managed streams from the given factory. " +
+ "It should also set the state to unhealthy")
+ verify(factory, managedService, stateChangeListener)
+ }
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/SpanSerdeSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/SpanSerdeSpec.scala
new file mode 100644
index 000000000..bf1d13ca9
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/SpanSerdeSpec.scala
@@ -0,0 +1,37 @@
+package com.expedia.www.haystack.commons.kstreams.serde
+
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+
+class SpanSerdeSpec extends UnitTestSpec {
+
+
+ "span serializer" should {
+ "should serialize a span" in {
+ Given("a span serializer")
+ val serializer = (new SpanSerde).serializer
+ And("a valid span is provided")
+ val span = generateTestSpan("foo", "bar", 100, client = true, server = false)
+ When("span serializer is used to serialize the span")
+ val bytes = serializer.serialize("proto-spans", span)
+ Then("it should serialize the object")
+ bytes.nonEmpty should be(true)
+ }
+ }
+ "span deserializer" should {
+ "should deserialize a span" in {
+ Given("a span deserializer")
+ val serializer = (new SpanSerde).serializer
+ val deserializer = (new SpanSerde).deserializer
+ And("a valid span is provided")
+ val span = generateTestSpan("foo", "bar", 100, client = true, server = false)
+ When("span deserializer is used on valid array of bytes")
+ val bytes = serializer.serialize("proto-spans", span)
+ val span2 = deserializer.deserialize("proto-spans", bytes)
+ Then("it should deserialize correctly")
+ span should be(span2)
+ }
+ }
+
+
+
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeKeySerdeSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeKeySerdeSpec.scala
new file mode 100644
index 000000000..e60517262
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeKeySerdeSpec.scala
@@ -0,0 +1,68 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams.serde.graph
+
+import com.expedia.www.haystack.commons.entities.{GraphEdge, GraphVertex}
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+
+class GraphEdgeKeySerdeSpec extends UnitTestSpec {
+ "GraphEdge Key serializer" should {
+ "should serialize a GraphEdge" in {
+ Given("a GraphEdge serializer")
+ val serializer = (new GraphEdgeKeySerde).serializer()
+
+ And("a valid GraphEdge is provided")
+ val edge = GraphEdge(GraphVertex("sourceSvc"), GraphVertex("destinationSvc"),
+ "operation", 1)
+
+ When("GraphEdge serializer is used to serialize the GraphEdge")
+ val bytes = serializer.serialize("graph-nodes", edge)
+
+ Then("it should serialize the object")
+ new String(bytes) shouldEqual "{\"source\":{\"name\":\"sourceSvc\",\"tags\":{}},\"destination\":{\"name\":\"destinationSvc\",\"tags\":{}},\"operation\":\"operation\",\"sourceTimestamp\":0}"
+ }
+ }
+
+ "GraphEdge Key deserializer" should {
+ "should deserialize a GraphEdge" in {
+ Given("a GraphEdge deserializer")
+ val serializer = (new GraphEdgeKeySerde).serializer()
+ val deserializer = (new GraphEdgeKeySerde).deserializer()
+
+ And("a valid GraphEdge is provided")
+ val edge = GraphEdge(GraphVertex("sourceSvc"), GraphVertex("destinationSvc"),
+ "operation", System.currentTimeMillis())
+
+ When("GraphEdge deserializer is used on valid array of bytes")
+ val bytes = serializer.serialize("graph-nodes", edge)
+ val dataWithoutSourceTimestamp = "{\"source\":{\"name\":\"sourceSvc\",\"tags\":{}},\"destination\":{\"name\":\"destinationSvc\",\"tags\":{}},\"operation\":\"operation\"}"
+
+ val serializedEdge_1 = deserializer.deserialize("graph-nodes", bytes)
+ val serializedEdge_2 = deserializer.deserialize("graph-nodes", dataWithoutSourceTimestamp.getBytes("utf-8"))
+
+ Then("it should deserialize correctly")
+ serializedEdge_1.source.name should be("sourceSvc")
+ serializedEdge_1.destination.name should be("destinationSvc")
+ serializedEdge_1.operation shouldEqual "operation"
+ serializedEdge_1.source.tags.size shouldBe 0
+ serializedEdge_1.destination.tags.size shouldBe 0
+ serializedEdge_2.sourceTimestamp should not be 0l
+ }
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeValueSerdeSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeValueSerdeSpec.scala
new file mode 100644
index 000000000..34e0b8809
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/graph/GraphEdgeValueSerdeSpec.scala
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams.serde.graph
+
+import com.expedia.www.haystack.commons.entities.{GraphEdge, GraphVertex}
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+
+class GraphEdgeValueSerdeSpec extends UnitTestSpec {
+ "GraphEdge Value serializer" should {
+ "should serialize a GraphEdge" in {
+ Given("a GraphEdge serializer")
+ val serializer = (new GraphEdgeValueSerde).serializer()
+
+ And("a valid GraphEdge is provided")
+ val edge = GraphEdge(GraphVertex("sourceSvc"), GraphVertex("destinationSvc"),
+ "operation", System.currentTimeMillis())
+
+ When("GraphEdge serializer is used to serialize the GraphEdge")
+ val bytes = serializer.serialize("graph-nodes", edge)
+
+ Then("it should serialize the object")
+ bytes.nonEmpty should be(true)
+ }
+ }
+
+ "GraphEdge Value deserializer" should {
+ "should deserialize a GraphEdge" in {
+ Given("a GraphEdge deserializer")
+ val serializer = (new GraphEdgeValueSerde).serializer()
+ val deserializer = (new GraphEdgeValueSerde).deserializer()
+
+ And("a valid GraphEdge is provided")
+ val edge = GraphEdge(GraphVertex("sourceSvc", Map("testtag" -> "true")), GraphVertex("destinationSvc"),
+ "operation", System.currentTimeMillis())
+
+ When("GraphEdge deserializer is used on valid array of bytes")
+ val bytes = serializer.serialize("graph-nodes", edge)
+ val serializedEdge = deserializer.deserialize("graph-nodes", bytes)
+
+ Then("it should deserialize correctly")
+ edge should be(serializedEdge)
+ edge.source.name should be("sourceSvc")
+ edge.source.tags.get("testtag") shouldBe Some("true")
+ }
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/metricdata/MetricTankSerdeSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/metricdata/MetricTankSerdeSpec.scala
new file mode 100644
index 000000000..3d7546094
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/kstreams/serde/metricdata/MetricTankSerdeSpec.scala
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.kstreams.serde.metricdata
+
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.entities.{Interval, TagKeys}
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import org.msgpack.core.MessagePack
+import org.msgpack.value.ValueFactory
+
+import scala.util.Random
+
+class MetricTankSerdeSpec extends UnitTestSpec {
+ val statusFile = "/tmp/app-health.status"
+ val DURATION_METRIC_NAME = "duration"
+ val SERVICE_NAME = "dummy_service"
+ val OPERATION_NAME = "dummy_operation"
+ val TOPIC_NAME = "dummy"
+
+
+ val metricTags = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+
+ "MetricTank serde for metric data" should {
+
+ "serialize and deserialize metric data using messagepack" in {
+
+ Given("metric data")
+ val tags = new java.util.LinkedHashMap[String, String] {
+ put("serviceName", SERVICE_NAME)
+ put("operationName", OPERATION_NAME)
+ put(MetricDefinition.MTYPE, "gauge")
+ put(MetricDefinition.UNIT, "short")
+ }
+ val metricData = getMetricData(tags)
+ val metricTankSerde = new MetricTankSerde()
+
+ When("its serialized using the metricTank Serde")
+ val serializedBytes = metricTankSerde.serializer().serialize(TOPIC_NAME, metricData)
+
+ Then("it should be encoded as message pack")
+ val unpacker = MessagePack.newDefaultUnpacker(serializedBytes)
+ unpacker should not be null
+
+ metricTankSerde.close()
+ }
+
+ "serialize metric data with the right metric interval if present" in {
+
+ Given("metric data with a 5 minute interval")
+ val metricTankSerde = new MetricTankSerde()
+
+ val tags = new java.util.LinkedHashMap[String, String] {
+ put("serviceName", SERVICE_NAME)
+ put("operationName", OPERATION_NAME)
+ put(MetricDefinition.MTYPE, "gauge")
+ put(MetricDefinition.UNIT, "short")
+ put("interval", Interval.FIVE_MINUTE.name.toString)
+ }
+ val metricData = getMetricData(tags)
+
+ When("its serialized using the metricTank Serde")
+ val serializedBytes = metricTankSerde.serializer().serialize(TOPIC_NAME, metricData)
+ val unpacker = MessagePack.newDefaultUnpacker(serializedBytes)
+ Then("it should be able to unpack the content")
+ unpacker should not be null
+
+ Then("it unpacked content should be a valid map")
+ val deserializedMetricData = unpacker.unpackValue().asMapValue().map()
+ deserializedMetricData should not be null
+
+ Then("interval key should be set as 300 seconds")
+ deserializedMetricData.get(ValueFactory.newString(metricTankSerde.serializer().intervalKey)).asIntegerValue().asInt() shouldBe 300
+
+ metricTankSerde.close()
+ }
+
+ "serialize metricpoint with the default interval if not present" in {
+
+ Given("metric point without the interval tag")
+ val metricTankSerde = new MetricTankSerde()
+ val tags = new java.util.LinkedHashMap[String, String] {
+ put("serviceName", SERVICE_NAME)
+ put("operationName", OPERATION_NAME)
+ put(MetricDefinition.MTYPE, "gauge")
+ put(MetricDefinition.UNIT, "short")
+ }
+ val metricData = getMetricData(tags)
+
+ When("its serialized using the metricTank Serde")
+ val serializedBytes = metricTankSerde.serializer().serialize(TOPIC_NAME, metricData)
+ val unpacker = MessagePack.newDefaultUnpacker(serializedBytes)
+ Then("it should be able to unpack the content")
+ unpacker should not be null
+
+ Then("it unpacked content should be a valid map")
+ val deserializedMetricData = unpacker.unpackValue().asMapValue().map()
+ deserializedMetricData should not be null
+
+ Then("interval key should be set as default metric interval in seconds")
+ deserializedMetricData.get(ValueFactory.newString(metricTankSerde.serializer().intervalKey)).asIntegerValue().asInt() shouldBe metricTankSerde.serializer().DEFAULT_INTERVAL_IN_SEC
+
+ metricTankSerde.close()
+ }
+
+
+ "serialize and deserialize simple metric points without loosing data" in {
+
+ Given("metric point")
+ val metricTankSerde = new MetricTankSerde()
+ val tags = new java.util.LinkedHashMap[String, String] {
+ put("serviceName", SERVICE_NAME)
+ put("operationName", OPERATION_NAME)
+ put(MetricDefinition.MTYPE, "gauge")
+ put(MetricDefinition.UNIT, "short")
+ }
+ val metricData = getMetricData(tags)
+
+ When("its serialized in the metricTank Format")
+ val serializedBytes = metricTankSerde.serializer().serialize(TOPIC_NAME, metricData)
+ val deserializedMetricPoint = metricTankSerde.deserializer().deserialize(TOPIC_NAME, serializedBytes)
+
+ Then("it should be encoded as message pack")
+ metricData shouldEqual deserializedMetricPoint
+
+ metricTankSerde.close()
+ }
+ }
+
+ "serializer returns null for any exception" in {
+
+ Given("MetricTankSerde and a null metric data")
+ val metricTankSerde = new MetricTankSerde()
+ val metricData = null
+
+ When("its serialized using the metricTank Serde")
+ val serializedBytes = metricTankSerde.serializer().serialize(TOPIC_NAME, metricData)
+
+ Then("serializer should return null")
+ serializedBytes shouldBe null
+ metricTankSerde.close()
+ }
+
+
+ private def getMetricData(tags: java.util.LinkedHashMap[String, String]): MetricData = {
+ val metricDefinition = new MetricDefinition(DURATION_METRIC_NAME, new TagCollection(tags), TagCollection.EMPTY)
+ new MetricData(metricDefinition, Random.nextDouble(), System.currentTimeMillis() / 1000)
+ }
+
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/logger/LoggerUtilSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/logger/LoggerUtilSpec.scala
new file mode 100644
index 000000000..b0cbbb4dd
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/logger/LoggerUtilSpec.scala
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.logger
+
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+import org.slf4j.{ILoggerFactory, Logger}
+
+class LoggerUtilSpec extends FunSpec with Matchers with EasyMockSugar {
+
+ describe("Logger Utils") {
+ it("should close the logger if it has stop method for e.g. logback") {
+ val logger = mock[Logger]
+ var isStopped = false
+
+ val loggerFactory = new ILoggerFactory {
+ override def getLogger(s: String): Logger = logger
+
+ def stop(): Unit = isStopped = true
+ }
+
+ whenExecuting(logger) {
+ LoggerUtils.shutdownLoggerWithFactory(loggerFactory)
+ isStopped shouldBe true
+ }
+ }
+
+ it("should close the logger if it has close method for e.g. log4j") {
+ val logger = mock[Logger]
+ var isStopped = false
+
+ val loggerFactory = new ILoggerFactory {
+ override def getLogger(s: String): Logger = logger
+
+ def close(): Unit = isStopped = true
+ }
+
+ whenExecuting(logger) {
+ LoggerUtils.shutdownLoggerWithFactory(loggerFactory)
+ isStopped shouldBe true
+ }
+ }
+
+ it("should not able to close the logger if it has neither stop/close method") {
+ val logger = mock[Logger]
+ var isStopped = false
+
+ val loggerFactory = new ILoggerFactory {
+ override def getLogger(s: String): Logger = logger
+
+ def shutdown(): Unit = isStopped = true
+ }
+
+ whenExecuting(logger) {
+ LoggerUtils.shutdownLoggerWithFactory(loggerFactory)
+ isStopped shouldBe false
+ }
+ }
+
+ it("should do nothing when stop method throws exception") {
+ val logger = mock[Logger]
+ var isStopped = true
+
+ val loggerFactory = new ILoggerFactory {
+ override def getLogger(s: String): Logger = logger
+
+ def stop(): Unit = {
+ isStopped = false
+ throw new Exception
+ }
+ }
+
+ whenExecuting(logger) {
+ LoggerUtils.shutdownLoggerWithFactory(loggerFactory)
+ isStopped shouldBe false
+ }
+ }
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/metrics/MetricRegistySpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/metrics/MetricRegistySpec.scala
new file mode 100644
index 000000000..74f217ab5
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/metrics/MetricRegistySpec.scala
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.metrics
+
+import com.codahale.metrics.Gauge
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+import com.expedia.www.haystack.commons.metrics.MetricsRegistries._
+
+class MetricRegistySpec extends UnitTestSpec with MetricsSupport {
+
+ "MetricRegisty extension" should {
+
+ "return the same gauge metric if its created more than once" in {
+ Given("gauge metric")
+ val metricName = "testMetric"
+ val gaugeMetric = new Gauge[Long] {
+ override def getValue: Long = this.hashCode()
+ }
+
+ When("its registered more than once")
+ val firstAttempt = metricRegistry.getOrAddGauge(metricName, gaugeMetric)
+ val secondAttempt = metricRegistry.getOrAddGauge(metricName, gaugeMetric)
+
+
+ Then("the first time it should create a new metric and register in the metrics registry")
+ firstAttempt shouldBe true
+
+ Then("the second time it shouldn't create the same metric ")
+ secondAttempt shouldBe false
+ }
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/retries/RetryOperationSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/retries/RetryOperationSpec.scala
new file mode 100644
index 000000000..df0a23b3e
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/retries/RetryOperationSpec.scala
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.retries
+
+import java.util.concurrent.atomic.AtomicInteger
+
+import org.scalatest.{FunSpec, Matchers}
+
+import scala.concurrent.ExecutionContext.Implicits.global
+import scala.concurrent.Future
+
+class RetryOperationSpec extends FunSpec with Matchers {
+ describe("Retry Operation handler") {
+ it("should not retry if main async function runs successfully") {
+ @volatile var onSuccessCalled = 0
+ val mainFuncCalled = new AtomicInteger(0)
+
+ RetryOperation.withRetryBackoff((callback) => {
+ mainFuncCalled.incrementAndGet()
+ Future {
+ Thread.sleep(500)
+ callback.onResult("xxxx")
+ }
+ },
+ RetryOperation.Config(maxRetries = 3, backOffInMillis = 100, backoffFactor = 1.5),
+ onSuccess = (result: String) => {
+ result.toString shouldEqual "xxxx"
+ onSuccessCalled = onSuccessCalled + 1
+ }, onFailure = (_) => {
+ fail("onFailure callback should not be called")
+ })
+
+ Thread.sleep(3000)
+ mainFuncCalled.get() shouldBe 1
+ onSuccessCalled shouldBe 1
+ }
+ }
+
+ it("should retry for async function if callback says retry but should not fail as last attempt succeeds") {
+ @volatile var onSuccessCalled = 0
+ val retryConfig = RetryOperation.Config(maxRetries = 3, backOffInMillis = 100, backoffFactor = 1.5)
+ val mainFuncCalled = new AtomicInteger(0)
+
+ RetryOperation.withRetryBackoff((callback) => {
+ val count = mainFuncCalled.incrementAndGet()
+ if (count > 1) {
+ callback.lastError() should not be null
+ } else {
+ callback.lastError() shouldBe null
+ }
+ if (count <= retryConfig.maxRetries) {
+ Future {
+ Thread.sleep(200)
+ callback.onError(new RuntimeException("error"), retry = true)
+ }
+ } else {
+ Future {
+ Thread.sleep(200)
+ callback.onResult("xxxxx")
+ }
+ }
+ },
+ retryConfig,
+ onSuccess = (result: String) => {
+ result shouldEqual "xxxxx"
+ onSuccessCalled = onSuccessCalled + 1
+ }, onFailure = (_) => {
+ fail("onFailure should not be called")
+ })
+
+ Thread.sleep(4000)
+ mainFuncCalled.get() shouldBe retryConfig.maxRetries + 1
+ onSuccessCalled shouldBe 1
+ }
+
+ it("should retry for async function if callback asks for a retry and fail finally as all attempts fail") {
+ @volatile var onFailureCalled = 0
+ val retryConfig = RetryOperation.Config(maxRetries = 2, backOffInMillis = 100, backoffFactor = 1.5)
+ val mainFuncCalled = new AtomicInteger(0)
+
+ val error = new RuntimeException("error")
+ RetryOperation.withRetryBackoff((callback) => {
+ mainFuncCalled.incrementAndGet()
+ Future {
+ Thread.sleep(500)
+ callback.onError(error, retry = true)
+ }
+ },
+ retryConfig,
+ onSuccess = (_: Any) => {
+ fail("onSuccess should not be called")
+ }, onFailure = (ex) => {
+ assert(ex.isInstanceOf[MaxRetriesAttemptedException])
+ ex.getCause shouldBe error
+ onFailureCalled = onFailureCalled + 1
+ })
+
+ Thread.sleep(4000)
+ mainFuncCalled.get() shouldBe (retryConfig.maxRetries + 1)
+ onFailureCalled shouldBe 1
+ }
+
+ it("should not retry if main async function runs successfully") {
+ var mainFuncCalled = 0
+ val resp = RetryOperation.executeWithRetryBackoff(() => {
+ mainFuncCalled = mainFuncCalled + 1
+ "success"
+ }, RetryOperation.Config(3, 100, 2))
+
+ mainFuncCalled shouldBe 1
+ resp.get shouldEqual "success"
+ }
+
+ it("should retry for function if callback says retry but should not fail as last attempt succeeds") {
+ var mainFuncCalled = 0
+ val retryConfig = RetryOperation.Config(3, 100, 2)
+ val resp = RetryOperation.executeWithRetryBackoff(() => {
+ mainFuncCalled = mainFuncCalled + 1
+ if(mainFuncCalled - 1 < retryConfig.maxRetries) throw new RuntimeException else "success"
+ }, retryConfig)
+
+ mainFuncCalled shouldBe retryConfig.maxRetries + 1
+ resp.get shouldEqual "success"
+ }
+
+ it("should retry for function if callback asks for a retry and fail finally as all attempts fail") {
+ var mainFuncCalled = 0
+ val retryConfig = RetryOperation.Config(3, 100, 2)
+ val error = new RuntimeException("error")
+ val resp = RetryOperation.executeWithRetryBackoff(() => {
+ mainFuncCalled = mainFuncCalled + 1
+ throw error
+ }, retryConfig)
+
+ mainFuncCalled shouldBe retryConfig.maxRetries + 1
+ resp.isFailure shouldBe true
+ }
+
+ it("retry operation backoff config should return the next backoff config") {
+ val retry = RetryOperation.Config(3, 1000, 1.5)
+
+ var nextBackoffConfig = retry.nextBackOffConfig
+ nextBackoffConfig.maxRetries shouldBe 3
+ nextBackoffConfig.nextBackOffConfig.backoffFactor shouldBe 1.5
+ nextBackoffConfig.backOffInMillis shouldBe 1500
+
+ nextBackoffConfig = nextBackoffConfig.nextBackOffConfig
+ nextBackoffConfig.maxRetries shouldBe 3
+ nextBackoffConfig.nextBackOffConfig.backoffFactor shouldBe 1.5
+ nextBackoffConfig.backOffInMillis shouldBe 2250
+ }
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/unit/UnitTestSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/unit/UnitTestSpec.scala
new file mode 100644
index 000000000..4b0059124
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/unit/UnitTestSpec.scala
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.commons.unit
+
+import java.util.UUID
+
+import com.expedia.open.tracing.{Log, Span, Tag}
+import org.scalatest._
+import org.scalatest.easymock.EasyMockSugar
+
+trait UnitTestSpec extends WordSpec with GivenWhenThen with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with EasyMockSugar {
+
+ val SERVER_SEND_EVENT = "ss"
+ val SERVER_RECV_EVENT = "sr"
+ val CLIENT_SEND_EVENT = "cs"
+ val CLIENT_RECV_EVENT = "cr"
+ protected def computeCurrentTimeInSecs: Long = {
+ System.currentTimeMillis() / 1000L
+ }
+
+ private[commons] def generateTestSpan(serviceName: String, operation: String, duration: Long, client: Boolean, server: Boolean): Span = {
+ generateTestSpan(UUID.randomUUID().toString, serviceName, operation, duration, client, server)
+ }
+
+ private[commons] def generateTestSpan(spanId: String, serviceName: String, operation: String, duration: Long, client: Boolean, server: Boolean): Span = {
+ val ts = System.currentTimeMillis() - (10 * 1000)
+ generateTestSpan(spanId, ts, serviceName, operation, duration, client, server)
+ }
+
+ private[commons] def generateTestSpan(spanId: String, ts: Long, serviceName: String, operation: String, duration: Long, client: Boolean, server: Boolean): Span = {
+
+
+ val spanBuilder = Span.newBuilder()
+ spanBuilder.setTraceId(UUID.randomUUID().toString)
+ spanBuilder.setSpanId(spanId)
+ spanBuilder.setServiceName(serviceName)
+ spanBuilder.setOperationName(operation)
+ spanBuilder.setStartTime(ts)
+ spanBuilder.setDuration(duration)
+
+ val logBuilder = Log.newBuilder()
+ if (client) {
+ logBuilder.setTimestamp(ts)
+ logBuilder.addFields(Tag.newBuilder().setKey("event").setVStr(CLIENT_SEND_EVENT).build())
+ spanBuilder.addLogs(logBuilder.build())
+ logBuilder.clear()
+ logBuilder.setTimestamp(ts + duration)
+ logBuilder.addFields(Tag.newBuilder().setKey("event").setVStr(CLIENT_RECV_EVENT).build())
+ spanBuilder.addLogs(logBuilder.build())
+ }
+
+ if (server) {
+ logBuilder.setTimestamp(ts)
+ logBuilder.addFields(Tag.newBuilder().setKey("event").setVStr(SERVER_RECV_EVENT).build())
+ spanBuilder.addLogs(logBuilder.build())
+ logBuilder.clear()
+ logBuilder.setTimestamp(ts + duration)
+ logBuilder.addFields(Tag.newBuilder().setKey("event").setVStr(SERVER_SEND_EVENT).build())
+ spanBuilder.addLogs(logBuilder.build())
+ }
+
+ spanBuilder.build()
+ }
+
+}
diff --git a/commons/commons/src/test/scala/com/expedia/www/haystack/commons/util/MetricDefinitionKeyGeneratorSpec.scala b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/util/MetricDefinitionKeyGeneratorSpec.scala
new file mode 100644
index 000000000..b97ab1e39
--- /dev/null
+++ b/commons/commons/src/test/scala/com/expedia/www/haystack/commons/util/MetricDefinitionKeyGeneratorSpec.scala
@@ -0,0 +1,37 @@
+package com.expedia.www.haystack.commons.util
+
+import java.util
+
+import com.expedia.metrics.{MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.entities.TagKeys.PRODUCT_KEY
+import com.expedia.www.haystack.commons.unit.UnitTestSpec
+
+class MetricDefinitionKeyGeneratorSpec extends UnitTestSpec {
+ "Metric Definition Key Generator" should {
+ "generate a unique key based on key and tags in MetricDefinition" in {
+ Given("a Metric Definition")
+ val metricDefinition = getMetricDefinition
+
+ When("MetricDefinitionKeyGenerator is called")
+ val key = MetricDefinitionKeyGenerator.generateKey(metricDefinition)
+
+ Then("a unique key is generated")
+ key should equal("key=duration,mtype=gauge,op=some-op,product=haystack,svc=some-svc,unit=short")
+ }
+ }
+
+ private def getMetricDefinition: MetricDefinition = {
+ val metricTags = new util.LinkedHashMap[String, String] {
+ put("svc", "some-svc")
+ put("op", "some-op")
+ }
+ val tags = new util.LinkedHashMap[String, String] {
+ putAll(metricTags)
+ put(MetricDefinition.MTYPE, "gauge")
+ put(MetricDefinition.UNIT, "short")
+ put(PRODUCT_KEY, "haystack")
+ }
+ val tc = new TagCollection(tags)
+ new MetricDefinition("duration", tc, TagCollection.EMPTY)
+ }
+}
diff --git a/commons/idl/pom.xml b/commons/idl/pom.xml
new file mode 100644
index 000000000..e0481496e
--- /dev/null
+++ b/commons/idl/pom.xml
@@ -0,0 +1,138 @@
+
+
+
+
+
+ 4.0.0
+
+
+ com.expedia.www
+ haystack-commons-parent
+ 1.0.66-SNAPSHOT
+
+
+ haystack-idl-java
+ jar
+
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+ io.grpc
+ grpc-protobuf
+ provided
+
+
+
+ io.grpc
+ grpc-stub
+ provided
+
+
+
+
+ ${basedir}/src/main/scala
+
+
+
+ ${basedir}/src/main/resources
+ true
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+ com.github.os72
+ protoc-jar-maven-plugin
+
+
+ generate-sources
+
+ run
+
+
+ com.google.protobuf:protoc:3.0.0
+
+ ${project.basedir}/../haystack-idl/proto
+ ${project.basedir}/../haystack-idl/proto/api
+ ${project.basedir}/../haystack-idl/proto/backend
+
+
+ ${project.basedir}/../haystack-idl/proto
+ ${project.basedir}/../haystack-idl/proto/api
+ ${project.basedir}/../haystack-idl/proto/backend
+
+ ${project.basedir}/target/generated-sources
+
+
+ java
+
+
+ grpc-java
+ io.grpc:protoc-gen-grpc-java:1.0.1
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+
+
diff --git a/commons/mvnw b/commons/mvnw
new file mode 100755
index 000000000..5551fde8e
--- /dev/null
+++ b/commons/mvnw
@@ -0,0 +1,286 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven2 Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+ # TODO classpath?
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project base directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+##########################################################################################
+# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+# This allows using the maven wrapper in projects that prohibit checking in binary data.
+##########################################################################################
+if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found .mvn/wrapper/maven-wrapper.jar"
+ fi
+else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
+ fi
+ jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+ while IFS="=" read key value; do
+ case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
+ esac
+ done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Downloading from: $jarUrl"
+ fi
+ wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
+
+ if command -v wget > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found wget ... using wget"
+ fi
+ wget "$jarUrl" -O "$wrapperJarPath"
+ elif command -v curl > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found curl ... using curl"
+ fi
+ curl -o "$wrapperJarPath" "$jarUrl"
+ else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Falling back to using Java to download"
+ fi
+ javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
+ if [ -e "$javaClass" ]; then
+ if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Compiling MavenWrapperDownloader.java ..."
+ fi
+ # Compiling the Java class
+ ("$JAVA_HOME/bin/javac" "$javaClass")
+ fi
+ if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ # Running the downloader
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Running MavenWrapperDownloader.java ..."
+ fi
+ ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
+ fi
+ fi
+ fi
+fi
+##########################################################################################
+# End of extension
+##########################################################################################
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/commons/mvnw.cmd b/commons/mvnw.cmd
new file mode 100755
index 000000000..e5cfb0ae9
--- /dev/null
+++ b/commons/mvnw.cmd
@@ -0,0 +1,161 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven2 Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM set title of command window
+title %0
+@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO (
+ IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
+)
+
+@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
+if exist %WRAPPER_JAR% (
+ echo Found %WRAPPER_JAR%
+) else (
+ echo Couldn't find %WRAPPER_JAR%, downloading it ...
+ echo Downloading from: %DOWNLOAD_URL%
+ powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"
+ echo Finished downloading %WRAPPER_JAR%
+)
+@REM End of extension
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/commons/pom.xml b/commons/pom.xml
new file mode 100644
index 000000000..c77ec5c44
--- /dev/null
+++ b/commons/pom.xml
@@ -0,0 +1,522 @@
+
+
+
+
+
+ 4.0.0
+
+ com.expedia.www
+ haystack-commons-parent
+ 1.0.66-SNAPSHOT
+ pom
+
+
+ scm:git:git://github.com/ExpediaDotCom/haystack-commons.git
+ scm:git:ssh://github.com/ExpediaDotCom/haystack-commons.git
+ http://github.com/ExpediaDotCom/haystack-commons
+
+
+ haystack-commons
+ This module contains some of the common code for haystack modules
+ https://github.com/ExpediaDotCom/haystack-commons/tree/master
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+
+
+ haystack
+ Haystack Team
+ haystack@expedia.com
+ https://github.com/ExpediaDotCom/haystack
+
+
+
+
+ idl
+ commons
+
+
+
+
+ 1.3.1
+ 3.0.2
+ 3.3.1
+ 1.7.25
+ 1.4
+ 19.0
+
+ 0.8.13
+ 1.1.1
+ 1.7.0
+
+ 3.6.0
+ 0.4.0
+
+
+ 1.8
+ 2
+ 12
+
+ 7
+ ${scala.major.version}.${scala.minor.version}
+ ${scala.major.minor.version}.${scala.maintenance.version}
+
+
+ 1.6.0
+ 3.0.3
+ 3.4
+
+
+ 3.0.0
+ 3.3.0.1
+ 3.4.2
+ 1.0
+ 3.8.0
+ 3.1.0
+ ${project.basedir}/../scalastyle/scalastyle_config.xml
+ 0.9.0
+ 1.3.0
+
+ 1.6
+ 3.0.1
+ 3.1.0
+ 3.0.1
+ 1.6.8
+
+
+
+
+
+
+ com.google.guava
+ guava
+ ${guava.version}
+ provided
+
+
+
+
+ javax.annotation
+ javax.annotation-api
+ 1.3.2
+ provided
+
+
+
+ com.typesafe
+ config
+ ${typesafe-config.version}
+ provided
+
+
+
+
+ org.json4s
+ json4s-jackson_${scala.major.minor.version}
+ ${json4s.version}
+ provided
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+ ${build-helper-maven-plugin.version}
+ provided
+
+
+ org.slf4j
+ slf4j-api
+
+
+ org.slf4j
+ slf4j-nop
+
+
+ org.slf4j
+ slf4j-jdk14
+
+
+ org.slf4j
+ jcl-over-slf4j
+
+
+
+
+
+ org.scala-lang
+ scala-library
+ ${scala-library.version}
+
+
+
+ org.slf4j
+ slf4j-api
+ ${slf4j.version}
+
+
+
+ com.google.protobuf
+ protobuf-java
+ ${protobuf.version}
+
+
+
+ com.expedia
+ metrics-java
+ ${metrics-java.version}
+
+
+
+ io.grpc
+ grpc-protobuf
+ ${grpc.version}
+ provided
+
+
+ io.grpc
+ grpc-stub
+ ${grpc.version}
+ provided
+
+
+
+ com.codahale.metrics
+ metrics-core
+ ${metrics-core.version}
+ provided
+
+
+
+ commons-codec
+ commons-codec
+ ${commons-codec.version}
+
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ ${kafka.version}
+ provided
+
+
+
+ org.apache.kafka
+ kafka-streams
+ ${kafka.version}
+ provided
+
+
+
+ org.msgpack
+ msgpack-core
+ ${msgpack.version}
+ provided
+
+
+
+
+
+ org.scalatest
+ scalatest_${scala.major.minor.version}
+ ${scalatest.version}
+ test
+
+
+
+ org.easymock
+ easymock
+ ${easymock.version}
+ test
+
+
+
+
+
+ org.pegdown
+ pegdown
+ ${pegdown.version}
+ test
+
+
+
+
+
+
+ ${basedir}/src/main/scala
+
+
+
+ ${basedir}/src/main/resources
+ true
+
+
+
+
+
+
+
+
+ io.takari
+ maven
+ 0.6.1
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+ ${build-helper-maven-plugin.version}
+
+
+ generate-sources
+
+ add-source
+
+
+
+ ${basedir}/src/main/java
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ ${project.jdk.version}
+ ${project.jdk.version}
+ UTF-8
+
+ ${maven-compiler-plugin.version}
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+ ${scala-maven-plugin.version}
+
+
+ scala-compile-first
+ process-resources
+
+ add-source
+ compile
+
+
+
+ scala-test-compile
+ process-test-resources
+
+ testCompile
+
+
+
+ attach-javadocs
+
+ doc-jar
+
+
+
+
+
+
+ com.github.os72
+ protoc-jar-maven-plugin
+ ${maven-protobuf-plugin.version}
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+ ${maven-scalatest-plugin.version}
+
+
+ test
+
+ test
+
+
+
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+ ${maven-scalastyle-plugin.version}
+
+ false
+ true
+ true
+ false
+ ${project.basedir}/src/main/scala
+ ${project.basedir}/src/test/scala
+ ${scalastyle.config.location}
+ ${project.build.directory}/scalastyle-output.xml
+ UTF-8
+
+
+
+ compile-scalastyle
+
+ check
+
+ compile
+
+
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+ ${scoverage.plugin.version}
+
+
+ 80
+ true
+ true
+ ${scala-library.version}
+ true
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ ${maven-source-plugin.version}
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+ ${maven-javadoc-plugin.version}
+
+
+ attach-javadocs
+
+ jar
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+ ${maven-jar-plugin-version}
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+ ${maven-gpg-plugin.version}
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ ${nexus-staging-maven-plugin.version}
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+ ${maven-jar-plugin-version}
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+
+
+
+
+
+
+ ossrh
+ https://oss.sonatype.org/content/repositories/snapshots
+
+
+ ossrh
+ http://oss.sonatype.org/service/local/staging/deploy/maven2/
+
+
+
+
diff --git a/commons/scalastyle/scalastyle_config.xml b/commons/scalastyle/scalastyle_config.xml
new file mode 100644
index 000000000..e0cd28086
--- /dev/null
+++ b/commons/scalastyle/scalastyle_config.xml
@@ -0,0 +1,136 @@
+
+ Scalastyle standard configuration
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docker/.gitignore b/docker/.gitignore
new file mode 100644
index 000000000..1062418c4
--- /dev/null
+++ b/docker/.gitignore
@@ -0,0 +1,2 @@
+.idea/
+*.iml
diff --git a/docker/LICENSE b/docker/LICENSE
new file mode 100644
index 000000000..9f133f5cd
--- /dev/null
+++ b/docker/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2017 Expedia, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 000000000..4717ea79d
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,103 @@
+- [Running Haystack using docker-compose](#running-haystack-using-docker-compose)
+ * [Allocate memory to docker](#allocate-memory-to-docker)
+ * [To start Haystack's traces, blobs, trends, service graph and adaptive-alerting](#to-start-haystacks-traces-blobs-trends-service-graph-and-adaptive-alerting)
+ * [To start Zipkin (tracing) with Haystack's trends, service graph and adaptive-alerting](#to-start-zipkin-tracing-with-haystacks-trends-service-graph-and-adaptive-alerting)
+ * [Note on composing components](#note-on-composing-components)
+
+## Running Haystack using docker-compose
+
+### Allocate memory to docker
+
+Please check this [Stackoverflow answer](https://stackoverflow.com/questions/44533319/how-to-assign-more-memory-to-docker-container)
+
+To run all of haystack and its components, __it is suggested to change the default in docker settings from `2GiB` to `6GiB`__
+
+### To start Haystack's traces, trends, service graph and adaptive-alerting
+
+```bash
+docker-compose -f docker-compose.yml \
+ -f traces/docker-compose.yml \
+ -f trends/docker-compose.yml \
+ -f service-graph/docker-compose.yml \
+ -f adaptive-alerting/docker-compose.yml \
+ -f agent/docker-compose.yml \
+ -f example/traces/docker-compose.yml up
+```
+
+The command above starts haystack components, and two sample web applications with the haystack-agent. It may take a minute or two for the containers to come up and connect with each other.
+
+Haystack's UI will be available at http://localhost:8080
+
+Haystack's agent will be available on host port 35000 (i.e., localhost: 35000).
+
+[Sample application](https://github.com/ExpediaDotCom/opentracing-spring-haystack-example) has a 'frontend' and 'backend'. The 'frontend' app will be available at http://localhost:9090/hello. Sending a request to frontend will cause a call to the backend before fulfilling this request.
+
+Send some sample requests to the 'frontend' application by running
+
+```bash
+run.sh
+```
+
+One can then see the traces, trends and a service-graph showing the relationship between the two applications in the UI.
+
+### To start Haystack's traces, blobs, trends, service graph and adaptive-alerting
+
+```bash
+docker-compose -f docker-compose.yml \
+ -f traces/docker-compose.yml \
+ -f trends/docker-compose.yml \
+ -f service-graph/docker-compose.yml \
+ -f adaptive-alerting/docker-compose.yml \
+ -f agent/docker-compose.yml \
+ -f example/blobs/docker-compose.yml up
+```
+
+The command above starts haystack components, and two sample web applications with the haystack-agent. It may take a minute or two for the containers to come up and connect with each other.
+
+Haystack's UI will be available at http://localhost:8080
+
+Haystack's agent will be available in port 35000 in the host (i.e., localhost: 35000).
+
+[Sample Application](https://github.com/ExpediaDotCom/haystack-blob-example) has a 'client' and a 'server'. The client interacts with the server listening on port `9090`. The client app will be available at `http://localhost:9091/displayMessage`. Sending a request to client will cause a call to the server before fulfilling this request.
+
+Call the client using the link given above and then you will be able to see the traces, trends and a service-graph showing the relationship between the two applications in the UI.
+
+Alternatively, you can also send some sample requests to the 'server' application by running
+
+```bash
+run.sh
+```
+
+### To start Zipkin (tracing) with Haystack's trends, service graph and adaptive-alerting
+
+```bash
+docker-compose -f docker-compose.yml \
+ -f zipkin/docker-compose.yml \
+ -f trends/docker-compose.yml \
+ -f adaptive-alerting/docker-compose.yml \
+ -f service-graph/docker-compose.yml up
+```
+
+The command above starts [Pitchfork](https://github.com/HotelsDotCom/pitchfork) to proxy data to [Zipkin](https://github.com/openzipkin/) and Haystack.
+
+Give a minute or two for the containers to come up and connect with each other. Once the stack is up, one can use the sample application @ https://github.com/openzipkin/brave-webmvc-example and send some sample data to see traces (from Zipkin), trends and service-graph in haystack-ui @ http://localhost:8080
+
+### Note on composing components
+
+Note the two commands above combine a series of `docker-compose.yml` files.
+
+- Haystack needs at least one trace provider ( `traces/docker-compose.yml` or `zipkin/docker-compose.yml` ) and one trends provider ( `trends/docker-compose.yml` )
+- One can remove `adaptive-alerting/docker-compose.yml` and `service-graph/docker-compose.yml` if those components are not required
+- One can remove `examples/traces/docker-compose.yml` or `examples/blobs/docker-compose.yml` and just have `agent/docker-compose.yml` to start your application integrated with haystack to send data
+- If one is using Zipkin instrument app, use `zipkin/docker-compose.yml` to send data to the stack and use trends, service-graph and adaptive-alerting as needed
+- Starting the stack with only the base docker-compose.yml will start core services like kafka, cassandra and elastic-search along with haystack-ui with mock backend
+
+```bash
+docker-compose -f docker-compose.yml up
+```
+
+### Note on Adaptive Alerting
+
+- Model Service that fetches anomaly detection model for a specific metric has been replaced with a mocked (using wiremock) to allow the stack to use a default model. Default detection model us [EWMA](https://en.wikipedia.org/wiki/EWMA_chart)
+- Model Service is being refactored to allow better model selection and we will be releasing it in the next month or two
+- Alert-Notification service that dispatches alerts to either email or slack is [commented in docker-compose](adaptive-alerting/docker-compose.yml#L100) file for local testing. You can uncomment it and provide slack_token or smtp credentials via environment.
diff --git a/docker/adaptive-alerting/configs/aa-manager/docker.conf b/docker/adaptive-alerting/configs/aa-manager/docker.conf
new file mode 100644
index 000000000..150ea4d20
--- /dev/null
+++ b/docker/adaptive-alerting/configs/aa-manager/docker.conf
@@ -0,0 +1,9 @@
+ad-manager {
+ streams {
+ bootstrap.servers = "kafkasvc:9092"
+ application.id = "ad-manager"
+ timestamp.extractor = "com.expedia.adaptivealerting.kafka.serde.MappedMetricDataTimestampExtractor"
+ JsonPojoClass = "com.expedia.adaptivealerting.core.data.MappedMetricData"
+ }
+ model-service-uri-template = "http://modelservice:8080/api/models/search/findByMetricHash?hash=%s"
+}
diff --git a/docker/adaptive-alerting/configs/aa-mapper/docker.conf b/docker/adaptive-alerting/configs/aa-mapper/docker.conf
new file mode 100644
index 000000000..6c04a393b
--- /dev/null
+++ b/docker/adaptive-alerting/configs/aa-mapper/docker.conf
@@ -0,0 +1,11 @@
+ad-mapper {
+ streams {
+ bootstrap.servers = "kafkasvc:9092"
+ application.id = "ad-mapper"
+ default.value.serde = "com.expedia.adaptivealerting.kafka.serde.MetricDataSerde"
+ timestamp.extractor = "com.expedia.adaptivealerting.kafka.serde.MetricDataTimestampExtractor"
+ JsonPojoClass = "com.expedia.metrics.MetricData"
+ }
+ inbound-topic = "mdm"
+ model-service-uri-template = "http://modelservice:8080/api/detectors/search/findByMetricHash?hash=%s"
+}
diff --git a/docker/adaptive-alerting/db_init/init_db.sh b/docker/adaptive-alerting/db_init/init_db.sh
new file mode 100644
index 000000000..7ef8e8203
--- /dev/null
+++ b/docker/adaptive-alerting/db_init/init_db.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env sh
+
+mysql -u root -proot < /home/build-db.sql
+mysql -u root -proot < /home/stored-procs.sql
+mysql -u root -proot < /home/sample-data.sql
diff --git a/docker/adaptive-alerting/docker-compose.yml b/docker/adaptive-alerting/docker-compose.yml
new file mode 100644
index 000000000..f154d19c5
--- /dev/null
+++ b/docker/adaptive-alerting/docker-compose.yml
@@ -0,0 +1,163 @@
+version: "3"
+services:
+
+# database:
+# image: mysql:5.7
+# ports:
+# - "3306:3306"
+# restart: always
+# volumes:
+# - ./adaptive-alerting/sql:/home/
+# - ./adaptive-alerting/db_init:/docker-entrypoint-initdb.d
+# command: --default-authentication-plugin=mysql_native_password
+# environment:
+# MYSQL_ROOT_PASSWORD: root
+# MYSQL_DATABASE: aa_model_service
+#
+#
+# modelservice:
+# image: expediadotcom/adaptive-alerting-modelservice:e90821e5ca0e0d895e01d9cb87612c463dcf0dc6
+# ports:
+# - "8008:8008"
+# environment:
+# AA_GRAPHITE_ENABLED : "false"
+# SPRING_CONFIG_LOCATION : "classpath:/application.yml"
+# PASSWORD : "root"
+# JAVA_XMS: 128m
+# depends_on:
+# - database
+# links:
+# - database
+# restart: always
+
+ modelservice:
+ image: rodolpheche/wiremock
+# ports:
+# - "8500:8080"
+ volumes:
+ - ./adaptive-alerting/stubs:/home/wiremock/
+
+
+ aa-mapper:
+ image: expediadotcom/adaptive-alerting-ad-mapper:e90821e5ca0e0d895e01d9cb87612c463dcf0dc6
+ environment:
+ AA_GRAPHITE_ENABLED : "false"
+ AA_OVERRIDES_CONFIG_PATH : "/home/docker.conf"
+ JAVA_XMS: 128m
+ volumes:
+ - ./adaptive-alerting/configs/aa-mapper:/home
+ depends_on:
+ - kafkasvc
+ - zookeeper
+ - modelservice
+ links:
+ - kafkasvc
+ - modelservice
+ restart: always
+
+ aa-manager:
+ image: expediadotcom/adaptive-alerting-ad-manager:e90821e5ca0e0d895e01d9cb87612c463dcf0dc6
+ environment:
+ AA_GRAPHITE_ENABLED : "false"
+ AA_OVERRIDES_CONFIG_PATH : "/home/docker.conf"
+ JAVA_XMS: 128m
+ volumes:
+ - ./adaptive-alerting/configs/aa-manager:/home
+ depends_on:
+ - kafkasvc
+ - zookeeper
+ - modelservice
+ links:
+ - kafkasvc
+ - modelservice
+ restart: always
+
+ anomaly-to-alert:
+ image: expediadotcom/adaptive-alerting-a2a-mapper:3482d04bcee0c62b1d64e5cf9b289f243d03a77b
+ environment:
+ AA_GRAPHITE_ENABLED : "false"
+ JAVA_XMS: 128m
+ depends_on:
+ - kafkasvc
+ - zookeeper
+ links:
+ - kafkasvc
+ restart: always
+
+ subscription:
+ image: expediadotcom/alert-manager-service:67a10b9e28dfc51e806b9ee629ad91a7dfc1d505
+ environment:
+ AM_GRAPHITE_ENABLED : "false"
+ SPRING_CONFIG_LOCATION : "classpath:/application.yml"
+ JAVA_XMS: 128m
+ depends_on:
+ - elasticsearch
+ links:
+ - elasticsearch
+ restart: always
+
+
+# tell about kafka client to read alerts from kafka topic
+# notifier:
+# image: expediadotcom/alert-manager-notifier:3117ece498d619d560939b5acc6bc948b23899da
+# environment:
+# AM_GRAPHITE_ENABLED : "false"
+# SPRING_CONFIG_LOCATION : "classpath:/application.yml"
+## SMTP_PASSWORD: "password"
+## SMTP_USERNAME: "user"
+## SMTP_HOST: "localhost"
+## SLACK_TOKEN: "token"
+# JAVA_XMS: 128m
+# depends_on:
+# - elasticsearch
+# - subscription
+# - zookeeper
+# - kafkasvc
+# links:
+# - elasticsearch
+# - kafkasvc
+# - subscription
+# restart: always
+
+
+ alert-api:
+ image: expediadotcom/haystack-alert-api:f63141bbc1c1e671766b5e83b0e8efb605ab0402
+ ports:
+ - "4500:8088"
+ environment:
+ HAYSTACK_GRAPHITE_ENABLED: "false"
+ HAYSTACK_LOG_LEVEL: "INFO"
+ JAVA_XMS: 128m
+ depends_on:
+ - elasticsearch
+ - subscription
+ links:
+ - elasticsearch
+ - subscription
+ restart: always
+
+
+ anomaly-store:
+ image: expediadotcom/haystack-anomaly-store:f63141bbc1c1e671766b5e83b0e8efb605ab0402
+ environment:
+ HAYSTACK_GRAPHITE_ENABLED: "false"
+ HAYSTACK_LOG_LEVEL: "INFO"
+ JAVA_XMS: 128m
+ depends_on:
+ - elasticsearch
+ - kafkasvc
+ links:
+ - kafkasvc
+ - elasticsearch
+ restart: always
+
+
+ ui:
+ environment:
+ HAYSTACK_OVERRIDES_CONFIG_PATH: /data/connectors.json
+ HAYSTACK_PROP_CONNECTORS_ALERTS_CONNECTOR__NAME: "haystack"
+ HAYSTACK_PROP_CONNECTORS_ALERTS_SUBSCRIPTIONS_CONNECTOR__NAME: "haystack"
+ HAYSTACK_PROP_CONNECTORS_ALERTS_HAYSTACK__HOST: "alert-api"
+ HAYSTACK_PROP_CONNECTORS_ALERTS_HAYSTACK__PORT: "8088"
+
+
diff --git a/docker/adaptive-alerting/sql/build-db.sql b/docker/adaptive-alerting/sql/build-db.sql
new file mode 100644
index 000000000..53bc7ba7a
--- /dev/null
+++ b/docker/adaptive-alerting/sql/build-db.sql
@@ -0,0 +1,75 @@
+DROP DATABASE IF EXISTS aa_model_service;
+
+CREATE DATABASE aa_model_service;
+
+USE aa_model_service;
+
+create table metric (
+ id int unsigned primary key not null auto_increment,
+ ukey varchar(255) unique not null,
+ hash char(36) unique not null,
+ description varchar(255),
+ tags json,
+ date_created timestamp default CURRENT_TIMESTAMP
+);
+
+create table model_type (
+ id smallint unsigned primary key not null auto_increment,
+ ukey varchar(100) unique not null,
+ date_created timestamp default CURRENT_TIMESTAMP
+);
+
+CREATE TABLE detector (
+ id int unsigned primary key NOT NULL AUTO_INCREMENT,
+ uuid char(36) unique not null,
+ model_type_id smallint unsigned not null,
+ hyperparams json,
+ training_meta json,
+ seyren_flag boolean default false,
+ date_created timestamp NULL DEFAULT CURRENT_TIMESTAMP,
+ created_by varchar(100),
+ constraint model_type_id_fk foreign key (model_type_id) references model_type (id)
+);
+
+create table model (
+ id int unsigned primary key not null auto_increment,
+ params json,
+ detector_id int unsigned not null,
+ weak_sigmas decimal(3, 3),
+ strong_sigmas decimal(3, 3),
+ other_stuff json,
+ date_created timestamp default CURRENT_TIMESTAMP,
+ constraint detector_id_fk foreign key (detector_id) references detector (id)
+);
+
+create table metric_detector_mapping (
+ id int unsigned primary key not null auto_increment,
+ metric_id int unsigned not null,
+ detector_id int unsigned not null,
+ date_created timestamp default CURRENT_TIMESTAMP,
+ constraint metric_id_fk foreign key (metric_id) references metric (id),
+ constraint detector_id_mapping_fk foreign key (detector_id) references detector (id),
+ unique index (metric_id, detector_id)
+);
+
+create table user (
+ id int unsigned primary key not null auto_increment,
+ username varchar(100) unique not null,
+ password varchar(100) not null,
+ role varchar(100),
+ enabled boolean
+);
+
+create table oauth_client_details (
+ client_id VARCHAR(256) PRIMARY KEY,
+ resource_ids VARCHAR(256),
+ client_secret VARCHAR(256),
+ scope VARCHAR(256),
+ authorized_grant_types VARCHAR(256),
+ web_server_redirect_uri VARCHAR(256),
+ authorities VARCHAR(256),
+ access_token_validity INTEGER,
+ refresh_token_validity INTEGER,
+ additional_information VARCHAR(4096),
+ autoapprove VARCHAR(256)
+);
diff --git a/docker/adaptive-alerting/sql/sample-data.sql b/docker/adaptive-alerting/sql/sample-data.sql
new file mode 100644
index 000000000..41b8c2c79
--- /dev/null
+++ b/docker/adaptive-alerting/sql/sample-data.sql
@@ -0,0 +1,29 @@
+USE `aa_model_service`;
+
+INSERT INTO `metric` (`ukey`, `hash`, `tags`) VALUES
+ ('karmalab.stats.gauges.AirBoss.chelappabo003_karmalab_net.java.lang.Threading.ThreadCount', '1.71828d68a2938ff1ef96c340f12e2dd6', '{"unit": "unknown", "mtype": "gauge", "org_id": "1", "interval": "30"}')
+;
+INSERT INTO `metric` (`ukey`, `hash`, `tags`) VALUES
+ ('dummy.metric', '1.25345234523452352253452f12e2dd6', '{"unit": "unknown", "mtype": "gauge", "org_id": "1", "interval": "30"}')
+;
+
+
+INSERT INTO `model_type` (`ukey`) VALUES
+ ('constant-detector'),
+ ('cusum-detector'),
+ ('ewma-detector'),
+ ('individuals-detector'),
+ ('pewma-detector'),
+ ('rcf-detector')
+;
+
+CALL insert_detector('3ec81aa2-2cdc-415e-b4f3-c1beb223ae60','cusum-detector');
+CALL insert_detector('2cdc1aa2-2cdc-355e-b4f3-d2beb223ae60','constant-detector');
+
+CALL insert_model('3ec81aa2-2cdc-415e-b4f3-c1beb223ae60','{"alpha":40,"beta":30}', '2018-10-10 10:02:04');
+CALL insert_model('3ec81aa2-2cdc-415e-b4f3-c1beb223ae60','{"alpha":100,"beta":455}','2018-10-12 17:01:04');
+CALL insert_model('2cdc1aa2-2cdc-355e-b4f3-d2beb223ae60','{"low":100,"high":455}','2018-10-10 10:01:04');
+CALL insert_model('2cdc1aa2-2cdc-355e-b4f3-d2beb223ae60','{"low":100,"high":455}', '2018-10-12 17:01:04');
+
+CALL insert_mapping('1.71828d68a2938ff1ef96c340f12e2dd6', '3ec81aa2-2cdc-415e-b4f3-c1beb223ae60');
+CALL insert_mapping('1.25345234523452352253452f12e2dd6', '2cdc1aa2-2cdc-355e-b4f3-d2beb223ae60');
diff --git a/docker/adaptive-alerting/sql/stored-procs.sql b/docker/adaptive-alerting/sql/stored-procs.sql
new file mode 100644
index 000000000..90e7cc4a2
--- /dev/null
+++ b/docker/adaptive-alerting/sql/stored-procs.sql
@@ -0,0 +1,73 @@
+USE `aa_model_service`;
+
+DROP PROCEDURE IF EXISTS insert_mapping;
+DROP PROCEDURE IF EXISTS insert_model;
+DROP PROCEDURE IF EXISTS insert_detector;
+
+DROP PROCEDURE IF EXISTS insert_mapping_wildcard_metric_targets_to_model;
+DELIMITER //
+
+CREATE PROCEDURE insert_detector (
+ IN uuid CHAR(36),
+ IN type_ukey VARCHAR(100)
+)
+ BEGIN
+ DECLARE type_id INT(5) UNSIGNED;
+
+ SELECT t.id INTO type_id FROM model_type t WHERE t.ukey = type_ukey;
+ INSERT INTO detector (uuid, model_type_id) VALUES (uuid, type_id);
+ END //
+
+CREATE PROCEDURE insert_model (
+ IN uuid CHAR(36),
+ IN params json,
+ IN date_created timestamp
+)
+ BEGIN
+ DECLARE detector_id INT(5) UNSIGNED;
+
+ SELECT d.id INTO detector_id FROM detector d WHERE d.uuid = uuid;
+ INSERT INTO model (detector_id, params , date_created) VALUES (detector_id, params, date_created);
+ END //
+
+CREATE PROCEDURE insert_mapping (
+ IN metric_hash CHAR(36),
+ IN detector_uuid CHAR(36)
+)
+ BEGIN
+ DECLARE metric_id INT(10) UNSIGNED;
+ DECLARE detector_id INT(10) UNSIGNED;
+
+ SELECT m.id INTO metric_id FROM metric m WHERE m.hash = metric_hash;
+ SELECT m.id INTO detector_id FROM detector m WHERE m.uuid = detector_uuid;
+ INSERT INTO metric_detector_mapping (metric_id, detector_id) VALUES (metric_id, detector_id);
+ END //
+
+CREATE PROCEDURE insert_mapping_wildcard_metric_targets_to_detector (
+ IN detector_uuid CHAR(36),
+ IN metric_ukey CHAR(100)
+)
+ BEGIN
+ DECLARE metric_id INT(10) UNSIGNED;
+ DECLARE detector_id INT(10) UNSIGNED;
+ DECLARE done INT DEFAULT 0;
+ DECLARE present INT DEFAULT 0;
+ DECLARE cur1 cursor for SELECT m.id FROM metric m WHERE m.ukey LIKE metric_ukey;
+ DECLARE continue handler for not found set done=1;
+
+ open cur1;
+
+ REPEAT
+ FETCH cur1 into metric_id;
+ if NOT done then
+ SELECT id INTO detector_id FROM detector WHERE uuid = detector_uuid;
+ IF NOT EXISTS (SELECT 1 FROM metric_detector_mapping m3 WHERE m3.metric_id = metric_id and m3.detector_id = detector_id)
+ THEN
+ INSERT INTO metric_detector_mapping (metric_id, detector_id) VALUES (metric_id, detector_id);
+ END IF;
+ END IF;
+ UNTIL done END REPEAT;
+ close cur1;
+ END //
+
+DELIMITER ;
diff --git a/docker/adaptive-alerting/stubs/__files/get_detectors.json b/docker/adaptive-alerting/stubs/__files/get_detectors.json
new file mode 100644
index 000000000..9a73fce0a
--- /dev/null
+++ b/docker/adaptive-alerting/stubs/__files/get_detectors.json
@@ -0,0 +1,10 @@
+{
+ "_embedded": {
+ "models": []
+ },
+ "_links": {
+ "self": {
+ "href": "http://modelservice:8080/api/detectors/search/findByMetricHash?hash=1234"
+ }
+ }
+}
diff --git a/docker/adaptive-alerting/stubs/__files/get_models.json b/docker/adaptive-alerting/stubs/__files/get_models.json
new file mode 100644
index 000000000..bff3f9d94
--- /dev/null
+++ b/docker/adaptive-alerting/stubs/__files/get_models.json
@@ -0,0 +1,10 @@
+{
+ "_embedded": {
+ "detectors": []
+ },
+ "_links": {
+ "self": {
+ "href": "http://modelservice:8080/api/models/search/findByMetricHash?hash=1234"
+ }
+ }
+}
diff --git a/docker/adaptive-alerting/stubs/mappings/get-detectors.json b/docker/adaptive-alerting/stubs/mappings/get-detectors.json
new file mode 100644
index 000000000..7e7795a36
--- /dev/null
+++ b/docker/adaptive-alerting/stubs/mappings/get-detectors.json
@@ -0,0 +1,15 @@
+{
+ "request": {
+ "method": "GET",
+ "urlPath": "/api/detectors/search/findByMetricHash",
+ "queryParameters": {
+ "hash" : {
+ "matches" : ".*"
+ }
+ }
+ },
+ "response": {
+ "status": 200,
+ "bodyFileName": "get_detectors.json"
+ }
+}
diff --git a/docker/adaptive-alerting/stubs/mappings/get-models.json b/docker/adaptive-alerting/stubs/mappings/get-models.json
new file mode 100644
index 000000000..b5bddf483
--- /dev/null
+++ b/docker/adaptive-alerting/stubs/mappings/get-models.json
@@ -0,0 +1,16 @@
+{
+ "request": {
+ "method": "GET",
+ "urlPath": "/api/models/search/findByMetricHash",
+ "queryParameters": {
+ "hash" : {
+ "matches" : ".*"
+ }
+ }
+
+ },
+ "response": {
+ "status": 200,
+ "bodyFileName": "get_models.json"
+ }
+}
diff --git a/docker/agent/default.conf b/docker/agent/default.conf
new file mode 100644
index 000000000..fe1125af4
--- /dev/null
+++ b/docker/agent/default.conf
@@ -0,0 +1,55 @@
+agents {
+ spans {
+ enabled = true
+ port = 35000
+ dispatchers {
+ kafka {
+ bootstrap.servers = "kafkasvc:9092"
+ producer.topic = "proto-spans"
+ buffer.memory = 1048576
+ retries = 2
+ }
+ }
+ }
+ ossblobs {
+ enabled = false
+ port = 35001
+ max.blob.size.in.kb = 512
+ dispatchers {
+ s3 {
+ keep.alive = true
+ max.outstanding.requests = 150
+ should.wait.for.upload = false
+ max.connections = 50
+ retry.count = 1
+ bucket.name = "haystack-blobs"
+ region = "us-east-1"
+ aws.access.key = "accessKey"
+ aws.secret.key = "secretKey"
+ }
+ }
+ }
+
+ pitchfork {
+ enabled = false
+ port = 9411
+ http.threads {
+ max = 16
+ min = 2
+ }
+ gzip.enabled = true
+ idle.timeout.ms = 60000
+ stop.timeout.ms = 30000
+ accept.null.timestamps = false
+ max.timestamp.drift.sec = -1
+
+ dispatchers {
+ kafka {
+ bootstrap.servers = "kafkasvc:9092"
+ producer.topic = "proto-spans"
+ buffer.memory = 1048576
+ retries = 2
+ }
+ }
+ }
+}
diff --git a/docker/agent/docker-compose.yml b/docker/agent/docker-compose.yml
new file mode 100644
index 000000000..a123fbb7a
--- /dev/null
+++ b/docker/agent/docker-compose.yml
@@ -0,0 +1,13 @@
+version: "3"
+services:
+ haystack-agent:
+ image: expediadotcom/haystack-agent:latest
+ volumes:
+ # make sure you run docker-compose from the
+ # root path
+ - ./agent/default.conf:/app/bin/default.conf
+ environment:
+ JAVA_XMS: 128m
+ haystack_env_agents_spans_port: 35000
+ ports:
+ - "35000:35000"
diff --git a/docker/connectors.json b/docker/connectors.json
new file mode 100644
index 000000000..0d0e9a22a
--- /dev/null
+++ b/docker/connectors.json
@@ -0,0 +1,3 @@
+{
+ "connectors": { }
+}
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
new file mode 100644
index 000000000..761fa4f08
--- /dev/null
+++ b/docker/docker-compose.yml
@@ -0,0 +1,64 @@
+#
+# Copyright 2018 Expedia, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+version: "3"
+services:
+ elasticsearch:
+ image: elastic/elasticsearch:6.0.1
+ environment:
+ ES_JAVA_OPTS: "-Xms512m -Xmx512m"
+ xpack.security.enabled: "false"
+ ports:
+ - "9200:9200"
+ restart: always
+
+ cassandra:
+ image: cassandra:3.11.0
+ environment:
+ MAX_HEAP_SIZE: 256m
+ HEAP_NEWSIZE: 256m
+ # uncomment below port mapping to expose and connect to this application out of local docker container network
+# ports:
+# - "9042:9042"
+
+ zookeeper:
+ image: wurstmeister/zookeeper
+ ports:
+ - "2181:2181"
+
+ kafkasvc:
+ image: wurstmeister/kafka:2.11-1.1.1
+ depends_on:
+ - zookeeper
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_ADVERTISED_LISTENERS: INSIDE://kafkasvc:9092,OUTSIDE://localhost:19092
+ KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:19092
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
+ KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_CREATE_TOPICS: "proto-spans:1:1,metricpoints:1:1,metric-data-points:1:1,mdm:1:1,metrics:1:1,graph-nodes:1:1,service-graph:1:1,mapped-metrics:1:1,anomalies:1:1,alerts:1:1"
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ ports:
+ - "9092:9092"
+ - "19092:19092"
+
+ ui:
+ image: expediadotcom/haystack-ui:1.1.7
+ volumes:
+ - ./:/data
+ ports:
+ - "8080:8080"
diff --git a/docker/example/blobs/docker-compose.yml b/docker/example/blobs/docker-compose.yml
new file mode 100644
index 000000000..a70dc379f
--- /dev/null
+++ b/docker/example/blobs/docker-compose.yml
@@ -0,0 +1,56 @@
+version: '3'
+services:
+ s3rver:
+ image: vaibhavsawhney1511/s3rver
+ ports:
+ - "5000:5000"
+
+ haystack-agent:
+ # it is expected that this example is run along with the
+ # agent/docker-compose.yml file
+
+ depends_on:
+ - s3rver
+ environment:
+ JAVA_XMS: 128m
+ haystack_env_agents_spans_port: 35000
+ HAYSTACK_PROP_AGENTS_OSSBLOBS_ENABLED: "true"
+ HAYSTACK_PROP_AGENTS_OSSBLOBS_PORT: 35001
+ HAYSTACK_PROP_AGENTS_OSSBLOBS_DISPATCHERS_S3_SERVICE_ENDPOINT: http://s3rver:5000
+ HAYSTACK_PROP_AGENTS_OSSBLOBS_DISPATCHERS_S3_PATH_STYLE_ACCESS_ENABLED: "true"
+ HAYSTACK_PROP_AGENTS_OSSBLOBS_DISPATCHERS_S3_DISABLE_CHUNKED_ENCODING: "true"
+ HAYSTACK_PROP_AGENTS_OSSBLOBS_DISPATCHERS_S3_aws_access_key: "S3RVER"
+ HAYSTACK_PROP_AGENTS_OSSBLOBS_DISPATCHERS_S3_aws_secret_key: "S3RVER"
+ HAYSTACK_PROP_AGENTS_OSSBLOBS_DISPATCHERS_S3_BUCKET_NAME: "s3rver"
+ HAYSTACK_PROP_AGENTS_OSSBLOBS_DISPATCHERS_S3_SHOULD_WAIT_FOR_UPLOAD: "false"
+ ports:
+ - "35000:35000"
+ - "35001:35001"
+
+ reverse-proxy:
+ image: expediadotcom/blobs-http-reverse-proxy:latest
+ depends_on:
+ - haystack-agent
+ environment:
+ grpc-server-endpoint: haystack-agent:35001
+ http-port: ":35002"
+ ports:
+ - "35002:35002"
+
+ haystack-blob-example-client:
+ image: expediadotcom/haystack-blob-example-client:4b43b0858d8be7455a830df430e7f0a4a0a8afbf
+ depends_on:
+ - haystack-blob-example-server
+ expose:
+ - "9091"
+ ports:
+ - "9091:9091"
+
+ haystack-blob-example-server:
+ image: expediadotcom/haystack-blob-example-server:4b43b0858d8be7455a830df430e7f0a4a0a8afbf
+ depends_on:
+ - reverse-proxy
+ expose:
+ - "9090"
+ ports:
+ - "9090:9090"
\ No newline at end of file
diff --git a/docker/example/opentelemetry/docker-compose.yml b/docker/example/opentelemetry/docker-compose.yml
new file mode 100644
index 000000000..1ad64a7d9
--- /dev/null
+++ b/docker/example/opentelemetry/docker-compose.yml
@@ -0,0 +1,35 @@
+#
+# Copyright 2018 Expedia, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+version: "3"
+
+services:
+ haystack-agent:
+ # it is expected that this example is run along with the
+ # agent/docker-compose.yml file
+
+ environment:
+ HAYSTACK_PROP_AGENTS_PITCHFORK_ENABLED: "true"
+ ports:
+ - "9411:9411"
+ expose:
+ - "9411"
+
+ haystack-opentelemetry-example:
+ image: expediadotcom/haystack-opentelemetry-example:latest
+ depends_on:
+ - haystack-agent
+ ports:
+ - "9090:9090"
diff --git a/docker/example/traces/docker-compose.yml b/docker/example/traces/docker-compose.yml
new file mode 100644
index 000000000..8aa875047
--- /dev/null
+++ b/docker/example/traces/docker-compose.yml
@@ -0,0 +1,28 @@
+version: "3"
+services:
+ haystack-agent:
+ # it is expected that this example is run along with the
+ # agent/docker-compose.yml file
+
+ frontend:
+ image: expediadotcom/opentracing-spring-haystack-example
+ environment:
+ APP_MODE: frontend
+ SPRING_PROFILE: remote
+ HAYSTACK_AGENT_HOST: haystack-agent
+ BACKEND_URL: http://backend:9091
+ HAYSTACK_BLOBS_ENABLED: "false"
+ depends_on:
+ - haystack-agent
+ ports:
+ - "9090:9090"
+
+ backend:
+ image: expediadotcom/opentracing-spring-haystack-example
+ environment:
+ APP_MODE: backend
+ SPRING_PROFILE: remote
+ HAYSTACK_AGENT_HOST: haystack-agent
+ HAYSTACK_BLOBS_ENABLED: "false"
+ depends_on:
+ - haystack-agent
diff --git a/docker/run.sh b/docker/run.sh
new file mode 100755
index 000000000..d697c6c93
--- /dev/null
+++ b/docker/run.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+curl -XPOST -H "Content-Type: application/json" -d '{
+ "fields": [{
+ "name": "error",
+ "type": "string",
+ "enabled": true
+ }, {
+ "name": "http-status",
+ "type": "string",
+ "aliases": [ "http.status_code" ],
+ "enabled": true
+ }]
+}' "http://localhost:9200/reload-configs/whitelist-index-fields/1" 2>1 1>/dev/null
+
+COUNT=0
+URL=$1
+
+[[ -z ${URL} ]] && URL=http://localhost:9090/hello
+
+while true
+do
+ COUNT=$((COUNT+1))
+ curl ${URL}
+ echo " ${COUNT}"
+ sleep 1
+done
diff --git a/docker/service-graph/docker-compose.yml b/docker/service-graph/docker-compose.yml
new file mode 100644
index 000000000..ae6cb6c9a
--- /dev/null
+++ b/docker/service-graph/docker-compose.yml
@@ -0,0 +1,49 @@
+#
+# Copyright 2018 Expedia, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+version: "3"
+services:
+ node-finder:
+ image: expediadotcom/haystack-service-graph-node-finder:1.0.17
+ environment:
+ JAVA_XMS: 128m
+ HAYSTACK_GRAPHITE_ENABLED: "false"
+ HAYSTACK_PROP_KAFKA_NODE_METADATA_TOPIC_PARTITION_COUNT: "1"
+ HAYSTACK_PROP_KAFKA_NODE_METADATA_TOPIC_REPLICATION_FACTOR: "1"
+ depends_on:
+ - "kafkasvc"
+ entrypoint: ["/dockerize","-wait=tcp://kafkasvc:9092","-timeout=200s","-wait-retry-interval=40s","--","./start-app.sh"]
+ restart: always
+
+ graph-builder:
+ image: expediadotcom/haystack-service-graph-graph-builder:1.0.17
+ environment:
+ JAVA_XMS: 128m
+ HAYSTACK_GRAPHITE_ENABLED: "false"
+ HAYSTACK_PROP_SERVICE_HTTP_PORT: "8091"
+ HAYSTACK_PROP_KAFKA_STREAMS_REPLICATION_FACTOR: 1
+ depends_on:
+ - "kafkasvc"
+ entrypoint: ["/dockerize","-wait=tcp://kafkasvc:9092","-timeout=200s","-wait-retry-interval=40s","--","./start-app.sh"]
+ restart: always
+ ports:
+ - "8091:8091"
+
+ ui:
+ environment:
+ HAYSTACK_OVERRIDES_CONFIG_PATH: /data/connectors.json
+ HAYSTACK_PROP_CONNECTORS_SERVICE__GRAPH_CONNECTOR__NAME: "haystack"
+ HAYSTACK_PROP_CONNECTORS_SERVICE__GRAPH_WINDOW__SIZE__IN__SECS: 3600
+ HAYSTACK_PROP_CONNECTORS_SERVICE__GRAPH_SERVICE__GRAPH__URL: "http://graph-builder:8091/servicegraph"
diff --git a/docker/traces/docker-compose.yml b/docker/traces/docker-compose.yml
new file mode 100644
index 000000000..ef12d16f3
--- /dev/null
+++ b/docker/traces/docker-compose.yml
@@ -0,0 +1,63 @@
+#
+# Copyright 2018 Expedia, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+version: "3"
+services:
+ storage-backend:
+ image: expediadotcom/haystack-trace-backend-cassandra
+ environment:
+ HAYSTACK_GRAPHITE_ENABLED: "false"
+ HAYSTACK_LOG_LEVEL: "DEBUG"
+ JAVA_XMS: 128m
+ depends_on:
+ - "cassandra"
+ entrypoint: ["/dockerize","-wait=tcp://cassandra:9042","-timeout=200s","-wait-retry-interval=20s","--","./start-app.sh"]
+ restart: always
+ # uncomment below port mapping to expose and connect to this application out of local docker container network
+# ports:
+# - "8090:8090"
+
+ trace-reader:
+ image: expediadotcom/haystack-trace-reader:1.0.10
+ environment:
+ HAYSTACK_GRAPHITE_ENABLED: "false"
+ HAYSTACK_PROP_BACKEND_CLIENT_HOST: "storage-backend"
+ JAVA_XMS: 128m
+ entrypoint: ["/dockerize","-wait=tcp://storage-backend:8090","-timeout=200s","-wait-retry-interval=40s","--","./start-app.sh"]
+ restart: always
+
+ trace-indexer:
+ image: expediadotcom/haystack-trace-indexer:1.0.10
+ environment:
+ HAYSTACK_GRAPHITE_ENABLED: "false"
+ HAYSTACK_PROP_BACKEND_CLIENT_HOST: "storage-backend"
+ HAYSTACK_PROP_SERVICE_METADATA_ENABLED: "true"
+ HAYSTACK_PROP_KAFKA_MAX_WAKEUPS: "100"
+ HAYSTACK_PROP_SERVICE_METADATA_FLUSH_INTERVAL_SEC: "0"
+ JAVA_XMS: 128m
+ entrypoint: ["/dockerize","-wait=tcp://kafkasvc:9092","-timeout=200s","-wait-retry-interval=40s","--","./start-app.sh"]
+ depends_on:
+ - "elasticsearch"
+ restart: always
+
+ ui:
+ environment:
+ HAYSTACK_OVERRIDES_CONFIG_PATH: /data/connectors.json
+ HAYSTACK_PROP_CONNECTORS_TRACES_CONNECTOR__NAME: "haystack"
+ HAYSTACK_PROP_CONNECTORS_TRACES_SERVICE__REFRESH__INTERVAL__IN__SECS: "0"
+ HAYSTACK_PROP_CONNECTORS_TRACES_HAYSTACK__HOST: "trace-reader"
+ HAYSTACK_PROP_CONNECTORS_TRACES_HAYSTACK__PORT: "8088"
+ HAYSTACK_PROP_CONNECTORS_BLOBS_ENABLE__BLOBS: "true"
+ HAYSTACK_PROP_CONNECTORS_BLOBS_BLOBS__URL : "http://localhost:35002"
diff --git a/docker/trends/docker-compose.yml b/docker/trends/docker-compose.yml
new file mode 100644
index 000000000..bdf796fb3
--- /dev/null
+++ b/docker/trends/docker-compose.yml
@@ -0,0 +1,64 @@
+#
+# Copyright 2018 Expedia, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+version: "3"
+services:
+ metrictank:
+ image: grafana/metrictank:0.10.1
+ environment:
+ MT_HTTP_MULTI_TENANT: "false"
+ MT_CARBON_IN_ENABLED: "false"
+ MT_KAFKA_MDM_IN_ENABLED: "true"
+ MT_CASSANDRA_ADDRS: "cassandra:9042"
+ MT_KAFKA_MDM_IN_BROKERS: "kafkasvc:9092"
+ MT_CASSANDRA_IDX_HOSTS: "cassandra:9042"
+ MT_STATS_ENABLED: "false"
+ MT_MEMORY_IDX_TAG_SUPPORT: "true"
+ depends_on:
+ - "kafkasvc"
+ restart: always
+ # uncomment below port mapping to expose and connect to this application out of local docker container network
+# ports:
+# - "6060:6060"
+
+ trends-transformer:
+ image: expediadotcom/haystack-span-timeseries-transformer:1.1.3
+ environment:
+ HAYSTACK_GRAPHITE_ENABLED: "false"
+ HAYSTACK_PROP_KAFKA_STREAMS_BOOTSTRAP_SERVERS: "kafkasvc:9092"
+ HAYSTACK_PROP_KAFKA_PRODUCER_TOPIC: "metric-data-points"
+ JAVA_XMS: 128m
+ entrypoint: ["/dockerize","-wait=tcp://kafkasvc:9092","-timeout=200s","-wait-retry-interval=40s","--","./start-app.sh"]
+ depends_on:
+ - "kafkasvc"
+ restart: always
+
+ trends-aggregator:
+ image: expediadotcom/haystack-timeseries-aggregator:1.1.3
+ environment:
+ HAYSTACK_GRAPHITE_ENABLED: "false"
+ HAYSTACK_PROP_KAFKA_STREAMS_BOOTSTRAP_SERVERS: "kafkasvc:9092"
+ HAYSTACK_PROP_KAFKA_CONSUMER_TOPIC: "metric-data-points"
+ JAVA_XMS: 128m
+ entrypoint: ["/dockerize","-wait=tcp://kafkasvc:9092","-timeout=200s","-wait-retry-interval=40s","--","./start-app.sh"]
+ depends_on:
+ - "kafkasvc"
+ restart: always
+
+ ui:
+ environment:
+ HAYSTACK_OVERRIDES_CONFIG_PATH: /data/connectors.json
+ HAYSTACK_PROP_CONNECTORS_TRENDS_CONNECTOR__NAME: "haystack"
+ HAYSTACK_PROP_CONNECTORS_TRENDS_METRIC__TANK__URL: "http://metrictank:6060"
diff --git a/docker/zipkin/docker-compose.yml b/docker/zipkin/docker-compose.yml
new file mode 100644
index 000000000..bcda5a90a
--- /dev/null
+++ b/docker/zipkin/docker-compose.yml
@@ -0,0 +1,48 @@
+#
+# Copyright 2018 Expedia, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+version: "3"
+
+services:
+ pitchfork:
+ image: hotelsdotcom/pitchfork:latest
+ ports:
+ - "9411:9411"
+ environment:
+ PITCHFORK_FORWARDERS_LOGGING_ENABLED: "true"
+ PITCHFORK_FORWARDERS_LOGGING_LOG_FULL_SPAN: "true"
+ PITCHFORK_FORWARDERS_ZIPKIN_HTTP_ENABLED: "true"
+ PITCHFORK_FORWARDERS_ZIPKIN_HTTP_ENDPOINT: "http://zipkin:9411/api/v2/spans"
+ PITCHFORK_FORWARDERS_HAYSTACK_KAFKA_ENABLED: "true"
+ PITCHFORK_FORWARDERS_HAYSTACK_KAFKA_BOOTSTRAP_SERVERS: "kafkasvc:9092"
+
+ zipkin:
+ image: openzipkin/zipkin
+ container_name: zipkin
+ environment:
+ STORAGE_TYPE: elasticsearch
+ ES_HOSTS: elasticsearch
+ KAFKA_BOOTSTRAP_SERVERS: kafkasvc:9092
+ ports:
+ - "9412:9411"
+ depends_on:
+ - elasticsearch
+ - kafkasvc
+
+ ui:
+ environment:
+ HAYSTACK_OVERRIDES_CONFIG_PATH: /data/connectors.json
+ HAYSTACK_PROP_CONNECTORS_TRACES_CONNECTOR__NAME: "zipkin"
+ HAYSTACK_PROP_CONNECTORS_TRACES_ZIPKIN__URL: 'http://zipkin:9411/api/v2'
diff --git a/idl/.gitignore b/idl/.gitignore
new file mode 100644
index 000000000..d82a0da01
--- /dev/null
+++ b/idl/.gitignore
@@ -0,0 +1,29 @@
+# Compiled class file
+*.class
+
+# Log file
+*.log
+
+# BlueJ files
+*.ctxt
+
+# Mobile Tools for Java (J2ME)
+.mtj.tmp/
+
+# Package Files #
+*.jar
+*.war
+*.ear
+*.zip
+*.tar.gz
+*.rar
+.idea
+*/.idea
+*.iml
+*/*.ipr
+./build/*
+
+# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
+hs_err_pid*
+java/target/
+fakespans/fakespans
diff --git a/idl/LICENSE b/idl/LICENSE
new file mode 100644
index 000000000..9f133f5cd
--- /dev/null
+++ b/idl/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2017 Expedia, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/idl/README.md b/idl/README.md
new file mode 100644
index 000000000..bcf5008d7
--- /dev/null
+++ b/idl/README.md
@@ -0,0 +1,12 @@
+# haystack-idl
+Span and other data models used by Haystack are defined as [Protocol Buffer](https://developers.google.com/protocol-buffers/) files in [proto](./proto) folder
+
+## Generating Java source for Haystack Spans
+A simple maven pom file is available in [java](./java) folder to compile Haystack proto files in to a jar
+
+## Creating test data in kafka
+Simple utility in Go to generate and send sample Spans to Kakfa is in [fakespans](./fakespans) folder
+
+## Building fakespans
+```docker run --rm -it -v "$PWD":/usr/src/app -w /usr/src/app golang:1.8 /usr/src/app/build.sh```
+
diff --git a/idl/build.sh b/idl/build.sh
new file mode 100755
index 000000000..99b2cdbee
--- /dev/null
+++ b/idl/build.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+mkdir -p build
+cd fakespans
+go get github.com/Shopify/sarama
+go get github.com/codeskyblue/go-uuid
+go get github.com/golang/protobuf/proto
+
+
+for GOOS in darwin linux windows; do
+ for GOARCH in 386 amd64; do
+ echo "Building for ${GOOS} - ${GOARCH}"
+ export GOOS=${GOOS}
+ export GOARCH=${GOARCH}
+ go build -v -o ../build/fakespans-$GOOS-$GOARCH
+ done
+done
\ No newline at end of file
diff --git a/idl/proto/agent/spanAgent.proto b/idl/proto/agent/spanAgent.proto
new file mode 100644
index 000000000..edca7b03e
--- /dev/null
+++ b/idl/proto/agent/spanAgent.proto
@@ -0,0 +1,39 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+syntax = "proto3";
+import "span.proto";
+
+option java_package = "com.expedia.open.tracing.agent.api";
+option java_multiple_files = true;
+option go_package = "haystack";
+
+message DispatchResult {
+ ResultCode code = 1; // result code is 0 for sucessful dipatch only
+ string error_message = 2; // error message if result code is non zero
+
+ enum ResultCode {
+ SUCCESS = 0;
+ UNKNOWN_ERROR = 1;
+ RATE_LIMIT_ERROR = 2;
+ }
+}
+
+// service interface to push spans to haystack agent
+service SpanAgent {
+ rpc dispatch (Span) returns (DispatchResult); // dispatch span to haystack agent
+}
diff --git a/idl/proto/api/anomaly/anomalyReader.proto b/idl/proto/api/anomaly/anomalyReader.proto
new file mode 100644
index 000000000..7255c3905
--- /dev/null
+++ b/idl/proto/api/anomaly/anomalyReader.proto
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+syntax = "proto3";
+
+option java_package = "com.expedia.open.tracing.api.anomaly";
+option java_multiple_files = true;
+option go_package = "haystack";
+
+
+message SearchAnamoliesRequest {
+ map labels = 1;
+ int64 startTime = 2;
+ int64 endTime = 3;
+ int32 size = 4;
+}
+
+message Anomaly {
+ double expectedValue = 1;
+ double observedValue = 2;
+ int64 timestamp = 3;
+}
+
+message SearchAnamolyResponse {
+ string name = 1;
+ map labels = 2;
+ repeated Anomaly anomalies = 3;
+}
+
+message SearchAnomaliesResponse {
+ repeated SearchAnamolyResponse searchAnomalyResponse = 1;
+}
+
+
+service AnomalyReader {
+ rpc getAnomalies(SearchAnamoliesRequest) returns (SearchAnomaliesResponse); // fetches the anomalies
+}
diff --git a/idl/proto/api/subscription/subscriptionManagement.proto b/idl/proto/api/subscription/subscriptionManagement.proto
new file mode 100644
index 000000000..3a5a0a743
--- /dev/null
+++ b/idl/proto/api/subscription/subscriptionManagement.proto
@@ -0,0 +1,117 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+syntax = "proto3";
+
+option java_package = "com.expedia.open.tracing.api.subscription";
+option java_multiple_files = true;
+option go_package = "haystack";
+
+message Empty {
+
+}
+
+enum DispatchType {
+ EMAIL = 0;
+ SLACK = 1;
+}
+
+message Field {
+ string name = 1; // name of the field
+ string value = 2; // value of the field
+}
+
+message Operand {
+ oneof operand {
+ Field field = 1; // leaf field value
+ ExpressionTree expression = 2; // a nested expression tree
+ }
+}
+
+message ExpressionTree {
+ enum Operator {
+ AND = 0;
+ OR = 1;
+ }
+
+ Operator operator = 1; // operator to use the subtree results
+ repeated Operand operands = 2; // list of operands
+}
+
+message Dispatcher {
+ DispatchType type = 1;
+ string endpoint = 2;
+}
+
+message SubscriptionRequest {
+ ExpressionTree expressionTree = 1;
+ repeated Dispatcher dispatchers = 2;
+}
+
+message User {
+ string username = 1;
+}
+
+message CreateSubscriptionRequest {
+ User user = 1;
+ SubscriptionRequest subscriptionRequest = 2;
+}
+
+message CreateSubscriptionResponse {
+ string subscriptionId = 1;
+}
+
+message SubscriptionResponse {
+ string subscriptionId = 1;
+ User user = 2;
+ repeated Dispatcher dispatchers = 3;
+ ExpressionTree expressionTree = 4;
+ int64 lastModifiedTime = 5;
+ int64 createdTime = 6;
+}
+
+message SearchSubscriptionResponse {
+ repeated SubscriptionResponse subscriptionResponse = 1;
+}
+
+message UpdateSubscriptionRequest {
+ string subscriptionId = 1;
+ SubscriptionRequest subscriptionRequest = 2;
+}
+
+message DeleteSubscriptionRequest {
+ string subscriptionId = 1;
+}
+
+message SearchSubscriptionRequest {
+ User user = 1;
+ map labels = 2;
+}
+
+message GetSubscriptionRequest {
+ string subscriptionId = 1;
+}
+
+
+service SubscriptionManagement {
+ rpc createSubscription(CreateSubscriptionRequest) returns (CreateSubscriptionResponse); // create a new subscription. Returns a subscription Id
+ rpc updateSubscription(UpdateSubscriptionRequest) returns (Empty); // update a subscription. All updates would be idempotent.
+ rpc deleteSubscription(DeleteSubscriptionRequest) returns (Empty); // delete a subscription.
+ rpc getSubscription(GetSubscriptionRequest) returns (SubscriptionResponse); // Fetch a subscription given the id of the subscription.
+ rpc searchSubscription(SearchSubscriptionRequest) returns (SearchSubscriptionResponse); // search subscription given a set of labels.
+}
\ No newline at end of file
diff --git a/idl/proto/api/traceReader.proto b/idl/proto/api/traceReader.proto
new file mode 100644
index 000000000..9aba6af36
--- /dev/null
+++ b/idl/proto/api/traceReader.proto
@@ -0,0 +1,178 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+syntax = "proto3";
+import "span.proto";
+
+option java_package = "com.expedia.open.tracing.api";
+option java_multiple_files = true;
+option go_package = "haystack";
+
+// collection of spans belonging to a single Trace
+message Trace {
+ string traceId = 1;
+ repeated Span childSpans = 2;
+}
+
+// request for fetching Trace for traceId
+message TraceRequest {
+ string traceId = 1;
+}
+
+// request for raw traces representing list of traceIds
+message RawTracesRequest {
+ repeated string traceId = 1;
+}
+
+// list of filtered traces
+message RawTracesResult {
+ repeated Trace traces = 1;
+}
+
+// request for fetching span for give traceId and spanId
+message SpanRequest {
+ string traceId = 1;
+ string spanId = 2;
+}
+
+message SpanResponse {
+ repeated Span spans = 1; // list of spans with a given traceId and spanId
+}
+
+// a single operand in the expression tree
+message Operand {
+ oneof operand {
+ Field field = 1; // leaf field value
+ ExpressionTree expression = 2; // a nested expression tree
+ }
+}
+
+// nested n-ary expression tree for specifying expression to filter
+// represents a binary operator which will be performed on a list of operands
+message ExpressionTree {
+ enum Operator {
+ AND = 0;
+ OR = 1;
+ }
+
+ Operator operator = 1; // operator to use the subtree results
+ repeated Operand operands = 2; // list of operands
+ bool isSpanLevelExpression = 3; // if this expression is a span level or trace level filter
+}
+
+// criteria for searching traces
+message TracesSearchRequest {
+ repeated Field fields = 1 [deprecated=true]; // fields to filter traces
+ int64 startTime = 2; // search window start time in microseconds time from epoch
+ int64 endTime = 3; // search window end time in microseconds time from epoch
+ int32 limit = 4; // limit on number of results to return
+ ExpressionTree filterExpression = 5; // expression tree for trace search filters
+}
+
+// list of filtered traces
+message TracesSearchResult {
+ repeated Trace traces = 1;
+}
+
+// request for fetching trace count of search result per interval
+message TraceCountsRequest {
+ repeated Field fields = 1 [deprecated=true]; // fields to filter traces
+ int64 startTime = 2; // search window start time in microseconds time from epoch
+ int64 endTime = 3; // search window end time in microseconds time from epoch
+ int64 interval = 4; // interval in microseconds
+ ExpressionTree filterExpression = 5; // expression tree for trace search filters
+}
+
+// trace count list
+message TraceCounts {
+ repeated TraceCount traceCount = 1;
+}
+
+// count of traces for an interval
+message TraceCount {
+ int64 timestamp = 1; // end time of trace search result in microseconds time from epoch
+ int64 count = 2; // count of traces
+}
+
+// Field is a general abstraction on data associated with a span
+// It can represent any indexed span attribute such as tag, log, spanName, or operationName
+message Field {
+ enum Operator { // define the operator between name and its value
+ EQUAL = 0;
+ GREATER_THAN = 1;
+ LESS_THAN = 2;
+ NOT_EQUAL = 3;
+ }
+
+ string name = 1; // name of the field
+ string value = 2; // value of the field
+ Operator operator = 3; // operation between name and value, default is EQUAL
+}
+
+// An empty message type for rq/rs
+message Empty {}
+
+// query for fetching values for given field
+message FieldValuesRequest {
+ string fieldName = 1; // name of field to query for
+ repeated Field filters = 2; // provided fields to be used for filtering
+}
+
+// whitelisted field metadata to accompany field name
+message FieldMetadata {
+ bool isRangeQuery = 1;
+}
+
+message FieldNames {
+ repeated string names = 1;
+ repeated FieldMetadata fieldMetadata = 2;
+}
+
+message FieldValues {
+ repeated string values = 1;
+}
+
+message CallNode {
+ string serviceName = 1;
+ string operationName = 2;
+ string infrastructureProvider = 3; // infrastructure provider hosting the service
+ string infrastructureLocation = 4; // infrastructure location hosting the service
+ string duration = 5; // duration of the call perceived by the service
+}
+
+message Call {
+ CallNode from = 1; // service node from which call was started
+ CallNode to = 2; // service node to which call was terminated
+ int64 networkDelta = 3; // time delta in transit
+}
+
+message TraceCallGraph {
+ repeated Call calls = 1; // list of service calls
+}
+
+// service interface to search and get traces
+service TraceReader {
+ rpc searchTraces (TracesSearchRequest) returns (TracesSearchResult); // search for traces based on filter fields and other criteria
+ rpc getTraceCounts (TraceCountsRequest) returns (TraceCounts); // fetch per interval count of traces search
+ rpc getTrace (TraceRequest) returns (Trace); // fetch a trace using traceId
+ rpc getRawTrace (TraceRequest) returns (Trace); // fetch a trace in raw un-transformed format using traceId
+ rpc getRawSpan (SpanRequest) returns (SpanResponse); // fetch a span of a trace in raw un-transformed format using traceId and spanId
+ rpc getFieldNames (Empty) returns (FieldNames); // get all searchable Fields available in haystack system
+ rpc getFieldValues (FieldValuesRequest) returns (FieldValues); // get values for a given Field
+ rpc getTraceCallGraph (TraceRequest) returns (TraceCallGraph); // get graph of service calls made in the given traceId
+ rpc getRawTraces (RawTracesRequest) returns (RawTracesResult); // get raw traces for given list of traceIds
+}
diff --git a/idl/proto/backend/storageBackend.proto b/idl/proto/backend/storageBackend.proto
new file mode 100644
index 000000000..1bdd37a70
--- /dev/null
+++ b/idl/proto/backend/storageBackend.proto
@@ -0,0 +1,68 @@
+/*
+ *
+ * Copyright 2018 Expedia, Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+syntax = "proto3";
+
+option java_package = "com.expedia.open.tracing.backend";
+option java_multiple_files = true;
+option go_package = "haystack";
+
+
+message Field {
+ string name = 1; // name of the field
+ string value = 2; // value of the field
+}
+message Metadata {
+ repeated Field fields = 1;
+}
+
+message TraceRecord {
+ string traceId = 1;
+ bytes spans = 2; // byte array value type
+ int64 timestamp = 4;
+ Metadata metadata = 3;
+}
+
+// query for writing trace records to persistent store
+message WriteSpansRequest {
+ repeated TraceRecord records = 1;
+}
+
+// query for reading trace records from persistent store
+message ReadSpansRequest {
+ repeated string traceIds = 1; // trace id of the request
+}
+
+message ReadSpansResponse {
+ repeated TraceRecord records = 1; // collection of span buffers
+}
+
+message WriteSpansResponse {
+ ResultCode code = 1; // result code is 0 for sucessful dipatch only
+ string error_message = 2; // error message if result code is non zero
+
+ enum ResultCode {
+ SUCCESS = 0;
+ UNKNOWN_ERROR = 1;
+ }
+}
+
+// service interface to write and read traces
+service StorageBackend {
+ rpc writeSpans (WriteSpansRequest) returns (WriteSpansResponse); // write buffered spans to backend
+ rpc readSpans (ReadSpansRequest) returns (ReadSpansResponse); // read buffered spans from backend
+}
diff --git a/idl/proto/blobs/blob.proto b/idl/proto/blobs/blob.proto
new file mode 100644
index 000000000..002d7490f
--- /dev/null
+++ b/idl/proto/blobs/blob.proto
@@ -0,0 +1,30 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+syntax = "proto3";
+
+option java_package = "com.expedia.blobs.model";
+option java_multiple_files = true;
+option go_package = "blob";
+
+// Blob represents the data thats needs to be saved for a specific service call.
+message Blob {
+
+ string key = 1; // unique key
+ map metadata = 2;
+ bytes content = 3;
+}
diff --git a/idl/proto/blobs/blobAgent.proto b/idl/proto/blobs/blobAgent.proto
new file mode 100644
index 000000000..1469fd0f6
--- /dev/null
+++ b/idl/proto/blobs/blobAgent.proto
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+syntax = "proto3";
+import "blob.proto";
+
+option java_package = "com.expedia.haystack.agent.blobs.api";
+option java_multiple_files = true;
+option go_package = "blob";
+
+message DispatchResult {
+ ResultCode code = 1; // result code is 0 for sucessful dipatch only
+ string error_message = 2; // error message if result code is non zero
+
+ enum ResultCode {
+ SUCCESS = 0;
+ UNKNOWN_ERROR = 1;
+ RATE_LIMIT_ERROR = 2;
+ MAX_SIZE_EXCEEDED_ERROR = 3;
+ }
+}
+
+message BlobReadResponse {
+ Blob blob = 1;
+ ResultCode code = 2;
+ string error_message = 3;
+
+ enum ResultCode {
+ SUCCESS = 0;
+ UNKNOWN_ERROR = 1;
+ }
+}
+
+message FormattedBlobReadResponse {
+ string data = 1;
+}
+
+message BlobSearch {
+ string key = 1;
+}
+
+// service interface to push blobs to haystack agent
+service BlobAgent {
+ rpc dispatch (Blob) returns (DispatchResult); // dispatch blob to haystack agent
+ rpc read (BlobSearch) returns (BlobReadResponse);
+ rpc readBlobAsString(BlobSearch) returns (FormattedBlobReadResponse);
+}
diff --git a/idl/proto/span.proto b/idl/proto/span.proto
new file mode 100644
index 000000000..0f6b3941f
--- /dev/null
+++ b/idl/proto/span.proto
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+syntax = "proto3";
+
+option java_package = "com.expedia.open.tracing";
+option java_multiple_files = true;
+option go_package = "haystack";
+
+// Span represents a unit of work performed by a service.
+message Span {
+
+ string traceId = 1; // unique trace id
+ string spanId = 2; // unique span id
+ string parentSpanId = 3; // optional, a span can have its parent spanId
+ string serviceName = 4; // name of service
+ string operationName = 5; // name of operation
+
+ int64 startTime = 6; // creation time of this span in microseconds since epoch
+ int64 duration = 7; // span duration in microseconds
+
+ repeated Log logs = 8; // arbitrary set of timestamp-aware key-value pairs
+ repeated Tag tags = 9; // arbitrary set of key-value pairs
+}
+
+
+// Log is a timestamped event with a set of tags.
+message Log {
+ int64 timestamp = 1; // timestamp in microseconds since epoch
+ repeated Tag fields = 2;
+}
+
+
+// Tag is a strongly typed key/value pair. We use 'oneof' protobuf attribute to represent the possible tagTypes
+message Tag {
+
+ // TagType denotes the type of a Tag's value.
+ enum TagType {
+ STRING = 0;
+ DOUBLE = 1;
+ BOOL = 2;
+ LONG = 3;
+ BINARY = 4;
+ }
+ string key = 1; // name of the tag key
+ TagType type = 2; // type of tag, namely string, double, bool, long and binary
+ oneof myvalue {
+ string vStr = 3; // string value type
+ int64 vLong = 4; // long value type
+ double vDouble = 5; // double value type
+ bool vBool = 6; // bool value type
+ bytes vBytes = 7; // byte array value type
+ }
+}
+
+
+// You can optionally use Batch to send a collection of spans. Spans may not necessarily belong to one traceId.
+message Batch {
+ repeated Span spans = 1; // a collection of spans emitted from the process/service
+}
diff --git a/idl/proto/spanBuffer.proto b/idl/proto/spanBuffer.proto
new file mode 100644
index 000000000..41d3632d6
--- /dev/null
+++ b/idl/proto/spanBuffer.proto
@@ -0,0 +1,29 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+syntax = "proto3";
+import "span.proto";
+
+option java_package = "com.expedia.open.tracing.buffer";
+option java_multiple_files = true;
+option go_package = "haystack";
+
+// This entity represents a collection of spans that belong to one traceId
+message SpanBuffer {
+ string traceId = 1; // unique trace id
+ repeated Span childSpans = 2; // list of child spans
+}
diff --git a/service-graph/.gitignore b/service-graph/.gitignore
new file mode 100644
index 000000000..137a11e38
--- /dev/null
+++ b/service-graph/.gitignore
@@ -0,0 +1,22 @@
+target/
+pom.xml.tag
+pom.xml.releaseBackup
+pom.xml.versionsBackup
+pom.xml.next
+release.properties
+dependency-reduced-pom.xml
+buildNumber.properties
+.mvn/timing.properties
+
+# Avoid ignoring Maven wrapper jar file (.jar files are usually ignored)
+!/.mvn/wrapper/maven-wrapper.jar
+
+#intellij
+.idea/
+*.ipr
+*.iws
+*.iml
+
+#app
+*/logs/
+*/local/
diff --git a/service-graph/CONTRIBUTING.md b/service-graph/CONTRIBUTING.md
new file mode 100644
index 000000000..317757128
--- /dev/null
+++ b/service-graph/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+##Bugs
+We use Github Issues for our bug reporting. Please make sure the bug isn't already listed before opening a new issue.
+
+##Development
+All work on Haystack happens directly on Github. Core Haystack team members will review opened pull requests.
+
+##Requests
+If you see a feature that you would like to be added, please open an issue in the respective repository or in the general Haystack repo.
+
+##Contributing to Documentation
+To contribute to documentation, you can directly modify the corresponding .md files in the docs directory under the base haystack repository, and submit a pull request. Once your PR is merged, the documentation is automatically built and deployed to https://expediadotcom.github.io/haystack.
+
+##License
+By contributing to Haystack, you agree that your contributions will be licensed under its Apache License.
\ No newline at end of file
diff --git a/service-graph/LICENSE b/service-graph/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/service-graph/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/service-graph/Makefile b/service-graph/Makefile
new file mode 100644
index 000000000..afd0e07b8
--- /dev/null
+++ b/service-graph/Makefile
@@ -0,0 +1,32 @@
+.PHONY: all clean build report-coverage node-finder graph-builder snapshotter release
+
+PWD := $(shell pwd)
+
+clean:
+ mvn clean
+
+build: clean
+ mvn package
+
+node-finder:
+ mvn verify -DfinalName=haystack-service-graph-node-finder -pl node-finder -am
+
+graph-builder:
+ mvn verify -DfinalName=haystack-service-graph-graph-builder -pl graph-builder -am
+
+snapshotter:
+ mvn verify -DfinalName=haystack-service-graph-snapshotter -pl snapshotter -am
+
+all: clean node-finder graph-builder snapshotter
+
+# build all and release
+release: clean node-finder graph-builder snapshotter
+ cd node-finder && $(MAKE) release
+ cd graph-builder && $(MAKE) release
+ cd snapshotter && $(MAKE) release
+ ./.travis/deploy.sh
+
+# run coverage tests
+report-coverage:
+ mvn clean scoverage:test scoverage:report-only
+ open target/site/scoverage/index.html
diff --git a/service-graph/README.md b/service-graph/README.md
new file mode 100644
index 000000000..a36d03e95
--- /dev/null
+++ b/service-graph/README.md
@@ -0,0 +1,140 @@
+[](https://travis-ci.org/ExpediaDotCom/haystack-service-graph)
+[](https://github.com/ExpediaDotCom/haystack/blob/master/LICENSE)
+
+# Haystack-service-graph
+
+This repository has two components that focus on
+
+* Building a service dependency graph from incoming spans and
+* Computing the network latency between the services that allows
+[haystack-trends](https://github.com/ExpediaDotCom/haystack-traces) to produce latency trends between services.
+
+## Required Reading
+
+In order to understand Haystack, we recommend reading the details of the
+[Haystack](https://expediadotcom.github.io/haystack) project. Haystack is written in
+[Kafka streams](http://docs.confluent.io/current/streams/index.html)
+and hence some prior knowledge of Iafka streams is helpful.
+
+## Component: node-finder
+
+This component discovers the relationships between services. Eventually those relationships will be expressed as a graph
+in which the services are the nodes and the operations are the edges. Since client spans do not carry the name of the
+service being called, and server spans do not carry the name of the service calling them, this component accumulates the
+incoming spans and uses `span-id` to discover the dependent services and the operations between them.
+
+Discovered "span pairs" are then used to produce two different outputs
+
+1. A simple object that has
+ * the calling service name
+ * the called service name
+ * the operation name
+2. A `MetricPoint` object with the `latency` between the service pair, discovered by examining timestamps in the spans.
+
+Like many other components of Haystack, this component is also a `Kafka streams` application. The picture below shows
+the topology / architecture of this component.
+
+ +---------------+
+ | |
+ | proto-spans |
+ | |
+ +-------+-------+
+ |
+ +---------V----------+
+ | |
+ +----+ span-accumulator +----+
+ | | | |
+ | +--------------------+ |
+ | |
+ +---------V---------+ +------------V------------+
+ | | | |
+ | latency-producer | | nodes-n-edges-producer |
+ | | | |
+ +---------+---------+ +------------+------------+
+ | |
+ +--------V--------+ +---------V---------+
+ | | | |
+ | metric-sink | | graph-nodes-sink |
+ | | | |
+ +-----------------+ +-------------------+
+
+The Starting point for the application is the
+[Streams](node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/Streams.scala) class, which
+builds the topology shown in the picture above. This `node-finder` topology consists of one source, three processors
+and two sinks.
+
+* Source: The topology contains a source called `proto-spans`. This source reads a Kafka topic with the same name.
+It uses `SpanDeserializer` as the value deserializer to read incoming spans in the topic.
+
+* Processors:
+ * span-accumulator : This processor accumulates all the incoming spans in a PriorityQueue ordered by each Span's
+ timestamp to maintain the incoming order. Periodically, it traverses the priority queue to find spans with matching
+ span-ids and combines them to form a client-server span pair. These span pairs are then forwarded to the downstream
+ processors. Accumulation time is configurable with a configuration keyed by `accumulator.interval`. It has a minor
+ optimization built in during queue traversal to match recently arrived spans with spans in the next batch.
+ * latency-producer : The latency producer is one of the processors downstream of span-accumulator. This simple
+ processor produces a `MetricPoint` instance to record the network latency in the current span pair.
+ A sample JSON representation of the metric point will look like
+ ```json
+ {
+ "metric" : "latency",
+ "type" : "gauge",
+ "value" : 40.0,
+ "epochTimeInSeconds" : 1523637898,
+ "tags" : {
+ "serviceName" : "foo-service",
+ "operationName" : "bar-operation"
+ }
+ }
+ ```
+ * nodes-n-edges-producer: This processor is another simple processor that is downstream of span-accumulator.
+ For every span pair received, this processor emits a simple JSON representation of a graph edge as shown below
+
+ ```json
+ {
+ "source" : "foo-service",
+ "destination" : "baz-service",
+ "operation" : "bar-operation"
+ }
+ ```
+* Sinks:
+ * metric-sink: This sink is downstream of latency-producer. It serializes each MetricPoint instance with a
+ `Message Pack` serializer and writes the serialized output to a configured Kafka topic.
+ * graph-nodes-sink: This sink is downstream of nodes-n-edges-producer. It serializes the JSON as a string and writes
+ that string to a configured Kafka topic for the `graph-builder` component to consume and build a service dependency
+ graph.
+
+## Component: graph-builder
+
+This component takes graph edges emitted by `node-finder` and merges them together to form the full service-graph.
+It also has an http endpoint to return the accumulated service-graph.
+
+#### Streaming
+`graph-builder` accumulates incoming edges in
+[ktable](https://kafka.apache.org/0102/javadoc/org/apache/kafka/streams/kstream/KTable.html), using the stream
+[table duality concept](https://docs.confluent.io/current/streams/concepts.html#duality-of-streams-and-tables).
+Each row in the ktable represets one graph edge. Each edge is supplemented with some stats such as running count and
+last seen timestamp.
+
+Kafka does take care of persisting and replicating the graph ktable across brokers to have fault tolerance.
+
+#### HTTP API
+`graph-builder` also acts as an http api to query the graph ktable, using servlets over embedded jetty for implementing
+the endpoints.
+[Kafka interactive query](https://kafka.apache.org/10/documentation/streams/developer-guide/interactive-queries.html)
+is used for fetching service graphs from local.
+
+An interactive query to a single stream nodes return only the graph-edges sharded to that node, hence it is a partial
+view of the world. The servlet take care of fetching partial graphs from all nodes having the ktable to form full
+service graphs.
+
+######endpoints
+1. `/servicegraph` : returns full service graph, includes edges from all know services. Edges include operations also.
+
+## Building
+
+To build the components in this repository at once, one can run
+```
+make all
+```
+To build the components separately, once can check the README in the individual component folders.
\ No newline at end of file
diff --git a/service-graph/Release.md b/service-graph/Release.md
new file mode 100644
index 000000000..03675aa41
--- /dev/null
+++ b/service-graph/Release.md
@@ -0,0 +1,10 @@
+#Releasing
+Currently we publish the repo to docker hub and nexus central repository.
+
+#How to release and publish
+
+* Git tagging:
+
+```git tag -a 1.x.x -m "Release description..."```
+
+Or you can also tag using UI: https://github.com/ExpediaDotCom/haystack-service-graph/releases
\ No newline at end of file
diff --git a/service-graph/ReleaseNotes.md b/service-graph/ReleaseNotes.md
new file mode 100644
index 000000000..66c1850a2
--- /dev/null
+++ b/service-graph/ReleaseNotes.md
@@ -0,0 +1,31 @@
+# Release Notes
+
+## 2019-01-29 1.0.15
+ * Make S3 item name use / instead of _, to take advantage of S3 "folders"
+
+## 2019-01-29 1.0.14
+ * Handle command line args properly in the S3 store
+
+## 2019-01-28 1.0.13
+ * Fix Docker image name for snapshotter (was haystack-service-snapshotter, is now haystack-service-graph-snapshotter)
+
+## 2019-01-25 1.0.12
+ * Fix typo in Docker image name for snapshotter
+
+## 2019-01-25 1.0.11
+ * Publish snapshotter to Docker
+
+## 2019-01-23 1.0.10
+ * Names of S3 service graph snapshot items should terminate in ".csv"
+
+## 2019-01-23 1.0.9
+ * Make the parameter for listObjectsBatchSize in S3SnapshotStore optional, as it's only needed when calling write
+
+## 2019-01-23 1.0.8
+ * Remove Main companion object (it wasn't really needed)
+ * Allow URL to be specified as a parameter instead of being hard coded
+ * More unit tests
+
+## 2019-01-23 1.0.7
+ * Add Main companion class to Main object so that it can be instantiated by the Java JVM
+ * Add this ReleaseNotes.md file
diff --git a/service-graph/checkstyles/scalastyle_config.xml b/service-graph/checkstyles/scalastyle_config.xml
new file mode 100644
index 000000000..d364af665
--- /dev/null
+++ b/service-graph/checkstyles/scalastyle_config.xml
@@ -0,0 +1,134 @@
+
+ Scalastyle standard configuration
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/service-graph/deployment/scripts/publish-to-docker-hub.sh b/service-graph/deployment/scripts/publish-to-docker-hub.sh
new file mode 100755
index 000000000..f075c6160
--- /dev/null
+++ b/service-graph/deployment/scripts/publish-to-docker-hub.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+set -e
+
+QUALIFIED_DOCKER_IMAGE_NAME=$DOCKER_ORG/$DOCKER_IMAGE_NAME
+echo "DOCKER_ORG=$DOCKER_ORG, DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME, QUALIFIED_DOCKER_IMAGE_NAME=$QUALIFIED_DOCKER_IMAGE_NAME"
+echo "BRANCH=$BRANCH, TAG=$TAG, SHA=$SHA"
+
+# login
+docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
+
+# Add tags
+if [[ $TAG =~ ([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
+ echo "releasing semantic versions"
+
+ unset MAJOR MINOR PATCH
+ MAJOR="${BASH_REMATCH[1]}"
+ MINOR="${BASH_REMATCH[2]}"
+ PATCH="${BASH_REMATCH[3]}"
+
+ # for tag, add MAJOR, MAJOR.MINOR, MAJOR.MINOR.PATCH and latest as tag
+ # publish image with tags
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR.$PATCH
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR.$PATCH
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:latest
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:latest
+
+elif [[ "$BRANCH" == "master" ]]; then
+ echo "releasing master branch"
+
+ # for 'master' branch, add SHA as tags
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$SHA
+
+ # publish image with tags
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME
+fi
diff --git a/service-graph/deployment/terraform/graph-builder/main.tf b/service-graph/deployment/terraform/graph-builder/main.tf
new file mode 100644
index 000000000..898c29c49
--- /dev/null
+++ b/service-graph/deployment/terraform/graph-builder/main.tf
@@ -0,0 +1,70 @@
+locals {
+ app_name = "graph-builder"
+ config_file_path = "${path.module}/templates/graph-builder_conf.tpl"
+ deployment_yaml_file_path = "${path.module}/templates/deployment_yaml.tpl"
+ count = "${var.enabled?1:0}"
+ checksum = "${sha1("${data.template_file.config_data.rendered}")}"
+ configmap_name = "graph-builder-${local.checksum}"
+}
+
+resource "kubernetes_config_map" "haystack-config" {
+ metadata {
+ name = "${local.configmap_name}"
+ namespace = "${var.namespace}"
+ }
+ data {
+ "graph-builder.conf" = "${data.template_file.config_data.rendered}"
+ }
+ count = "${local.count}"
+}
+
+data "template_file" "config_data" {
+ template = "${file("${local.config_file_path}")}"
+
+ vars {
+ kafka_endpoint = "${var.kafka_endpoint}"
+ }
+}
+
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ graphite_port = "${var.graphite_port}"
+ graphite_host = "${var.graphite_hostname}"
+ graphite_enabled = "${var.graphite_enabled}"
+ node_selecter_label = "${var.node_selecter_label}"
+ image = "${var.image}"
+ replicas = "${var.replicas}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ jvm_memory_limit = "${var.jvm_memory_limit}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ configmap_name = "${local.configmap_name}"
+ env_vars= "${indent(9,"${var.env_vars}")}"
+ service_port = "${var.service_port}"
+ container_port = "${var.container_port}"
+ }
+}
+
+resource "null_resource" "kubectl_apply" {
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+ count = "${local.count}"
+}
+
+
+resource "null_resource" "kubectl_destroy" {
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/service-graph/deployment/terraform/graph-builder/outputs.tf b/service-graph/deployment/terraform/graph-builder/outputs.tf
new file mode 100644
index 000000000..562aba81f
--- /dev/null
+++ b/service-graph/deployment/terraform/graph-builder/outputs.tf
@@ -0,0 +1,7 @@
+output "hostname" {
+ value = "${local.app_name}"
+}
+
+output "service_port" {
+ value = "${var.service_port}"
+}
\ No newline at end of file
diff --git a/service-graph/deployment/terraform/graph-builder/templates/deployment_yaml.tpl b/service-graph/deployment/terraform/graph-builder/templates/deployment_yaml.tpl
new file mode 100644
index 000000000..38b5fb909
--- /dev/null
+++ b/service-graph/deployment/terraform/graph-builder/templates/deployment_yaml.tpl
@@ -0,0 +1,84 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "HAYSTACK_OVERRIDES_CONFIG_PATH"
+ value: "/config/graph-builder.conf"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "HAYSTACK_GRAPHITE_ENABLED"
+ value: "${graphite_enabled}"
+ - name: "HAYSTACK_PROP_SERVICE_HOST"
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: "JAVA_XMS"
+ value: "${jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${jvm_memory_limit}m"
+ ${env_vars}
+ livenessProbe:
+ exec:
+ command:
+ - grep
+ - "true"
+ - /app/isHealthy
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ failureThreshold: 2
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
+
+
+# ------------------- Service ------------------- #
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ ports:
+ - port: ${service_port}
+ targetPort: ${container_port}
+ selector:
+ k8s-app: ${app_name}
diff --git a/service-graph/deployment/terraform/graph-builder/templates/graph-builder_conf.tpl b/service-graph/deployment/terraform/graph-builder/templates/graph-builder_conf.tpl
new file mode 100644
index 000000000..2e3ce7a01
--- /dev/null
+++ b/service-graph/deployment/terraform/graph-builder/templates/graph-builder_conf.tpl
@@ -0,0 +1,47 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-graph-builder"
+ bootstrap.servers = "${kafka_endpoint}"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ replication.factor = 1
+ }
+
+ consumer {
+ topic = "graph-nodes"
+ }
+
+ producer {
+ topic = "service-graph"
+ }
+
+ aggregate {
+ window.sec = 1800
+ retention.days = 1
+ }
+}
+
+service {
+ threads {
+ min = 1
+ max = 5
+ idle.timeout = 12000
+ }
+
+ http {
+ port = 8080
+ idle.timeout = 12000
+ }
+
+ client {
+ connection.timeout = 10000
+ socket.timeout = 10000
+ }
+}
diff --git a/service-graph/deployment/terraform/graph-builder/variables.tf b/service-graph/deployment/terraform/graph-builder/variables.tf
new file mode 100644
index 000000000..1de153833
--- /dev/null
+++ b/service-graph/deployment/terraform/graph-builder/variables.tf
@@ -0,0 +1,27 @@
+variable "image" {}
+variable "replicas" {}
+variable "namespace" {}
+variable "kafka_endpoint" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "graphite_enabled" {}
+variable "enabled"{}
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selecter_label"{}
+variable "memory_request"{}
+variable "memory_limit"{}
+variable "jvm_memory_limit"{}
+variable "cpu_request"{}
+variable "cpu_limit"{}
+variable "env_vars" {}
+variable "termination_grace_period" {
+ default = 30
+}
+
+variable "service_port" {
+ default = 8080
+}
+variable "container_port" {
+ default = 8080
+}
diff --git a/service-graph/deployment/terraform/main.tf b/service-graph/deployment/terraform/main.tf
new file mode 100644
index 000000000..63de8e9c2
--- /dev/null
+++ b/service-graph/deployment/terraform/main.tf
@@ -0,0 +1,64 @@
+module "node-finder" {
+ source = "node-finder"
+ image = "expediadotcom/haystack-service-graph-node-finder:${var.service-graph["version"]}"
+ replicas = "${var.service-graph["node_finder_instances"]}"
+ namespace = "${var.namespace}"
+ kafka_endpoint = "${var.kafka_hostname}:${var.kafka_port}"
+
+ graphite_hostname = "${var.graphite_hostname}"
+ graphite_port = "${var.graphite_port}"
+ graphite_enabled = "${var.graphite_enabled}"
+ node_selecter_label = "${var.node_selector_label}"
+ enabled = "${var.service-graph["enabled"]}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ cpu_limit = "${var.service-graph["node_finder_cpu_limit"]}"
+ cpu_request = "${var.service-graph["node_finder_cpu_request"]}"
+ memory_limit = "${var.service-graph["node_finder_memory_limit"]}"
+ memory_request = "${var.service-graph["node_finder_memory_request"]}"
+ jvm_memory_limit = "${var.service-graph["node_finder_jvm_memory_limit"]}"
+ env_vars = "${var.service-graph["node_finder_environment_overrides"]}"
+ metricpoint_encoder_type = "${var.service-graph["metricpoint_encoder_type"]}"
+ collect_tags = "${var.service-graph["collect_tags"]}"
+}
+
+module "graph-builder" {
+ source = "graph-builder"
+ image = "expediadotcom/haystack-service-graph-graph-builder:${var.service-graph["version"]}"
+ replicas = "${var.service-graph["graph_builder_instances"]}"
+ namespace = "${var.namespace}"
+ kafka_endpoint = "${var.kafka_hostname}:${var.kafka_port}"
+ graphite_hostname = "${var.graphite_hostname}"
+ graphite_port = "${var.graphite_port}"
+ graphite_enabled = "${var.graphite_enabled}"
+ node_selecter_label = "${var.node_selector_label}"
+ enabled = "${var.service-graph["enabled"]}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ cpu_limit = "${var.service-graph["graph_builder_cpu_limit"]}"
+ cpu_request = "${var.service-graph["graph_builder_cpu_request"]}"
+ memory_limit = "${var.service-graph["graph_builder_memory_limit"]}"
+ memory_request = "${var.service-graph["graph_builder_memory_request"]}"
+ jvm_memory_limit = "${var.service-graph["graph_builder_jvm_memory_limit"]}"
+ env_vars = "${var.service-graph["graph_builder_environment_overrides"]}"
+}
+/*
+module "snapshotter" {
+ source = "snapshotter"
+ image = "expediadotcom/haystack-service-graph-snapshotter:${var.service-graph["version"]}"
+ namespace = "${var.namespace}"
+ graphite_hostname = "${var.graphite_hostname}"
+ graphite_port = "${var.graphite_port}"
+ enabled = "${var.service-graph["enabled"]}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ cpu_limit = "${var.service-graph["snapshotter_cpu_limit"]}"
+ snapshotter_purge_age_ms = "${var.service-graph["snapshotter_purge_age_ms"]}"
+ cpu_request = "${var.service-graph["snapshotter_cpu_request"]}"
+ memory_limit = "${var.service-graph["snapshotter_memory_limit"]}"
+ memory_request = "${var.service-graph["snapshotter_memory_request"]}"
+ jvm_memory_limit = "${var.service-graph["snapshotter_jvm_memory_limit"]}"
+ env_vars = "${var.service-graph["snapshotter_environment_overrides"]}"
+ main_args = "${var.service-graph["main_args"]}"
+}
+*/
diff --git a/service-graph/deployment/terraform/node-finder/main.tf b/service-graph/deployment/terraform/node-finder/main.tf
new file mode 100644
index 000000000..c74e247f3
--- /dev/null
+++ b/service-graph/deployment/terraform/node-finder/main.tf
@@ -0,0 +1,70 @@
+locals {
+ app_name = "node-finder"
+ config_file_path = "${path.module}/templates/node-finder_conf.tpl"
+ deployment_yaml_file_path = "${path.module}/templates/deployment_yaml.tpl"
+ count = "${var.enabled?1:0}"
+ checksum = "${sha1("${data.template_file.config_data.rendered}")}"
+ configmap_name = "node-finder-${local.checksum}"
+}
+
+resource "kubernetes_config_map" "haystack-config" {
+ metadata {
+ name = "${local.configmap_name}"
+ namespace = "${var.namespace}"
+ }
+ data {
+ "node-finder.conf" = "${data.template_file.config_data.rendered}"
+ }
+ count = "${local.count}"
+}
+
+data "template_file" "config_data" {
+ template = "${file("${local.config_file_path}")}"
+
+ vars {
+ kafka_endpoint = "${var.kafka_endpoint}"
+ metricpoint_encoder_type = "${var.metricpoint_encoder_type}"
+ collect_tags = "${var.collect_tags}"
+ }
+}
+
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ graphite_port = "${var.graphite_port}"
+ graphite_host = "${var.graphite_hostname}"
+ graphite_enabled = "${var.graphite_enabled}"
+ node_selecter_label = "${var.node_selecter_label}"
+ image = "${var.image}"
+ replicas = "${var.replicas}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ jvm_memory_limit = "${var.jvm_memory_limit}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ configmap_name = "${local.configmap_name}"
+ env_vars= "${indent(9,"${var.env_vars}")}"
+ }
+}
+
+resource "null_resource" "kubectl_apply" {
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+ count = "${local.count}"
+}
+
+
+resource "null_resource" "kubectl_destroy" {
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/service-graph/deployment/terraform/node-finder/outputs.tf b/service-graph/deployment/terraform/node-finder/outputs.tf
new file mode 100644
index 000000000..e69de29bb
diff --git a/service-graph/deployment/terraform/node-finder/templates/deployment_yaml.tpl b/service-graph/deployment/terraform/node-finder/templates/deployment_yaml.tpl
new file mode 100644
index 000000000..bf9d8ff30
--- /dev/null
+++ b/service-graph/deployment/terraform/node-finder/templates/deployment_yaml.tpl
@@ -0,0 +1,64 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "HAYSTACK_OVERRIDES_CONFIG_PATH"
+ value: "/config/node-finder.conf"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_ENABLED"
+ value: "${graphite_enabled}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "JAVA_XMS"
+ value: "${jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${jvm_memory_limit}m"
+ ${env_vars}
+ livenessProbe:
+ exec:
+ command:
+ - grep
+ - "true"
+ - /app/isHealthy
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ failureThreshold: 2
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
+
diff --git a/service-graph/deployment/terraform/node-finder/templates/node-finder_conf.tpl b/service-graph/deployment/terraform/node-finder/templates/node-finder_conf.tpl
new file mode 100644
index 000000000..9a09cc1e5
--- /dev/null
+++ b/service-graph/deployment/terraform/node-finder/templates/node-finder_conf.tpl
@@ -0,0 +1,51 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-node-finder"
+ bootstrap.servers = "${kafka_endpoint}"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ timestamp.extractor = "com.expedia.www.haystack.commons.kstreams.SpanTimestampExtractor"
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ }
+
+ producer {
+ metrics {
+ topic = "metric-data-points"
+ // there are three types of encoders that are used on service and operation names:
+ // 1) periodreplacement: replaces all periods with 3 underscores
+ // 2) base64: base64 encodes the full name with a padding of _
+ // 3) noop: does not perform any encoding
+ key.encoder = "${metricpoint_encoder_type}"
+
+ }
+ service.call {
+ topic = "graph-nodes"
+ }
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+
+ accumulator {
+ interval = 2500
+ }
+ // collector tags allow service graph to collect tags from spans and have them available when querying service
+ // graph. Example: you can collect the tags service tier and infraprovider tags using value "[tier,infraprovider]"
+ collectorTags = ${collect_tags}
+
+ node.metadata {
+ topic {
+ autocreate = true
+ name = "haystack-node-finder-metadata"
+ partition.count = 6
+ replication.factor = 2
+ }
+ }
+}
+
diff --git a/service-graph/deployment/terraform/node-finder/variables.tf b/service-graph/deployment/terraform/node-finder/variables.tf
new file mode 100644
index 000000000..5b45ecd8c
--- /dev/null
+++ b/service-graph/deployment/terraform/node-finder/variables.tf
@@ -0,0 +1,24 @@
+variable "image" {}
+variable "replicas" {}
+variable "namespace" {}
+variable "kafka_endpoint" {}
+variable "metricpoint_encoder_type" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "graphite_enabled" {}
+variable "enabled"{}
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selecter_label"{}
+variable "memory_request"{}
+variable "memory_limit"{}
+variable "jvm_memory_limit"{}
+variable "cpu_request"{}
+variable "cpu_limit"{}
+variable "env_vars" {}
+variable "termination_grace_period" {
+ default = 30
+}
+variable "collect_tags" {
+ default = "[]"
+}
diff --git a/service-graph/deployment/terraform/outputs.tf b/service-graph/deployment/terraform/outputs.tf
new file mode 100644
index 000000000..f7c451c2f
--- /dev/null
+++ b/service-graph/deployment/terraform/outputs.tf
@@ -0,0 +1,7 @@
+output "graph_builder_hostname" {
+ value = "${module.graph-builder.hostname}"
+}
+
+output "graph_builder_port" {
+ value = "${module.graph-builder.service_port}"
+}
\ No newline at end of file
diff --git a/service-graph/deployment/terraform/snapshotter/templates/deployment_yaml.tpl b/service-graph/deployment/terraform/snapshotter/templates/deployment_yaml.tpl
new file mode 100644
index 000000000..8d1dd19ae
--- /dev/null
+++ b/service-graph/deployment/terraform/snapshotter/templates/deployment_yaml.tpl
@@ -0,0 +1,50 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: batch/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "JAVA_XMS"
+ value: "${jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${jvm_memory_limit}m"
+ ${env_vars}
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
\ No newline at end of file
diff --git a/service-graph/deployment/terraform/snapshotter/templates/snapshotter_conf.tpl b/service-graph/deployment/terraform/snapshotter/templates/snapshotter_conf.tpl
new file mode 100644
index 000000000..dd9e3c620
--- /dev/null
+++ b/service-graph/deployment/terraform/snapshotter/templates/snapshotter_conf.tpl
@@ -0,0 +1,3 @@
+snapshotter {
+ purge.age.ms = ${snapshotter_purge_age_ms}
+}
\ No newline at end of file
diff --git a/service-graph/deployment/terraform/variables.tf b/service-graph/deployment/terraform/variables.tf
new file mode 100644
index 000000000..66cb9df87
--- /dev/null
+++ b/service-graph/deployment/terraform/variables.tf
@@ -0,0 +1,14 @@
+variable "kafka_hostname" {}
+variable "kafka_port" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "graphite_enabled" {}
+variable "kubectl_context_name" {}
+variable "kubectl_executable_name" {}
+variable "namespace" {}
+variable "node_selector_label"{}
+
+# service-graph config
+variable "service-graph" {
+ type = "map"
+}
diff --git a/service-graph/graph-builder/Makefile b/service-graph/graph-builder/Makefile
new file mode 100644
index 000000000..3d1deb4df
--- /dev/null
+++ b/service-graph/graph-builder/Makefile
@@ -0,0 +1,11 @@
+.PHONY: integration_test release
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-service-graph-graph-builder
+PWD := $(shell pwd)
+
+docker-image:
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+release: docker-image
+ ../deployment/scripts/publish-to-docker-hub.sh
diff --git a/service-graph/graph-builder/README.md b/service-graph/graph-builder/README.md
new file mode 100644
index 000000000..c40bbaa25
--- /dev/null
+++ b/service-graph/graph-builder/README.md
@@ -0,0 +1,65 @@
+#Haystack : node-finder
+
+Information on what this component is all about is documented in the [README](../README.md) of the repository
+
+## Building
+
+```
+mvn clean verify
+```
+
+or
+
+```
+make docker-image
+```
+
+## Testing Locally
+
+* Download Kafka 0.11.0.x
+* Start Zookeeper locally (from kafka home)
+```
+bin/zookeeper-server-start.sh config/zookeeper.properties
+```
+* Start Kafka locally (from kafka home)
+```
+bin/kafka-server-start.sh config/server.properties
+```
+* Create proto-spans topic (from kafka home)
+```
+bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic proto-spans
+```
+* Create a local.conf override file
+```
+cat local.conf
+
+health.status.path = "logs/isHealthy"
+
+kafka {
+ streams {
+ bootstrap.servers = "localhost:9092"
+ }
+}
+```
+* Build graph-builder application locally (graph-builder app root)
+```
+mvn clean package
+```
+* Start the node-finder (node-finder app root)
+```
+export HAYSTACK_OVERRIDES_CONFIG_PATH=/local.conf
+java -jar target/haystack-service-graph-node-finder.jar
+```
+
+* Start application (graph-builder app root)
+```
+java -jar target/haystack-service-graph-graph-builder.jar
+```
+* Send data to Kafka (refer to fakespans tool README)
+```
+$GOBIN/fakespans --from-file fakespans.json --kafka-broker localhost:9092
+```
+* Check the output topics (from kafka home)
+```
+curl http://localhost:8080/servicegraph
+```
diff --git a/service-graph/graph-builder/build/docker/Dockerfile b/service-graph/graph-builder/build/docker/Dockerfile
new file mode 100644
index 000000000..f2aac8f71
--- /dev/null
+++ b/service-graph/graph-builder/build/docker/Dockerfile
@@ -0,0 +1,25 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-service-graph-graph-builder
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+ENV DOCKERIZE_VERSION v0.6.1
+
+ADD https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/dockerize-alpine-linux-amd64-${DOCKERIZE_VERSION}.tar.gz dockerize.tar.gz
+RUN tar xzf dockerize.tar.gz
+RUN chmod +x dockerize
+
+RUN mkdir -p ${APP_HOME}
+RUN chmod a+w /app
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+
+RUN chmod +x ${APP_HOME}/start-app.sh
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/service-graph/graph-builder/build/docker/jmxtrans-agent.xml b/service-graph/graph-builder/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..6f53416f0
--- /dev/null
+++ b/service-graph/graph-builder/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,61 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:true}
+
+ haystack.service-graph.graph-builder.#hostname#.
+
+ 60
+
diff --git a/service-graph/graph-builder/build/docker/start-app.sh b/service-graph/graph-builder/build/docker/start-app.sh
new file mode 100755
index 000000000..694d74941
--- /dev/null
+++ b/service-graph/graph-builder/build/docker/start-app.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+-XX:+UseG1GC \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+if [[ -n "$SERVICE_DEBUG_ON" ]] && [[ "$SERVICE_DEBUG_ON" == true ]]; then
+ JAVA_OPTS="$JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y"
+fi
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/service-graph/graph-builder/pom.xml b/service-graph/graph-builder/pom.xml
new file mode 100644
index 000000000..d1c8df374
--- /dev/null
+++ b/service-graph/graph-builder/pom.xml
@@ -0,0 +1,222 @@
+
+
+
+
+ haystack-service-graph
+ com.expedia.www
+ 1.0.15-SNAPSHOT
+
+
+ 4.0.0
+ haystack-service-graph-graph-builder
+ jar
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+
+ 1.1.0
+ com.expedia.www.haystack.service.graph.graph.builder.App
+ ${project.artifactId}-${project.version}
+
+
+
+
+ org.apache.commons
+ commons-lang3
+
+
+
+ org.apache.httpcomponents
+ httpclient
+
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ ${kafka-version}
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+
+
+ org.apache.kafka
+ kafka-clients
+ ${kafka-version}
+
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+
+
+
+
+ org.eclipse.jetty
+ jetty-server
+
+
+ org.eclipse.jetty
+ jetty-servlet
+
+
+ org.apache.httpcomponents
+ fluent-hc
+
+
+
+
+
+ org.apache.kafka
+ kafka-streams
+ ${kafka-version}
+ test
+ test
+
+
+
+ org.apache.kafka
+ kafka-streams
+ ${kafka-version}
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ ${kafka-version}
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ test
+ test
+
+
+
+ org.apache.kafka
+ kafka-clients
+ ${kafka-version}
+ test
+ test
+
+
+
+ org.mockito
+ mockito-all
+
+
+
+
+
+ ${finalName}
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+ org.expedia.www.haystack.commons.scalatest.IntegrationSuite
+
+
+
+ integration-test
+ integration-test
+
+ test
+
+
+ org.expedia.www.haystack.commons.scalatest.IntegrationSuite
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+ true
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ package
+
+ shade
+
+
+
+
+ reference.conf
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
diff --git a/service-graph/graph-builder/src/main/resources/app.conf b/service-graph/graph-builder/src/main/resources/app.conf
new file mode 100644
index 000000000..c4120541b
--- /dev/null
+++ b/service-graph/graph-builder/src/main/resources/app.conf
@@ -0,0 +1,55 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-graph-builder"
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ replication.factor = 1
+ }
+
+ rocksdb {
+ block.cache.size = 16777216
+ block.size = 16384
+ cache.index.and.filter.blocks = true
+ max.write.buffer.number = 2
+ }
+
+ consumer {
+ topic = "graph-nodes"
+ }
+
+ producer {
+ topic = "service-graph"
+ }
+
+ aggregate {
+ window.sec = 300
+ retention.days = 7
+ }
+}
+
+service {
+ host = "localhost"
+ threads {
+ min = 1
+ max = 5
+ idle.timeout = 12000
+ }
+
+ http {
+ port = 8080
+ idle.timeout = 12000
+ }
+
+ client {
+ connection.timeout = 1000
+ socket.timeout = 1000
+ }
+}
diff --git a/service-graph/graph-builder/src/main/resources/logback.xml b/service-graph/graph-builder/src/main/resources/logback.xml
new file mode 100644
index 000000000..c45f62d7b
--- /dev/null
+++ b/service-graph/graph-builder/src/main/resources/logback.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/App.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/App.scala
new file mode 100644
index 000000000..d942f5edd
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/App.scala
@@ -0,0 +1,144 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.commons.health.{HealthStatusController, UpdateHealthStatusFile}
+import com.expedia.www.haystack.commons.kstreams.app.ManagedKafkaStreams
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.service.graph.graph.builder.config.AppConfiguration
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities.{KafkaConfiguration, ServiceConfiguration}
+import com.expedia.www.haystack.service.graph.graph.builder.service.fetchers.{LocalOperationEdgesFetcher, LocalServiceEdgesFetcher, RemoteOperationEdgesFetcher, RemoteServiceEdgesFetcher}
+import com.expedia.www.haystack.service.graph.graph.builder.service.resources._
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.QueryTimestampReader
+import com.expedia.www.haystack.service.graph.graph.builder.service.{HttpService, ManagedHttpService}
+import com.expedia.www.haystack.service.graph.graph.builder.stream.{ServiceGraphStreamSupplier, StreamSupplier}
+import com.netflix.servo.util.VisibleForTesting
+import org.apache.kafka.streams.KafkaStreams
+import org.slf4j.LoggerFactory
+
+/**
+ * Starting point for graph-builder application
+ */
+object App extends MetricsSupport {
+ private val LOGGER = LoggerFactory.getLogger(App.getClass)
+
+ def main(args: Array[String]): Unit = {
+ val appConfiguration = new AppConfiguration()
+
+ // instantiate the application
+ // if any exception occurs during instantiation
+ // gracefully handles teardown and does system exit
+ val app = runApp(appConfiguration)
+
+ if (app == null) {
+ System.exit(1)
+ } else {
+ // add a shutdown hook
+ Runtime.getRuntime.addShutdownHook(new Thread() {
+ override def run(): Unit = {
+ LOGGER.info("Shutdown hook is invoked, tearing down the application.")
+ if (app != null) app.stop()
+ }
+ })
+ }
+ }
+
+ @VisibleForTesting
+ def runApp(appConfiguration: AppConfiguration): ManagedApplication = {
+ val jmxReporter: JmxReporter = JmxReporter.forRegistry(metricRegistry).build()
+ val healthStatusController = new HealthStatusController
+ healthStatusController.addListener(new UpdateHealthStatusFile(appConfiguration.healthStatusFilePath))
+
+ var stream: KafkaStreams = null
+ var service: HttpService = null
+ try {
+
+ // build kafka stream to create service graph
+ // it ingests graph edges and create service graph out of it
+ // graphs are stored as materialized ktable in stream state store
+ stream = createStream(appConfiguration.kafkaConfig, healthStatusController)
+
+ // build http service to query current service graph
+ // it performs interactive query on ktable
+ service = createService(appConfiguration.serviceConfig, stream, appConfiguration.kafkaConfig)
+
+ // wrap service and stream in a managed application instance
+ // ManagedApplication makes sure that startup/shutdown sequence is right
+ // and startup/shutdown errors are handling appropriately
+ val app = new ManagedApplication(
+ new ManagedHttpService(service),
+ new ManagedKafkaStreams(stream),
+ jmxReporter,
+ LoggerFactory.getLogger(classOf[ManagedApplication])
+ )
+
+ // start the application
+ // if any exception occurs during startup
+ // gracefully handles teardown and does system exit
+ app.start()
+
+ // mark the app as healthy
+ healthStatusController.setHealthy()
+
+ app
+ } catch {
+ case ex: Exception =>
+ LOGGER.error("Observed fatal exception instantiating the app", ex)
+ if(stream != null) stream.close()
+ if(service != null) service.close()
+ null
+ }
+ }
+
+ @VisibleForTesting
+ def createStream(kafkaConfig: KafkaConfiguration, healthController: HealthStatusController): KafkaStreams = {
+ // service graph kafka stream supplier
+ val serviceGraphStreamSupplier = new ServiceGraphStreamSupplier(kafkaConfig)
+
+ // create kstream using application topology
+ val streamsSupplier = new StreamSupplier(
+ serviceGraphStreamSupplier,
+ healthController,
+ kafkaConfig.streamsConfig,
+ kafkaConfig.consumerTopic)
+
+ // build kstream app
+ streamsSupplier.get()
+ }
+
+ @VisibleForTesting
+ def createService(serviceConfig: ServiceConfiguration, stream: KafkaStreams, kafkaConfig: KafkaConfiguration): HttpService = {
+ val storeName = kafkaConfig.producerTopic
+ val localOperationEdgesFetcher = new LocalOperationEdgesFetcher(stream, storeName)
+ val remoteOperationEdgesFetcher = new RemoteOperationEdgesFetcher(serviceConfig.client)
+ val localServiceEdgesFetcher = new LocalServiceEdgesFetcher(stream, storeName)
+ val remoteServiceEdgesFetcher = new RemoteServiceEdgesFetcher(serviceConfig.client)
+
+ implicit val timestampReader: QueryTimestampReader = new QueryTimestampReader(kafkaConfig.aggregationWindowSec)
+ val servlets = Map(
+ "/operationgraph/local" -> new LocalOperationGraphResource(localOperationEdgesFetcher),
+ "/operationgraph" -> new GlobalOperationGraphResource(stream, storeName, serviceConfig, localOperationEdgesFetcher, remoteOperationEdgesFetcher),
+ "/servicegraph/local" -> new LocalServiceGraphResource(localServiceEdgesFetcher),
+ "/servicegraph" -> new GlobalServiceGraphResource(stream, storeName, serviceConfig, localServiceEdgesFetcher, remoteServiceEdgesFetcher),
+ "/isWorking" -> new IsWorkingResource
+ )
+
+ new HttpService(serviceConfig, servlets)
+ }
+}
\ No newline at end of file
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/ManagedApplication.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/ManagedApplication.scala
new file mode 100644
index 000000000..85e37b019
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/ManagedApplication.scala
@@ -0,0 +1,63 @@
+package com.expedia.www.haystack.service.graph.graph.builder
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.commons.kstreams.app.ManagedService
+import com.expedia.www.haystack.commons.logger.LoggerUtils
+import com.expedia.www.haystack.service.graph.graph.builder.ManagedApplication._
+import org.slf4j.Logger
+
+import scala.util.Try
+
+object ManagedApplication {
+ val StartMessage = "Starting the given topology and service"
+ val HttpStartMessage = "HTTP service started successfully"
+ val StreamStartMessage = "Kafka stream started successfully"
+ val HttpStopMessage = "Shutting down HTTP service"
+ val StreamStopMessage = "Shutting down Kafka stream"
+ val JmxReporterStopMessage = "Shutting down JMX Reporter"
+ val LoggerStopMessage = "Shutting down logger. Bye!"
+}
+
+class ManagedApplication(service: ManagedService, stream: ManagedService, jmxReporter: JmxReporter, logger: Logger) {
+
+ require(service != null)
+ require(stream != null)
+ require(jmxReporter != null)
+ require(logger != null)
+
+ def start(): Unit = {
+ try {
+ jmxReporter.start()
+ logger.info(StartMessage)
+
+ service.start()
+ logger.info(HttpStartMessage)
+
+ stream.start()
+ logger.info(StreamStartMessage)
+ } catch {
+ case ex: Exception =>
+ logger.error("Observed fatal exception while starting the app", ex)
+ stop()
+ System.exit(1)
+ }
+ }
+
+ /**
+ * This method stops the given `StreamsRunner` and `JmxReporter` is they have been
+ * previously started. If not, this method does nothing
+ */
+ def stop(): Unit = {
+ logger.info(HttpStopMessage)
+ Try(service.stop())
+
+ logger.info(StreamStopMessage)
+ Try(stream.stop())
+
+ logger.info(JmxReporterStopMessage)
+ Try(jmxReporter.close())
+
+ logger.info(LoggerStopMessage)
+ Try(LoggerUtils.shutdownLogger())
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/AppConfiguration.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/AppConfiguration.scala
new file mode 100644
index 000000000..d925fd5e8
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/AppConfiguration.scala
@@ -0,0 +1,140 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.config
+
+import java.util.Properties
+
+import com.expedia.www.haystack.commons.config.ConfigurationLoader
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities._
+import com.typesafe.config.Config
+import org.apache.commons.lang3.StringUtils
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+
+import scala.collection.JavaConverters._
+
+/**
+ * This class reads the configuration from the given resource name using {@link ConfigurationLoader ConfigurationLoader}
+ *
+ * @param resourceName name of the resource file to load
+ */
+class AppConfiguration(resourceName: String) {
+
+ require(StringUtils.isNotBlank(resourceName))
+
+ private val config = ConfigurationLoader.loadConfigFileWithEnvOverrides(resourceName = this.resourceName)
+
+ /**
+ * default constructor. Loads config from resource name to "app.conf"
+ */
+ def this() = this("app.conf")
+
+ /**
+ * Location of the health status file
+ */
+ val healthStatusFilePath: String = config.getString("health.status.path")
+
+ /**
+ * Instance of {@link KafkaConfiguration KafkaConfiguration} to be used by the kstreams application
+ */
+ lazy val kafkaConfig: KafkaConfiguration = {
+
+ // verify if the applicationId and bootstrap server config are non empty
+ def verifyRequiredProps(props: Properties): Unit = {
+ require(StringUtils.isNotBlank(props.getProperty(StreamsConfig.APPLICATION_ID_CONFIG)))
+ require(StringUtils.isNotBlank(props.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)))
+ require(StringUtils.isNotBlank(props.getProperty(StreamsConfig.APPLICATION_SERVER_CONFIG)))
+ }
+
+ def addProps(config: Config, props: Properties, prefix: (String) => String = identity): Unit = {
+ config.entrySet().asScala.foreach(kv => {
+ val propKeyName = prefix(kv.getKey)
+ props.setProperty(propKeyName, kv.getValue.unwrapped().toString)
+ })
+ }
+
+ val kafka = config.getConfig("kafka")
+ val streamsConfig = kafka.getConfig("streams")
+ val consumerConfig = kafka.getConfig("consumer")
+ val producerConfig = kafka.getConfig("producer")
+
+ // add stream specific properties
+ val streamProps = new Properties
+ addProps(streamsConfig, streamProps)
+ // add stream application server config
+ if (StringUtils.isBlank(streamProps.getProperty(StreamsConfig.APPLICATION_SERVER_CONFIG))) {
+ streamProps.setProperty(StreamsConfig.APPLICATION_SERVER_CONFIG, s"${config.getString("service.host")}:${config.getInt("service.http.port")}")
+ }
+
+ if (kafka.hasPath("rocksdb")) {
+ CustomRocksDBConfig.setRocksDbConfig(kafka.getConfig("rocksdb"))
+ streamProps.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, classOf[CustomRocksDBConfig])
+ }
+
+ // validate props
+ verifyRequiredProps(streamProps)
+
+ // offset reset for kstream
+ val autoOffsetReset =
+ if (streamsConfig.hasPath("auto.offset.reset")) {
+ AutoOffsetReset.valueOf(streamsConfig.getString("auto.offset.reset").toUpperCase)
+ } else {
+ AutoOffsetReset.LATEST
+ }
+
+ val aggregation = kafka.getConfig("aggregate")
+ val aggregationWindowSec = aggregation.getInt("window.sec")
+ val aggregationRetentionDays = aggregation.getInt("retention.days")
+
+ KafkaConfiguration(new StreamsConfig(streamProps),
+ consumerConfig.getString("topic"),
+ producerConfig.getString("topic"),
+ autoOffsetReset,
+ kafka.getLong("close.timeout.ms"),
+ aggregationWindowSec,
+ aggregationRetentionDays
+ )
+ }
+
+ /**
+ * Instance of {@link ServiceConfiguration} to be used by servlet container
+ */
+ lazy val serviceConfig: ServiceConfiguration = {
+ val service = config.getConfig("service")
+ val threads = service.getConfig("threads")
+ val http = service.getConfig("http")
+ val client = service.getConfig("client")
+
+ ServiceConfiguration(
+ service.getString("host"),
+ ServiceThreadsConfiguration(
+ threads.getInt("min"),
+ threads.getInt("max"),
+ threads.getInt("idle.timeout")
+ ),
+ ServiceHttpConfiguration(
+ http.getInt("port"),
+ http.getLong("idle.timeout")
+ ),
+ ServiceClientConfiguration(
+ client.getInt("connection.timeout"),
+ client.getInt("socket.timeout")
+ )
+ )
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/CustomRocksDBConfig.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/CustomRocksDBConfig.scala
new file mode 100644
index 000000000..0ee350334
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/CustomRocksDBConfig.scala
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.graph.builder.config.entities
+
+import java.util
+
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities.CustomRocksDBConfig._
+import com.google.common.annotations.VisibleForTesting
+import com.typesafe.config.{Config, ConfigRenderOptions}
+import org.apache.kafka.streams.state.RocksDBConfigSetter
+import org.rocksdb.{BlockBasedTableConfig, Options}
+import org.slf4j.{Logger, LoggerFactory}
+
+object CustomRocksDBConfig {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[CustomRocksDBConfig])
+
+ @VisibleForTesting var rocksDBConfig: Config = _
+ def setRocksDbConfig(cfg: Config): Unit = rocksDBConfig = cfg
+}
+
+class CustomRocksDBConfig extends RocksDBConfigSetter {
+
+ override def setConfig(storeName: String, options: Options, configs: util.Map[String, AnyRef]): Unit = {
+ require(rocksDBConfig != null, "rocksdb config should not be empty or null")
+
+ LOGGER.info("setting rocksdb configuration '{}'",
+ rocksDBConfig.root().render(ConfigRenderOptions.defaults().setOriginComments(false)))
+
+ val tableConfig = new BlockBasedTableConfig
+ tableConfig.setBlockCacheSize(rocksDBConfig.getLong("block.cache.size"))
+ tableConfig.setBlockSize(rocksDBConfig.getLong("block.size"))
+ tableConfig.setCacheIndexAndFilterBlocks(rocksDBConfig.getBoolean("cache.index.and.filter.blocks"))
+ options.setTableFormatConfig(tableConfig)
+ options.setMaxWriteBufferNumber(rocksDBConfig.getInt("max.write.buffer.number"))
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/KafkaConfiguration.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/KafkaConfiguration.scala
new file mode 100644
index 000000000..5201cbf08
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/KafkaConfiguration.scala
@@ -0,0 +1,45 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.config.entities
+
+import org.apache.commons.lang3.StringUtils
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+
+/**
+ * Case class holding required configuration for the node finder kstreams app
+ * @param streamsConfig valid instance of StreamsConfig
+ * @param consumerTopic topic name for incoming graph edges topic
+ * @param producerTopic topic name for materialized ktable changelogs
+ * @param autoOffsetReset Offset type for the kstreams app to start with
+ * @param closeTimeoutInMs time for closing a kafka topic
+ */
+case class KafkaConfiguration(streamsConfig: StreamsConfig,
+ consumerTopic: String,
+ producerTopic: String,
+ autoOffsetReset: AutoOffsetReset,
+ closeTimeoutInMs: Long,
+ aggregationWindowSec: Int,
+ aggregationRetentionDays: Int
+ ) {
+ require(streamsConfig != null)
+ require(StringUtils.isNotBlank(consumerTopic))
+ require(StringUtils.isNotBlank(producerTopic))
+ require(autoOffsetReset != null)
+ require(closeTimeoutInMs > 0)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceClientConfiguration.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceClientConfiguration.scala
new file mode 100644
index 000000000..71d0f26f5
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceClientConfiguration.scala
@@ -0,0 +1,28 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.config.entities
+
+/**
+ *
+ * @param connectionTimeout
+ * @param socketTimeout
+ */
+case class ServiceClientConfiguration(connectionTimeout: Int, socketTimeout: Int) {
+ require(connectionTimeout > 0)
+ require(socketTimeout > 0)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceConfiguration.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceConfiguration.scala
new file mode 100644
index 000000000..74964d0d8
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceConfiguration.scala
@@ -0,0 +1,34 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.config.entities
+
+import org.apache.commons.lang3.StringUtils
+
+/**
+ * Configuration for servlets and servlet container
+ *
+ * @param threads threads configuration of servelet container
+ * @param http http configuration of servelet container
+ * @param client configuration of http client
+ */
+case class ServiceConfiguration(host: String, threads: ServiceThreadsConfiguration, http: ServiceHttpConfiguration, client: ServiceClientConfiguration) {
+ require(StringUtils.isNotEmpty(host))
+ require(threads != null)
+ require(http != null)
+ require(client != null)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceHttpConfiguration.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceHttpConfiguration.scala
new file mode 100644
index 000000000..13773c0be
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceHttpConfiguration.scala
@@ -0,0 +1,28 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.config.entities
+
+/**
+ * Http configuration for servlet container
+ * @param port port to use by servlet container
+ * @param idleTimeout http connection timeout
+ */
+case class ServiceHttpConfiguration(port: Int, idleTimeout: Long) {
+ require(port > 0)
+ require(idleTimeout > 0)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceThreadsConfiguration.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceThreadsConfiguration.scala
new file mode 100644
index 000000000..add238673
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/config/entities/ServiceThreadsConfiguration.scala
@@ -0,0 +1,30 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.config.entities
+
+/**
+ * Threads configuration for servlet container
+ * @param min minimum number of threads to use for running servlets
+ * @param max maximum number of threads to use for running servlets
+ * @param idleTimeout timeout for a thread
+ */
+case class ServiceThreadsConfiguration(min: Int, max: Int, idleTimeout: Int) {
+ require(min > 0)
+ require(max > min)
+ require(idleTimeout > 0)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/EdgeStats.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/EdgeStats.scala
new file mode 100644
index 000000000..b65b38ddb
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/EdgeStats.scala
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.model
+
+import com.expedia.www.haystack.commons.entities.{GraphEdge, TagKeys}
+
+import scala.collection.mutable
+
+/**
+ * Object to hold stats for graph edges
+ *
+ * @param count edge count seen so far
+ * @param lastSeen timestamp the edge was last seen, in ms
+ * @param errorCount error rate for this specific operation
+ */
+case class EdgeStats(count: Long,
+ lastSeen: Long,
+ errorCount: Long,
+ sourceTags: mutable.Map[String, String] = mutable.HashMap[String, String](),
+ destinationTags: mutable.Map[String, String] = mutable.HashMap[String, String]()) {
+ def update(e: GraphEdge): EdgeStats = {
+ this.sourceTags ++= e.source.tags
+ this.sourceTags.remove(TagKeys.ERROR_KEY)
+ this.destinationTags ++= e.destination.tags
+ this.destinationTags.remove(TagKeys.ERROR_KEY)
+
+ val incrErrorCountBy = if (e.source.tags.getOrElse(TagKeys.ERROR_KEY, "false") == "true") 1 else 0
+ EdgeStats(
+ count + 1,
+ lastSeen(e),
+ errorCount + incrErrorCountBy,
+ sourceTags,
+ destinationTags)
+ }
+
+ private def lastSeen(e: GraphEdge): Long = {
+ if (e.sourceTimestamp == 0) System.currentTimeMillis() else Math.max(e.sourceTimestamp, this.lastSeen)
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/EdgeStatsSerde.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/EdgeStatsSerde.scala
new file mode 100644
index 000000000..0d4ca054f
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/EdgeStatsSerde.scala
@@ -0,0 +1,60 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.model
+
+import java.util
+
+import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer}
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+
+class EdgeStatsSerde extends Serde[EdgeStats] {
+ implicit val formats = DefaultFormats
+
+ override def deserializer(): Deserializer[EdgeStats] = new EdgeStatsDeserializer
+
+ override def serializer(): Serializer[EdgeStats] = new EdgeStatsSerializer
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+}
+
+class EdgeStatsSerializer extends Serializer[EdgeStats] {
+ implicit val formats = DefaultFormats
+
+ override def serialize(topic: String, edgeStats: EdgeStats): Array[Byte] = {
+ Serialization.write(edgeStats).getBytes("utf-8")
+ }
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+}
+
+class EdgeStatsDeserializer extends Deserializer[EdgeStats] {
+ implicit val formats = DefaultFormats
+
+ override def deserialize(topic: String, data: Array[Byte]): EdgeStats = {
+ if(data == null) EdgeStats(0, 0, 0) else Serialization.read[EdgeStats](new String(data))
+ }
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/OperationGraph.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/OperationGraph.scala
new file mode 100644
index 000000000..b5cc01963
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/OperationGraph.scala
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.model
+
+/**
+ * Service graph
+ * @param edges list of edges in the graph
+ */
+case class OperationGraph(edges: Seq[OperationGraphEdge]) {
+ require(edges != null)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/OperationGraphEdge.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/OperationGraphEdge.scala
new file mode 100644
index 000000000..58af7e180
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/OperationGraphEdge.scala
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.model
+
+import org.apache.commons.lang3.StringUtils
+
+/**
+ * A graph edge representing relationship between two services over an operation
+ * @param source source service
+ * @param destination destination service
+ * @param stats stats around the edge
+ * @param effectiveFrom start timestamp from which stats are collected
+ * @param effectiveTo end timestamp till which stats are collected
+ */
+case class OperationGraphEdge(source: String,
+ destination: String,
+ operation: String,
+ stats: EdgeStats,
+ effectiveFrom: Long,
+ effectiveTo: Long) {
+ require(StringUtils.isNotEmpty(source))
+ require(StringUtils.isNotEmpty(destination))
+ require(StringUtils.isNotEmpty(operation))
+ require(stats != null)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/ServiceGraph.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/ServiceGraph.scala
new file mode 100644
index 000000000..cf327cfb7
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/ServiceGraph.scala
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.model
+
+/**
+ * Service graph
+ * @param edges list of edges in the graph
+ */
+case class ServiceGraph(edges: Seq[ServiceGraphEdge]) {
+ require(edges != null)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/ServiceGraphEdge.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/ServiceGraphEdge.scala
new file mode 100644
index 000000000..2c5905a25
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/model/ServiceGraphEdge.scala
@@ -0,0 +1,80 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.model
+
+import scala.collection.mutable
+/**
+ * A graph edge representing relationship between two services over an operation
+ *
+ * @param source source service
+ * @param destination destination service
+ * @param stats stats around the edge
+ * @param effectiveFrom start timestamp from which stats are collected
+ * @param effectiveTo end timestamp till which stats are collected
+ *
+ */
+case class ServiceGraphEdge(source: ServiceGraphVertex,
+ destination: ServiceGraphVertex,
+ stats: ServiceEdgeStats,
+ effectiveFrom: Long,
+ effectiveTo: Long) {
+ require(source != null)
+ require(destination != null)
+ require(stats != null)
+
+ def mergeTags(first: Map[String, String], second: Map[String, String]): Map[String, String] = {
+ val merged = new mutable.HashMap[String, mutable.HashSet[String]]()
+
+ def merge(tags: Map[String, String]) {
+ tags.foreach {
+ case (key, value) =>
+ val valueSet = merged.getOrElseUpdate(key, new mutable.HashSet[String]())
+ valueSet ++= value.split(",")
+ }
+ }
+
+ merge(first)
+ merge(second)
+
+ merged.mapValues(_.mkString(",")).toMap
+ }
+
+ def +(other: ServiceGraphEdge): ServiceGraphEdge = {
+ val sourceVertex = this.source.copy(tags = mergeTags(other.source.tags, this.source.tags))
+ val destinationVertex = this.destination.copy(tags = mergeTags(other.destination.tags, this.destination.tags))
+ ServiceGraphEdge(
+ sourceVertex,
+ destinationVertex,
+ this.stats + other.stats,
+ Math.min(this.effectiveFrom, other.effectiveFrom),
+ Math.max(this.effectiveTo, other.effectiveTo))
+ }
+}
+
+case class ServiceGraphVertex(name: String, tags: Map[String, String] = Map())
+
+case class ServiceEdgeStats(count: Long,
+ lastSeen: Long,
+ errorCount: Long) {
+ def +(other: ServiceEdgeStats): ServiceEdgeStats = {
+ ServiceEdgeStats(
+ this.count + other.count,
+ Math.max(this.lastSeen, other.lastSeen),
+ this.errorCount + other.errorCount)
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/HttpService.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/HttpService.scala
new file mode 100644
index 000000000..ef2220036
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/HttpService.scala
@@ -0,0 +1,68 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service
+
+import javax.servlet.Servlet
+
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities.ServiceConfiguration
+import org.eclipse.jetty.server.{HttpConfiguration, HttpConnectionFactory, Server, ServerConnector}
+import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder}
+import org.eclipse.jetty.util.thread.QueuedThreadPool
+import org.slf4j.LoggerFactory
+
+class HttpService(config: ServiceConfiguration, resources: Map[String, Servlet]) extends AutoCloseable{
+ private val LOGGER = LoggerFactory.getLogger(classOf[HttpService])
+
+ // TODO move server creation to a supplier
+ private val server = {
+ // threadpool to run servlets
+ val threadPool = new QueuedThreadPool(config.threads.max, config.threads.min, config.threads.idleTimeout)
+
+ // building jetty server
+ val server = new Server(threadPool)
+
+ // configuring jetty's http parameters
+ val httpConnector = new ServerConnector(server, new HttpConnectionFactory(new HttpConfiguration))
+ httpConnector.setPort(config.http.port)
+ httpConnector.setIdleTimeout(config.http.idleTimeout)
+ server.addConnector(httpConnector)
+
+ // adding servlets
+ val context = new ServletContextHandler(server, "/")
+ resources.foreach(
+ resource => {
+ LOGGER.info(s"adding servlet ${resource._2} at ${resource._1}")
+ context.addServlet(new ServletHolder(resource._2), resource._1)
+ }
+ )
+
+ // built jetty server object
+ LOGGER.info("jetty server constructed")
+ server
+ }
+
+
+ def start(): Unit = {
+ server.start()
+ }
+
+ def close(): Unit = {
+ server.stop()
+ server.destroy()
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/ManagedHttpService.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/ManagedHttpService.scala
new file mode 100644
index 000000000..1c704a33f
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/ManagedHttpService.scala
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service
+
+import java.util.concurrent.atomic.AtomicBoolean
+
+import com.expedia.www.haystack.commons.kstreams.app.ManagedService
+
+class ManagedHttpService(service: HttpService) extends ManagedService {
+ require(service != null)
+ private val isRunning: AtomicBoolean = new AtomicBoolean(false)
+
+ override def start(): Unit = {
+ service.start()
+ isRunning.set(true)
+ }
+
+ override def stop(): Unit = {
+ if(isRunning.getAndSet(false)) {
+ service.close()
+ }
+ }
+
+ override def hasStarted: Boolean = isRunning.get()
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/LocalOperationEdgesFetcher.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/LocalOperationEdgesFetcher.scala
new file mode 100644
index 000000000..ca6fca32c
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/LocalOperationEdgesFetcher.scala
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.fetchers
+
+import com.expedia.www.haystack.commons.entities.GraphEdge
+import com.expedia.www.haystack.service.graph.graph.builder.model.{EdgeStats, OperationGraphEdge}
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.IOUtils
+import org.apache.kafka.streams.kstream.Windowed
+import org.apache.kafka.streams.state.{KeyValueIterator, QueryableStoreTypes, ReadOnlyWindowStore}
+import org.apache.kafka.streams.{KafkaStreams, KeyValue}
+
+import scala.collection.JavaConverters._
+
+class LocalOperationEdgesFetcher(streams: KafkaStreams, storeName: String) {
+ private lazy val store: ReadOnlyWindowStore[GraphEdge, EdgeStats] =
+ streams.store(storeName, QueryableStoreTypes.windowStore[GraphEdge, EdgeStats]())
+
+ def fetchEdges(from: Long, to: Long): List[OperationGraphEdge] = {
+ var iterator: KeyValueIterator[Windowed[GraphEdge], EdgeStats] = null
+ try {
+ iterator = store.fetchAll(from, to)
+ val edges = for (kv: KeyValue[Windowed[GraphEdge], EdgeStats] <- iterator.asScala)
+ yield OperationGraphEdge(
+ kv.key.key.source.name,
+ kv.key.key.destination.name,
+ kv.key.key.operation,
+ kv.value,
+ kv.key.window().start(),
+ Math.min(System.currentTimeMillis(), kv.key.window().end()))
+ edges.toList
+ } finally {
+ IOUtils.closeSafely(iterator)
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/LocalServiceEdgesFetcher.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/LocalServiceEdgesFetcher.scala
new file mode 100644
index 000000000..d8b8c34cf
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/LocalServiceEdgesFetcher.scala
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.fetchers
+
+import com.expedia.www.haystack.commons.entities.GraphEdge
+import com.expedia.www.haystack.service.graph.graph.builder.model.{EdgeStats, ServiceEdgeStats, ServiceGraphEdge, ServiceGraphVertex}
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.EdgesMerger._
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.IOUtils
+import org.apache.kafka.streams.kstream.Windowed
+import org.apache.kafka.streams.state.{KeyValueIterator, QueryableStoreTypes, ReadOnlyWindowStore}
+import org.apache.kafka.streams.{KafkaStreams, KeyValue}
+
+import scala.collection.JavaConverters._
+
+class LocalServiceEdgesFetcher(streams: KafkaStreams, storeName: String) {
+ private lazy val store: ReadOnlyWindowStore[GraphEdge, EdgeStats] =
+ streams.store(storeName, QueryableStoreTypes.windowStore[GraphEdge, EdgeStats]())
+
+ def fetchEdges(from: Long, to: Long): Seq[ServiceGraphEdge] = {
+ var iterator: KeyValueIterator[Windowed[GraphEdge], EdgeStats] = null
+ try {
+ iterator = store.fetchAll(from, to)
+ val serviceGraphEdges =
+ for (kv: KeyValue[Windowed[GraphEdge], EdgeStats] <- iterator.asScala)
+ yield ServiceGraphEdge(
+ ServiceGraphVertex(kv.key.key.source.name, kv.value.sourceTags.toMap),
+ ServiceGraphVertex(kv.key.key.destination.name, kv.value.destinationTags.toMap),
+ ServiceEdgeStats(kv.value.count, kv.value.lastSeen, kv.value.errorCount),
+ kv.key.window().start(), Math.min(System.currentTimeMillis(), kv.key.window().end()))
+
+ getMergedServiceEdges(serviceGraphEdges.toSeq)
+ } finally {
+ IOUtils.closeSafely(iterator)
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/RemoteOperationEdgesFetcher.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/RemoteOperationEdgesFetcher.scala
new file mode 100644
index 000000000..ce6a92caa
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/RemoteOperationEdgesFetcher.scala
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.fetchers
+
+import java.util.concurrent.Executors
+
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities.ServiceClientConfiguration
+import com.expedia.www.haystack.service.graph.graph.builder.model.{OperationGraph, OperationGraphEdge}
+import org.apache.http.client.fluent.Request
+import org.apache.http.client.utils.URIBuilder
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+
+import scala.concurrent.{ExecutionContext, Future}
+import scala.util.Try
+
+class RemoteOperationEdgesFetcher(clientConfig: ServiceClientConfiguration) extends AutoCloseable {
+
+ private val dispatcher = ExecutionContext.fromExecutorService(
+ Executors.newFixedThreadPool(Math.min(Runtime.getRuntime.availableProcessors(), 2)))
+
+ implicit val formats = DefaultFormats
+
+ def fetchEdges(host: String, port: Int, from: Long, to: Long): Future[Seq[OperationGraphEdge]] = {
+ val request = new URIBuilder()
+ .setScheme("http")
+ .setPath("/operationgraph/local")
+ .setParameter("from", from.toString)
+ .setParameter("to", to.toString)
+ .setHost(host)
+ .setPort(port)
+ .build()
+
+ Future {
+ val response = Request.Get(request)
+ .connectTimeout(clientConfig.connectionTimeout)
+ .socketTimeout(clientConfig.socketTimeout)
+ .execute()
+ .returnContent()
+ .asString()
+
+ Serialization.read[OperationGraph](response).edges
+ }(dispatcher)
+ }
+
+ override def close(): Unit = {
+ Try(this.dispatcher.shutdown())
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/RemoteServiceEdgesFetcher.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/RemoteServiceEdgesFetcher.scala
new file mode 100644
index 000000000..0ac7c1a84
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/fetchers/RemoteServiceEdgesFetcher.scala
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.fetchers
+
+import java.util.concurrent.Executors
+
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities.ServiceClientConfiguration
+import com.expedia.www.haystack.service.graph.graph.builder.model.{ServiceGraph, ServiceGraphEdge}
+import org.apache.http.client.fluent.Request
+import org.apache.http.client.utils.URIBuilder
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+
+import scala.concurrent.{ExecutionContext, Future}
+import scala.util.Try
+
+class RemoteServiceEdgesFetcher(clientConfig: ServiceClientConfiguration) extends AutoCloseable {
+ implicit val formats = DefaultFormats
+
+ private val dispatcher = ExecutionContext.fromExecutorService(
+ Executors.newFixedThreadPool(Math.min(Runtime.getRuntime.availableProcessors(), 2)))
+
+ def fetchEdges(host: String, port: Int, from: Long, to: Long): Future[Seq[ServiceGraphEdge]] = {
+ val uri = new URIBuilder()
+ .setScheme("http")
+ .setPath("/servicegraph/local")
+ .setParameter("from", from.toString)
+ .setParameter("to", to.toString)
+ .setHost(host)
+ .setPort(port)
+ .build()
+
+ Future {
+ val response = Request.Get(uri)
+ .connectTimeout(clientConfig.connectionTimeout)
+ .socketTimeout(clientConfig.socketTimeout)
+ .execute()
+ .returnContent()
+ .asString()
+ Serialization.read[ServiceGraph](response).edges
+ }(dispatcher)
+ }
+
+ override def close(): Unit = {
+ Try(dispatcher.shutdown())
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/GlobalOperationGraphResource.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/GlobalOperationGraphResource.scala
new file mode 100644
index 000000000..b5cac4c64
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/GlobalOperationGraphResource.scala
@@ -0,0 +1,72 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.resources
+
+import javax.servlet.http.HttpServletRequest
+
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities.ServiceConfiguration
+import com.expedia.www.haystack.service.graph.graph.builder.model.{OperationGraph, OperationGraphEdge}
+import com.expedia.www.haystack.service.graph.graph.builder.service.fetchers.{LocalOperationEdgesFetcher, RemoteOperationEdgesFetcher}
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.QueryTimestampReader
+import org.apache.kafka.streams.KafkaStreams
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+import scala.concurrent.ExecutionContext.Implicits.global
+import scala.concurrent.duration._
+import scala.concurrent.{Await, Future}
+
+class GlobalOperationGraphResource(streams: KafkaStreams,
+ storeName: String,
+ serviceConfig: ServiceConfiguration,
+ localEdgesFetcher: LocalOperationEdgesFetcher,
+ remoteEdgesFetcher: RemoteOperationEdgesFetcher)(implicit val timestampReader: QueryTimestampReader)
+extends Resource("operationgraph") {
+ private val LOGGER = LoggerFactory.getLogger(classOf[GlobalOperationGraphResource])
+ private val globalEdgeCount = metricRegistry.histogram("operationgraph.global.edges")
+
+ protected override def get(request: HttpServletRequest): OperationGraph = {
+ val from = timestampReader.fromTimestamp(request)
+ val to = timestampReader.toTimestamp(request)
+
+ // get list of all hosts containing service-graph store
+ // fetch local service graphs from all hosts
+ // and merge local graphs to create global graph
+ val edgesListFuture: Iterable[Future[Seq[OperationGraphEdge]]] = streams
+ .allMetadataForStore(storeName)
+ .asScala
+ .map(host => {
+ if (host.host() == serviceConfig.host) {
+ LOGGER.info(s"operation graph from local returned is ivnoked")
+ Future(localEdgesFetcher.fetchEdges(from, to))
+ } else {
+ LOGGER.info(s"operation graph from ${host.host()} is invoked")
+ remoteEdgesFetcher.fetchEdges(host.host(), host.port(), from, to)
+ }
+ })
+
+ val singleResultFuture = Future.sequence(edgesListFuture)
+ val edgesList = Await
+ .result(singleResultFuture, serviceConfig.client.socketTimeout.millis)
+ .foldLeft(mutable.ListBuffer[OperationGraphEdge]())((buffer, coll) => buffer ++= coll)
+
+ globalEdgeCount.update(edgesList.length)
+ OperationGraph(edgesList)
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/GlobalServiceGraphResource.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/GlobalServiceGraphResource.scala
new file mode 100644
index 000000000..dfd988a52
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/GlobalServiceGraphResource.scala
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.resources
+
+import javax.servlet.http.HttpServletRequest
+
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities.ServiceConfiguration
+import com.expedia.www.haystack.service.graph.graph.builder.model.{ServiceGraph, ServiceGraphEdge}
+import com.expedia.www.haystack.service.graph.graph.builder.service.fetchers.{LocalServiceEdgesFetcher, RemoteServiceEdgesFetcher}
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.EdgesMerger._
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.QueryTimestampReader
+import org.apache.kafka.streams.KafkaStreams
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+import scala.concurrent.ExecutionContext.Implicits.global
+import scala.concurrent.duration._
+import scala.concurrent.{Await, Future}
+
+class GlobalServiceGraphResource(streams: KafkaStreams,
+ storeName: String,
+ serviceConfig: ServiceConfiguration,
+ localEdgesFetcher: LocalServiceEdgesFetcher,
+ remoteEdgesFetcher: RemoteServiceEdgesFetcher) (implicit val timestampReader: QueryTimestampReader)
+ extends Resource("servicegraph") {
+ private val LOGGER = LoggerFactory.getLogger(classOf[LocalServiceGraphResource])
+ private val globalEdgeCount = metricRegistry.histogram("servicegraph.global.edges")
+
+ protected override def get(request: HttpServletRequest): ServiceGraph = {
+ val from = timestampReader.fromTimestamp(request)
+ val to = timestampReader.toTimestamp(request)
+
+ // get list of all hosts containing service-graph store
+ // fetch local service graphs from all hosts
+ // and merge local graphs to create global graph
+ val edgesListFuture: Iterable[Future[Seq[ServiceGraphEdge]]] = streams
+ .allMetadataForStore(storeName)
+ .asScala
+ .map(host => {
+ if (host.host() == serviceConfig.host) {
+ LOGGER.info(s"service graph from local invoked")
+ Future(localEdgesFetcher.fetchEdges(from, to))
+ }
+ else {
+ LOGGER.info(s"service graph from ${host.host()} for edges is invoked")
+ remoteEdgesFetcher.fetchEdges(host.host(), host.port(), from, to)
+ }
+ })
+
+ val singleResultFuture = Future.sequence(edgesListFuture)
+ val edges = Await
+ .result(singleResultFuture, serviceConfig.client.socketTimeout.millis)
+ .foldLeft(mutable.ListBuffer[ServiceGraphEdge]())((buffer, coll) => buffer ++= coll)
+
+ val mergedEdgeList = getMergedServiceEdges(edges)
+ globalEdgeCount.update(mergedEdgeList.length)
+ ServiceGraph(mergedEdgeList)
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/IsWorkingResource.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/IsWorkingResource.scala
new file mode 100644
index 000000000..5086e253a
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/IsWorkingResource.scala
@@ -0,0 +1,27 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.resources
+
+import javax.servlet.http.HttpServletRequest
+
+class IsWorkingResource() extends Resource("isworking") {
+
+ protected override def get(request: HttpServletRequest): IsWorking = new IsWorking()
+
+ class IsWorking(isWorking: Boolean = true)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/LocalOperationGraphResource.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/LocalOperationGraphResource.scala
new file mode 100644
index 000000000..304cfebf9
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/LocalOperationGraphResource.scala
@@ -0,0 +1,38 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.resources
+
+import javax.servlet.http.HttpServletRequest
+
+import com.expedia.www.haystack.service.graph.graph.builder.model.OperationGraph
+import com.expedia.www.haystack.service.graph.graph.builder.service.fetchers.{LocalOperationEdgesFetcher, LocalServiceEdgesFetcher}
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.QueryTimestampReader
+
+class LocalOperationGraphResource(localEdgesFetcher: LocalOperationEdgesFetcher)
+ (implicit val timestampReader: QueryTimestampReader) extends Resource("operationgraph.local") {
+ private val edgeCount = metricRegistry.histogram("operationgraph.local.edges")
+
+ protected override def get(request: HttpServletRequest): OperationGraph = {
+ val from = timestampReader.fromTimestamp(request)
+ val to = timestampReader.toTimestamp(request)
+
+ val localGraph = OperationGraph(localEdgesFetcher.fetchEdges(from, to))
+ edgeCount.update(localGraph.edges.length)
+ localGraph
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/LocalServiceGraphResource.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/LocalServiceGraphResource.scala
new file mode 100644
index 000000000..56c5c68c1
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/LocalServiceGraphResource.scala
@@ -0,0 +1,39 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.resources
+
+import javax.servlet.http.HttpServletRequest
+
+import com.expedia.www.haystack.service.graph.graph.builder.model.ServiceGraph
+import com.expedia.www.haystack.service.graph.graph.builder.service.fetchers.LocalServiceEdgesFetcher
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.QueryTimestampReader
+
+class LocalServiceGraphResource(localEdgesFetcher: LocalServiceEdgesFetcher)(implicit val timestampReader: QueryTimestampReader)
+ extends Resource("servicegraph.local") {
+
+ private val edgeCount = metricRegistry.histogram("servicegraph.local.edges")
+
+ protected override def get(request: HttpServletRequest): ServiceGraph = {
+ val from = timestampReader.fromTimestamp(request)
+ val to = timestampReader.toTimestamp(request)
+
+ val localGraph = ServiceGraph(localEdgesFetcher.fetchEdges(from, to))
+ edgeCount.update(localGraph.edges.length)
+ localGraph
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/Resource.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/Resource.scala
new file mode 100644
index 000000000..d7327c5c7
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/resources/Resource.scala
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.resources
+
+import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse}
+
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import org.apache.http.entity.ContentType
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+import org.slf4j.LoggerFactory
+
+import scala.util.{Failure, Success, Try}
+
+abstract class Resource(endpointName: String) extends HttpServlet with MetricsSupport {
+ private val LOGGER = LoggerFactory.getLogger(classOf[Resource])
+ private val timer = metricRegistry.timer(endpointName)
+ private val failureCount = metricRegistry.meter(s"$endpointName.failure")
+
+ implicit val formats = DefaultFormats
+
+ protected override def doGet(request: HttpServletRequest, response: HttpServletResponse): Unit = {
+ val time = timer.time()
+
+ Try(get(request)) match {
+ case Success(getResponse) =>
+ response.setContentType(ContentType.APPLICATION_JSON.getMimeType)
+ response.setStatus(HttpServletResponse.SC_OK)
+ response.getWriter.print(Serialization.write(getResponse))
+ LOGGER.info(s"accesslog: ${request.getRequestURI} completed successfully")
+
+ case Failure(ex) =>
+ response.setContentType(ContentType.APPLICATION_JSON.getMimeType)
+ response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR)
+ response.getWriter.print(Serialization.write(new Error(ex.getMessage)))
+ failureCount.mark()
+ LOGGER.error(s"accesslog: ${request.getRequestURI} failed", ex)
+ }
+
+ response.getWriter.flush()
+ time.stop()
+ }
+
+ // endpoint method for child resources to inherit
+ protected def get(request: HttpServletRequest): Object
+
+ class Error(message: String, error: Boolean = true)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/utils/EdgesMerger.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/utils/EdgesMerger.scala
new file mode 100644
index 000000000..d3bf59f0e
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/utils/EdgesMerger.scala
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.graph.builder.service.utils
+
+import com.expedia.www.haystack.service.graph.graph.builder.model.{EdgeStats, OperationGraphEdge, ServiceGraphEdge}
+
+object EdgesMerger {
+ def getMergedServiceEdges(serviceGraphEdges: Seq[ServiceGraphEdge]): Seq[ServiceGraphEdge] = {
+ // group by source and destination service
+ val groupedEdges = serviceGraphEdges.groupBy(edge => ServicePair(edge.source.name, edge.destination.name))
+
+ // go through edges grouped by source and destination
+ // add counts for all edges in group to get total count for a source destination pair
+ // get latest last seen for all edges in group to last seen for a source destination pair
+ groupedEdges.map {
+ case (_, edge) => edge.reduce((e1, e2) => e1 + e2)
+ }.toSeq
+ }
+
+ def getMergedOperationEdges(operationGraphEdges: Seq[OperationGraphEdge]): Seq[OperationGraphEdge] = {
+ // group by source and destination service
+ val groupedEdges = operationGraphEdges.groupBy(edge => OperationTrio(edge.source, edge.destination, edge.operation))
+
+ // go through edges grouped by source and destination
+ // add counts for all edges in group to get total count for an operation trio
+ // get latest last seen for all edges in group to last seen for an operation trio
+ groupedEdges.map(
+ group => group._2
+ .reduce((e1, e2) => OperationGraphEdge(group._1.source, group._1.destination, group._1.operation,
+ EdgeStats(e1.stats.count + e2.stats.count, Math.max(e1.stats.lastSeen, e2.stats.lastSeen), e1.stats
+ .errorCount + e2.stats.errorCount), Math.min(e1.effectiveFrom, e2.effectiveFrom), Math.max(e1.effectiveTo, e2.effectiveTo))))
+ .toSeq
+ }
+
+ private case class ServicePair(source: String, destination: String)
+
+ private case class OperationTrio(source: String, destination: String, operation: String)
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/utils/IOUtils.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/utils/IOUtils.scala
new file mode 100644
index 000000000..2c1fe19c3
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/utils/IOUtils.scala
@@ -0,0 +1,35 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.graph.builder.service.utils
+
+import java.io.Closeable
+
+import org.slf4j.{Logger, LoggerFactory}
+
+object IOUtils {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(IOUtils.getClass)
+
+ def closeSafely(resource: Closeable): Unit = {
+ try {
+ if (resource != null) resource.close()
+ } catch {
+ case ex: Exception => LOGGER.error(s"Fail to close the resource with error", ex)
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/utils/QueryTimestampReader.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/utils/QueryTimestampReader.scala
new file mode 100644
index 000000000..4f779afb3
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/service/utils/QueryTimestampReader.scala
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service.utils
+
+import java.time.Instant
+import java.time.temporal.ChronoUnit
+import javax.servlet.http.HttpServletRequest
+
+import org.apache.commons.lang3.StringUtils
+
+class QueryTimestampReader(aggregateWindowSec: Long) {
+
+ def toTimestamp(request: HttpServletRequest): Long = {
+ if (StringUtils.isEmpty(request.getParameter("to"))) {
+ Instant.now().toEpochMilli
+ } else {
+ extractTime(request, "to")
+ }
+ }
+
+ def fromTimestamp(request: HttpServletRequest): Long = {
+ val timestamp = if (StringUtils.isEmpty(request.getParameter("from"))) {
+ Instant.now().minus(24, ChronoUnit.HOURS).toEpochMilli
+ } else {
+ extractTime(request, "from")
+ }
+ adjustTimeWithAggregateWindow(timestamp)
+
+ }
+
+ private def extractTime(request: HttpServletRequest, key: String): Long = {
+ request.getParameter(key).toLong
+ }
+
+ private def adjustTimeWithAggregateWindow(epochMillis: Long): Long = {
+ val result = Math.floor(epochMillis / (aggregateWindowSec * 1000)).toLong
+ result * aggregateWindowSec * 1000
+ }
+}
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/stream/ServiceGraphStreamSupplier.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/stream/ServiceGraphStreamSupplier.scala
new file mode 100644
index 000000000..ce5105556
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/stream/ServiceGraphStreamSupplier.scala
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.stream
+
+import java.util.concurrent.TimeUnit
+import java.util.function.Supplier
+
+import com.expedia.www.haystack.commons.entities.GraphEdge
+import com.expedia.www.haystack.commons.kstreams.GraphEdgeTimestampExtractor
+import com.expedia.www.haystack.commons.kstreams.serde.graph.{GraphEdgeKeySerde, GraphEdgeValueSerde}
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities.KafkaConfiguration
+import com.expedia.www.haystack.service.graph.graph.builder.model.{EdgeStats, EdgeStatsSerde}
+import org.apache.kafka.streams.kstream._
+import org.apache.kafka.streams.{Consumed, StreamsBuilder, Topology}
+
+class ServiceGraphStreamSupplier(kafkaConfiguration: KafkaConfiguration) extends Supplier[Topology] {
+ override def get(): Topology = initialize(new StreamsBuilder)
+
+ private def tumblingWindow(): TimeWindows = {
+ TimeWindows
+ .of(TimeUnit.SECONDS.toMillis(kafkaConfiguration.aggregationWindowSec))
+ .until(TimeUnit.DAYS.toMillis(kafkaConfiguration.aggregationRetentionDays))
+ }
+
+ private def initialize(builder: StreamsBuilder): Topology = {
+
+ val initializer: Initializer[EdgeStats] = () => EdgeStats(0, 0, 0)
+
+ val aggregator: Aggregator[GraphEdge, GraphEdge, EdgeStats] = {
+ (_: GraphEdge, v: GraphEdge, stats: EdgeStats) => stats.update(v)
+ }
+
+ builder
+ //
+ // read edges from graph-nodes topic
+ // graphEdge is both the key and value
+ // use graph edge timestamp
+ .stream(
+ kafkaConfiguration.consumerTopic,
+ Consumed.`with`(
+ new GraphEdgeKeySerde,
+ new GraphEdgeValueSerde,
+ new GraphEdgeTimestampExtractor,
+ kafkaConfiguration.autoOffsetReset
+ )
+ )
+ //
+ // group by key for doing aggregations on edges
+ // this will not cause any repartition
+ .groupByKey(
+ Serialized.`with`(new GraphEdgeKeySerde, new GraphEdgeValueSerde)
+ )
+ //
+ // create tumbling windows for edges
+ .windowedBy(tumblingWindow()).aggregate(
+ initializer,
+ // calculate stats for edges
+ // keep the resulting ktable as materialized view in memory
+ // enabled logging to persist ktable changelog topic and replicated to multiple brokers
+ aggregator, Materialized.as(kafkaConfiguration
+ .producerTopic)
+ .withKeySerde(new GraphEdgeKeySerde)
+ .withValueSerde(new EdgeStatsSerde)
+ .withCachingEnabled())
+
+ // build stream topology and return
+ builder.build()
+ }
+}
\ No newline at end of file
diff --git a/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/stream/StreamSupplier.scala b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/stream/StreamSupplier.scala
new file mode 100644
index 000000000..966e0e22e
--- /dev/null
+++ b/service-graph/graph-builder/src/main/scala/com.expedia.www.haystack.service.graph.graph.builder/stream/StreamSupplier.scala
@@ -0,0 +1,107 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.graph.builder.stream
+
+import java.util.Properties
+import java.util.concurrent.TimeUnit
+import java.util.function.Supplier
+
+import com.expedia.www.haystack.commons.health.HealthStatusController
+import com.expedia.www.haystack.commons.kstreams.app.StateChangeListener
+import org.apache.kafka.clients.admin.AdminClient
+import org.apache.kafka.streams.{KafkaStreams, StreamsConfig, Topology}
+import org.slf4j.LoggerFactory
+
+import scala.util.Try
+
+/**
+ * Factory class to create a KafkaStreams instance and wrap it as a simple service {@see ManagedKafkaStreams}
+ *
+ * Optionally this class can check the presence of consuming topic
+ *
+ * @param topologySupplier A supplier that creates and returns a Kafka Stream Topology
+ * @param healthController health controller
+ * @param streamsConfig Configuration instance for KafkaStreams
+ * @param consumerTopic Optional consuming topic name
+ */
+//noinspection ScalaDocInlinedTag,ScalaDocParserErrorInspection
+class StreamSupplier(topologySupplier: Supplier[Topology],
+ healthController: HealthStatusController,
+ streamsConfig: StreamsConfig,
+ consumerTopic: String,
+ var adminClient: AdminClient = null) extends Supplier[KafkaStreams] {
+
+ require(topologySupplier != null, "streamsBuilder is required")
+ require(healthController != null, "healthStatusController is required")
+ require(streamsConfig != null, "streamsConfig is required")
+ require(consumerTopic != null && !consumerTopic.isEmpty, "consumerTopic is required")
+ if(adminClient == null) {
+ adminClient = AdminClient.create(getBootstrapProperties)
+ }
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[StreamSupplier])
+
+ /**
+ * creates a new instance of KafkaStreams application wrapped as a {@link ManagedService} instance
+ *
+ * @return instance of ManagedService
+ */
+ override def get(): KafkaStreams = {
+ checkConsumerTopic()
+
+ val listener = new StateChangeListener(healthController)
+ val streams = new KafkaStreams(topologySupplier.get(), streamsConfig)
+ streams.setStateListener(listener)
+ streams.setUncaughtExceptionHandler(listener)
+ streams.cleanUp()
+
+ streams
+ }
+
+ private def checkConsumerTopic(): Unit = {
+ LOGGER.info(s"checking for the consumer topic $consumerTopic")
+ try {
+ val present = adminClient.listTopics().names().get().contains(consumerTopic)
+ if (!present) {
+ throw new TopicNotPresentException(consumerTopic,
+ s"Topic '$consumerTopic' is configured as a consumer and it is not present")
+ }
+ }
+ finally {
+ Try(adminClient.close(5, TimeUnit.SECONDS))
+ }
+ }
+
+ private def getBootstrapProperties: Properties = {
+ val properties = new Properties()
+ properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, streamsConfig.getList(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG))
+ properties
+ }
+
+ /**
+ * Custom RuntimeException that represents a required Kafka topic not being present
+ *
+ * @param topic Name of the topic that is missing
+ * @param message Message
+ */
+ class TopicNotPresentException(topic: String, message: String) extends RuntimeException(message) {
+ def getTopic: String = topic
+ }
+}
+
diff --git a/service-graph/graph-builder/src/test/java/org/expedia/www/haystack/commons/scalatest/IntegrationSuite.java b/service-graph/graph-builder/src/test/java/org/expedia/www/haystack/commons/scalatest/IntegrationSuite.java
new file mode 100644
index 000000000..fe13e09e4
--- /dev/null
+++ b/service-graph/graph-builder/src/test/java/org/expedia/www/haystack/commons/scalatest/IntegrationSuite.java
@@ -0,0 +1,29 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.expedia.www.haystack.commons.scalatest;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@org.scalatest.TagAnnotation
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD, ElementType.TYPE})
+public @interface IntegrationSuite {
+}
diff --git a/service-graph/graph-builder/src/test/resources/integration/kafka-server.properties b/service-graph/graph-builder/src/test/resources/integration/kafka-server.properties
new file mode 100644
index 000000000..860ae817c
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/integration/kafka-server.properties
@@ -0,0 +1,51 @@
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+# The port the socket server listens on
+port=9092
+
+# The number of threads handling network requests
+num.network.threads=2
+
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=1048576
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=1048576
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+# A comma seperated list of directories under which to store log files
+log.dirs=target/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=536870912
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=60000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=1000000
+
+#auto create topics
+auto.create.topics.enable=true
+
+default.replication.factor=1
+offsets.topic.replication.factor=1
\ No newline at end of file
diff --git a/service-graph/graph-builder/src/test/resources/integration/local.conf b/service-graph/graph-builder/src/test/resources/integration/local.conf
new file mode 100644
index 000000000..057e156fb
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/integration/local.conf
@@ -0,0 +1,60 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-graph-builder"
+ bootstrap.servers = "localhost:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ rocksdb {
+ block.cache.size = 16777216
+ block.size = 16384
+ cache.index.and.filter.blocks = true
+ max.write.buffer.number = 2
+ }
+
+ consumer {
+ topic = "graph-nodes"
+ }
+
+ producer {
+ topic = "service-graph"
+ config = {
+ cleanup.policy = "compact"
+ }
+ }
+
+ aggregate {
+ window.sec = 300
+ retention.days = 3
+ }
+}
+
+service {
+ host = "localhost"
+
+ threads {
+ min = 5
+ max = 10
+ idle.timeout = 12000
+ }
+
+ http {
+ port = 8080
+ idle.timeout = 12000
+ }
+
+ client {
+ connection.timeout = 1000
+ socket.timeout = 1000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/graph-builder/src/test/resources/integration/zookeeper.properties b/service-graph/graph-builder/src/test/resources/integration/zookeeper.properties
new file mode 100644
index 000000000..75e8c6506
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/integration/zookeeper.properties
@@ -0,0 +1,6 @@
+# the directory where the snapshot is stored.
+dataDir=target/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
\ No newline at end of file
diff --git a/service-graph/graph-builder/src/test/resources/log4j.properties b/service-graph/graph-builder/src/test/resources/log4j.properties
new file mode 100644
index 000000000..fa7f75bf8
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/log4j.properties
@@ -0,0 +1,5 @@
+log4j.rootLogger=OFF, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
\ No newline at end of file
diff --git a/service-graph/graph-builder/src/test/resources/logback-test.xml b/service-graph/graph-builder/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..f7171463c
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/logback-test.xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+
+
+
+
+
+
diff --git a/service-graph/graph-builder/src/test/resources/logback.xml b/service-graph/graph-builder/src/test/resources/logback.xml
new file mode 100644
index 000000000..c7d7bf222
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/logback.xml
@@ -0,0 +1,35 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:-monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:-2003}
+ service-graph-node-finder
+
+
+
+
+
+
+
diff --git a/service-graph/graph-builder/src/test/resources/test/test.conf b/service-graph/graph-builder/src/test/resources/test/test.conf
new file mode 100644
index 000000000..b2312f0f4
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/test/test.conf
@@ -0,0 +1,56 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-graph-builder"
+ bootstrap.servers = "localhost:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ rocksdb {
+ block.cache.size = 16777216
+ block.size = 16384
+ cache.index.and.filter.blocks = true
+ max.write.buffer.number = 2
+ }
+
+ consumer {
+ topic = "graph-nodes"
+ }
+
+ producer {
+ topic = "service-graph"
+ }
+
+ aggregate {
+ window.sec = 3600
+ retention.days = 3
+ }
+}
+
+service {
+ host = "localhost"
+ threads {
+ min = 1
+ max = 5
+ idle.timeout = 12000
+ }
+
+ http {
+ port = 8080
+ idle.timeout = 12000
+ }
+
+ client {
+ connection.timeout = 1000
+ socket.timeout = 1000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/graph-builder/src/test/resources/test/test_application_server_set.conf b/service-graph/graph-builder/src/test/resources/test/test_application_server_set.conf
new file mode 100644
index 000000000..25bd919c5
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/test/test_application_server_set.conf
@@ -0,0 +1,57 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-graph-builder"
+ application.server = "127.0.0.1:1002"
+ bootstrap.servers = "localhost:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ rocksdb {
+ block.cache.size = 16777216
+ block.size = 16384
+ cache.index.and.filter.blocks = true
+ max.write.buffer.number = 2
+ }
+
+ consumer {
+ topic = "graph-nodes"
+ }
+
+ producer {
+ topic = "service-graph"
+ }
+
+ aggregate {
+ window.sec = 3600
+ retention.days = 3
+ }
+}
+
+service {
+ host = "localhost"
+ threads {
+ min = 1
+ max = 5
+ idle.timeout = 12000
+ }
+
+ http {
+ port = 8080
+ idle.timeout = 12000
+ }
+
+ client {
+ connection.timeout = 1000
+ socket.timeout = 1000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/graph-builder/src/test/resources/test/test_no_app_id.conf b/service-graph/graph-builder/src/test/resources/test/test_no_app_id.conf
new file mode 100644
index 000000000..0968c2011
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/test/test_no_app_id.conf
@@ -0,0 +1,55 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ bootstrap.servers = "localhost:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ rocksdb {
+ block.cache.size = 16777216
+ block.size = 16384
+ cache.index.and.filter.blocks = true
+ max.write.buffer.number = 2
+ }
+
+ consumer {
+ topic = "graph-nodes"
+ }
+
+ producer {
+ topic = "service-graph"
+ }
+
+ aggregate {
+ window.sec = 3600
+ retention.days = 3
+ }
+}
+
+service {
+ host = "localhost"
+ threads {
+ min = 1
+ max = 5
+ idle.timeout = 12000
+ }
+
+ http {
+ port = 8080
+ idle.timeout = 12000
+ }
+
+ client {
+ connection.timeout = 1000
+ socket.timeout = 1000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/graph-builder/src/test/resources/test/test_no_bootstrap.conf b/service-graph/graph-builder/src/test/resources/test/test_no_bootstrap.conf
new file mode 100644
index 000000000..e25cb39f8
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/test/test_no_bootstrap.conf
@@ -0,0 +1,55 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-graph-builder"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ rocksdb {
+ block.cache.size = 16777216
+ block.size = 16384
+ cache.index.and.filter.blocks = true
+ max.write.buffer.number = 2
+ }
+
+ consumer {
+ topic = "graph-nodes"
+ }
+
+ producer {
+ topic = "service-graph"
+ }
+
+ aggregate {
+ window.sec = 3600
+ retention.days = 3
+ }
+}
+
+service {
+ host = "localhost"
+ threads {
+ min = 1
+ max = 5
+ idle.timeout = 12000
+ }
+
+ http {
+ port = 8080
+ idle.timeout = 12000
+ }
+
+ client {
+ connection.timeout = 1000
+ socket.timeout = 1000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/graph-builder/src/test/resources/test/test_no_consumer.conf b/service-graph/graph-builder/src/test/resources/test/test_no_consumer.conf
new file mode 100644
index 000000000..09344d81f
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/test/test_no_consumer.conf
@@ -0,0 +1,52 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-graph-builder"
+ bootstrap.servers = "localhost:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ rocksdb {
+ block.cache.size = 16777216
+ block.size = 16384
+ cache.index.and.filter.blocks = true
+ max.write.buffer.number = 2
+ }
+
+ producer {
+ topic = "service-graph"
+ }
+
+ aggregate {
+ window.sec = 3600
+ retention.days = 3
+ }
+}
+
+service {
+ host = "localhost"
+ threads {
+ min = 1
+ max = 5
+ idle.timeout = 12000
+ }
+
+ http {
+ port = 8080
+ idle.timeout = 12000
+ }
+
+ client {
+ connection.timeout = 1000
+ socket.timeout = 1000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/graph-builder/src/test/resources/test/test_no_producer.conf b/service-graph/graph-builder/src/test/resources/test/test_no_producer.conf
new file mode 100644
index 000000000..9389fb361
--- /dev/null
+++ b/service-graph/graph-builder/src/test/resources/test/test_no_producer.conf
@@ -0,0 +1,53 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-graph-builder"
+ bootstrap.servers = "localhost:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ rocksdb {
+ block.cache.size = 16777216
+ block.size = 16384
+ cache.index.and.filter.blocks = true
+ max.write.buffer.number = 2
+ }
+
+ consumer {
+ topic = "graph-nodes"
+ }
+
+ aggregate {
+ window.sec = 3600
+ retention.days = 3
+ }
+}
+
+service {
+ host = "localhost"
+
+ threads {
+ min = 1
+ max = 5
+ idle.timeout = 12000
+ }
+
+ http {
+ port = 8080
+ idle.timeout = 12000
+ }
+
+ client {
+ connection.timeout = 1000
+ socket.timeout = 1000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/AppSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/AppSpec.scala
new file mode 100644
index 000000000..bd5d9b2e0
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/AppSpec.scala
@@ -0,0 +1,249 @@
+package com.expedia.www.haystack.service.graph.graph.builder
+
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+import java.util.Properties
+
+import com.expedia.www.haystack.commons.entities.{GraphEdge, GraphVertex, TagKeys}
+import com.expedia.www.haystack.commons.health.HealthStatusController
+import com.expedia.www.haystack.commons.kstreams.serde.graph.{GraphEdgeKeySerde, GraphEdgeValueSerde}
+import com.expedia.www.haystack.service.graph.graph.builder.config.AppConfiguration
+import com.expedia.www.haystack.service.graph.graph.builder.kafka.KafkaController
+import com.expedia.www.haystack.service.graph.graph.builder.model.{EdgeStats, OperationGraph, ServiceGraph}
+import com.expedia.www.haystack.service.graph.graph.builder.service.HttpService
+import org.apache.http.client.fluent.Request
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.streams.KafkaStreams
+import org.apache.kafka.streams.state.{QueryableStoreTypes, ReadOnlyWindowStore}
+import org.expedia.www.haystack.commons.scalatest.IntegrationSuite
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+import org.scalatest.BeforeAndAfterAll
+
+import scala.collection.JavaConverters._
+import scala.util.Random
+
+@IntegrationSuite
+class AppSpec extends TestSpec with BeforeAndAfterAll {
+
+ val kafkaController: KafkaController = createKafkaController()
+ private val appConfig = new AppConfiguration("integration/local.conf")
+ var stream: KafkaStreams = _
+ var service: HttpService = _
+
+ implicit val formats = DefaultFormats
+
+ override def beforeAll {
+ //start kafka and zk
+ kafkaController.startService()
+
+ //ensure test topics are present
+ kafkaController.createTopics(List(appConfig.kafkaConfig.consumerTopic))
+
+ //start topology
+ stream = App.createStream(appConfig.kafkaConfig, new HealthStatusController)
+ stream.start()
+
+ //start service
+ service = App.createService(appConfig.serviceConfig, stream, appConfig.kafkaConfig)
+ service.start()
+
+ //time for kstreams to initialize completely
+ Thread.sleep(20000)
+ }
+
+ describe("graph-builder application") {
+ it("should add new edges in ktable") {
+ Given("running stream topology")
+
+ When("getting new edges")
+ //send test data to source topic
+ val producer = kafkaController.createProducer(
+ appConfig.kafkaConfig.consumerTopic,
+ new GraphEdgeKeySerde().serializer(), new GraphEdgeValueSerde().serializer()
+ )
+
+ val random = new Random
+ val source = random.nextString(4)
+ val destination = random.nextString(4)
+ val operation = random.nextString(4)
+ val time = System.currentTimeMillis()
+
+ //send sample data
+ produceRecord(producer, source, destination, operation, time)
+
+ Then("edges should be added to edges ktable")
+ //read data from ktable to validate
+ val store: ReadOnlyWindowStore[GraphEdge, EdgeStats] =
+ stream.store(appConfig.kafkaConfig.producerTopic, QueryableStoreTypes.windowStore[GraphEdge, EdgeStats]())
+
+ val storeIterator = store.all()
+ val filteredEdges = storeIterator.asScala.toList.filter(
+ edge => {
+ val gEdge = edge.key.key
+ gEdge.source == GraphVertex(source) && gEdge.destination == GraphVertex(destination) && gEdge.operation == operation && gEdge.sourceTimestamp == 0
+ })
+
+ filteredEdges.length should be(1)
+ filteredEdges.head.value.count should be(1)
+ }
+
+ it("should add only one row for duplicate edges in ktable") {
+ Given("running stream topology")
+
+ When("getting duplicate edges")
+ //send test data to source topic
+ val producer = kafkaController.createProducer(
+ appConfig.kafkaConfig.consumerTopic,
+ new GraphEdgeKeySerde().serializer(), new GraphEdgeValueSerde().serializer())
+
+ val random = new Random
+ val source = random.nextString(4)
+ val destination = random.nextString(4)
+ val operation = random.nextString(4)
+ val time = System.currentTimeMillis()
+
+ //send sample data
+ produceDuplicateRecord(producer, 3, source, destination, operation, time)
+
+ Then("only one edge should be added to edges ktable")
+ //read data from ktable to validate
+ val store: ReadOnlyWindowStore[GraphEdge, EdgeStats] =
+ stream.store(appConfig.kafkaConfig.producerTopic, QueryableStoreTypes.windowStore[GraphEdge, EdgeStats]())
+
+ val storeIterator = store.all()
+ val filteredEdges = storeIterator.asScala.toList.filter(
+ edge => {
+ val gEdge = edge.key.key
+ gEdge.source == GraphVertex(source) && gEdge.destination == GraphVertex(destination) && gEdge.operation == operation && gEdge.sourceTimestamp == 0
+ })
+
+ filteredEdges.length should be(1)
+ filteredEdges.head.value.count should be(3)
+ }
+
+ it("should make servicegraph queriable through http") {
+ Given("running stream topology")
+
+ When("getting new edge")
+ //send test data to source topic
+ val producer = kafkaController.createProducer(
+ appConfig.kafkaConfig.consumerTopic,
+ new GraphEdgeKeySerde().serializer(), new GraphEdgeValueSerde().serializer())
+ val random = new Random
+ val source = random.nextInt().toString
+ val destination = random.nextInt().toString
+ val operation = random.nextString(4)
+ val time = System.currentTimeMillis()
+
+ //send sample data
+ produceRecord(producer, source, destination, operation, time, Map("tag1" -> "testtagval1", TagKeys.ERROR_KEY -> "true"))
+
+ Then("servicegraph endpoint should return the new edge")
+ val edgeJson = Request
+ .Get(s"http://localhost:${appConfig.serviceConfig.http.port}/servicegraph")
+ .execute()
+ .returnContent()
+ .asString()
+
+ val serviceGraph = Serialization.read[ServiceGraph](edgeJson)
+ val filteredEdges = serviceGraph.edges.filter(
+ edge => edge.source.name == source && edge.destination.name == destination)
+
+ filteredEdges.length should be(1)
+ filteredEdges.head.stats.count shouldBe 1
+ filteredEdges.head.stats.errorCount shouldBe 1
+ filteredEdges.head.source.tags.size should be(1)
+ filteredEdges.head.source.tags.get("tag1") should be (Some("testtagval1"))
+ }
+
+ it("should make operationgraph queriable through http") {
+ Given("running stream topology")
+
+ When("getting new edge")
+ //send test data to source topic
+ val producer = kafkaController.createProducer(
+ appConfig.kafkaConfig.consumerTopic,
+ new GraphEdgeKeySerde().serializer(), new GraphEdgeValueSerde().serializer())
+ val random = new Random
+ val source = random.nextInt().toString
+ val destination = random.nextInt().toString
+ val operation = random.nextInt().toString
+ val time = System.currentTimeMillis()
+
+ //send sample data
+ produceRecord(producer, source, destination, operation, time)
+
+ Then("operationgraph endpoint should return the new edge")
+ val edgeJson = Request
+ .Get(s"http://localhost:${appConfig.serviceConfig.http.port}/operationgraph")
+ .execute()
+ .returnContent()
+ .asString()
+
+ val operationGraph = Serialization.read[OperationGraph](edgeJson)
+ val filteredEdges = operationGraph.edges.filter(
+ edge => edge.source == source && edge.destination == destination && edge.operation == operation)
+
+ filteredEdges.length should be(1)
+ }
+ }
+
+ override def afterAll {
+ //stop service & topology
+ service.close()
+ stream.close()
+
+ //stop kafka and zk
+ kafkaController.stopService()
+ }
+
+ private def createKafkaController(): KafkaController = {
+ val zkProperties = new Properties
+ zkProperties.load(classOf[AppSpec].getClassLoader.getResourceAsStream("integration/zookeeper.properties"))
+
+ val kafkaProperties = new Properties
+ kafkaProperties.load(classOf[AppSpec].getClassLoader.getResourceAsStream("integration/kafka-server.properties"))
+
+ new KafkaController(kafkaProperties, zkProperties)
+ }
+
+ private def produceRecord(producer: KafkaProducer[GraphEdge, GraphEdge], source: String, destination: String,
+ operation: String, time: Long, sourceEdgetags: Map[String, String] = Map()): Unit = {
+ sendRecord(producer, source, destination, operation, time, sourceEdgetags)
+
+ // flush and sleep for couple of seconds for streams to process
+ producer.flush()
+ Thread.sleep(2000)
+ }
+
+ private def produceDuplicateRecord(producer: KafkaProducer[GraphEdge, GraphEdge], count: Int, source: String, destination: String, operation: String, time: Long): Unit = {
+ for (i <- 0 until count) sendRecord(producer, source, destination, operation, time)
+
+ // flush and sleep for couple of seconds for streams to process
+ producer.flush()
+ Thread.sleep(2000)
+ }
+
+ private def sendRecord(producer: KafkaProducer[GraphEdge, GraphEdge], source: String, destination: String,
+ operation: String, time: Long, sourceEdgeTags: Map[String, String] = Map()): Unit = {
+ val edge = GraphEdge(GraphVertex(source, sourceEdgeTags), GraphVertex(destination), operation, time)
+ producer.send(new ProducerRecord[GraphEdge, GraphEdge](appConfig.kafkaConfig.consumerTopic, edge, edge))
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/ManagedApplicationSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/ManagedApplicationSpec.scala
new file mode 100644
index 000000000..e7cdb4280
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/ManagedApplicationSpec.scala
@@ -0,0 +1,125 @@
+package com.expedia.www.haystack.service.graph.graph.builder
+
+import java.security.Permission
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.commons.kstreams.app.ManagedService
+import com.expedia.www.haystack.service.graph.graph.builder.ManagedApplication._
+import org.mockito.Mockito.{times, verify, verifyNoMoreInteractions, when}
+import org.scalatest.mockito.MockitoSugar
+import org.scalatest.{BeforeAndAfterAll, FunSpec, SequentialNestedSuiteExecution}
+import org.slf4j.Logger
+
+sealed case class ExitException(status: Int) extends SecurityException("System.exit() was called") {
+}
+
+sealed class NoExitSecurityManager extends SecurityManager {
+ override def checkPermission(perm: Permission): Unit = {}
+
+ override def checkPermission(perm: Permission, context: Object): Unit = {}
+
+ override def checkExit(status: Int): Unit = {
+ super.checkExit(status)
+ throw ExitException(status)
+ }
+}
+
+class ManagedApplicationSpec extends FunSpec with MockitoSugar with BeforeAndAfterAll with SequentialNestedSuiteExecution {
+
+ override def beforeAll(): Unit = System.setSecurityManager(new NoExitSecurityManager())
+ override def afterAll(): Unit = System.setSecurityManager(null)
+
+ describe("ManagedApplication constructor") {
+ val (mocks: List[AnyRef], service: ManagedService, stream: ManagedService, jmxReporter: JmxReporter, logger: Logger) = createAndBundleMocks
+ it ("should throw an IllegalArgumentException if passed a null service") {
+ assertThrows[IllegalArgumentException] {
+ new ManagedApplication(null, stream, jmxReporter, logger)
+ }
+ }
+ it ("should throw an IllegalArgumentException if passed a null stream") {
+ assertThrows[IllegalArgumentException] {
+ new ManagedApplication(service, null, jmxReporter, logger)
+ }
+ }
+ it ("should throw an IllegalArgumentException if passed a null jmxReporter") {
+ assertThrows[IllegalArgumentException] {
+ new ManagedApplication(service, stream, null, logger)
+ }
+ }
+ it ("should throw an IllegalArgumentException if passed a null logger") {
+ assertThrows[IllegalArgumentException] {
+ new ManagedApplication(service, stream, jmxReporter, null)
+ }
+ }
+ verifyNoMoreInteractionsForAllMocks(mocks)
+ }
+
+ describe("ManagedApplication start") {
+ val (mocks: List[AnyRef], service: ManagedService, stream: ManagedService, jmxReporter: JmxReporter, logger: Logger) = createAndBundleMocks
+ val managedApplication = new ManagedApplication(service, stream, jmxReporter, logger)
+ it ("should start all dependencies when called") {
+ managedApplication.start()
+ verify(service).start()
+ verify(logger).info(StartMessage)
+ verify(stream).start()
+ verify(logger).info(HttpStartMessage)
+ verify(jmxReporter).start()
+ verify(logger).info(StreamStartMessage)
+ }
+ it ("should call System.exit() when an exception is thrown") {
+ when(service.start()).thenThrow(new NullPointerException)
+ assertThrows[ExitException] {
+ managedApplication.start()
+ }
+ verify(service, times(2)).start()
+ }
+ verifyNoMoreInteractionsForAllMocks(mocks)
+ }
+
+ describe("ManagedApplication stop") {
+ val (mocks: List[AnyRef], service: ManagedService, stream: ManagedService, jmxReporter: JmxReporter, logger: Logger) = createAndBundleMocks
+ it ("should stop all dependencies when called") {
+ val managedApplication = new ManagedApplication(service, stream, jmxReporter, logger)
+ managedApplication.stop()
+ verify(logger).info(HttpStopMessage)
+ verify(service).stop()
+ verify(logger).info(StreamStopMessage)
+ verify(stream).stop()
+ verify(logger).info(JmxReporterStopMessage)
+ verify(jmxReporter).close()
+ verify(logger).info(LoggerStopMessage)
+ }
+ verifyNoMoreInteractionsForAllMocks(mocks)
+ }
+
+ private def createMocks(): List[AnyRef] =
+ {
+ val service = mock[ManagedService]
+ val stream = mock[ManagedService]
+ val jmxReporter = mock[JmxReporter]
+ val logger = mock[Logger]
+ List(service, stream, jmxReporter, logger)
+ }
+
+ private def createAndBundleMocks = {
+ val mocks = createMocks()
+ val (service: ManagedService, stream: ManagedService, jmxReporter: JmxReporter, logger: Logger) = bundleMocks(mocks)
+ (mocks, service, stream, jmxReporter, logger)
+ }
+
+ private def bundleMocks(mocks: List[AnyRef]) = {
+ val service = mocks.head.asInstanceOf[ManagedService]
+ val stream = mocks(1).asInstanceOf[ManagedService]
+ val jmxReporter = mocks(2).asInstanceOf[JmxReporter]
+ val logger = mocks(3).asInstanceOf[Logger]
+ (service, stream, jmxReporter, logger)
+ }
+
+ private def verifyNoMoreInteractionsForAllMocks(mocks: List[AnyRef]): Unit = {
+ verifyNoMoreInteractions(mocks.head)
+ verifyNoMoreInteractions(mocks(1))
+ verifyNoMoreInteractions(mocks(2))
+ verifyNoMoreInteractions(mocks(3))
+ }
+
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/TestSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/TestSpec.scala
new file mode 100644
index 000000000..8f29d6ae0
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/TestSpec.scala
@@ -0,0 +1,23 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder
+
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, GivenWhenThen, Matchers}
+
+trait TestSpec extends FunSpec with GivenWhenThen with Matchers with EasyMockSugar
\ No newline at end of file
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/config/AppConfigurationSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/config/AppConfigurationSpec.scala
new file mode 100644
index 000000000..9e23caacf
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/config/AppConfigurationSpec.scala
@@ -0,0 +1,113 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.config
+
+import com.expedia.www.haystack.service.graph.graph.builder.TestSpec
+import com.expedia.www.haystack.service.graph.graph.builder.config.entities.CustomRocksDBConfig
+import com.typesafe.config.ConfigException
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.processor.WallclockTimestampExtractor
+import org.rocksdb.{BlockBasedTableConfig, Options}
+
+import scala.collection.JavaConverters._
+
+class AppConfigurationSpec extends TestSpec {
+ describe("loading application configuration") {
+ it("should fail creating KafkaConfiguration if no application id is specified") {
+ Given("a test configuration file")
+ val file = "test/test_no_app_id.conf"
+
+ When("Application configuration is loaded")
+
+ Then("it should throw an exception")
+ intercept[IllegalArgumentException] {
+ new AppConfiguration(file).kafkaConfig
+ }
+ }
+
+ it("should fail creating KafkaConfiguration if no bootstrap is specified") {
+ Given("a test configuration file")
+ val file = "test/test_no_bootstrap.conf"
+
+ When("Application configuration is loaded")
+
+ Then("it should throw an exception")
+ intercept[IllegalArgumentException] {
+ new AppConfiguration(file).kafkaConfig
+ }
+ }
+
+ it("should fail creating KafkaConfiguration if no consumer is specified") {
+ Given("a test configuration file")
+ val file = "test/test_no_consumer.conf"
+
+ When("Application configuration is loaded")
+
+ Then("it should throw an exception")
+ intercept[ConfigException] {
+ new AppConfiguration(file).kafkaConfig
+ }
+ }
+
+ it("should fail creating KafkaConfiguration if no producer is specified") {
+ Given("a test configuration file")
+ val file = "test/test_no_producer.conf"
+
+ When("Application configuration is loaded")
+
+ Then("it should throw an exception")
+ intercept[ConfigException] {
+ new AppConfiguration(file).kafkaConfig
+ }
+ }
+
+ it("should create KafkaConfiguration and ServiceConfiguration as specified") {
+ Given("a test configuration file")
+ val file = "test/test.conf"
+
+ When("Application configuration is loaded and KafkaConfiguration is obtained")
+ val config = new AppConfiguration(file)
+
+ Then("it should load as expected")
+ config.kafkaConfig.streamsConfig.defaultTimestampExtractor() shouldBe a [WallclockTimestampExtractor]
+ config.kafkaConfig.consumerTopic should be ("graph-nodes")
+ config.serviceConfig.http.port should be (8080)
+ config.serviceConfig.threads.max should be(5)
+ config.serviceConfig.client.connectionTimeout should be(1000)
+ val rocksDbOptions = new Options()
+ new CustomRocksDBConfig().setConfig("", rocksDbOptions, Map[String, AnyRef]().asJava)
+ val blockConfig = rocksDbOptions.tableFormatConfig().asInstanceOf[BlockBasedTableConfig]
+ blockConfig.blockCacheSize() shouldBe 16777216l
+ blockConfig.blockSize() shouldBe 16384l
+ blockConfig.cacheIndexAndFilterBlocks() shouldBe true
+ rocksDbOptions.maxWriteBufferNumber() shouldBe 2
+ config.kafkaConfig.streamsConfig.values().get(StreamsConfig.APPLICATION_SERVER_CONFIG).toString shouldBe "localhost:8080"
+ }
+
+ it("should allow for the application server to be set in the config file") {
+ Given("a test configuration file")
+ val file = "test/test_application_server_set.conf"
+
+ When("Application configuration is loaded and KafkaConfiguration is obtained")
+ val config = new AppConfiguration(file)
+
+ Then("it should load the application.server expected")
+ config.kafkaConfig.streamsConfig.values().get(StreamsConfig.APPLICATION_SERVER_CONFIG).toString shouldBe "127.0.0.1:1002"
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/kafka/KafkaController.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/kafka/KafkaController.scala
new file mode 100644
index 000000000..8ebb2e7b3
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/kafka/KafkaController.scala
@@ -0,0 +1,89 @@
+package com.expedia.www.haystack.service.graph.graph.builder.kafka
+
+import java.util.Properties
+import java.util.concurrent.TimeUnit
+import java.util.concurrent.atomic.AtomicBoolean
+
+import kafka.server.RunningAsBroker
+import org.apache.kafka.clients.CommonClientConfigs
+import org.apache.kafka.clients.admin.{AdminClient, NewTopic}
+import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig}
+import org.apache.kafka.common.serialization.{Deserializer, Serializer}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.util.Try
+
+class KafkaController(kafkaProperties: Properties, zooKeeperProperties: Properties) {
+ require(kafkaProperties != null)
+ require(zooKeeperProperties != null)
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[KafkaController])
+
+ private val zkPort = zooKeeperProperties.getProperty("clientPort").toInt
+ private val kafkaPort = kafkaProperties.getProperty("port").toInt
+
+ lazy val zkUrl: String = "localhost:" + zkPort
+ lazy val kafkaUrl: String = "localhost:" + kafkaPort
+
+ private val kafkaPropertiesWithZk = new Properties
+ kafkaPropertiesWithZk.putAll(kafkaProperties)
+ kafkaPropertiesWithZk.put("zookeeper.connect", zkUrl)
+ private val kafkaServer = new KafkaLocal(kafkaPropertiesWithZk)
+
+ def startService(): Unit = {
+ //start zk
+ val zookeeper = new ZooKeeperLocal(zooKeeperProperties)
+ new Thread(zookeeper).start()
+ Thread.sleep(2000)
+
+ //start kafka
+ kafkaServer.start()
+ Thread.sleep(2000)
+
+ // check kafka status
+ if (kafkaServer.state().currentState != RunningAsBroker.state) {
+ throw new IllegalStateException("Kafka server is not in a running state")
+ }
+
+ //lifecycle message
+ LOGGER.info("Kafka started and listening : {}", kafkaUrl)
+ }
+
+ def stopService(): Unit = {
+ //stop kafka
+ kafkaServer.stop()
+
+ //lifecycle message
+ LOGGER.info("Kafka stopped")
+ }
+
+ def createTopics(topics: List[String]): Unit = {
+ if (topics.nonEmpty) {
+ val adminClient = AdminClient.create(getBootstrapProperties)
+ try {
+ adminClient.createTopics(topics.map(topic => new NewTopic(topic, 1, 1)).asJava)
+ adminClient.listTopics().names().get().forEach(s => LOGGER.info("Available topic : {}", s))
+ }
+ finally {
+ Try(adminClient.close(5, TimeUnit.SECONDS))
+ }
+ }
+ }
+
+ def createProducer[K, V] (topic: String, keySerializer: Serializer[K],
+ valueSerializer: Serializer[V]) : KafkaProducer[K, V] = {
+ val properties = getBootstrapProperties
+ properties.put(ProducerConfig.CLIENT_ID_CONFIG, topic + "Producer")
+ new KafkaProducer[K, V](properties, keySerializer, valueSerializer)
+ }
+
+ private def getBootstrapProperties: Properties = {
+ val properties = new Properties()
+ properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, List(kafkaUrl).asJava)
+ properties
+ }
+}
+
+class InvalidStateException(message: String) extends RuntimeException(message) {}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/kafka/KafkaLocal.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/kafka/KafkaLocal.scala
new file mode 100644
index 000000000..7775c4407
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/kafka/KafkaLocal.scala
@@ -0,0 +1,23 @@
+package com.expedia.www.haystack.service.graph.graph.builder.kafka
+
+import java.util.Properties
+
+import kafka.metrics.KafkaMetricsReporter
+import kafka.server.{BrokerState, KafkaConfig, KafkaServer}
+
+class KafkaLocal(val kafkaProperties: Properties) {
+ val kafkaConfig: KafkaConfig = KafkaConfig.fromProps(kafkaProperties)
+ val kafka: KafkaServer = new KafkaServer(kafkaConfig, kafkaMetricsReporters = List[KafkaMetricsReporter]())
+
+ def start(): Unit = {
+ kafka.startup()
+ }
+
+ def stop(): Unit = {
+ kafka.shutdown()
+ }
+
+ def state(): BrokerState = {
+ kafka.brokerState
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/kafka/ZooKeeperLocal.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/kafka/ZooKeeperLocal.scala
new file mode 100644
index 000000000..a5a8dafa3
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/kafka/ZooKeeperLocal.scala
@@ -0,0 +1,31 @@
+package com.expedia.www.haystack.service.graph.graph.builder.kafka
+
+import java.io.IOException
+import java.util.Properties
+
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig
+import org.apache.zookeeper.server.{ServerConfig, ZooKeeperServerMain}
+import org.slf4j.LoggerFactory
+
+
+object ZooKeeperLocal {
+ private val LOGGER = LoggerFactory.getLogger(classOf[ZooKeeperLocal])
+}
+
+class ZooKeeperLocal(val zkProperties: Properties) extends Runnable {
+ private val quorumConfiguration = new QuorumPeerConfig
+ quorumConfiguration.parseProperties(zkProperties)
+ private val configuration = new ServerConfig
+ configuration.readFrom(quorumConfiguration)
+ private val zooKeeperServer = new ZooKeeperServerMain
+
+ override def run(): Unit = {
+ try {
+ zooKeeperServer.runFromConfig(configuration)
+ }
+ catch {
+ case e: IOException =>
+ ZooKeeperLocal.LOGGER.error("Zookeeper startup failed.", e)
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/model/EdgeStatsSerdeSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/model/EdgeStatsSerdeSpec.scala
new file mode 100644
index 000000000..0a1753fc7
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/model/EdgeStatsSerdeSpec.scala
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.graph.builder.model
+
+import com.expedia.www.haystack.service.graph.graph.builder.TestSpec
+
+class EdgeStatsSerdeSpec extends TestSpec {
+
+ describe("EdgeStateSerde ") {
+ it("should serialize EdgeState") {
+ Given("a valid EdgeState object")
+ val edgeStats = EdgeStats(0, 0, 0)
+
+ And("EdgeState serializer")
+ val serializer = new EdgeStatsSerde().serializer()
+
+ When("EdgeState is serialized")
+ val bytes = serializer.serialize("", edgeStats)
+
+ Then("it should generate valid byte stream")
+ bytes.nonEmpty should be(true)
+ }
+
+ it("should deserialize serialized EdgeState") {
+ Given("a valid EdgeState object")
+ val edgeStats = EdgeStats(1, 1, 1)
+
+ And("serialized EdgeState")
+ val serializer = new EdgeStatsSerde().serializer()
+ val bytes = serializer.serialize("", edgeStats)
+
+ And("EdgeState deserializer")
+ val deserializer = new EdgeStatsSerde().deserializer()
+
+ When("EdgeState byte is deserialized")
+ val deserializedEdgeStats = deserializer.deserialize("", bytes)
+
+ Then("it should generate valid byte stream")
+ deserializedEdgeStats should not be null
+ deserializedEdgeStats.count should be(1)
+ deserializedEdgeStats.lastSeen should be(1)
+ deserializedEdgeStats.errorCount should be(1)
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/model/EdgeStatsSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/model/EdgeStatsSpec.scala
new file mode 100644
index 000000000..9273e25f7
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/model/EdgeStatsSpec.scala
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.graph.builder.model
+
+import com.expedia.www.haystack.commons.entities.TagKeys.ERROR_KEY
+import com.expedia.www.haystack.commons.entities.{GraphEdge, GraphVertex}
+import org.scalatest.mockito.MockitoSugar
+import org.scalatest.{FunSpec, GivenWhenThen, Matchers}
+
+import scala.collection.mutable
+
+class EdgeStatsSpec extends FunSpec with GivenWhenThen with Matchers with MockitoSugar {
+
+ private val vertexName1 = "vertexName1"
+ private val vertexName2 = "vertexName2"
+ private val mutableTagsa: mutable.Map[String, String] = mutable.Map("mutableKeya" -> "mutableValuea", ERROR_KEY -> "true")
+ private val mutableTagsb: mutable.Map[String, String] = mutable.Map("mutableKeyb" -> "mutableValueb", ERROR_KEY -> "true")
+ private val immutableTagsA: Map[String, String] = Map("immutableKeyA" -> "immutableValueA", ERROR_KEY -> "true")
+ private val immutableTagsB: Map[String, String] = Map("immutableKeyB" -> "immutableValueB", ERROR_KEY -> "true")
+ private val graphVertex1A: GraphVertex = GraphVertex(vertexName1, immutableTagsA)
+ private val graphVertex2B: GraphVertex = GraphVertex(vertexName2, immutableTagsB)
+ private val graphVertexWithNoTags: GraphVertex = GraphVertex(vertexName2, Map.empty)
+ private val operationX = "operationX"
+ private val sourceTimestamp12 = 12
+ private val currentTimeAtStartOfTest = System.currentTimeMillis()
+ private val edgeStatsWithNoTags = EdgeStats(count = 0, lastSeen = 0, errorCount = 0)
+ private val graphEdgeWithNoTags = GraphEdge(graphVertexWithNoTags, graphVertexWithNoTags, operationX, 0)
+
+ describe("EdgeStats constructor") {
+ it("should use empty Maps for tags if no tags were specified") {
+ assert(edgeStatsWithNoTags.sourceTags.isEmpty)
+ assert(edgeStatsWithNoTags.destinationTags.isEmpty)
+ }
+ }
+
+ describe("EdgeStats update") {
+ {
+ val graphEdge = GraphEdge(graphVertex1A, graphVertex2B, operationX, sourceTimestamp12)
+ val edgeStats = EdgeStats(count = 0, lastSeen = 0, errorCount = 0,
+ sourceTags = mutableTagsa, destinationTags = mutableTagsb)
+ val updatedEdgeStats = edgeStats.update(graphEdge)
+ it("should collect tags") {
+ updatedEdgeStats.sourceTags.contains("mutableKeyb")
+ updatedEdgeStats.sourceTags.contains("immutableKeyB")
+ updatedEdgeStats.destinationTags.contains("mutableKeya")
+ updatedEdgeStats.destinationTags.contains("immutableKeyA")
+ }
+ it("should clear error keys from the tags") {
+ updatedEdgeStats.sourceTags.size shouldEqual 2
+ updatedEdgeStats.destinationTags.size shouldEqual 2
+ }
+ it("should count errors passed in from the graph edge source tags") {
+ updatedEdgeStats.errorCount shouldEqual 1
+ }
+ it("should assume no errors if the source tags map does not contain an error key") {
+ edgeStatsWithNoTags.update(graphEdgeWithNoTags).errorCount shouldEqual 0
+ }
+ }
+ it("should calculate last seen from System.currentTimeMillis if source timestamp is 0") {
+ val graphEdge = GraphEdge(graphVertex1A, graphVertex2B, operationX, 0)
+ val edgeStats = EdgeStats(count = 0, lastSeen = 0, errorCount = 0,
+ sourceTags = mutableTagsa, destinationTags = mutableTagsb)
+ val updatedEdgeStats = edgeStats.update(graphEdge)
+ assert(updatedEdgeStats.lastSeen >= currentTimeAtStartOfTest)
+ }
+ it("should calculate last seen from source timestamp if source timestamp is not 0") {
+ val graphEdge = GraphEdge(graphVertex1A, graphVertex2B, operationX, sourceTimestamp12)
+ val edgeStats = EdgeStats(count = 0, lastSeen = 0, errorCount = 0,
+ sourceTags = mutableTagsa, destinationTags = mutableTagsb)
+ val updatedEdgeStats = edgeStats.update(graphEdge)
+ updatedEdgeStats.lastSeen shouldEqual sourceTimestamp12
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/model/ServiceGraphEdgeSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/model/ServiceGraphEdgeSpec.scala
new file mode 100644
index 000000000..5b846726f
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/model/ServiceGraphEdgeSpec.scala
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.graph.builder.model
+
+import com.expedia.www.haystack.service.graph.graph.builder.TestSpec
+
+class ServiceGraphEdgeSpec extends TestSpec {
+
+ describe("ServiceGraphEdge") {
+ it("should merge Service graph objects accurately") {
+ Given("valid ServiceGraphEdge objects")
+ val serviceGraph1 = ServiceGraphEdge(
+ ServiceGraphVertex("src", Map("X-HAYSTACK-INFRASTRUCTURE-PROVIDER" -> "aws")),
+ ServiceGraphVertex("dest", Map("X-HAYSTACK-INFRASTRUCTURE-PROVIDER" -> "dc")),
+ ServiceEdgeStats(10, 15000, 3), 0, 10000)
+
+ val serviceGraph2 = ServiceGraphEdge(
+ ServiceGraphVertex("src", Map("X-HAYSTACK-INFRASTRUCTURE-PROVIDER" -> "dc")),
+ ServiceGraphVertex("dest", Map("X-HAYSTACK-INFRASTRUCTURE-PROVIDER" -> "dc")),
+ ServiceEdgeStats(15, 16000, 5), 0, 10000)
+
+ val serviceGraph3 = ServiceGraphEdge(
+ ServiceGraphVertex("src", Map("X-HAYSTACK-INFRASTRUCTURE-PROVIDER" -> "aws")),
+ ServiceGraphVertex("dest", Map("X-HAYSTACK-INFRASTRUCTURE-PROVIDER" -> "dc")),
+ ServiceEdgeStats(20, 17000, 8), 0, 10000)
+
+ When("Merging service graph objects")
+ val serviceGraph4 = serviceGraph1 + serviceGraph2
+ val serviceGraph5 = serviceGraph3 + serviceGraph4
+
+ serviceGraph5.source.tags.get("X-HAYSTACK-INFRASTRUCTURE-PROVIDER").get should be ("aws,dc")
+ serviceGraph5.destination.tags.get("X-HAYSTACK-INFRASTRUCTURE-PROVIDER").get should be ("dc")
+ serviceGraph5.stats.count should be (45)
+ serviceGraph5.stats.errorCount should be (16)
+ serviceGraph5.stats.lastSeen should be (17000)
+ }
+
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/service/ManagedHttpServiceSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/service/ManagedHttpServiceSpec.scala
new file mode 100644
index 000000000..52751468f
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/service/ManagedHttpServiceSpec.scala
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.graph.builder.service
+
+import org.mockito.Mockito.{verify, verifyNoMoreInteractions}
+import org.scalatest.mockito.MockitoSugar
+import org.scalatest.{FunSpec, Matchers}
+
+class ManagedHttpServiceSpec extends FunSpec with Matchers with MockitoSugar {
+ describe("ManagedHttpService constructor") {
+ it ("should require the service argument to be non-null") {
+ an [IllegalArgumentException] should be thrownBy new ManagedHttpService(null)
+ }
+ }
+
+ describe("ManagedHttpService.start()") {
+ val httpService = mock[HttpService]
+ val managedHttpService = new ManagedHttpService(httpService)
+ it("should call the service's start() method and set isRunning to true") {
+ managedHttpService.start()
+ verify(httpService).start()
+ assert(managedHttpService.hasStarted)
+ }
+ verifyNoMoreInteractions(httpService)
+ }
+
+ describe("ManagedHttpService.stop()") {
+ val httpService = mock[HttpService]
+ val managedHttpService = new ManagedHttpService(httpService)
+ it("should not call the service's stop() method if the service is not running") {
+ managedHttpService.stop()
+ verifyNoMoreInteractions(httpService)
+ }
+ it("should call the service's close() method and set isRunning to false if the service is running") {
+ managedHttpService.start()
+ managedHttpService.stop()
+ verify(httpService).start()
+ verify(httpService).close()
+ assert(!managedHttpService.hasStarted)
+ verifyNoMoreInteractions(httpService)
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/service/resources/LocalOperationGraphResourceSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/service/resources/LocalOperationGraphResourceSpec.scala
new file mode 100644
index 000000000..9c1a8ce34
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/service/resources/LocalOperationGraphResourceSpec.scala
@@ -0,0 +1,49 @@
+package com.expedia.www.haystack.service.graph.graph.builder.service.resources
+
+import com.expedia.www.haystack.service.graph.graph.builder.model.{OperationGraph, OperationGraphEdge}
+import com.expedia.www.haystack.service.graph.graph.builder.service.fetchers.LocalOperationEdgesFetcher
+import com.expedia.www.haystack.service.graph.graph.builder.service.utils.QueryTimestampReader
+import javax.servlet.http.HttpServletRequest
+import org.mockito.Mockito
+import org.mockito.Mockito.{verify, when}
+import org.scalatest.mockito.MockitoSugar
+import org.scalatest.{FunSpec, Matchers}
+
+class LocalOperationGraphResourceSpec extends FunSpec with Matchers with MockitoSugar {
+ private implicit val timestampReader: QueryTimestampReader = mock[QueryTimestampReader]
+ private class LocalOperationGraphResourceChild(localEdgesFetcher: LocalOperationEdgesFetcher)
+ extends LocalOperationGraphResource(localEdgesFetcher: LocalOperationEdgesFetcher) {
+ override def get(request: HttpServletRequest): OperationGraph = {
+ super.get(request)
+ }
+ }
+
+ describe("LocalOperationGraphResource.get()") {
+ val localEdgesFetcher = mock[LocalOperationEdgesFetcher]
+ val request = mock[HttpServletRequest]
+ val operationGraphEdges = mock[List[OperationGraphEdge]]
+
+ val OperationGraphEdgesLength = 42
+ val From: Long = 271828
+ val To: Long = 371415
+
+ val localOperationGraphResource = new LocalOperationGraphResourceChild(localEdgesFetcher)
+
+ it ("should read an OperationGraph with the correct timestamps") {
+ when(timestampReader.fromTimestamp(request)).thenReturn(From)
+ when(timestampReader.toTimestamp(request)).thenReturn(To)
+ when(localEdgesFetcher.fetchEdges(From, To)).thenReturn(operationGraphEdges)
+ when(operationGraphEdges.length).thenReturn(OperationGraphEdgesLength)
+
+ val localGraph = localOperationGraphResource.get(request)
+
+ assert(localGraph.edges == operationGraphEdges)
+
+ verify(timestampReader).fromTimestamp(request)
+ verify(timestampReader).toTimestamp(request)
+ verify(localEdgesFetcher).fetchEdges(From, To)
+ verify(operationGraphEdges).length
+ Mockito.verifyNoMoreInteractions(localEdgesFetcher, request, operationGraphEdges)
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/service/utils/EdgesMergerSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/service/utils/EdgesMergerSpec.scala
new file mode 100644
index 000000000..e2797476d
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/service/utils/EdgesMergerSpec.scala
@@ -0,0 +1,103 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.graph.builder.service.utils
+
+import java.lang.Math.{max, min}
+
+import com.expedia.www.haystack.service.graph.graph.builder.model._
+import org.scalatest.{FunSpec, Matchers}
+
+class EdgesMergerSpec extends FunSpec with Matchers {
+
+ describe("EdgesMerger.getMergedServiceEdges()") {
+ val stats1 = ServiceEdgeStats(1, 10, 100)
+ val stats3 = ServiceEdgeStats(3, 30, 300)
+ val stats5 = ServiceEdgeStats(5, 50, 500)
+ val stats7 = ServiceEdgeStats(7, 70, 700)
+ val vertexA: ServiceGraphVertex = ServiceGraphVertex("serviceGraphVertexA", Map.empty)
+ val vertexB: ServiceGraphVertex = ServiceGraphVertex("serviceGraphVertexB", Map.empty)
+ val vertexC: ServiceGraphVertex = ServiceGraphVertex("serviceGraphVertexC", Map.empty)
+ val edgeAB1 = ServiceGraphEdge(vertexA, vertexB, stats1, 1000, 10000)
+ val edgeAB3 = ServiceGraphEdge(vertexA, vertexB, stats3, 3000, 30000)
+ val edgeAC5 = ServiceGraphEdge(vertexA, vertexC, stats5, 7000, 70000)
+ val edgeBC7 = ServiceGraphEdge(vertexB, vertexC, stats7, 15000, 150000)
+ it("should create two edges when source matches but destination does not") {
+ val mergedEdges = EdgesMerger.getMergedServiceEdges(Seq(edgeAB1, edgeAC5))
+ mergedEdges.size should equal(2)
+ mergedEdges should contain(edgeAB1)
+ mergedEdges should contain(edgeAC5)
+ }
+ it("should create two edges when destination matches but source does not") {
+ val mergedEdges = EdgesMerger.getMergedServiceEdges(Seq(edgeAC5, edgeBC7))
+ mergedEdges.size should equal(2)
+ mergedEdges should contain(edgeAC5)
+ mergedEdges should contain(edgeBC7)
+ }
+ it("should merge two edges when source and destination match") {
+ val mergedEdges = EdgesMerger.getMergedServiceEdges(Seq(edgeAB1, edgeAB3))
+ mergedEdges.size should equal(1)
+ val mergedEdge: ServiceGraphEdge = mergedEdges.head
+ mergedEdge.source should equal(vertexA)
+ mergedEdge.destination should equal(vertexB)
+ mergedEdge.effectiveFrom should equal(min(edgeAB1.effectiveFrom, edgeAB3.effectiveFrom))
+ mergedEdge.effectiveTo should equal(max(edgeAB1.effectiveTo, edgeAB3.effectiveTo))
+ mergedEdge.stats.count should equal(edgeAB1.stats.count + edgeAB3.stats.count)
+ mergedEdge.stats.lastSeen should equal(max(edgeAB1.stats.lastSeen, edgeAB3.stats.lastSeen))
+ mergedEdge.stats.errorCount should equal(edgeAB1.stats.errorCount + edgeAB3.stats.errorCount)
+ }
+ }
+
+ describe("EdgesMerger.getMergedOperationEdge()") {
+ val stats1 = EdgeStats(1, 10, 100)
+ val stats3 = EdgeStats(3, 30, 300)
+ val stats5 = EdgeStats(5, 50, 500)
+ val stats7 = EdgeStats(7, 70, 700)
+ val stats9 = EdgeStats(9, 90, 900)
+ val edgeAX1: OperationGraphEdge = OperationGraphEdge("sourceA", "destinationX", "operation1", stats1, 1000, 10000)
+ val edgeAX3: OperationGraphEdge = OperationGraphEdge("sourceA", "destinationX", "operation3", stats3, 3000, 30000)
+ val edgeAY3: OperationGraphEdge = OperationGraphEdge("sourceA", "destinationY", "operation3", stats5, 7000, 70000)
+ val edgeBY3a: OperationGraphEdge = OperationGraphEdge("sourceB", "destinationY", "operation3", stats7, 15000, 150000)
+ val edgeBY3b: OperationGraphEdge = OperationGraphEdge("sourceB", "destinationY", "operation3", stats7, 31000, 310000)
+ it ("should create two edges when source and destination match but operation does not") {
+ val mergedEdges = EdgesMerger.getMergedOperationEdges(Seq(edgeAX1, edgeAX3))
+ mergedEdges.size should equal(2)
+ mergedEdges should contain(edgeAX1)
+ mergedEdges should contain(edgeAX3)
+ }
+ it ("should create two edges when source and operation match but destination does not") {
+ val mergedEdges = EdgesMerger.getMergedOperationEdges(Seq(edgeAX3, edgeAY3))
+ mergedEdges.size should equal(2)
+ mergedEdges should contain(edgeAX3)
+ mergedEdges should contain(edgeAY3)
+ }
+ it ("should merge two edges when source, destination and operation match") {
+ val mergedEdges = EdgesMerger.getMergedOperationEdges(Seq(edgeBY3a, edgeBY3b))
+ mergedEdges.size should equal(1)
+ val mergedEdge = mergedEdges.head
+ mergedEdge.source should equal(edgeBY3a.source)
+ mergedEdge.destination should equal(edgeBY3b.destination)
+ mergedEdge.operation should equal(edgeBY3a.operation)
+ mergedEdge.stats.count should equal(edgeBY3a.stats.count + edgeBY3b.stats.count)
+ mergedEdge.stats.lastSeen should equal(max(edgeBY3a.stats.lastSeen, edgeBY3b.stats.lastSeen))
+ mergedEdge.stats.errorCount should equal(edgeBY3a.stats.errorCount + edgeBY3b.stats.errorCount)
+ mergedEdge.effectiveFrom should equal(min(edgeBY3a.effectiveFrom, edgeBY3b.effectiveFrom))
+ mergedEdge.effectiveTo should equal(max(edgeBY3a.effectiveTo, edgeBY3b.effectiveTo))
+ }
+ }
+}
diff --git a/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/stream/StreamSupplierSpec.scala b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/stream/StreamSupplierSpec.scala
new file mode 100644
index 000000000..5d9c861b3
--- /dev/null
+++ b/service-graph/graph-builder/src/test/scala/com/expedia/www/haystack/service/graph/graph/builder/stream/StreamSupplierSpec.scala
@@ -0,0 +1,66 @@
+package com.expedia.www.haystack.service.graph.graph.builder.stream
+
+import java.util
+import java.util.Collections
+import java.util.concurrent.TimeUnit
+import java.util.function.Supplier
+
+import com.expedia.www.haystack.commons.health.HealthStatusController
+import org.apache.kafka.clients.admin.{AdminClient, ListTopicsResult}
+import org.apache.kafka.common.KafkaFuture
+import org.apache.kafka.streams.{StreamsConfig, Topology}
+import org.mockito.Mockito.{reset, verify, verifyNoMoreInteractions, when}
+import org.scalatest.mockito.MockitoSugar
+import org.scalatest.{FunSpec, Matchers}
+
+class StreamSupplierSpec extends FunSpec with Matchers with MockitoSugar {
+ private val ConsumerTopic = "ConsumerTopic"
+
+ private val topologySupplier = mock[Supplier[Topology]]
+ private val healthController = mock[HealthStatusController]
+ private val streamsConfig = mock[StreamsConfig]
+
+ describe("StreamSupplier constructor") {
+ it ("should require the topologySupplier argument to be non-null") {
+ an [IllegalArgumentException] should be thrownBy
+ new StreamSupplier(null, healthController, streamsConfig, ConsumerTopic)
+ }
+ it ("should require the healthController argument to be non-null") {
+ an [IllegalArgumentException] should be thrownBy
+ new StreamSupplier(topologySupplier, null, streamsConfig, ConsumerTopic)
+ }
+ it ("should require the streamsConfig argument to be non-null") {
+ an [IllegalArgumentException] should be thrownBy
+ new StreamSupplier(topologySupplier, healthController, null, ConsumerTopic)
+ }
+ it ("should require the consumerTopic argument to be non-null") {
+ an [IllegalArgumentException] should be thrownBy
+ new StreamSupplier(topologySupplier, healthController, streamsConfig, null)
+ }
+ verifyNoMoreInteractions(topologySupplier, healthController, streamsConfig)
+ }
+
+ private val adminClient = mock[AdminClient]
+ private val listTopicsResult: ListTopicsResult = mock[ListTopicsResult]
+ private val kafkaFuture: KafkaFuture[util.Set[String]] = mock[KafkaFuture[util.Set[String]]]
+
+ describe("StreamSupplier.get()") {
+ it("should throw an exception if the consumer topic does exist") {
+ when(adminClient.listTopics()).thenReturn(listTopicsResult)
+ when(listTopicsResult.names()).thenReturn(kafkaFuture)
+ val nonExistentTopic = "NonExistent" + ConsumerTopic
+ when(kafkaFuture.get()).thenReturn(Collections.singleton(ConsumerTopic))
+
+ val streamSupplier = new StreamSupplier(topologySupplier, healthController, streamsConfig, nonExistentTopic, adminClient)
+ val thrown = the [streamSupplier.TopicNotPresentException] thrownBy streamSupplier.get
+ thrown.getTopic shouldEqual nonExistentTopic
+
+ verify(adminClient).listTopics()
+ verify(listTopicsResult).names()
+ verify(kafkaFuture).get()
+ verify(adminClient).close(5, TimeUnit.SECONDS)
+ verifyNoMoreInteractions(topologySupplier, healthController, streamsConfig, adminClient, listTopicsResult, kafkaFuture)
+ reset(topologySupplier, healthController, streamsConfig, adminClient, listTopicsResult, kafkaFuture)
+ }
+ }
+}
diff --git a/service-graph/node-finder/Makefile b/service-graph/node-finder/Makefile
new file mode 100644
index 000000000..d8c273917
--- /dev/null
+++ b/service-graph/node-finder/Makefile
@@ -0,0 +1,11 @@
+.PHONY: integration_test release
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-service-graph-node-finder
+PWD := $(shell pwd)
+
+docker-image:
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+release: docker-image
+ ../deployment/scripts/publish-to-docker-hub.sh
diff --git a/service-graph/node-finder/README.md b/service-graph/node-finder/README.md
new file mode 100644
index 000000000..6d494b026
--- /dev/null
+++ b/service-graph/node-finder/README.md
@@ -0,0 +1,65 @@
+#Haystack : node-finder
+
+Information on what this component is all about is documented in the [README](../README.md) of the repository
+
+## Building
+
+```
+mvn clean verify
+```
+
+or
+
+```
+make docker-image
+```
+
+## Testing Locally
+
+* Download Kafka 0.11.0.x
+* Start Zookeeper locally (from kafka home)
+```
+bin/zookeeper-server-start.sh config/zookeeper.properties
+```
+* Start Kafka locally (from kafka home)
+```
+bin/kafka-server-start.sh config/server.properties
+```
+* Create proto-spans topic (from kafka home)
+```
+bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic proto-spans
+```
+* Create a local.conf override file
+```
+cat local.conf
+
+health.status.path = "logs/isHealthy"
+
+kafka {
+ streams {
+ bootstrap.servers = "localhost:9092"
+ }
+ accumulator {
+ interval = 1000
+ }
+}
+```
+* Build node-finder application locally (node-finder app root)
+```
+mvn clean package
+```
+* Start the application (node-finder app root)
+```
+export HAYSTACK_OVERRIDES_CONFIG_PATH=/local.conf
+java -jar target/haystack-service-graph-node-finder.jar
+```
+* Send data to Kafka (refer to fakespans tool README)
+```
+$GOBIN/fakespans --from-file fakespans.json --kafka-broker localhost:9092
+```
+* Check the output topics (from kafka home)
+```
+bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic graph-nodes --from-beginning
+
+bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic metricpoints --from-beginning
+```
diff --git a/service-graph/node-finder/build/docker/Dockerfile b/service-graph/node-finder/build/docker/Dockerfile
new file mode 100644
index 000000000..16201a90e
--- /dev/null
+++ b/service-graph/node-finder/build/docker/Dockerfile
@@ -0,0 +1,24 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-service-graph-node-finder
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+ENV DOCKERIZE_VERSION v0.6.1
+
+ADD https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/dockerize-alpine-linux-amd64-${DOCKERIZE_VERSION}.tar.gz dockerize.tar.gz
+RUN tar xzf dockerize.tar.gz
+RUN chmod +x dockerize
+
+RUN mkdir -p ${APP_HOME}
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+
+RUN chmod +x ${APP_HOME}/start-app.sh
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/service-graph/node-finder/build/docker/jmxtrans-agent.xml b/service-graph/node-finder/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..0ae96bfef
--- /dev/null
+++ b/service-graph/node-finder/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,86 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:true}
+
+ haystack.service-graph.node-finder.#hostname#.
+
+ 60
+
diff --git a/service-graph/node-finder/build/docker/start-app.sh b/service-graph/node-finder/build/docker/start-app.sh
new file mode 100755
index 000000000..719b69531
--- /dev/null
+++ b/service-graph/node-finder/build/docker/start-app.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+-XX:+UseG1GC \
+-Xloggc:/var/log/gc.log \
+-XX:+PrintGCDetails \
+-XX:+PrintGCDateStamps \
+-XX:+UseGCLogFileRotation \
+-XX:NumberOfGCLogFiles=5 \
+-XX:GCLogFileSize=2M \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+if [[ -n "$SERVICE_DEBUG_ON" ]] && [[ "$SERVICE_DEBUG_ON" == true ]]; then
+ JAVA_OPTS="$JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y"
+fi
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/service-graph/node-finder/pom.xml b/service-graph/node-finder/pom.xml
new file mode 100644
index 000000000..3d2c1bed1
--- /dev/null
+++ b/service-graph/node-finder/pom.xml
@@ -0,0 +1,191 @@
+
+
+
+
+ haystack-service-graph
+ com.expedia.www
+ 1.0.15-SNAPSHOT
+
+
+ 4.0.0
+ haystack-service-graph-node-finder
+ jar
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+
+ 1.1.0
+ com.expedia.www.haystack.service.graph.node.finder.App
+ ${project.artifactId}-${project.version}
+
+
+
+
+
+ org.apache.commons
+ commons-lang3
+
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ ${kafka-version}
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+
+
+ org.apache.kafka
+ kafka-clients
+ ${kafka-version}
+
+
+
+ org.apache.kafka
+ kafka-streams
+ ${kafka-version}
+
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+
+
+
+ org.msgpack
+ msgpack-core
+
+
+
+ org.json4s
+ json4s-ext_${scala.major.minor.version}
+
+
+
+
+
+ ${finalName}
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+ org.expedia.www.haystack.commons.scalatest.IntegrationSuite
+
+
+
+ integration-test
+ integration-test
+
+ test
+
+
+ org.expedia.www.haystack.commons.scalatest.IntegrationSuite
+
+
+
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+
+ true
+
+ com.expedia.www.haystack.service.graph.node.finder;com.expedia.www.haystack.commons.kstreams.app.Main;com.expedia.www.haystack.commons.kstreams.app.StreamsFactory
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+ true
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ package
+
+ shade
+
+
+
+
+ reference.conf
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
diff --git a/service-graph/node-finder/src/main/resources/app.conf b/service-graph/node-finder/src/main/resources/app.conf
new file mode 100644
index 000000000..e1d885c52
--- /dev/null
+++ b/service-graph/node-finder/src/main/resources/app.conf
@@ -0,0 +1,43 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-node-finder"
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "com.expedia.www.haystack.commons.kstreams.SpanTimestampExtractor"
+ producer.linger.ms = 500
+ state.dir = "/app"
+ replication.factor = 2
+ }
+
+ producer {
+ metrics.topic = "metric-data-points"
+ service.call.topic = "graph-nodes"
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+
+ //config for span accumulator to wait for matching span after which it will drop the span
+ accumulator {
+ interval = 2500 //in milliSec
+ }
+
+ collectorTags = []
+
+ node.metadata {
+ topic {
+ autocreate = true
+ name = "haystack-node-finder-metadata"
+ partition.count = 1
+ replication.factor = 1
+ }
+ }
+}
diff --git a/service-graph/node-finder/src/main/resources/logback.xml b/service-graph/node-finder/src/main/resources/logback.xml
new file mode 100644
index 000000000..c45f62d7b
--- /dev/null
+++ b/service-graph/node-finder/src/main/resources/logback.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/App.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/App.scala
new file mode 100644
index 000000000..e6e7ff038
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/App.scala
@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder
+
+import com.expedia.www.haystack.commons.health.{HealthStatusController, UpdateHealthStatusFile}
+import com.expedia.www.haystack.commons.kstreams.app.{Main, StateChangeListener, StreamsFactory, StreamsRunner}
+import com.expedia.www.haystack.service.graph.node.finder.app.Streams
+import com.expedia.www.haystack.service.graph.node.finder.app.metadata.TopicCreator
+import com.expedia.www.haystack.service.graph.node.finder.config.AppConfiguration
+import com.netflix.servo.util.VisibleForTesting
+
+/**
+ * Starting point for node-finder application
+ */
+object App extends Main {
+ /**
+ * Creates a valid instance of StreamsRunner.
+ *
+ * StreamsRunner is created with a valid StreamsFactory instance and a listener that observes
+ * state changes of the kstreams application.
+ *
+ * StreamsFactory in turn is created with a Topology Supplier and kafka.StreamsConfig. Any failure in
+ * StreamsFactory is gracefully handled by StreamsRunner to shut the application off
+ *
+ * Core logic of this application is in the `app.Streams` instance - which is a topology supplier. The
+ * topology of this application is built in this class.
+ *
+ * @return A valid instance of `StreamsRunner`
+ */
+ override def createStreamsRunner(): StreamsRunner = {
+ val appConfiguration = new AppConfiguration()
+
+ val healthStatusController = new HealthStatusController
+ healthStatusController.addListener(new UpdateHealthStatusFile(appConfiguration.healthStatusFilePath))
+
+ val stateChangeListener = new StateChangeListener(healthStatusController)
+
+ createStreamsRunner(appConfiguration, stateChangeListener)
+ }
+
+ @VisibleForTesting
+ def createStreamsRunner(appConfiguration: AppConfiguration,
+ stateChangeListener: StateChangeListener): StreamsRunner = {
+ //create the topology provider
+ val kafkaConfig = appConfiguration.kafkaConfig
+
+ TopicCreator.makeMetadataTopicReady(kafkaConfig)
+
+ val streams = new Streams(kafkaConfig)
+
+ val streamsFactory = new StreamsFactory(streams, kafkaConfig.streamsConfig, kafkaConfig.protoSpanTopic)
+
+ new StreamsRunner(streamsFactory, stateChangeListener)
+ }
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/GraphNodeProducer.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/GraphNodeProducer.scala
new file mode 100644
index 000000000..611c00de3
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/GraphNodeProducer.scala
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.app
+
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.service.graph.node.finder.model.SpanPair
+import org.apache.kafka.streams.processor.{Processor, ProcessorContext, ProcessorSupplier}
+import org.slf4j.LoggerFactory
+
+class GraphNodeProducerSupplier extends ProcessorSupplier[String, SpanPair] {
+ override def get(): Processor[String, SpanPair] = new GraphNodeProducer
+}
+
+class GraphNodeProducer extends Processor[String, SpanPair] with MetricsSupport {
+ private var context: ProcessorContext = _
+ private val processMeter = metricRegistry.meter("graph.node.producer.process")
+ private val forwardMeter = metricRegistry.meter("graph.node.producer.emit")
+ private val LOGGER = LoggerFactory.getLogger(classOf[GraphNodeProducer])
+
+ override def init(context: ProcessorContext): Unit = {
+ this.context = context
+ }
+
+ override def process(key: String, spanPair: SpanPair): Unit = {
+ processMeter.mark()
+
+ if (LOGGER.isDebugEnabled) {
+ LOGGER.debug(s"Received message ($key, $spanPair)")
+ }
+
+ spanPair.getGraphEdge match {
+ case Some(graphEdge) =>
+ context.forward(graphEdge, graphEdge)
+ forwardMeter.mark()
+ if (LOGGER.isDebugEnabled) {
+ LOGGER.debug(s"Graph edge : (${spanPair.getId}, $graphEdge")
+ }
+ case None =>
+ }
+
+ context.commit()
+ }
+
+ override def punctuate(timestamp: Long): Unit = {}
+
+ override def close(): Unit = {}
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/LatencyProducer.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/LatencyProducer.scala
new file mode 100644
index 000000000..83b67e4a4
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/LatencyProducer.scala
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.app
+
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.service.graph.node.finder.model.SpanPair
+import org.apache.kafka.streams.processor.{Processor, ProcessorContext, ProcessorSupplier}
+import org.slf4j.LoggerFactory
+
+class LatencyProducerSupplier() extends ProcessorSupplier[String, SpanPair] {
+ override def get(): Processor[String, SpanPair] = new LatencyProducer()
+}
+
+class LatencyProducer() extends Processor[String, SpanPair] with MetricsSupport {
+ private var context: ProcessorContext = _
+ private val processMeter = metricRegistry.meter("latency.producer.process")
+ private val forwardMeter = metricRegistry.meter("latency.producer.emit")
+ private val LOGGER = LoggerFactory.getLogger(classOf[LatencyProducer])
+
+ override def init(context: ProcessorContext): Unit = {
+ this.context = context
+ }
+
+ override def process(key: String, spanPair: SpanPair): Unit = {
+ processMeter.mark()
+
+ if (LOGGER.isDebugEnabled) {
+ LOGGER.debug(s"Received message ($key, $spanPair)")
+ }
+
+ spanPair.getLatency match {
+ case Some(metricData) =>
+ context.forward(metricData.getMetricDefinition.getKey, metricData)
+ forwardMeter.mark()
+ if (LOGGER.isInfoEnabled()) {
+ LOGGER.info(s"Latency Metric: $metricData")
+ }
+ case None =>
+ }
+
+ context.commit()
+ }
+
+ override def punctuate(timestamp: Long): Unit = {}
+
+ override def close(): Unit = {}
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/SpanAccumulator.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/SpanAccumulator.scala
new file mode 100644
index 000000000..9b1276d1b
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/SpanAccumulator.scala
@@ -0,0 +1,191 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.app
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.graph.GraphEdgeTagCollector
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.service.graph.node.finder.model.{LightSpan, ServiceNodeMetadata, SpanPair, SpanPairBuilder}
+import com.expedia.www.haystack.service.graph.node.finder.utils.SpanUtils
+import com.netflix.servo.util.VisibleForTesting
+import org.apache.commons.lang3.StringUtils
+import org.apache.kafka.streams.processor._
+import org.apache.kafka.streams.state.KeyValueStore
+import org.slf4j.LoggerFactory
+
+import scala.collection.mutable
+
+class SpanAccumulatorSupplier(storeName: String,
+ accumulatorInterval: Int,
+ tagCollector: GraphEdgeTagCollector) extends
+ ProcessorSupplier[String, Span] {
+ override def get(): Processor[String, Span] = new SpanAccumulator(storeName, accumulatorInterval, tagCollector)
+}
+
+class SpanAccumulator(storeName: String,
+ accumulatorInterval: Int,
+ tagCollector: GraphEdgeTagCollector)
+ extends Processor[String, Span] with MetricsSupport {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[SpanAccumulator])
+ private val processMeter = metricRegistry.meter("span.accumulator.process")
+ private val aggregateMeter = metricRegistry.meter("span.accumulator.aggregate")
+ private val forwardMeter = metricRegistry.meter("span.accumulator.emit")
+
+ // map to store spanId -> span data. Used for checking child-parent relationship
+ private var spanMap = mutable.HashMap[String, mutable.HashSet[LightSpan]]()
+
+ // map to store parentSpanId -> span data. Used for checking child-parent relationship
+ private var parentSpanMap = mutable.HashMap[String, mutable.HashSet[LightSpan]]()
+
+ private var processorContext: ProcessorContext = _
+ private var metadataStore: KeyValueStore[String, ServiceNodeMetadata] = _
+
+ override def init(context: ProcessorContext): Unit = {
+ processorContext = context
+ context.schedule(accumulatorInterval, PunctuationType.STREAM_TIME, getPunctuator(context))
+ metadataStore = context.getStateStore(storeName).asInstanceOf[KeyValueStore[String, ServiceNodeMetadata]]
+ LOGGER.info(s"${this.getClass.getSimpleName} initialized")
+ }
+
+ override def process(key: String, span: Span): Unit = {
+ processMeter.mark()
+
+ //find the span type
+ val spanType = SpanUtils.getSpanType(span)
+
+ if (SpanUtils.isAccumulableSpan(span)) {
+
+ val lightSpan = LightSpan(span.getSpanId,
+ span.getParentSpanId,
+ span.getStartTime / 1000, //startTime is in microseconds, so divide it by 1000 to send MS
+ span.getServiceName,
+ span.getOperationName,
+ span.getDuration,
+ spanType,
+ tagCollector.collectTags(span))
+
+ //add new light span to the span map and parent map
+ spanMap.getOrElseUpdate(span.getSpanId, mutable.HashSet[LightSpan]()).add(lightSpan)
+ if (StringUtils.isNotEmpty(span.getParentSpanId)) {
+ parentSpanMap.getOrElseUpdate(span.getParentSpanId, mutable.HashSet[LightSpan]()).add(lightSpan)
+ }
+
+ processSpan(lightSpan) foreach {
+ spanPair =>
+ if (isValidMerge(spanPair)) forward(processorContext, spanPair)
+ cleanupSpanMap(spanPair)
+ aggregateMeter.mark()
+ }
+ }
+ }
+
+ override def punctuate(timestamp: Long): Unit = {}
+
+ override def close(): Unit = {}
+
+ //forward all complete spans
+ private def forward(context: ProcessorContext, spanPair: SpanPair): Unit = {
+ LOGGER.debug("Forwarding complete SpanPair: {}", spanPair)
+ context.forward(spanPair.getId, spanPair)
+ forwardMeter.mark()
+ }
+
+ /**
+ * process the given light span to check whether it can form a span pair
+ *
+ * @param span incoming span to be processed
+ * @return sequence of span pair whether complete or incomplete
+ */
+ private def processSpan(span: LightSpan): Seq[SpanPair] = {
+ val possibleSpanPairs = spanMap(span.spanId)
+
+ //matched span, whether complete or incomplete based on their service
+ val spanPairs = mutable.ListBuffer[SpanPair]()
+
+ //same spanId is present in spanMap
+ if (possibleSpanPairs.size > 1) {
+ spanPairs += SpanPairBuilder.createSpanPair(possibleSpanPairs.head, possibleSpanPairs.tail.head)
+ } else {
+ //look for its parent ie if its parentId is in span map
+ spanMap.get(span.parentSpanId) match {
+ case Some(parentSpan) => spanPairs += SpanPairBuilder.createSpanPair(parentSpan.head, span)
+ case _ =>
+ }
+ //look for its child ie if its spanId is in parent map
+ parentSpanMap.get(span.spanId) match {
+ case Some(childSpans) => spanPairs ++= childSpans.map(childSpan => SpanPairBuilder.createSpanPair(childSpan, span))
+ case _ =>
+ }
+ }
+ spanPairs
+ }
+
+ @VisibleForTesting
+ def getPunctuator(context: ProcessorContext): Punctuator = {
+ (timestamp: Long) => {
+ //we keep a span only until timeToKeep time and leave the rest in place and see
+ //if they get their matching span pair before timeToKeep
+ val timeToKeep = timestamp - accumulatorInterval //in milliSec
+ LOGGER.debug(s"Punctuate called with $timestamp. TimeToKeep is $timeToKeep. Map sizes are ${spanMap.values.flatten[LightSpan].size} & ${parentSpanMap.size}")
+
+ //if the span is within the time limit, we will keep them, otherwise discard
+ spanMap = spanMap.filter {
+ case (_, ls) => ls.exists(sp => sp.isLaterThan(timeToKeep))
+ }
+ parentSpanMap = parentSpanMap.filter {
+ case (_, ls) => ls.exists(sp => sp.isLaterThan(timeToKeep))
+ }
+
+ // commit the current processing progress
+ context.commit()
+ }
+ }
+
+ @VisibleForTesting
+ def spanCount: Int = spanMap.values.flatten[LightSpan].size
+
+ @VisibleForTesting
+ def internalSpanMap = spanMap.toMap
+
+ /**
+ * spans in a span pair to be cleaned up from the parent span map.
+ * Not removing it from spanMap since there could be multiple children for it in case of same service.
+ *
+ * @param spanPair span pair with client / server spans
+ */
+ private def cleanupSpanMap(spanPair: SpanPair): Unit = {
+ spanPair.getBackingSpans.foreach(ls => {
+ parentSpanMap.remove(ls.spanId)
+ })
+ }
+
+ private def isValidMerge(spanPair: SpanPair): Boolean = {
+ if (spanPair.isComplete) {
+ val metadata = metadataStore.get(spanPair.getServerSpan.serviceName)
+ if (metadata == null) {
+ true
+ } else {
+ // if current merge matches with the recorded style, or it is shared span merge style, then accept it
+ (metadata.useSharedSpan == spanPair.IsSharedSpan) || spanPair.IsSharedSpan
+ }
+ } else {
+ false
+ }
+ }
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/Streams.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/Streams.scala
new file mode 100644
index 000000000..3816304b8
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/Streams.scala
@@ -0,0 +1,234 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.app
+
+import java.util.function.Supplier
+
+import com.expedia.www.haystack.commons.graph.GraphEdgeTagCollector
+import com.expedia.www.haystack.commons.kstreams.serde.SpanSerde
+import com.expedia.www.haystack.commons.kstreams.serde.graph.GraphEdgeKeySerde
+import com.expedia.www.haystack.commons.kstreams.serde.graph.GraphEdgeValueSerde
+import com.expedia.www.haystack.commons.kstreams.serde.metricdata.{MetricDataSerializer, MetricTankSerde}
+import com.expedia.www.haystack.service.graph.node.finder.app.metadata.MetadataProducerSupplier
+import com.expedia.www.haystack.service.graph.node.finder.app.metadata.MetadataStoreUpdateProcessorSupplier
+import com.expedia.www.haystack.service.graph.node.finder.config.KafkaConfiguration
+import com.expedia.www.haystack.service.graph.node.finder.model.MetadataStoreBuilder
+import com.expedia.www.haystack.service.graph.node.finder.model.ServiceNodeMetadataSerde
+import com.netflix.servo.util.VisibleForTesting
+import org.apache.kafka.common.serialization.Serdes
+import org.apache.kafka.common.serialization.StringDeserializer
+import org.apache.kafka.common.serialization.StringSerializer
+import org.apache.kafka.streams.Topology
+
+class Streams(kafkaConfiguration: KafkaConfiguration) extends Supplier[Topology] {
+
+ private val PROTO_SPANS = "proto-spans"
+ private val SPAN_ACCUMULATOR = "span-accumulator"
+ private val LATENCY_PRODUCER = "latency-producer"
+ private val GRAPH_NODE_PRODUCER = "nodes-n-edges-producer"
+ private val METRIC_SINK = "metric-sink"
+ private val GRAPH_NODE_SINK = "graph-nodes-sink"
+
+ private val METADATA_STORE_PROCESSOR = "metadata-store-processor"
+ private val METADATA_SOURCE_NODE = "metadata-source-node"
+ private val METADATA_PRODUCER = "metadata-producer"
+ private val METADATA_SINK = "metadata-sink"
+
+ override def get(): Topology = initialize(new Topology)
+
+ /**
+ * This provides a topology that is shown in the flow chart below
+ *
+ * +---------------+
+ * | |
+ * | proto-spans |
+ * | |
+ * +-------+-------+
+ * |
+ * +---------V----------+
+ * | |
+ * +----+ span-accumulator +----+
+ * | | | |
+ * | +--------------------+ |
+ * | |
+ * +---------V---------+ +------------V------------+
+ * | | | |
+ * | latency-producer | | nodes-n-edges-producer |
+ * | | | |
+ * +---------+---------+ +------------+------------+
+ * | |
+ * +--------V--------+ +---------V---------+
+ * | | | |
+ * | metric-sink | | graph-nodes-sink |
+ * | | | |
+ * +-----------------+ +-------------------+
+ *
+ * Source:
+ *
+ * proto-spans : Reads a Kafka topic of spans serialized in protobuf format
+ *
+ * Processors:
+ *
+ * span-accumulator : Aggregates incoming spans for specified time to find matching client-server spans
+ * latency-producer : Computes and emits network latency from the span pairs produced by span-accumulator
+ * nodes-n-edges-producer : This processor produces, from the span pairs produced by span-accumulator, a simple
+ * graph relationship between the services in the form: service --(operation)--> service
+ * Sinks:
+ *
+ * metric-sink : Output of latency-producer (MetricPoint) is serialized using MessagePack and sent to a Kafka topic
+ * graph-nodes-sink : Output of nodes-n-edges-producer is serialized a json string and sent to a Kafka topic
+ *
+ * @return the Topology
+ */
+ @VisibleForTesting
+ def initialize(topology: Topology): Topology = {
+ //add source
+ addSource(PROTO_SPANS, topology)
+
+ //add span accumulator. This step will aggregate spans
+ //by message id. This will emit spans with client-server
+ //relationship after specified number of seconds
+ addAccumulator(SPAN_ACCUMULATOR, topology, PROTO_SPANS)
+
+ //add latency producer. This is downstream of accumulator
+ //this will parse a span with client-server relationship and
+ //emit a metric point on the latency for that service-operation pair
+ addLatencyProducer(LATENCY_PRODUCER, topology, SPAN_ACCUMULATOR)
+
+ //add graph node producer. This is downstream of accumulator
+ //for each client-server span emitted by the accumulator, this will
+ //produce a service - operation - service data point for building
+ //the edges between the nodes in a graph
+ addGraphNodeProducer(GRAPH_NODE_PRODUCER, topology, SPAN_ACCUMULATOR)
+
+ //add sink for latency producer
+ addMetricSink(METRIC_SINK, kafkaConfiguration.metricsTopic, topology, LATENCY_PRODUCER)
+
+ //add sink for graph node producer
+ addGraphNodeSink(GRAPH_NODE_SINK, kafkaConfiguration.serviceCallTopic, topology, GRAPH_NODE_PRODUCER)
+
+ //add metadata processor and a sink for metadata store
+ addMetadataProducer(METADATA_PRODUCER, topology, SPAN_ACCUMULATOR)
+ addMetadataStoreSink(METADATA_SINK, topology, METADATA_PRODUCER)
+
+ //return the topology built
+ topology
+ }
+
+ private def addSource(stepName: String, topology: Topology) : Unit = {
+ //add a source
+ topology.addSource(
+ kafkaConfiguration.autoOffsetReset,
+ stepName,
+ kafkaConfiguration.timestampExtractor,
+ new StringDeserializer,
+ (new SpanSerde).deserializer(),
+ kafkaConfiguration.protoSpanTopic)
+ }
+
+ private def addAccumulator(accumulatorName: String, topology: Topology, sourceName: String) : Unit = {
+ val tags =
+ if (kafkaConfiguration.collectorTags != null)
+ kafkaConfiguration.collectorTags.toSet[String]
+ else
+ Set[String]()
+
+ topology.addProcessor(
+ accumulatorName,
+ new SpanAccumulatorSupplier(kafkaConfiguration.metadataConfig.topic, kafkaConfiguration.accumulatorInterval,
+ new GraphEdgeTagCollector(tags)),
+ sourceName
+ )
+
+ topology.addGlobalStore(MetadataStoreBuilder.storeBuilder(kafkaConfiguration.metadataConfig),
+ METADATA_SOURCE_NODE,
+ Serdes.String().deserializer(),
+ new ServiceNodeMetadataSerde().deserializer(),
+ kafkaConfiguration.metadataConfig.topic,
+ METADATA_STORE_PROCESSOR,
+ new MetadataStoreUpdateProcessorSupplier(kafkaConfiguration.metadataConfig.topic))
+ }
+
+ private def addLatencyProducer(latencyProducerName: String,
+ topology: Topology,
+ accumulatorName: String) : Unit = {
+ topology.addProcessor(
+ latencyProducerName,
+ new LatencyProducerSupplier(),
+ accumulatorName
+ )
+ }
+
+ private def addGraphNodeProducer(graphNodeProducerName: String,
+ topology: Topology,
+ accumulatorName: String) = {
+ topology.addProcessor(
+ graphNodeProducerName,
+ new GraphNodeProducerSupplier(),
+ accumulatorName
+ )
+ }
+
+ private def addMetricSink(metricSinkName: String,
+ metricsTopic: String,
+ topology: Topology,
+ latencyProducerName: String): Unit = {
+ topology.addSink(
+ metricSinkName,
+ metricsTopic,
+ new StringSerializer,
+ new MetricDataSerializer,
+ latencyProducerName
+ )
+ }
+
+ private def addGraphNodeSink(graphNodeSinkName: String,
+ serviceCallTopic: String,
+ topology: Topology,
+ graphNodeProducerName: String): Unit = {
+ topology.addSink(
+ graphNodeSinkName,
+ serviceCallTopic,
+ new GraphEdgeKeySerde().serializer(),
+ new GraphEdgeValueSerde().serializer(),
+ graphNodeProducerName
+ )
+ }
+
+ private def addMetadataStoreSink(sinkName: String,
+ topology: Topology,
+ producerName: String): Unit = {
+ topology.addSink(
+ sinkName,
+ kafkaConfiguration.metadataConfig.topic,
+ Serdes.String().serializer(),
+ new ServiceNodeMetadataSerde().serializer(),
+ producerName
+ )
+ }
+
+ private def addMetadataProducer(processorName: String,
+ topology: Topology,
+ producerName: String): Unit = {
+ topology.addProcessor(
+ processorName,
+ new MetadataProducerSupplier(kafkaConfiguration.metadataConfig.topic),
+ producerName
+ )
+ }
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/metadata/MetadataProducerSupplier.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/metadata/MetadataProducerSupplier.scala
new file mode 100644
index 000000000..c2bca5dd8
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/metadata/MetadataProducerSupplier.scala
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.node.finder.app.metadata
+
+import com.expedia.www.haystack.service.graph.node.finder.model.{ServiceNodeMetadata, SpanPair}
+import org.apache.kafka.streams.processor.{Processor, ProcessorContext, ProcessorSupplier}
+import org.apache.kafka.streams.state.KeyValueStore
+
+class MetadataProducerSupplier(metadataStoreName: String) extends ProcessorSupplier[String, SpanPair] {
+ override def get(): Processor[String, SpanPair] = new MetadataProducer(metadataStoreName)
+}
+
+class MetadataProducer(metadataStoreName: String) extends Processor[String, SpanPair] {
+ private var context: ProcessorContext = _
+ private var store: KeyValueStore[String, ServiceNodeMetadata] = _
+
+ override def init(context: ProcessorContext): Unit = {
+ this.context = context
+ this.store = context.getStateStore(metadataStoreName).asInstanceOf[KeyValueStore[String, ServiceNodeMetadata]]
+ }
+
+ override def process(key: String, spanPair: SpanPair): Unit = {
+ // emit the metadata only if service uses SharedSpan merge style
+ if (this.store.get(spanPair.getServerSpan.serviceName) == null && spanPair.IsSharedSpan) {
+ context.forward(spanPair.getServerSpan.serviceName, ServiceNodeMetadata(spanPair.IsSharedSpan))
+ }
+ }
+
+ override def punctuate(timestamp: Long): Unit = ()
+
+ override def close(): Unit = ()
+}
\ No newline at end of file
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/metadata/MetadataStoreUpdateProcessorSupplier.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/metadata/MetadataStoreUpdateProcessorSupplier.scala
new file mode 100644
index 000000000..9438b4d79
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/metadata/MetadataStoreUpdateProcessorSupplier.scala
@@ -0,0 +1,47 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.node.finder.app.metadata
+
+import com.expedia.www.haystack.service.graph.node.finder.model.ServiceNodeMetadata
+import org.apache.kafka.streams.processor.{Processor, ProcessorContext, ProcessorSupplier}
+import org.apache.kafka.streams.state.KeyValueStore
+
+import scala.util.Try
+
+class MetadataStoreUpdateProcessorSupplier(storeName: String) extends ProcessorSupplier[String, ServiceNodeMetadata] {
+ override def get(): Processor[String, ServiceNodeMetadata] = new MetadataStoreUpdateProcessor(storeName)
+}
+
+class MetadataStoreUpdateProcessor(storeName: String) extends Processor[String, ServiceNodeMetadata] {
+ private var store: KeyValueStore[String, ServiceNodeMetadata] = _
+
+ override def init(context: ProcessorContext): Unit = {
+ store = context.getStateStore(storeName).asInstanceOf[KeyValueStore[String, ServiceNodeMetadata]]
+ }
+
+ override def process(key: String, value: ServiceNodeMetadata): Unit = {
+ store.put(key, value)
+ }
+
+ override def punctuate(timestamp: Long): Unit = ()
+
+ override def close(): Unit = {
+ Try(store.close())
+ }
+}
\ No newline at end of file
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/metadata/TopicCreator.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/metadata/TopicCreator.scala
new file mode 100644
index 000000000..248c62817
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/app/metadata/TopicCreator.scala
@@ -0,0 +1,69 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.node.finder.app.metadata
+
+import java.util.Properties
+import java.util.concurrent.{ExecutionException, TimeUnit}
+
+import com.expedia.www.haystack.service.graph.node.finder.config.KafkaConfiguration
+import org.apache.kafka.clients.CommonClientConfigs
+import org.apache.kafka.clients.admin.{AdminClient, NewTopic}
+import org.apache.kafka.common.config.TopicConfig
+import org.apache.kafka.common.errors.TopicExistsException
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.util.Try
+
+object TopicCreator {
+ private val LOGGER = LoggerFactory.getLogger(TopicCreator.getClass)
+
+ def makeMetadataTopicReady(config: KafkaConfiguration): Unit = {
+ if(!config.metadataConfig.autoCreate)
+ return
+
+ val properties = new Properties()
+ properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, config.streamsConfig.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG))
+ val adminClient = AdminClient.create(properties)
+ try {
+ val overridesConfig = new java.util.HashMap[String, String]()
+ overridesConfig.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT)
+ val topics = new NewTopic(
+ config.metadataConfig.topic,
+ config.metadataConfig.partitionCount,
+ config.metadataConfig.replicationFactor.toShort).configs(overridesConfig)
+ adminClient.createTopics(List(topics).asJava).values().entrySet().asScala.foreach(entry => {
+ try {
+ entry.getValue.get()
+ } catch {
+ case ex: ExecutionException =>
+ if (ex.getCause.isInstanceOf[TopicExistsException]) {
+ LOGGER.info(s"metadata topic '${config.metadataConfig}' already exists!")
+ } else {
+ throw new RuntimeException(s"Fail to create the metadata topic ${config.metadataConfig}", ex)
+ }
+ case ex: Exception => throw new RuntimeException(s"Fail to create the metadata topic ${config.metadataConfig}", ex)
+ }
+ })
+ }
+ finally {
+ Try(adminClient.close(5, TimeUnit.SECONDS))
+ }
+ }
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/config/AppConfiguration.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/config/AppConfiguration.scala
new file mode 100644
index 000000000..81b71d34d
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/config/AppConfiguration.scala
@@ -0,0 +1,117 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.config
+
+import java.util.Properties
+
+import com.expedia.www.haystack.commons.config.ConfigurationLoader
+import com.expedia.www.haystack.commons.kstreams.SpanTimestampExtractor
+import com.typesafe.config.Config
+import org.apache.commons.lang3.StringUtils
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+import org.apache.kafka.streams.processor.TimestampExtractor
+
+import scala.collection.JavaConverters._
+
+/**
+ * This class reads the configuration from the given resource name using {@link ConfigurationLoader ConfigurationLoader}
+ *
+ * @param resourceName name of the resource file to load
+ */
+class AppConfiguration(resourceName: String) {
+
+ require(StringUtils.isNotBlank(resourceName))
+
+ private val config = ConfigurationLoader.loadConfigFileWithEnvOverrides(resourceName = this.resourceName)
+
+ /**
+ * default constructor. Loads config from resource name to "app.conf"
+ */
+ def this() = this("app.conf")
+
+ /**
+ * Location of the health status file
+ */
+ val healthStatusFilePath: String = config.getString("health.status.path")
+
+ /**
+ * Instance of {@link KafkaConfiguration KafkaConfiguration} to be used by the kstreams application
+ */
+ lazy val kafkaConfig: KafkaConfiguration = {
+
+ // verify if the applicationId and bootstrap server config are non empty
+ def verifyRequiredProps(props: Properties): Unit = {
+ require(StringUtils.isNotBlank(props.getProperty(StreamsConfig.APPLICATION_ID_CONFIG)))
+ require(StringUtils.isNotBlank(props.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)))
+ }
+
+ def addProps(config: Config, props: Properties, prefix: (String) => String = identity): Unit = {
+ config.entrySet().asScala.foreach(kv => {
+ val propKeyName = prefix(kv.getKey)
+ props.setProperty(propKeyName, kv.getValue.unwrapped().toString)
+ })
+ }
+
+ val kafka = config.getConfig("kafka")
+ val producerConfig = kafka.getConfig("producer")
+ val consumerConfig = kafka.getConfig("consumer")
+ val streamsConfig = kafka.getConfig("streams")
+
+ val props = new Properties
+
+ // add stream specific properties
+ addProps(streamsConfig, props)
+
+ // validate props
+ verifyRequiredProps(props)
+
+ val timestampExtractor = Option(props.getProperty("timestamp.extractor")) match {
+ case Some(timeStampExtractorClass) =>
+ Class.forName(timeStampExtractorClass).newInstance().asInstanceOf[TimestampExtractor]
+ case None =>
+ new SpanTimestampExtractor
+ }
+
+
+ //set timestamp extractor
+ props.setProperty("timestamp.extractor", timestampExtractor.getClass.getName)
+
+ val collectorTags: List[String] = if (kafka.hasPath("collectorTags")) kafka.getStringList("collectorTags").asScala
+ .toList
+ else List()
+
+ val metadataTopicConfig = kafka.getConfig("node.metadata.topic")
+
+ KafkaConfiguration(new StreamsConfig(props),
+ producerConfig.getString("metrics.topic"),
+ producerConfig.getString("service.call.topic"),
+ consumerConfig.getString("topic"),
+ if (streamsConfig.hasPath("auto.offset.reset")) {
+ AutoOffsetReset.valueOf(streamsConfig.getString("auto.offset.reset").toUpperCase)
+ }
+ else {
+ AutoOffsetReset.LATEST
+ },
+ timestampExtractor,
+ kafka.getInt("accumulator.interval"),
+ kafka.getLong("close.timeout.ms"),
+ NodeMetadataConfiguration(metadataTopicConfig.getBoolean("autocreate"), metadataTopicConfig.getString("name"), metadataTopicConfig.getInt("partition.count"), metadataTopicConfig.getInt("replication.factor")),
+ collectorTags)
+ }
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/config/KafkaConfiguration.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/config/KafkaConfiguration.scala
new file mode 100644
index 000000000..bf4de3e25
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/config/KafkaConfiguration.scala
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.config
+
+import org.apache.commons.lang3.StringUtils
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+import org.apache.kafka.streams.processor.TimestampExtractor
+
+/**
+ * Case class holding required configuration for the node finder kstreams app
+ * @param streamsConfig valid instance of StreamsConfig
+ * @param metricsTopic topic name for latency metrics
+ * @param serviceCallTopic topic name for service call relationship information
+ * @param protoSpanTopic topic from where Spans serialized in protobuf to be consumed
+ * @param autoOffsetReset Offset type for the kstreams app to start with
+ * @param timestampExtractor instance of timestamp extractor
+ * @param accumulatorInterval interval to aggregate spans to look for client and server spans
+ * @param closeTimeoutInMs time for closing a kafka topic
+ * @param metadataConfig configuration for metadata kakfa topic
+ * @param collectorTags Tags to be collected when generating graph edges
+ */
+case class KafkaConfiguration(streamsConfig: StreamsConfig,
+ metricsTopic: String,
+ serviceCallTopic: String,
+ protoSpanTopic: String,
+ autoOffsetReset: AutoOffsetReset,
+ timestampExtractor: TimestampExtractor,
+ accumulatorInterval: Int,
+ closeTimeoutInMs: Long,
+ metadataConfig: NodeMetadataConfiguration,
+ collectorTags: List[String]) {
+ require(streamsConfig != null)
+ require(StringUtils.isNotBlank(metricsTopic))
+ require(StringUtils.isNotBlank(serviceCallTopic))
+ require(StringUtils.isNotBlank(protoSpanTopic))
+ require(autoOffsetReset != null)
+ require(timestampExtractor != null)
+ require(closeTimeoutInMs > 0)
+ require(collectorTags != null)
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/config/NodeMetadataConfiguration.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/config/NodeMetadataConfiguration.scala
new file mode 100644
index 000000000..0850a8e6c
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/config/NodeMetadataConfiguration.scala
@@ -0,0 +1,21 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.node.finder.config
+
+case class NodeMetadataConfiguration(autoCreate: Boolean, topic: String, partitionCount: Int, replicationFactor: Int)
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/model/LightSpan.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/model/LightSpan.scala
new file mode 100644
index 000000000..9fea6f5f7
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/model/LightSpan.scala
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.node.finder.model
+
+import com.expedia.www.haystack.service.graph.node.finder.utils.SpanType
+import com.expedia.www.haystack.service.graph.node.finder.utils.SpanType.SpanType
+import org.apache.commons.lang3.StringUtils
+
+/**
+ * Light weight representation of a Span with minimal information required
+ *
+ * @param spanId Unique identity of the Span
+ * @param parentSpanId spanId of its Parent span
+ * @param time Timestamp associated with a Span in MilliSeconds (i.e., StartTime)
+ * @param serviceName Service name of the span
+ * @param operationName Operation name of the span
+ * @param duration duration of the Span in micro seconds
+ * @param spanType type of the span
+ */
+case class LightSpan(spanId: String,
+ parentSpanId: String,
+ time: Long, // in epoch millis
+ serviceName: String,
+ operationName: String,
+ duration: Long, // in micros
+ spanType: SpanType,
+ tags: Map[String, String]) extends Equals {
+ require(StringUtils.isNotBlank(spanId))
+ require(time > 0)
+ require(StringUtils.isNotBlank(serviceName))
+ require(StringUtils.isNoneBlank(operationName))
+ require(spanType != null)
+
+ private val durationInMillis = duration / 1000L
+ /**
+ * check whether this light span is later than the given cutOffTime
+ *
+ * @param cutOffTime time in epoch millis to be compared
+ * @return true if this span is later than the given cutOffTime time else false
+ */
+ def isLaterThan(cutOffTime: Long): Boolean = (time + durationInMillis - cutOffTime) > 0
+
+ override def canEqual(that: Any): Boolean = {
+ that.isInstanceOf[LightSpan]
+ }
+
+ override def equals(that: Any): Boolean = {
+ that match {
+ case that: LightSpan =>
+ that.canEqual(this) &&
+ this.spanId == that.spanId &&
+ this.parentSpanId == that.parentSpanId &&
+ this.serviceName == that.serviceName
+ case _ => false
+ }
+ }
+
+ override def hashCode(): Int = {
+ 41 * (
+ 41 * (
+ 41 + spanId.hashCode
+ ) + parentSpanId.hashCode
+ ) + serviceName.hashCode
+ }
+}
+
+/**
+ * Builder class for LightSpan
+ */
+object LightSpanBuilder {
+
+ /**
+ * update span type to an existing span
+ *
+ * @param span span to be updated
+ * @param spanType span type to be updated in a given span
+ * @return
+ */
+ def updateSpanTypeIfAbsent(span: LightSpan, spanType: SpanType): LightSpan = {
+ if (span.spanType == SpanType.OTHER) span.copy(spanType = spanType) else span
+ }
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/model/ServiceNodeMetadata.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/model/ServiceNodeMetadata.scala
new file mode 100644
index 000000000..c28e24c5a
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/model/ServiceNodeMetadata.scala
@@ -0,0 +1,76 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.service.graph.node.finder.model
+
+import java.util
+
+import com.expedia.www.haystack.service.graph.node.finder.config.NodeMetadataConfiguration
+import org.apache.kafka.common.serialization.{Deserializer, Serde, Serdes, Serializer}
+import org.apache.kafka.streams.state.{KeyValueStore, StoreBuilder, Stores}
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+
+case class ServiceNodeMetadata(useSharedSpan: Boolean)
+
+class ServiceNodeMetadataSerde extends Serde[ServiceNodeMetadata] {
+ implicit val formats = DefaultFormats
+
+ override def deserializer(): Deserializer[ServiceNodeMetadata] = {
+ new Deserializer[ServiceNodeMetadata] {
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+ override def deserialize(key: String, payload: Array[Byte]): ServiceNodeMetadata = {
+ if (payload == null) {
+ null
+ } else {
+ Serialization.read[ServiceNodeMetadata](new String(payload))
+ }
+ }
+ }
+ }
+
+ override def serializer(): Serializer[ServiceNodeMetadata] = {
+ new Serializer[ServiceNodeMetadata] {
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def serialize(key: String, data: ServiceNodeMetadata): Array[Byte] = {
+ Serialization.write(data).getBytes("utf-8")
+ }
+
+ override def close(): Unit = ()
+ }
+ }
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+}
+
+object MetadataStoreBuilder {
+ def storeBuilder(config: NodeMetadataConfiguration): StoreBuilder[KeyValueStore[String, ServiceNodeMetadata]] = {
+ Stores.keyValueStoreBuilder(
+ Stores.inMemoryKeyValueStore(config.topic),
+ Serdes.String(),
+ new ServiceNodeMetadataSerde())
+ .withCachingEnabled()
+ .withLoggingDisabled()
+ }
+}
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/model/SpanPair.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/model/SpanPair.scala
new file mode 100644
index 000000000..df1a05a74
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/model/SpanPair.scala
@@ -0,0 +1,162 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.model
+
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.entities._
+import com.expedia.www.haystack.service.graph.node.finder.utils.SpanType
+import org.apache.commons.lang3.StringUtils
+import org.slf4j.LoggerFactory
+import scala.collection.JavaConverters._
+
+/**
+ * An instance of SpanPair can contain data from both server and client spans.
+ * SpanPair is considered "complete" if it has data fields from both server and client span of the same SpanId
+ */
+class SpanPair {
+ private val LOGGER = LoggerFactory.getLogger(classOf[SpanPair])
+
+ private var clientSpan: LightSpan = _
+ private var serverSpan: LightSpan = _
+ private var isSharedSpan: Boolean = false
+
+ /**
+ * Returns true of the current instance has data for both server and client spans
+ * and their services are different
+ *
+ * @return true or false
+ */
+ def isComplete: Boolean = {
+ clientSpan != null &&
+ serverSpan != null &&
+ clientSpan.serviceName != serverSpan.serviceName &&
+ StringUtils.isNotEmpty(serverSpan.serviceName) &&
+ StringUtils.isNotEmpty(clientSpan.serviceName)
+ }
+
+ /**
+ * Returns the backing LightSpan objects
+ *
+ * @return list of LightSpan objects or an empty list
+ */
+ def getBackingSpans: List[LightSpan] = {
+ List(clientSpan, serverSpan).filter(w => w != null)
+ }
+
+ /**
+ * Merges the given spans into the current instance of the SpanPair using spanType.
+ * Also, merge them if parent-child relationship is there between given spans
+ * to produce {@link #getGraphEdge} and {@link #getLatency} data
+ *
+ * @param spanOne lightSpan to be merged with the current SpanPair
+ * @param spanTwo lightSpan to be merged with the current SpanPair
+ */
+ def merge(spanOne: LightSpan, spanTwo: LightSpan): Unit = {
+ if (spanOne.spanId.equalsIgnoreCase(spanTwo.parentSpanId)) {
+ setSpans(LightSpanBuilder.updateSpanTypeIfAbsent(spanOne, SpanType.CLIENT), LightSpanBuilder.updateSpanTypeIfAbsent(spanTwo, SpanType.SERVER))
+ isSharedSpan = false
+ } else if (spanOne.parentSpanId.equalsIgnoreCase(spanTwo.spanId)) {
+ setSpans(LightSpanBuilder.updateSpanTypeIfAbsent(spanOne, SpanType.SERVER), LightSpanBuilder.updateSpanTypeIfAbsent(spanTwo, SpanType.CLIENT))
+ isSharedSpan = false
+ } else {
+ setSpans(spanOne, spanTwo)
+ isSharedSpan = true
+ }
+
+ LOGGER.debug("created a span pair: client: {}, server: {}", List(clientSpan, serverSpan):_*)
+ }
+
+ /**
+ * set clientSpan or serverSpan depending upon the value of spanType in LightSpan
+ *
+ * @param spanOne span which needs to be set to clientSpan or serverSpan
+ * @param spanTwo span which needs to be set to clientSpan or serverSpan
+ */
+ private def setSpans(spanOne: LightSpan, spanTwo: LightSpan) = {
+ Seq(spanOne, spanTwo).foreach(span =>
+ span.spanType match {
+ case SpanType.CLIENT =>
+ this.clientSpan = span
+ case SpanType.SERVER =>
+ this.serverSpan = span
+ case SpanType.OTHER =>
+ }
+ )
+ }
+
+ /**
+ * Returns an instance of GraphEdge if the current SpanPair is complete. A GraphEdge
+ * contains the client span's ServiceName, it's OperationName and the corresponding server
+ * span's ServiceName. These three data points acts as the two nodes and edge of a graph relationship
+ *
+ * @return an instance of GraphEdge or None if the current SpanPair is inComplete
+ */
+ def getGraphEdge: Option[GraphEdge] = {
+ if (isComplete) {
+ val clientVertex = GraphVertex(clientSpan.serviceName, clientSpan.tags)
+ val serverVertex = GraphVertex(serverSpan.serviceName, serverSpan.tags)
+ Some(GraphEdge(clientVertex, serverVertex, clientSpan.operationName, clientSpan.time))
+ } else {
+ None
+ }
+ }
+
+ /**
+ * Returns an instance of MetricPoint that measures the latency of the current Span. Latency of the current
+ * Span is computed as client span's duration minus it's corresponding server span's duration. MetricPoint instance
+ * returned will be of type Gauge tagged with the current (client span's) service name and operation name.
+ *
+ * @return an instance of MetricPoint or None if the current spanPair instance is incomplete
+ */
+ def getLatency: Option[MetricData] = {
+ if (isComplete) {
+ val tags = new TagCollection(Map(
+ TagKeys.SERVICE_NAME_KEY -> clientSpan.serviceName,
+ TagKeys.OPERATION_NAME_KEY -> clientSpan.operationName,
+ MetricDefinition.UNIT -> "ms",
+ MetricDefinition.MTYPE -> "gauge"
+ ).asJava)
+ val metricDefinition = new MetricDefinition("latency", tags, TagCollection.EMPTY)
+ val metricData = new MetricData(metricDefinition,
+ (clientSpan.duration - serverSpan.duration)/1000,
+ clientSpan.time / 1000)
+
+ Some(metricData)
+ } else {
+ None
+ }
+ }
+
+ def getId: String = s"${clientSpan.spanId}"
+ def getServerSpan: LightSpan = serverSpan
+ def getClientSpan: LightSpan = clientSpan
+ def IsSharedSpan: Boolean = isSharedSpan
+
+ override def toString = s"SpanPair($isComplete, $clientSpan, $serverSpan)"
+}
+
+object SpanPairBuilder {
+ def createSpanPair(spanOne: LightSpan, spanTwo: LightSpan): SpanPair = {
+ require(spanOne != null)
+ require(spanTwo != null)
+
+ val newSpanPair = new SpanPair
+ newSpanPair.merge(spanOne, spanTwo)
+ newSpanPair
+ }
+}
\ No newline at end of file
diff --git a/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/utils/SpanUtils.scala b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/utils/SpanUtils.scala
new file mode 100644
index 000000000..76da43820
--- /dev/null
+++ b/service-graph/node-finder/src/main/scala/com/expedia/www/haystack/service/graph/node/finder/utils/SpanUtils.scala
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.utils
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.service.graph.node.finder.utils.SpanType.SpanType
+import org.apache.commons.lang3.StringUtils
+
+import scala.collection.JavaConverters._
+
+/**
+ * Object with utility methods to process a Span
+ */
+object SpanUtils {
+
+ val SERVER_SEND_EVENT = "ss"
+ val SERVER_RECV_EVENT = "sr"
+ val CLIENT_SEND_EVENT = "cs"
+ val CLIENT_RECV_EVENT = "cr"
+ val CLIENT = "client"
+ val SERVER = "server"
+
+ private val ONE = 1
+ private val TWO = 1 << 1
+ private val FOUR = 1 << 2
+ private val EIGHT = 1 << 3
+
+ private val THREE = ONE | TWO
+ private val TWELVE = FOUR | EIGHT
+
+ private val SPAN_MARKERS = Map(
+ CLIENT_SEND_EVENT -> Flag(ONE),
+ CLIENT_RECV_EVENT -> Flag(TWO),
+ SERVER_SEND_EVENT -> Flag(FOUR),
+ SERVER_RECV_EVENT -> Flag(EIGHT))
+
+ private val SPAN_TYPE_MAP = Map(Flag(THREE) -> SpanType.CLIENT, Flag(TWELVE) -> SpanType.SERVER)
+
+ /**
+ * Given a span check if it is eligible for accumulation and can be a light span
+ * @param span span to validate
+ * @return
+ */
+ def isAccumulableSpan(span: Span): Boolean =
+ StringUtils.isNotBlank(span.getSpanId)&&
+ StringUtils.isNotBlank(span.getServiceName) &&
+ StringUtils.isNotBlank(span.getOperationName) &&
+ span.getStartTime > 0
+
+ /**
+ * Given a span, this method looks for ('cs', 'cr') and ('sr', 'ss') pairs in log fields with key as "event"
+ * to identify a span type. Presence of ('cs', 'cr') events will result in SpanType.CLIENT and presence of
+ * events ('sr', 'ss') events will result in SpanType.SERVER. All other spans will be identified as OTHER
+ * @param span Span to identify
+ * @return Some(SpanType) of the given span or None
+ */
+ def getSpanType(span: Span): SpanType = {
+ var flag = Flag(0)
+ span.getLogsList.forEach(log => {
+ log.getFieldsList.asScala.foreach(tag => {
+ if (tag.getKey.equalsIgnoreCase("event") && StringUtils.isNotEmpty(tag.getVStr)) {
+ flag = flag | SPAN_MARKERS.getOrElse(tag.getVStr.toLowerCase, Flag(0))
+ }
+ })
+ })
+
+ // if event log tag is absent in the span object, decide the span type using `span.kind` tag key
+ // possible values for span.kind are `client` and `server`
+ // See opentracing conventions
+ SPAN_TYPE_MAP.getOrElse(flag, {
+ span.getTagsList.asScala.find(_.getKey == "span.kind").map(_.getVStr.toLowerCase) match {
+ case Some("client") => SpanType.CLIENT
+ case Some("server") => SpanType.SERVER
+ case _ => SpanType.OTHER
+ }
+ })
+ }
+
+ /**
+ * Finds the timestamp of the log entry in the given span that has a key named "event" with value that matches
+ * the given eventValue
+ * @param span Span from which event timestamp to be read
+ * @param eventValue value if the "event" field to match
+ * @return Some(Long) of the timestamp read or None
+ */
+ def getEventTimestamp(span: Span, eventValue: String): Option[Long] =
+ span.getLogsList.asScala.find(log => {
+ log.getFieldsList.asScala.exists(tag => {
+ tag.getKey.equalsIgnoreCase("event") && StringUtils.isNotEmpty(tag.getVStr) &&
+ tag.getVStr.equalsIgnoreCase(eventValue)
+ })
+ }) match {
+ case Some(log) => Option(log.getTimestamp)
+ case _ => None
+ }
+}
+
+/**
+ * Enum for different span types processed
+ * by the node finder application
+ */
+object SpanType extends Enumeration {
+ type SpanType = Value
+ val SERVER, CLIENT, OTHER = Value
+}
+
+/**
+ * Simple case class representing a flag
+ * @param value : value of the flag
+ */
+case class Flag(value: Int) {
+ def | (that: Flag): Flag = Flag(this.value | that.value)
+
+ override def equals(obj: scala.Any): Boolean = {
+ obj.asInstanceOf[Flag].value == value
+ }
+}
+
+
diff --git a/service-graph/node-finder/src/test/java/org/expedia/www/haystack/commons/scalatest/IntegrationSuite.java b/service-graph/node-finder/src/test/java/org/expedia/www/haystack/commons/scalatest/IntegrationSuite.java
new file mode 100644
index 000000000..fe13e09e4
--- /dev/null
+++ b/service-graph/node-finder/src/test/java/org/expedia/www/haystack/commons/scalatest/IntegrationSuite.java
@@ -0,0 +1,29 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.expedia.www.haystack.commons.scalatest;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@org.scalatest.TagAnnotation
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD, ElementType.TYPE})
+public @interface IntegrationSuite {
+}
diff --git a/service-graph/node-finder/src/test/resources/integration/kafka-server.properties b/service-graph/node-finder/src/test/resources/integration/kafka-server.properties
new file mode 100644
index 000000000..860ae817c
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/integration/kafka-server.properties
@@ -0,0 +1,51 @@
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+# The port the socket server listens on
+port=9092
+
+# The number of threads handling network requests
+num.network.threads=2
+
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=1048576
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=1048576
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+# A comma seperated list of directories under which to store log files
+log.dirs=target/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=536870912
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=60000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=1000000
+
+#auto create topics
+auto.create.topics.enable=true
+
+default.replication.factor=1
+offsets.topic.replication.factor=1
\ No newline at end of file
diff --git a/service-graph/node-finder/src/test/resources/integration/local.conf b/service-graph/node-finder/src/test/resources/integration/local.conf
new file mode 100644
index 000000000..7e03852cc
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/integration/local.conf
@@ -0,0 +1,45 @@
+health.status.path = "target/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-node-finder"
+ bootstrap.servers = "localhost:9092"
+ num.stream.threads = 1
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = earliest
+ #timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ producer {
+ metrics {
+ topic = "metricpoints"
+ }
+ service.call {
+ topic = "graph-nodes"
+ }
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+
+ accumulator {
+ interval = 1000
+ }
+
+ collectorTags = ["X-HAYSTACK-INFRASTRUCTURE-PROVIDER", "tier"]
+
+ node.metadata {
+ topic {
+ autocreate = false
+ name = "haystack-node-finder-metadata"
+ partition.count = 1
+ replication.factor = 1
+ }
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/node-finder/src/test/resources/integration/zookeeper.properties b/service-graph/node-finder/src/test/resources/integration/zookeeper.properties
new file mode 100644
index 000000000..c3e355615
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/integration/zookeeper.properties
@@ -0,0 +1,6 @@
+# the directory where the snapshot is stored.
+dataDir=target
+# the port at which the clients will connect
+clientPort=2181
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
\ No newline at end of file
diff --git a/service-graph/node-finder/src/test/resources/log4j.properties b/service-graph/node-finder/src/test/resources/log4j.properties
new file mode 100644
index 000000000..fa7f75bf8
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/log4j.properties
@@ -0,0 +1,5 @@
+log4j.rootLogger=OFF, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
\ No newline at end of file
diff --git a/service-graph/node-finder/src/test/resources/logback-test.xml b/service-graph/node-finder/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..38a30a589
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/logback-test.xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+
+
+
+
+
+
diff --git a/service-graph/node-finder/src/test/resources/test/test.conf b/service-graph/node-finder/src/test/resources/test/test.conf
new file mode 100644
index 000000000..e66482268
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/test/test.conf
@@ -0,0 +1,44 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-node-finder"
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ #timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ producer {
+ metrics {
+ topic = "metricpoints"
+ }
+ service.call {
+ topic = "graph-nodes"
+ }
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+
+ accumulator {
+ interval = 60000
+ }
+ collectorTags = ["X-HAYSTACK-INFRASTRUCTURE-PROVIDER", "tier"]
+
+ node.metadata {
+ topic {
+ autocreate = false
+ name = "haystack-node-finder-metadata"
+ partition.count = 6
+ replication.factor = 2
+ }
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/node-finder/src/test/resources/test/test_no_app_id.conf b/service-graph/node-finder/src/test/resources/test/test_no_app_id.conf
new file mode 100644
index 000000000..2147acd1f
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/test/test_no_app_id.conf
@@ -0,0 +1,33 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ #timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ producer {
+ metrics {
+ topic = "metricpoints"
+ }
+ service.call {
+ topic = "service-calls"
+ }
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+
+ accumulator {
+ interval = 60000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/node-finder/src/test/resources/test/test_no_bootstrap.conf b/service-graph/node-finder/src/test/resources/test/test_no_bootstrap.conf
new file mode 100644
index 000000000..cbd5c15a7
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/test/test_no_bootstrap.conf
@@ -0,0 +1,33 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-node-finder"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ #timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ producer {
+ metrics {
+ topic = "metricpoints"
+ }
+ service.call {
+ topic = "service-calls"
+ }
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+
+ accumulator {
+ interval = 60000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/node-finder/src/test/resources/test/test_no_consumer.conf b/service-graph/node-finder/src/test/resources/test/test_no_consumer.conf
new file mode 100644
index 000000000..0a360d21e
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/test/test_no_consumer.conf
@@ -0,0 +1,30 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-node-finder"
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ #timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ producer {
+ metrics {
+ topic = "metricpoints"
+ }
+ service.call {
+ topic = "service-calls"
+ }
+ }
+
+ accumulator {
+ interval = 60000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/node-finder/src/test/resources/test/test_no_metrics_topic.conf b/service-graph/node-finder/src/test/resources/test/test_no_metrics_topic.conf
new file mode 100644
index 000000000..3b6b93ea8
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/test/test_no_metrics_topic.conf
@@ -0,0 +1,31 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-node-finder"
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ #timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ producer {
+ service.call {
+ topic = "service-calls"
+ }
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+
+ accumulator {
+ interval = 60000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/node-finder/src/test/resources/test/test_no_producer.conf b/service-graph/node-finder/src/test/resources/test/test_no_producer.conf
new file mode 100644
index 000000000..0ae6d8ec0
--- /dev/null
+++ b/service-graph/node-finder/src/test/resources/test/test_no_producer.conf
@@ -0,0 +1,25 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-service-graph-node-finder"
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ #timestamp.extractor = "org.apache.kafka.streams.processor.WallclockTimestampExtractor"
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+
+ accumulator {
+ interval = 60000
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/TestSpec.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/TestSpec.scala
new file mode 100644
index 000000000..569d500f7
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/TestSpec.scala
@@ -0,0 +1,189 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack
+
+import java.util.UUID
+import java.util.concurrent.TimeUnit
+
+import com.expedia.open.tracing.Tag.TagType
+import com.expedia.open.tracing.{Log, Span, Tag}
+import com.expedia.www.haystack.service.graph.node.finder.model.{LightSpan, SpanPair, SpanPairBuilder}
+import com.expedia.www.haystack.service.graph.node.finder.utils.SpanType.SpanType
+import com.expedia.www.haystack.service.graph.node.finder.utils.{SpanType, SpanUtils}
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, GivenWhenThen, Matchers}
+
+trait TestSpec extends FunSpec with GivenWhenThen with Matchers with EasyMockSugar {
+ private val DEFAULT_START_TIME = System.currentTimeMillis() - TimeUnit.SECONDS.toMillis(5)
+ private val DEFAULT_DURATION_MICROS = TimeUnit.MILLISECONDS.toMicros(100)
+
+ def newLightSpan(spanId: String, parentSpanId: String, serviceName: String, operName: String, spanType: SpanType): LightSpan = {
+ LightSpan(spanId, parentSpanId, System.currentTimeMillis(), serviceName, operName, 1000, spanType, Map())
+ }
+
+ def newLightSpan(spanId: String, parentSpanId: String, serviceName: String, operationName: String, startTimeInMillis: Long,
+ duration: Long, spanType: SpanType, tags: Map[String, String] = Map()): LightSpan = {
+ LightSpan(spanId, parentSpanId, startTimeInMillis, serviceName, operationName, duration, spanType, tags)
+ }
+
+ def randomLightSpan(): LightSpan = {
+ LightSpan(UUID.randomUUID().toString, UUID.randomUUID().toString, System.currentTimeMillis(), "svc", "oper", 1000, SpanType.CLIENT, Map())
+ }
+
+ def newSpan(spanId: String, parentSpanId: String, serviceName: String): Span = {
+ newSpan(spanId, parentSpanId, serviceName, "oper", DEFAULT_DURATION_MICROS, client = false, server = false)._1
+ }
+
+ def newServerSpan(spanId: String, parentSpanId: String, serviceName: String): Span = {
+ newSpan(spanId, parentSpanId, serviceName, "oper", DEFAULT_DURATION_MICROS, client = false, server = true)._1
+ }
+
+ def newClientSpan(spanId: String, parentSpanId: String, serviceName: String): Span = {
+ newSpan(spanId, parentSpanId, serviceName, "oper", DEFAULT_DURATION_MICROS, client = true, server = false)._1
+ }
+
+ def newClientSpan(spanId: String, parentSpanId: String, serviceName: String, startTime: Long, duration: Long): Span = {
+ newSpan(spanId, parentSpanId, startTime, serviceName, "oper", duration, client=true, server=false)._1
+ }
+
+ def newServerSpan(spanId: String, parentSpanId: String, serviceName: String, startTime: Long, duration: Long): Span = {
+ newSpan(spanId, parentSpanId, startTime, serviceName, "oper", duration, client=false, server=true)._1
+ }
+
+ def newSpan(serviceName: String, operation: String, duration: Long, client: Boolean, server: Boolean): (Span, SpanType) = {
+ newSpan(UUID.randomUUID().toString, UUID.randomUUID().toString, serviceName, operation, duration, client, server)
+ }
+
+ def newSpan(spanId: String, parentSpanId: String, serviceName: String, operation: String, duration: Long, client: Boolean, server: Boolean): (Span, SpanType) = {
+ newSpan(spanId, parentSpanId, DEFAULT_START_TIME, serviceName, operation, duration, client, server)
+ }
+
+ def newSpan(spanId: String, parentSpanId: String, ts: Long, serviceName: String, operation: String, duration: Long, client: Boolean,
+ server: Boolean, tags: Map[String, String] = Map()): (Span, SpanType) = {
+ val spanBuilder = Span.newBuilder()
+ spanBuilder.setTraceId(UUID.randomUUID().toString)
+ spanBuilder.setSpanId(spanId)
+ spanBuilder.setParentSpanId(parentSpanId)
+ spanBuilder.setServiceName(serviceName)
+ spanBuilder.setOperationName(operation)
+ spanBuilder.setStartTime(ts * 1000) //microseconds
+ spanBuilder.setDuration(duration)
+ var spanType = SpanType.OTHER
+
+ val logBuilder = Log.newBuilder()
+ if (client) {
+ logBuilder.setTimestamp(ts)
+ logBuilder.addFields(Tag.newBuilder().setKey("event").setVStr(SpanUtils.CLIENT_SEND_EVENT).build())
+ spanBuilder.addLogs(logBuilder.build())
+ logBuilder.clear()
+ logBuilder.setTimestamp(ts + duration)
+ logBuilder.addFields(Tag.newBuilder().setKey("event").setVStr(SpanUtils.CLIENT_RECV_EVENT).build())
+ spanBuilder.addLogs(logBuilder.build())
+ spanType = SpanType.CLIENT
+ spanBuilder.addTags(Tag.newBuilder().setKey("span.kind").setVStr("client"))
+ }
+
+ if (server) {
+ logBuilder.setTimestamp(ts)
+ logBuilder.addFields(Tag.newBuilder().setKey("event").setVStr(SpanUtils.SERVER_RECV_EVENT).build())
+ spanBuilder.addLogs(logBuilder.build())
+ logBuilder.clear()
+ logBuilder.setTimestamp(ts + duration)
+ logBuilder.addFields(Tag.newBuilder().setKey("event").setVStr(SpanUtils.SERVER_SEND_EVENT).build())
+ spanBuilder.addLogs(logBuilder.build())
+ spanType = SpanType.SERVER
+ spanBuilder.addTags(Tag.newBuilder().setKey("span.kind").setVStr("server"))
+ }
+
+ if (tags.nonEmpty) {
+ val tagBuilder = Tag.newBuilder()
+ tags.foreach(tag => {
+ tagBuilder.setKey(tag._1).setVStr(tag._2).setType(TagType.STRING)
+ spanBuilder.addTags(tagBuilder.build())
+ tagBuilder.clear()
+ })
+ }
+
+ (spanBuilder.build(), spanType)
+ }
+
+ def produceSimpleSpan(offset: Long, callback: (Span) => Unit): Unit =
+ callback(newSpan(UUID.randomUUID().toString,
+ UUID.randomUUID().toString,
+ System.currentTimeMillis() - offset,
+ "foo-service", "bar", 1500, client = false, server = false)._1)
+
+ def produceClientSpan(offset: Long, callback: (Span) => Unit): Unit =
+ callback(newSpan(UUID.randomUUID().toString,
+ UUID.randomUUID().toString,
+ System.currentTimeMillis() - offset,
+ "foo-service", "bar", 1500, client = true, server = false)._1)
+
+ def produceServerSpan(offset: Long, callback: (Span) => Unit): Unit =
+ callback(newSpan(UUID.randomUUID().toString,
+ UUID.randomUUID().toString,
+ System.currentTimeMillis() - offset,
+ "baz-service", "bar", 500, client = false, server = true)._1)
+
+ def produceClientAndServerSpans(offset: Long, callback: (Span) => Unit): Unit = {
+ val clientSend = System.currentTimeMillis() - offset
+ val serverReceive = clientSend + 500
+ val spanId = UUID.randomUUID().toString
+ val parentSpanId = UUID.randomUUID().toString
+ val source = "foo-service"
+ val op = "bar"
+ val dest = "baz-service"
+ val (clientSpan, _) = newSpan(spanId, parentSpanId, clientSend, source, op, 1500, client = true, server = false)
+ val (serverSpan, _) = newSpan(spanId, parentSpanId, serverReceive, dest, op, 500, client = false, server = true)
+ callback(clientSpan)
+ callback(serverSpan)
+ }
+
+ def writeSpans(count: Int,
+ startOffset: Long,
+ producer: (Long, (Span) => Unit) => Unit,
+ consumer: (Span) => Unit): Unit = {
+ require(count >= 1)
+ var i = count
+ while (i >= 1) {
+ producer(i * startOffset, consumer)
+ i -= 1
+ }
+ }
+
+ def invalidSpanPair(): SpanPair = {
+ val spanId = UUID.randomUUID().toString
+ val parentSpanId = UUID.randomUUID().toString
+ val clientLightSpan = newLightSpan(spanId, parentSpanId, "foo-service", "bar", System.currentTimeMillis(), 1000, SpanType.CLIENT)
+ val anotherClientLightSpan = newLightSpan(spanId, parentSpanId, "foo-service", "bar", System.currentTimeMillis(), 1000, SpanType.CLIENT)
+ val spanPair = SpanPairBuilder.createSpanPair(clientLightSpan, anotherClientLightSpan)
+ spanPair
+ }
+
+ def validSpanPair(tags: Map[String, String] = Map()): SpanPair = {
+ val clientSend = System.currentTimeMillis()
+ val serverReceive = clientSend + 500
+ val spanId = UUID.randomUUID().toString
+ val parentSpanId = UUID.randomUUID().toString
+ val clientLightSpan = newLightSpan(spanId, parentSpanId, "foo-service", "bar", clientSend, 1500, SpanType.CLIENT, tags)
+ val serverLightSpan = newLightSpan(spanId, parentSpanId, "baz-service", "bar", serverReceive, 500, SpanType.SERVER, tags)
+
+ val spanPair = SpanPairBuilder.createSpanPair(clientLightSpan, serverLightSpan)
+ spanPair
+ }
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/commons/kafka/KafkaController.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/commons/kafka/KafkaController.scala
new file mode 100644
index 000000000..a35c52849
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/commons/kafka/KafkaController.scala
@@ -0,0 +1,100 @@
+package com.expedia.www.haystack.commons.kafka
+
+import java.util.Properties
+import java.util.concurrent.TimeUnit
+
+import kafka.server.RunningAsBroker
+import org.apache.kafka.clients.CommonClientConfigs
+import org.apache.kafka.clients.admin.{AdminClient, NewTopic}
+import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig}
+import org.apache.kafka.common.serialization.{Deserializer, Serializer}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.util.Try
+
+class KafkaController(kafkaProperties: Properties, zooKeeperProperties: Properties) {
+ require(kafkaProperties != null)
+ require(zooKeeperProperties != null)
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[KafkaController])
+
+ private val zkPort = zooKeeperProperties.getProperty("clientPort").toInt
+ private val kafkaPort = kafkaProperties.getProperty("port").toInt
+
+ lazy val zkUrl: String = "localhost:" + zkPort
+ lazy val kafkaUrl: String = "localhost:" + kafkaPort
+
+ private val kafkaPropertiesWithZk = new Properties
+ kafkaPropertiesWithZk.putAll(kafkaProperties)
+ kafkaPropertiesWithZk.put("zookeeper.connect", zkUrl)
+ private val kafkaServer = new KafkaLocal(kafkaPropertiesWithZk)
+
+ def startService(): Unit = {
+ //start zk
+ val zookeeper = new ZooKeeperLocal(zooKeeperProperties)
+ new Thread(zookeeper).start()
+
+ //start kafka
+ kafkaServer.start()
+ Thread.sleep(1000)
+ if (kafkaServer.state().currentState != RunningAsBroker.state) {
+ throw new IllegalStateException("Kafka server is not in a running state")
+ }
+
+ //lifecycle message
+ LOGGER.info("Kafka started and listening : {}", kafkaUrl)
+ }
+
+ def stopService(): Unit = {
+ //stop kafka
+ kafkaServer.stop()
+
+ //lifecycle message
+ LOGGER.info("Kafka stopped")
+ }
+
+ def createTopics(topics: List[String]): Unit = {
+ if (topics.nonEmpty) {
+ val adminClient = AdminClient.create(getBootstrapProperties)
+ try {
+ adminClient.createTopics(topics.map(topic => new NewTopic(topic, 1, 1)).asJava)
+ adminClient.listTopics().names().get().forEach(s => LOGGER.info("Available topic : {}", s))
+ }
+ finally {
+ Try(adminClient.close(5, TimeUnit.SECONDS))
+ }
+ }
+ }
+
+ def createProducer[K, V] (topic: String, keySerializer: Class[_ <: Serializer[K]],
+ valueSerializer: Class[_ <: Serializer[V]]) : KafkaProducer[K, V] = {
+ val properties = getBootstrapProperties
+ properties.put(ProducerConfig.CLIENT_ID_CONFIG, topic + "Producer")
+ properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializer.getName)
+ properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer.getName)
+ new KafkaProducer[K, V](properties)
+ }
+
+ def createConsumer[K, V] (topic: String, keySerializer: Class[_ <: Deserializer[K]],
+ valueSerializer: Class[_ <: Deserializer[V]]) : KafkaConsumer[K, V] = {
+ val properties = getBootstrapProperties
+ properties.put(ConsumerConfig.CLIENT_ID_CONFIG, topic + "Consumer")
+ properties.put(ConsumerConfig.GROUP_ID_CONFIG, topic + "ConsumerGroup")
+ properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keySerializer.getName)
+ properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueSerializer.getName)
+ properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
+ val consumer = new KafkaConsumer[K, V](properties)
+ consumer.subscribe(List(topic).asJava)
+ consumer
+ }
+
+ private def getBootstrapProperties: Properties = {
+ val properties = new Properties()
+ properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, List(kafkaUrl).asJava)
+ properties
+ }
+}
+
+class InvalidStateException(message: String) extends RuntimeException(message) {}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/commons/kafka/KafkaLocal.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/commons/kafka/KafkaLocal.scala
new file mode 100644
index 000000000..0dce343a2
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/commons/kafka/KafkaLocal.scala
@@ -0,0 +1,23 @@
+package com.expedia.www.haystack.commons.kafka
+
+import java.util.Properties
+
+import kafka.metrics.KafkaMetricsReporter
+import kafka.server.{BrokerState, KafkaConfig, KafkaServer}
+
+class KafkaLocal(val kafkaProperties: Properties) {
+ val kafkaConfig: KafkaConfig = KafkaConfig.fromProps(kafkaProperties)
+ val kafka: KafkaServer = new KafkaServer(kafkaConfig, kafkaMetricsReporters = List[KafkaMetricsReporter]())
+
+ def start(): Unit = {
+ kafka.startup()
+ }
+
+ def stop(): Unit = {
+ kafka.shutdown()
+ }
+
+ def state(): BrokerState = {
+ kafka.brokerState
+ }
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/commons/kafka/ZooKeeperLocal.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/commons/kafka/ZooKeeperLocal.scala
new file mode 100644
index 000000000..11b0da66b
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/commons/kafka/ZooKeeperLocal.scala
@@ -0,0 +1,31 @@
+package com.expedia.www.haystack.commons.kafka
+
+import java.io.IOException
+import java.util.Properties
+
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig
+import org.apache.zookeeper.server.{ServerConfig, ZooKeeperServerMain}
+import org.slf4j.LoggerFactory
+
+
+object ZooKeeperLocal {
+ private val LOGGER = LoggerFactory.getLogger(classOf[ZooKeeperLocal])
+}
+
+class ZooKeeperLocal(val zkProperties: Properties) extends Runnable {
+ private val quorumConfiguration = new QuorumPeerConfig
+ quorumConfiguration.parseProperties(zkProperties)
+ private val configuration = new ServerConfig
+ configuration.readFrom(quorumConfiguration)
+ private val zooKeeperServer = new ZooKeeperServerMain
+
+ override def run(): Unit = {
+ try {
+ zooKeeperServer.runFromConfig(configuration)
+ }
+ catch {
+ case e: IOException =>
+ ZooKeeperLocal.LOGGER.error("Zookeeper startup failed.", e)
+ }
+ }
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/AppSpec.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/AppSpec.scala
new file mode 100644
index 000000000..1d37a3b6c
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/AppSpec.scala
@@ -0,0 +1,142 @@
+package com.expedia.www.haystack.service.graph.node.finder
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+import java.util.Properties
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.TestSpec
+import com.expedia.www.haystack.commons.health.HealthStatusController
+import com.expedia.www.haystack.commons.kafka.{InvalidStateException, KafkaController}
+import com.expedia.www.haystack.commons.kstreams.app.StateChangeListener
+import com.expedia.www.haystack.commons.kstreams.serde.SpanSerializer
+import com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricDataDeserializer
+import com.expedia.www.haystack.service.graph.node.finder.config.AppConfiguration
+import com.expedia.www.haystack.service.graph.node.finder.utils.SpanUtils
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
+import org.apache.kafka.streams.KafkaStreams
+import org.expedia.www.haystack.commons.scalatest.IntegrationSuite
+import org.scalatest.BeforeAndAfter
+import org.slf4j.LoggerFactory
+
+@IntegrationSuite
+class AppSpec extends TestSpec with BeforeAndAfter {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[AppSpec])
+
+ private val appConfig = new AppConfiguration("integration/local.conf")
+ private val stateChangeListener = new ExtendedStateChangeListener(new HealthStatusController)
+ private val streamsRunner = App.createStreamsRunner(appConfig, stateChangeListener)
+
+ val kafkaController: KafkaController = createKafkaController()
+
+ before {
+ //start kafka and zk
+ kafkaController.startService()
+
+ //ensure test topics are present
+ kafkaController.createTopics(List(appConfig.kafkaConfig.protoSpanTopic,
+ appConfig.kafkaConfig.serviceCallTopic, appConfig.kafkaConfig.metricsTopic, appConfig.kafkaConfig.metadataConfig.topic))
+
+ //start topology
+ streamsRunner.start()
+
+ //time for kstreams to initialize completely·
+ waitForStreams()
+ }
+
+ describe("node finder application") {
+ it("should process spans from kafka and produce latency metrics and graph edges") {
+ //send test data to source topic
+ val producer = kafkaController.createProducer(appConfig.kafkaConfig.protoSpanTopic,
+ classOf[StringSerializer], classOf[SpanSerializer])
+
+ //send sample data
+ sendRecords(producer, 5)
+
+ //read data from output topics
+ LOGGER.info(s"Consuming topics ${appConfig.kafkaConfig.metricsTopic} and ${appConfig.kafkaConfig.serviceCallTopic}")
+ val metricsConsumer = kafkaController.createConsumer(appConfig.kafkaConfig.metricsTopic,
+ classOf[StringDeserializer], classOf[MetricDataDeserializer])
+ val metricRecords = metricsConsumer.poll(5000)
+
+ val graphConsumer = kafkaController.createConsumer(appConfig.kafkaConfig.serviceCallTopic,
+ classOf[StringDeserializer], classOf[StringDeserializer])
+ val graphRecords = graphConsumer.poll(5000)
+
+ //check if they are as expected
+ metricRecords.count() should be(5)
+ graphRecords.count() should be(5)
+ }
+ }
+
+ after {
+ //stop topology
+ streamsRunner.close()
+
+ //stop kafka and zk
+ kafkaController.stopService()
+ }
+
+ private def createKafkaController() : KafkaController = {
+ val zkProperties = new Properties
+ zkProperties.load(classOf[AppSpec].getClassLoader.getResourceAsStream("integration/zookeeper.properties"))
+
+ val kafkaProperties = new Properties
+ kafkaProperties.load(classOf[AppSpec].getClassLoader.getResourceAsStream("integration/kafka-server.properties"))
+
+ new KafkaController(kafkaProperties, zkProperties)
+ }
+
+ private def waitForStreams(): Unit = {
+ while (!stateChangeListener.currentState.isRunning &&
+ (stateChangeListener.currentState == KafkaStreams.State.CREATED)) Thread.sleep(100)
+
+ if (!stateChangeListener.currentState.isRunning) {
+ throw new InvalidStateException(stateChangeListener.currentState + " is not expected after startup")
+ }
+ }
+
+ private def sendRecords(producer: KafkaProducer[String, Span], count: Int) : Unit = {
+ val writer: (Span) => Unit = span => {
+ producer.send(new ProducerRecord[String, Span](appConfig.kafkaConfig.protoSpanTopic, span.getSpanId, span))
+ LOGGER.info("sent {} span {} : {}", SpanUtils.getSpanType(span).toString, span.getSpanId, span.getStartTime.toString)
+ }
+
+ //send 5 simple spans, 5 client spans, 5 server span and 5 client-server span combinations
+ for (_ <- 1 to count) produceClientSpan(10000, writer)
+ for (_ <- 1 to count) produceServerSpan(9000, writer)
+ for (_ <- 1 to count) produceClientAndServerSpans(8000, writer)
+ for (_ <- 1 to count) produceSimpleSpan(5000, writer)
+ producer.flush()
+
+ //sleep for 30 seconds for streams to process. This is probably too much for local -
+ //but depending on the compute in build servers this time varies
+ Thread.sleep(30000)
+ }
+
+ class ExtendedStateChangeListener(healthStatusController: HealthStatusController)
+ extends StateChangeListener(healthStatusController) {
+ var currentState: KafkaStreams.State = _
+
+ override def onChange(newState: KafkaStreams.State, oldState: KafkaStreams.State): Unit = {
+ super.onChange(newState, oldState)
+ currentState = newState
+ }
+ }
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/GraphNodeProducerSpec.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/GraphNodeProducerSpec.scala
new file mode 100644
index 000000000..280793bb8
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/GraphNodeProducerSpec.scala
@@ -0,0 +1,76 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.app
+
+import com.expedia.www.haystack.TestSpec
+import com.expedia.www.haystack.commons.entities.GraphEdge
+import org.apache.kafka.streams.processor.ProcessorContext
+import org.easymock.EasyMock._
+
+class GraphNodeProducerSpec extends TestSpec {
+ describe("producing graph nodes") {
+ it("should emit a valid graph node for a give complete SpanPair") {
+ Given("a valid SpanPair instance")
+ val spanPair = validSpanPair(Map("testtag" -> "true"))
+ val context = mock[ProcessorContext]
+ val graphNodeProducer = new GraphNodeProducer
+ val captured = newCapture[GraphEdge]()
+ When("process is called on GraphNodeProducer with it")
+ expecting {
+ context.forward(anyString(), capture[GraphEdge](captured)).once()
+ context.commit().once()
+ }
+ replay(context)
+ graphNodeProducer.init(context)
+ graphNodeProducer.process(spanPair.getId, spanPair)
+ val edge = captured.getValue
+ Then("it should produce a valid GraphNode object")
+ verify(context)
+ edge.source.name should be("foo-service")
+ edge.destination.name should be("baz-service")
+ edge.operation should be("bar")
+ edge.source.tags.get("testtag") shouldBe Some("true")
+ edge.destination.tags.get("testtag") shouldBe Some("true")
+ }
+ it("should emit no graph nodes for invalid light spans") {
+ Given("an incomplete SpanPair instance")
+ val spanPair = invalidSpanPair()
+ val context = mock[ProcessorContext]
+ val graphNodeProducer = new GraphNodeProducer
+ When("process is called on GraphNodeProducer with it")
+ expecting {
+ context.commit().once()
+ }
+ replay(context)
+ graphNodeProducer.init(context)
+ graphNodeProducer.process(spanPair.getId, spanPair)
+ Then("it should produce no graph node in the context")
+ verify(context)
+ }
+ }
+ describe("graph node producer supplier") {
+ it("should supply a valid producer") {
+ Given("a supplier instance")
+ val supplier = new GraphNodeProducerSupplier
+ When("a producer is request")
+ val producer = supplier.get()
+ Then("should yield a valid producer")
+ producer should not be null
+ }
+ }
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/LatencyProducerSpec.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/LatencyProducerSpec.scala
new file mode 100644
index 000000000..1537a596d
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/LatencyProducerSpec.scala
@@ -0,0 +1,69 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.app
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.TestSpec
+import org.apache.kafka.streams.processor.ProcessorContext
+import org.easymock.EasyMock._
+
+class LatencyProducerSpec extends TestSpec {
+ describe("latency producer") {
+ it("should produce latency metric for complete SpanPair") {
+ Given("a valid SpanPair instance")
+ val spanPair = validSpanPair()
+ val context = mock[ProcessorContext]
+ val latencyProducer = new LatencyProducer
+ When("process is invoked with a complete SpanPair")
+ expecting {
+ context.forward(anyString(), isA(classOf[MetricData])).once()
+ context.commit().once()
+ }
+ replay(context)
+ latencyProducer.init(context)
+ latencyProducer.process(spanPair.getId, spanPair)
+ Then("it should produce a metric point in the context")
+ verify(context)
+ }
+ it("should produce no metrics for invalid SpanPair") {
+ Given("an incomplete SpanPair instance")
+ val spanPair = invalidSpanPair()
+ val context = mock[ProcessorContext]
+ val latencyProducer = new LatencyProducer
+ When("process is invoked with a complete SpanPair")
+ expecting {
+ context.commit().once()
+ }
+ replay(context)
+ latencyProducer.init(context)
+ latencyProducer.process(spanPair.getId, spanPair)
+ Then("it should produce no metric points in the context")
+ verify(context)
+ }
+ }
+ describe("latency producer supplier") {
+ it("should supply a valid producer") {
+ Given("a supplier instance")
+ val supplier = new LatencyProducerSupplier
+ When("a producer is request")
+ val producer = supplier.get()
+ Then("should yield a valid producer")
+ producer should not be null
+ }
+ }
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/SpanAccumulatorSpec.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/SpanAccumulatorSpec.scala
new file mode 100644
index 000000000..a19222a0f
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/SpanAccumulatorSpec.scala
@@ -0,0 +1,456 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.app
+
+import java.util.concurrent.TimeUnit
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.TestSpec
+import com.expedia.www.haystack.commons.graph.GraphEdgeTagCollector
+import com.expedia.www.haystack.service.graph.node.finder.model.{ServiceNodeMetadata, SpanPair}
+import org.apache.kafka.streams.processor._
+import org.apache.kafka.streams.state.{KeyValueStore, Stores}
+import org.easymock.EasyMock
+import org.easymock.EasyMock._
+
+import scala.collection.mutable
+
+class SpanAccumulatorSpec extends TestSpec {
+ private val storeName = "my-store"
+ private val DEFAULT_ACCUMULATE_INTERVAL_MILLIS = TimeUnit.SECONDS.toMillis(2)
+
+ describe("a span accumulator") {
+ it("should schedule Punctuator on init") {
+ Given("a processor context")
+ val (context, _, _, _) = mockContext(0)
+ When("accumulator is initialized")
+ createAccumulator(context)
+ Then("it should schedule punctuation")
+ verify(context)
+ }
+
+ it("should collect all Client or Server Spans provided for processing") {
+ Given("an accumulator")
+ val accumulator = new SpanAccumulator(storeName, 1000, new GraphEdgeTagCollector())
+ When("10 server, 10 client and 10 other spans are processed")
+ val producers = List[(Long, (Span) => Unit) => Unit](produceSimpleSpan,
+ produceServerSpan, produceClientSpan)
+ producers.foreach(producer => writeSpans(10, 200, producer, (span) => accumulator.process(span.getSpanId, span)))
+ Then("accumulator should hold only the 10 client and 10 server spans")
+ accumulator.spanCount should be(30)
+ }
+
+ it("should emit SpanPair instances only for pairs of server and client spans") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, _, _, _) = mockContext(10)
+ val accumulator = createAccumulator(context)
+ And("50 spans are written to it, with 10 client, 10 server, 10 other and 10 pairs of server and client")
+ val producers = List[(Long, (Span) => Unit) => Unit](produceSimpleSpan,
+ produceServerSpan, produceClientSpan, produceClientAndServerSpans)
+ producers.foreach(producer => writeSpans(10, 2500, producer, (span) => accumulator.process(span.getSpanId, span)))
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 10 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+ }
+ }
+
+ describe("create span pair using ids") {
+ it("should emit SpanPair instances for parent-child relation using ids") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, kvStore, _, _) = mockContext(4)
+ val accumulator = createAccumulator(context)
+
+ And("spans from 5 services")
+ val spanList = List(
+ newSpan("I1", "I2", "svc1"),
+ newSpan("I3", "I1", "svc1"),
+ newSpan("I4", "I3", "svc2"),
+ newSpan("I5", "I4", "svc2"),
+ newSpan("I6", "I5", "svc3"),
+ newSpan("I7", "I6", "svc3"),
+ newSpan("I8", "I7", "svc4"),
+ newSpan("I9", "I8", "svc4"),
+ newSpan("I10", "I9", "svc5"),
+ newSpan("I11", "I10", "svc5")
+ )
+ spanList.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 10 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+
+ 1 until 6 foreach (id => {
+ kvStore.get(s"svc$id") shouldBe null
+ })
+ }
+
+ it("should emit SpanPair instances for parent-child relation using ids with server spans") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, kvStore, _, _) = mockContext(2)
+ val accumulator = createAccumulator(context)
+
+ And("spans from 5 services")
+ val spanList = List(
+ newServerSpan("I1", "I2", "svc1"),
+ newServerSpan("I4", "I1", "svc2"),
+ newClientSpan("I5", "I4", "svc2"),
+ newServerSpan("I6", "I5", "svc3"),
+ newServerSpan("I8", "I6", "svc4"),
+ newClientSpan("I9", "I8", "svc4"),
+ newClientSpan("I10", "I9", "svc5"),
+ newServerSpan("I11", "I10", "svc6")
+ )
+ spanList.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 10 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+ 1 until 7 foreach { id =>
+ kvStore.get(s"svc$id") shouldBe null
+ }
+ }
+
+ it("should emit SpanPair instances for parent-child relation using ids with (I5, I4) and (I6, I5) in reverse order") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, kvStore, _, _) = mockContext(4)
+ val accumulator = createAccumulator(context)
+ And("spans from 5 services")
+ val spanList = List(
+ newSpan("I1", "I2", "svc1"),
+ newSpan("I3", "I1", "svc1"),
+ newSpan("I4", "I3", "svc2"),
+ newSpan("I6", "I5", "svc3"), // child comes first
+ newSpan("I5", "I4", "svc2"), // then comes the parent
+ newSpan("I7", "I6", "svc3"),
+ newSpan("I8", "I7", "svc4"),
+ newSpan("I9", "I8", "svc4"),
+ newSpan("I10", "I9", "svc5"),
+ newSpan("I11", "I10", "svc5")
+ )
+ spanList.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 10 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+ 1 until 6 foreach { id =>
+ kvStore.get(s"svc$id") shouldBe null
+ }
+ }
+
+ it("should emit SpanPair instances for fork relation using ids for svc4 -> svc5 & svc4 -> svc6") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, _, _, _) = mockContext(5)
+ val accumulator = createAccumulator(context)
+ And("spans from 6 services")
+ val spanList = List(
+ newSpan("I1", "I2", "svc1"),
+ newSpan("I3", "I1", "svc1"),
+ newSpan("I4", "I3", "svc2"),
+ newSpan("I6", "I5", "svc3"),
+ newSpan("I5", "I4", "svc2"),
+ newSpan("I7", "I6", "svc3"),
+
+ newSpan("I8", "I7", "svc4"),
+ newSpan("I9", "I8", "svc4"),
+ newSpan("I10", "I8", "svc4"),
+
+ //downstream of svc4
+ newSpan("I11", "I9", "svc5"),
+ newSpan("I12", "I11", "svc5"),
+
+ //downstream of svc4
+ newSpan("I13", "I10", "svc6"),
+ newSpan("I14", "I13", "svc6")
+ )
+ spanList.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 10 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+ }
+
+ it("should emit valid SpanPair instances for parent-child relation ignoring duplicate spans") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, _, _, _) = mockContext(1)
+ val accumulator = createAccumulator(context)
+ And("spans from 5 services")
+ val spanList = List(
+ newSpan("I1", "I2", "svc1"),
+ newSpan("I1", "I2", "svc1"), //duplicate server span
+ newSpan("I3", "I1", "svc2"),
+ newSpan("I4", "I3", "svc2")
+ )
+ spanList.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 1 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+ }
+ }
+
+ it("should emit valid SpanPair instances in mixed merge mode where we receive spans in Singular(sharable) and Dual(non-sharable) style") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, kvStore, forwardedKeys, forwardedSpanPairs) = mockContext(3)
+ val accumulator = createAccumulator(context)
+ And("spans from 4 services")
+ val spanList = List(
+ // sharable client-server span
+ newClientSpan("I1", "I2", "svc1"),
+ newServerSpan("I1", "I2", "svc2"),
+
+ // non-sharable client-server span
+ newClientSpan("I2", "I1", "svc2"),
+ newServerSpan("I3", "I2", "svc3"),
+
+ // sharable client-server span
+ newClientSpan("I4", "I3", "svc3"),
+ newServerSpan("I4", "I3", "svc4")
+ )
+ spanList.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 3 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+ kvStore.get("svc1") shouldBe null
+ kvStore.get("svc2").useSharedSpan shouldBe true
+ kvStore.get("svc3")
+ kvStore.get("svc4").useSharedSpan shouldBe true
+ extractClientServerSvcNames(forwardedSpanPairs) should contain allOf("svc1->svc2", "svc2->svc3", "svc3->svc4")
+ forwardedKeys.toSet should contain allOf("I1", "I2", "I4")
+ }
+
+ it("should respect the singular(sharable) span merge style once set even later if it receives dual(non-sharable) span mode") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, kvStore, forwardedKeys, forwardedSpanPairs) = mockContext(3)
+ val accumulator = createAccumulator(context)
+ And("spans from 3 services")
+ val spanList = List(
+ // sharable client-server span
+ newClientSpan("I1", "I2", "svc1"),
+ newServerSpan("I1", "I2", "svc2"),
+
+ // sharable client-server span
+ newClientSpan("I3", "I1", "svc2"),
+ newServerSpan("I3", "I1", "svc3"),
+
+ // one non-sharable client-server span between svc1 and svc3
+ // one sharable client-server span between svc2 and svc3
+ newClientSpan("T1", "T2", "svc1"),
+ newServerSpan("T3", "T1", "svc3"),
+ newClientSpan("T3", "T1", "svc2")
+ )
+ spanList.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 3 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+
+ kvStore.get("svc1") shouldBe null
+ kvStore.get("svc2").useSharedSpan shouldBe true
+ kvStore.get("svc3").useSharedSpan shouldBe true
+ extractClientServerSvcNames(forwardedSpanPairs) should contain allOf("svc1->svc2", "svc2->svc3")
+ forwardedKeys.toSet should contain allOf("I1", "I3", "T3")
+ }
+
+ it("should auto-correct from dual to Singular merge style mode and never go back") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, kvStore, forwardedKeys, forwardedSpanPairs) = mockContext(3)
+ val accumulator = createAccumulator(context)
+ And("spans from 5 services")
+ val spanList = List(
+ // non-sharable client-server span between svc1 and svc3
+ newClientSpan("I1", "I2", "svc1"),
+ newServerSpan("I3", "I1", "svc3"),
+
+ newServerSpan("I1", "I2", "svc2"),
+ newClientSpan("I3", "I1", "svc2"),
+
+ newClientSpan("T1", "T2", "svc1"),
+ newServerSpan("T3", "T1", "svc3")
+ )
+ spanList.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 3 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+ kvStore.get("svc1") shouldBe null
+ kvStore.get("svc2").useSharedSpan shouldBe true
+ kvStore.get("svc3").useSharedSpan shouldBe true
+ extractClientServerSvcNames(forwardedSpanPairs) should contain allOf("svc1->svc2", "svc1->svc3", "svc2->svc3")
+ forwardedKeys.toSet should contain allOf("I1", "I3")
+ }
+
+ it("should emit valid SpanPair instances for only singular(sharable) styled spans") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, kvStore, _, _) = mockContext(3)
+ val accumulator = createAccumulator(context)
+ And("spans from 4 services")
+ val spanList = List(
+ newServerSpan("I1", "I2", "svc2"),
+ newServerSpan("I2", "I1", "svc3"),
+ newServerSpan("I3", "I2", "svc4"),
+ newClientSpan("I1", "I2", "svc1"),
+ newClientSpan("I2", "I1", "svc2"),
+ newClientSpan("I3", "I2", "svc3")
+ )
+ spanList.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(System.currentTimeMillis())
+
+ Then("it should produce 3 SpanPair instances as expected")
+ verify(context)
+ And("the accumulator's collection should be empty")
+ accumulator.spanCount should be(0)
+ kvStore.get("svc1") shouldBe null
+ 2 until 5 foreach { id =>
+ kvStore.get(s"svc$id").useSharedSpan shouldBe true
+ }
+ }
+
+ it("should apply eviction logic using the end time (start time + duration) of the incoming spans and not rely on their start time") {
+ Given("an accumulator and initialized with a processor context")
+ val (context, kvStore, forwardedKeys, forwardedSpanPairs) = mockContext(2, 2)
+ val accumulator = createAccumulator(context)
+
+ And("spans from 2 services")
+ val currentTime = System.currentTimeMillis()
+ val oldStartTime = currentTime - TimeUnit.SECONDS.toMillis(10)
+ val longDurationSpans = List(
+ // sharable client-server span
+ newClientSpan("I1", "I2", "svc1", oldStartTime, TimeUnit.SECONDS.toMicros(9)),
+ newClientSpan("I3", "I4", "svc1", oldStartTime, TimeUnit.SECONDS.toMicros(5)),
+ newServerSpan("I3", "I4", "svc2", oldStartTime, TimeUnit.SECONDS.toMicros(5))
+ )
+ longDurationSpans.foreach(span => accumulator.process(span.getSpanId, span))
+
+ When("punctuate is called")
+ accumulator.getPunctuator(context).punctuate(currentTime)
+
+ Then("it should produce 2 SpanPair instances as expected")
+ And("the accumulator's collection not be empty, it should hold span with spanId I1")
+ accumulator.spanCount should be(1)
+ Set("I1") should contain allElementsOf accumulator.internalSpanMap.keySet
+
+ And("when finally, server span with spanId I1 is observed, it should process and forward")
+ List(
+ newServerSpan("I1", "I2", "svc2", oldStartTime, TimeUnit.SECONDS.toMicros(9))
+ ).foreach(span => accumulator.process(span.getSpanId, span))
+
+ // once the stream time moves ahead, it should evict the older spans
+ accumulator.getPunctuator(context).punctuate(currentTime + TimeUnit.SECONDS.toMillis(5))
+ accumulator.spanCount should be(0)
+
+ verify(context)
+
+ kvStore.get("svc1") shouldBe null
+ kvStore.get("svc2").useSharedSpan shouldBe true
+ extractClientServerSvcNames(forwardedSpanPairs) should contain allElementsOf Seq("svc1->svc2")
+ forwardedKeys.toSet should contain allElementsOf Seq("I1", "I3")
+ }
+
+ describe("span accumulator supplier") {
+ it("should supply a valid accumulator") {
+ Given("a supplier instance")
+ val supplier = new SpanAccumulatorSupplier(storeName, 1000, new GraphEdgeTagCollector())
+ When("an accumulator instance is request")
+ val producer = supplier.get()
+ Then("should yield a valid producer")
+ producer should not be null
+ }
+ }
+
+ private def mockContext(expectedForwardCalls: Int, expectedCommits: Int = 1): (ProcessorContext, KeyValueStore[String, ServiceNodeMetadata], mutable.ListBuffer[String], mutable.ListBuffer[SpanPair]) = {
+ val context = mock[ProcessorContext]
+ val stateStore = Stores.inMemoryKeyValueStore(storeName).get()
+
+ val forwardedKeys = mutable.ListBuffer[String]()
+ val forwardedSpanPairs = mutable.ListBuffer[SpanPair]()
+
+ expecting {
+ context.schedule(anyLong(), isA(classOf[PunctuationType]), isA(classOf[Punctuator]))
+ .andReturn(mock[Cancellable]).once()
+
+ if (expectedForwardCalls > 0) {
+ val captureForwardedKey = EasyMock.newCapture[String]()
+ val captureForwardedSpanPair = EasyMock.newCapture[SpanPair]()
+ context
+ .forward(EasyMock.capture(captureForwardedKey), EasyMock.capture(captureForwardedSpanPair))
+ .andAnswer(() => {
+ val spanPair = captureForwardedSpanPair.getValue
+ if (spanPair.IsSharedSpan) {
+ stateStore.asInstanceOf[KeyValueStore[String, ServiceNodeMetadata]].put(spanPair.getServerSpan.serviceName, ServiceNodeMetadata(true))
+ }
+ forwardedKeys += captureForwardedKey.getValue
+ forwardedSpanPairs += spanPair
+ }).times(expectedForwardCalls)
+
+ context.commit().times(expectedCommits)
+ }
+ context.getStateStore(storeName).andReturn(stateStore)
+ }
+ replay(context)
+ (context, stateStore.asInstanceOf[KeyValueStore[String, ServiceNodeMetadata]], forwardedKeys, forwardedSpanPairs)
+ }
+
+ private def createAccumulator(context: ProcessorContext): SpanAccumulator = {
+ val accumulator = new SpanAccumulator(storeName, DEFAULT_ACCUMULATE_INTERVAL_MILLIS.toInt, new GraphEdgeTagCollector())
+ accumulator.init(context)
+ accumulator
+ }
+
+ private def extractClientServerSvcNames(spanPairs: mutable.ListBuffer[SpanPair]): Set[String] = {
+ spanPairs.map(p => p.getClientSpan.serviceName + "->" + p.getServerSpan.serviceName).toSet
+ }
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/StreamsSpec.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/StreamsSpec.scala
new file mode 100644
index 000000000..aaea1dac5
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/app/StreamsSpec.scala
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.app
+
+import com.expedia.www.haystack.TestSpec
+import com.expedia.www.haystack.commons.entities.GraphEdge
+import com.expedia.www.haystack.commons.kstreams.SpanTimestampExtractor
+import com.expedia.www.haystack.commons.kstreams.serde.SpanDeserializer
+import com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricDataSerializer
+import com.expedia.www.haystack.service.graph.node.finder.app.metadata.{MetadataProducerSupplier, MetadataStoreUpdateProcessorSupplier}
+import com.expedia.www.haystack.service.graph.node.finder.config.{KafkaConfiguration, NodeMetadataConfiguration}
+import com.expedia.www.haystack.service.graph.node.finder.model.ServiceNodeMetadata
+import org.apache.kafka.common.serialization._
+import org.apache.kafka.streams.state.{KeyValueStore, StoreBuilder}
+import org.apache.kafka.streams.{StreamsConfig, Topology}
+import org.easymock.EasyMock._
+
+class StreamsSpec extends TestSpec {
+ describe("configuring a topology should") {
+ it("should add a source, three processors and two sinks with expected arguments") {
+ Given("a configuration object of type KafkaConfiguration")
+ val streamsConfig = mock[StreamsConfig]
+ val kafkaConfig = KafkaConfiguration(streamsConfig,
+ "metrics", "service-call",
+ "proto-spans", Topology.AutoOffsetReset.LATEST,
+ new SpanTimestampExtractor, 10000, 10000, NodeMetadataConfiguration(false, "mystore", 1, 1), List("tier"))
+ val streams = new Streams(kafkaConfig)
+ val topology = mock[Topology]
+ When("initialize is invoked with a topology")
+ expecting {
+ topology.addSource(isA(classOf[Topology.AutoOffsetReset]), anyString(),
+ isA(classOf[SpanTimestampExtractor]), isA(classOf[StringDeserializer]),
+ isA(classOf[SpanDeserializer]), anyString()).andReturn(topology).once()
+ topology.addProcessor(anyString(), isA(classOf[SpanAccumulatorSupplier]),
+ anyString()).andReturn(topology).once()
+ topology.addProcessor(anyString(), isA(classOf[GraphNodeProducerSupplier]),
+ anyString()).andReturn(topology).once()
+ topology.addProcessor(anyString(), isA(classOf[LatencyProducerSupplier]),
+ anyString()).andReturn(topology).once()
+ topology.addSink(anyString(), anyString(), isA(classOf[StringSerializer]),
+ isA(classOf[MetricDataSerializer]), anyString()).andReturn(topology).once()
+ topology.addSink(anyString(), anyString(), isA(classOf[Serializer[GraphEdge]]),
+ isA(classOf[Serializer[GraphEdge]]), anyString()).andReturn(topology).once()
+
+ topology.addProcessor(anyString(), isA(classOf[MetadataProducerSupplier]),
+ anyString()).andReturn(topology).once()
+ topology.addSink(anyString(), anyString(), isA(classOf[Serializer[String]]),
+ isA(classOf[Serializer[ServiceNodeMetadata]]), anyString()).andReturn(topology).once()
+
+ topology.addGlobalStore(isA(classOf[StoreBuilder[KeyValueStore[String, ServiceNodeMetadata]]]),
+ anyString(),
+ isA(classOf[Deserializer[String]]),
+ isA(classOf[Deserializer[ServiceNodeMetadata]]),
+ anyString(),
+ anyString(),
+ isA(classOf[MetadataStoreUpdateProcessorSupplier])).andReturn(topology).once()
+ }
+ replay(topology)
+ streams.initialize(topology)
+ Then("it is configured as expected")
+ verify(topology)
+ }
+ }
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/config/AppConfigurationSpec.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/config/AppConfigurationSpec.scala
new file mode 100644
index 000000000..664f6f76d
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/config/AppConfigurationSpec.scala
@@ -0,0 +1,85 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.config
+
+import com.expedia.www.haystack.TestSpec
+import com.expedia.www.haystack.commons.kstreams.SpanTimestampExtractor
+import com.typesafe.config.ConfigException
+
+class AppConfigurationSpec extends TestSpec {
+ describe("loading application configuration") {
+ it("should fail creating KafkaConfiguration if no application id is specified") {
+ Given("a test configuration file")
+ val file = "test/test_no_app_id.conf"
+ When("Application configuration is loaded")
+ Then("it should throw an exception")
+ intercept[IllegalArgumentException] {
+ new AppConfiguration(file).kafkaConfig
+ }
+ }
+ it("should fail creating KafkaConfiguration if no bootstrap is specified") {
+ Given("a test configuration file")
+ val file = "test/test_no_bootstrap.conf"
+ When("Application configuration is loaded")
+ Then("it should throw an exception")
+ intercept[IllegalArgumentException] {
+ new AppConfiguration(file).kafkaConfig
+ }
+ }
+ it("should fail creating KafkaConfiguration if no metrics topic is specified") {
+ Given("a test configuration file")
+ val file = "test/test_no_metrics_topic.conf"
+ When("Application configuration is loaded")
+ Then("it should throw an exception")
+ intercept[ConfigException] {
+ new AppConfiguration(file).kafkaConfig
+ }
+ }
+ it("should fail creating KafkaConfiguration if no consumer is specified") {
+ Given("a test configuration file")
+ val file = "test/test_no_consumer.conf"
+ When("Application configuration is loaded")
+ Then("it should throw an exception")
+ intercept[ConfigException] {
+ new AppConfiguration(file).kafkaConfig
+ }
+ }
+ it("should fail creating KafkaConfiguration if no producer is specified") {
+ Given("a test configuration file")
+ val file = "test/test_no_producer.conf"
+ When("Application configuration is loaded")
+ Then("it should throw an exception")
+ intercept[ConfigException] {
+ new AppConfiguration(file).kafkaConfig
+ }
+ }
+ it("should create KafkaConfiguration as specified") {
+ Given("a test configuration file")
+ val file = "test/test.conf"
+ When("Application configuration is loaded and KafkaConfiguration is obtained")
+ val config = new AppConfiguration(file).kafkaConfig
+ Then("it should load as expected")
+ config.streamsConfig.defaultTimestampExtractor() shouldBe a [SpanTimestampExtractor]
+ config.serviceCallTopic should be ("graph-nodes")
+ config.accumulatorInterval should be (60000)
+ config.metadataConfig.topic should be ("haystack-node-finder-metadata")
+ config.metadataConfig.partitionCount should be (6)
+ config.metadataConfig.replicationFactor should be (2)
+ }
+ }
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/model/SpanPairSpec.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/model/SpanPairSpec.scala
new file mode 100644
index 000000000..9d67ac108
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/model/SpanPairSpec.scala
@@ -0,0 +1,95 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.model
+
+import java.util.UUID
+
+import com.expedia.metrics.{MetricDefinition, TagCollection}
+import com.expedia.www.haystack.TestSpec
+import com.expedia.www.haystack.commons.entities._
+import com.expedia.www.haystack.service.graph.node.finder.utils.SpanType
+
+import scala.collection.JavaConverters._
+
+class SpanPairSpec extends TestSpec {
+ describe("a complete span") {
+ it("should return a valid graphEdge for non-open tracing compliant spans") {
+ Given("a complete spanlite")
+ val spanId = UUID.randomUUID().toString
+ val parentSpanId = UUID.randomUUID().toString
+ val clientTime = System.currentTimeMillis()
+
+ val clientSpan = newLightSpan(spanId, parentSpanId, "foo-service", "bar", clientTime, 1000, SpanType.CLIENT)
+ val serverSpan = newLightSpan(spanId, parentSpanId, "baz-service", "bar", SpanType.SERVER)
+ val spanPair = SpanPairBuilder.createSpanPair(clientSpan, serverSpan)
+
+ When("get graphEdge is called")
+ val graphEdge = spanPair.getGraphEdge
+
+ Then("it should return a valid graphEdge")
+ spanPair.isComplete should be(true)
+ graphEdge.get should be(GraphEdge(GraphVertex("foo-service"), GraphVertex("baz-service"), "bar", clientTime))
+ }
+
+ it("should return a valid graphEdge for open tracing compliant spans") {
+ Given("a complete spanlite")
+ val spanId = UUID.randomUUID().toString
+ val clientTime = System.currentTimeMillis()
+
+ val clientSpan = newLightSpan(spanId, UUID.randomUUID().toString, "foo-service", "bar", clientTime, 1000, SpanType.OTHER)
+ val serverSpan = newLightSpan(UUID.randomUUID().toString, spanId, "baz-service", "bar", SpanType.OTHER)
+ val spanPair = SpanPairBuilder.createSpanPair(clientSpan, serverSpan)
+
+ When("get graphEdge is called")
+ val graphEdge = spanPair.getGraphEdge
+
+ Then("it should return a valid graphEdge")
+ spanPair.isComplete should be(true)
+ graphEdge.get should be(GraphEdge(GraphVertex("foo-service"), GraphVertex("baz-service"), "bar", clientTime))
+ }
+ it("should return valid metricPoints") {
+ Given("a complete spanlite")
+ val clientSend = System.currentTimeMillis()
+ val serverReceive = clientSend + 500
+ val spanId = UUID.randomUUID().toString
+ val clientSpan = newLightSpan(spanId, UUID.randomUUID().toString, "baz-service", "bar",
+ serverReceive, 500, SpanType.SERVER, Map())
+ val serverSpan = newLightSpan(spanId, UUID.randomUUID().toString, "foo-service", "bar",
+ clientSend, 1500, SpanType.CLIENT, Map())
+ val spanPair = SpanPairBuilder.createSpanPair(clientSpan, serverSpan)
+
+ When("get Latency is called")
+ val metricPoint = spanPair.getLatency.get
+
+ Then("it should return a valid latency pairs")
+ val tags = new TagCollection(Map(
+ TagKeys.SERVICE_NAME_KEY -> "foo-service",
+ TagKeys.OPERATION_NAME_KEY -> "bar",
+ MetricDefinition.UNIT -> "ms",
+ MetricDefinition.MTYPE -> "gauge"
+ ).asJava)
+
+ spanPair.isComplete should be(true)
+ metricPoint.getMetricDefinition.getKey should be ("latency")
+ metricPoint.getValue should be (1)
+ metricPoint.getTimestamp should be (clientSend / 1000)
+ metricPoint.getMetricDefinition.getTags should equal (tags)
+ }
+ }
+
+}
diff --git a/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/utils/SpanUtilsSpec.scala b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/utils/SpanUtilsSpec.scala
new file mode 100644
index 000000000..949e00878
--- /dev/null
+++ b/service-graph/node-finder/src/test/scala/com/expedia/www/haystack/service/graph/node/finder/utils/SpanUtilsSpec.scala
@@ -0,0 +1,120 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.node.finder.utils
+
+import com.expedia.open.tracing.Tag
+import com.expedia.www.haystack.TestSpec
+
+class SpanUtilsSpec extends TestSpec {
+ describe("discovering a span type") {
+ it("should return CLIENT when both 'cr' and 'cs' is present") {
+ Given("a span with 'cr' and 'cs' event logs")
+ val (span, _) = newSpan("foo-service", "bar", 6000, client = true, server = false)
+ When("getSpanType is called")
+ val spanType = SpanUtils.getSpanType(span)
+ Then("it is marked as CLIENT")
+ spanType should be (SpanType.CLIENT)
+ }
+
+ it("should return CLIENT when more when 'cr', 'cs' and 'sr' is present but span.kind is set correctly") {
+ Given("a span with 'cr','cs', 'sr' and 'ss' event logs")
+ val (span, _) = newSpan("foo-service", "bar", 6000, client = true, server = true)
+ When("getSpanType is called")
+ val spanType = SpanUtils.getSpanType(span)
+ Then("it is marked as OTHER")
+ spanType should be (SpanType.CLIENT)
+ }
+
+ it("should return SERVER when just 'sr' and 'ss' are present") {
+ Given("a span with 'sr' and 'ss' event logs")
+ val (span, _) = newSpan("foo-service", "bar", 6000, client = false, server = true)
+ When("getSpanType is called")
+ val spanType = SpanUtils.getSpanType(span)
+ Then("it is marked as SERVER")
+ spanType should be (SpanType.SERVER)
+ }
+
+ it("should return client when 'cr', 'cs', 'sr' and 'ss' are present but span.kind tag is present") {
+ Given("a span with no 'cr', cs', 'sr' and 'ss' event logs")
+ var (span, _) = newSpan("foo-service", "bar", 6000, client = false, server = false)
+ span = span.toBuilder.addTags(Tag.newBuilder().setKey("span.kind").setVStr("client")).build()
+ When("getSpanType is called")
+ val spanType = SpanUtils.getSpanType(span)
+ Then("it is marked as CLIENT")
+ spanType should be (SpanType.CLIENT)
+ }
+
+ it("should return server when 'cr', 'cs', 'sr' and 'ss' are present but span.kind tag is present") {
+ Given("a span with no 'cr', 'cs', 'sr' and 'ss' event logs")
+ var (span, _) = newSpan("foo-service", "bar", 6000, client = false, server = false)
+ span = span.toBuilder.addTags(Tag.newBuilder().setKey("span.kind").setVStr("server")).build()
+ When("getSpanType is called")
+ val spanType = SpanUtils.getSpanType(span)
+ Then("it is marked as SERVER")
+ spanType should be (SpanType.SERVER)
+ }
+
+ it("should return server when 'sr' and 'ss' are present and span.kind tag is also present") {
+ Given("a span with 'sr' and 'ss' event logs and span.kind tag as 'server'")
+ var (span, _) = newSpan("foo-service", "bar", 6000, client = false, server = true)
+ span = span.toBuilder.addTags(Tag.newBuilder().setKey("span.kind").setVStr("server")).build()
+ When("getSpanType is called")
+ val spanType = SpanUtils.getSpanType(span)
+ Then("it is marked as SERVER")
+ spanType should be (SpanType.SERVER)
+ }
+
+ it("should return client when 'cr' and 'cs' are present and span.kind tag is also present") {
+ Given("a span with 'cr' and 'cs' event logs and span.kind tag as 'client'")
+ var (span, _) = newSpan("foo-service", "bar", 6000, client = true, server = false)
+ span = span.toBuilder.addTags(Tag.newBuilder().setKey("span.kind").setVStr("client")).build()
+ When("getSpanType is called")
+ val spanType = SpanUtils.getSpanType(span)
+ Then("it is marked as CLIENT")
+ spanType should be (SpanType.CLIENT)
+ }
+ }
+
+ describe("finding an event time") {
+ it("should return None with the spanType is OTHER") {
+ Given("a span with no event logs")
+ val (span, _) = newSpan("foo-service", "bar", 6000, client = false, server = false)
+ When("getEventTime is called")
+ val eventTime = SpanUtils.getEventTimestamp(span, SpanUtils.SERVER_SEND_EVENT)
+ Then("it is marked as OTHER")
+ eventTime should be (None)
+ }
+ it("should return None with the spanType is SERVER and we look for CLIENT_SEND") {
+ Given("a span with no event logs")
+ val (span, _) = newSpan("foo-service", "bar", 6000, client = false, server = true)
+ When("getEventTime is called")
+ val eventTime = SpanUtils.getEventTimestamp(span, SpanUtils.CLIENT_SEND_EVENT)
+ Then("it is marked as OTHER")
+ eventTime should be (None)
+ }
+ it("should return timeStamp with the spanType is SERVER and we look for SERVER_SEND") {
+ Given("a span with no event logs")
+ val (span, _) = newSpan("foo-service", "bar", 6000, client = false, server = true)
+ When("getEventTime is called")
+ val eventTime = SpanUtils.getEventTimestamp(span, SpanUtils.SERVER_SEND_EVENT)
+ Then("it is marked as OTHER")
+ (eventTime.get > 0) should be (true)
+ }
+ }
+
+}
diff --git a/service-graph/pom.xml b/service-graph/pom.xml
new file mode 100644
index 000000000..66ef8c6a7
--- /dev/null
+++ b/service-graph/pom.xml
@@ -0,0 +1,538 @@
+
+
+
+ 4.0.0
+ com.expedia.www
+ haystack-service-graph
+ 1.0.15-SNAPSHOT
+ pom
+
+
+ node-finder
+ graph-builder
+ snapshot-store
+ snapshotter
+
+
+
+ scm:git:git://github.com/ExpediaDotCom/haystack-service-graph.git
+ scm:git:ssh://github.com/ExpediaDotCom/haystack-service-graph.git
+ http://github.com/ExpediaDotCom/haystack-service-graph
+
+
+ ${project.groupId}:${project.artifactId}
+ Code to generate the service graph and network latency using spans from the proto span stream
+ https://github.com/ExpediaDotCom/haystack-service-graph/tree/master
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+
+
+ haystack
+ Haystack Team
+ haystack@expedia.com
+ https://github.com/ExpediaDotCom/haystack
+
+
+
+
+ 4.2.5
+ 1.11.447
+ 3.4
+ 1.7.1
+ 0.5.0
+ 1.0.61
+ 0.1.12
+ ${version}
+ 4.5.3
+ 9.4.19.v20190610
+ 3.5.3
+ 3.6.0
+ 1.2.3
+ 1.6
+ 3.2.1
+ 3.0.1
+ 1.9.5
+ 0.8.13
+ 4.1.45.Final
+ 1.6.8
+ 1.6.0
+ 1.8
+ 3.4.0
+ ${scala.major.version}.${scala.minor.version}.${scala.tiny.version}
+ ${scala.major.version}.${scala.minor.version}
+ 2
+ 12
+ 5
+ 3.0.3
+ 2.4.1
+ 1.3.0
+ 1.7.25
+ 6.8
+ 1.3.1
+
+ true
+
+
+
+
+
+
+ com.google.protobuf
+ protobuf-java
+ ${protobuf.version}
+
+
+ io.grpc
+ grpc-protobuf
+ ${grpc.version}
+
+
+ io.grpc
+ grpc-stub
+ ${grpc.version}
+
+
+ io.grpc
+ grpc-netty
+ ${grpc.version}
+
+
+ io.netty
+ netty-handler
+ ${netty.handler.version}
+
+
+
+
+ org.scala-lang
+ scala-library
+ ${scala-library.version}
+
+
+ org.scala-lang
+ scala-reflect
+ ${scala-library.version}
+
+
+
+
+ com.typesafe
+ config
+ ${typesafe-config.version}
+
+
+
+
+ ch.qos.logback
+ logback-classic
+ ${logback.version}
+
+
+ ch.qos.logback
+ logback-core
+ ${logback.version}
+
+
+ org.slf4j
+ slf4j-api
+ ${slf4j-api.version}
+
+
+
+ com.amazonaws
+ aws-java-sdk-s3
+ ${aws-java-sdk.version}
+
+
+
+ org.json4s
+ json4s-jackson_${scala.major.minor.version}
+ ${json4s.version}
+
+
+
+ org.json4s
+ json4s-ext_${scala.major.minor.version}
+ ${json4s.version}
+
+
+
+ com.nrinaudo
+ kantan.csv_${scala.major.minor.version}
+ ${kantan_csv.version}
+
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+ ${haystack.logback.metrics.appender.version}
+
+
+
+ org.apache.commons
+ commons-lang3
+ ${commons-lang.version}
+
+
+
+ org.apache.httpcomponents
+ httpclient
+ ${httpclient.version}
+
+
+
+
+ com.expedia.www
+ haystack-commons
+ ${haystack-commons.version}
+
+
+ com.expedia.www
+ haystack-service-graph-snapshot-store
+ ${haystack-service-graph-snapshot-store.version}
+
+
+
+
+ org.msgpack
+ msgpack-core
+ ${msgpack.version}
+
+
+
+
+ org.eclipse.jetty
+ jetty-server
+ ${jetty.version}
+
+
+ org.eclipse.jetty
+ jetty-servlet
+ ${jetty.version}
+
+
+ org.apache.httpcomponents
+ fluent-hc
+ ${apache.httpcomponents.version}
+
+
+
+ org.mockito
+ mockito-all
+ ${mockito.version}
+ test
+
+
+
+ org.scalaj
+ scalaj-http_${scala.major.minor.version}
+ ${scalaj-http.version}
+
+
+
+
+
+
+
+ com.expedia.www
+ haystack-commons
+
+
+
+ org.json4s
+ json4s-jackson_${scala.major.minor.version}
+
+
+
+ com.typesafe
+ config
+
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+ io.grpc
+ grpc-protobuf
+
+
+
+ io.grpc
+ grpc-stub
+
+
+
+ org.scala-lang
+ scala-library
+
+
+
+ org.scala-lang
+ scala-reflect
+
+
+
+ ch.qos.logback
+ logback-classic
+
+
+
+ ch.qos.logback
+ logback-core
+
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+
+ org.scalatest
+ scalatest_${scala.major.minor.version}
+ ${scalatest.version}
+ test
+
+
+ org.pegdown
+ pegdown
+ ${pegdown.version}
+ test
+
+
+ junit
+ junit
+ 4.12
+ test
+
+
+ org.easymock
+ easymock
+ 3.4
+ test
+
+
+ org.apache.kafka
+ kafka-streams
+ 1.1.0
+ provided
+
+
+ com.codahale.metrics
+ metrics-core
+ 3.0.2
+
+
+
+
+
+ ${basedir}/src/main/scala
+
+
+ ${basedir}/src/main/resources
+ true
+
+
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+ 1.0
+
+
+ test
+
+ test
+
+
+
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+ ${scoverage.plugin.version}
+
+
+ 80
+ true
+ true
+ ${scala-library.version}
+ true
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+ 1.6
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+ 0.8.0
+
+ true
+ false
+ ${basedir}/../checkstyles/scalastyle_config.xml
+ ${basedir}/src/main/scala
+ ${basedir}/src/test/scala
+ ${project.build.directory}/scalastyle-output.xml
+ UTF-8
+
+
+
+ compile-scalastyle
+
+ check
+
+ compile
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+ 3.2.1
+
+
+ scala-compile-first
+ process-resources
+
+ add-source
+ compile
+
+
+
+ scala-test-compile
+ process-test-resources
+
+ testCompile
+
+
+
+ attach-javadocs
+
+ doc-jar
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.6.1
+
+ ${project.jdk.version}
+ ${project.jdk.version}
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ ${maven-source-plugin.version}
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+ ${maven-gpg-plugin.version}
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ ${nexus-staging-maven-plugin.version}
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+
+ ${skipGpg}
+
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+
+
+
+
+ ossrh
+ https://oss.sonatype.org/content/repositories/snapshots
+
+
+ ossrh
+ http://oss.sonatype.org/service/local/staging/deploy/maven2/
+
+
+
diff --git a/service-graph/snapshot-store/README.md b/service-graph/snapshot-store/README.md
new file mode 100644
index 000000000..c8429565b
--- /dev/null
+++ b/service-graph/snapshot-store/README.md
@@ -0,0 +1,25 @@
+#Haystack : snapshot-store
+
+The "snapshot" feature of the service graph starts with a call (made at regular intervals) to the service-graph service,
+asking for a current copy of the service graph; the service graph that it receives is then persisted. Currently two
+types of persistent storage are supported:
+1. The local file system, for development work and small installations
+2. Amazon [AWS S3](https://aws.amazon.com/s3/)
+
+The persistent copies can then be queried to observe the service graph at a point of time in the past.
+
+This snapshot-store package contains code to manage both types of durable locations.
+
+The persistent copies of the service-graph will be purged after a suitable amount of time. Since S3 has an
+[object expiration feature](https://aws.amazon.com/blogs/aws/amazon-s3-object-expiration/), there is no need
+for snapshot-store code to purge data from S3. The code for the local file system does purge expired data.
+
+Snapshots are stored in two CSV files: one for edges and one for nodes. These files can be consumed by
+[Spark DataFrames](https://spark.apache.org/docs/2.3.0/sql-programming-guide.html#datasets-and-dataframes)
+and were chosen as the storage format to facilitate using Spark or similar tools to analyze historical service graphs.
+
+## Building
+
+```
+mvn clean package
+```
\ No newline at end of file
diff --git a/service-graph/snapshot-store/pom.xml b/service-graph/snapshot-store/pom.xml
new file mode 100644
index 000000000..9eff98831
--- /dev/null
+++ b/service-graph/snapshot-store/pom.xml
@@ -0,0 +1,104 @@
+
+
+
+ 4.0.0
+
+
+ haystack-service-graph
+ com.expedia.www
+ 1.0.15-SNAPSHOT
+
+
+ haystack-service-graph-snapshot-store
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+
+
+ com.amazonaws
+ aws-java-sdk-s3
+
+
+ org.mockito
+ mockito-all
+
+
+ com.nrinaudo
+ kantan.csv_${scala.major.minor.version}
+
+
+
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+ org.expedia.www.haystack.commons.scalatest.IntegrationSuite
+
+
+
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+
+ true
+ 100
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/Constants.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/Constants.scala
new file mode 100644
index 000000000..5583a6572
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/Constants.scala
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+object Constants {
+ private val DotJson = ".json"
+
+ val DotCsv = ".csv"
+ val SlashNodes = "/nodes"
+ val SlashEdges = "/edges"
+ val _Nodes = "_nodes"
+ val _Edges = "_edges"
+ val SourceKey: String = "source"
+ val EdgesKey: String = "edges"
+ val DestinationKey: String = "destination"
+ val StatsKey: String = "stats"
+ val TagsKey: String = "tags"
+ val CountKey: String = "count"
+ val LastSeenKey: String = "lastSeen"
+ val ErrorCountKey: String = "errorCount"
+ val EffectiveFromKey: String = "effectiveFrom"
+ val EffectiveToKey: String = "effectiveTo"
+ val IdKey: String = "id"
+ val NameKey: String = "name"
+ val InfrastructureProviderKey: String = "X-HAYSTACK-INFRASTRUCTURE-PROVIDER"
+ val TierKey: String = "tier"
+ val ServiceGraph: String = "serviceGraph"
+ val JsonFileNameWithExtension: String = ServiceGraph + DotJson
+ val NodesCsvFileNameWithExtension: String = ServiceGraph + _Nodes + DotCsv
+ val EdgesCsvFileNameWithExtension: String = ServiceGraph + _Edges + DotCsv
+}
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/DataFramesIntoJsonTransformer.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/DataFramesIntoJsonTransformer.scala
new file mode 100644
index 000000000..95a4f4919
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/DataFramesIntoJsonTransformer.scala
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import com.expedia.www.haystack.service.graph.snapshot.store.Constants.EdgesKey
+import com.expedia.www.haystack.service.graph.snapshot.store.DataFramesIntoJsonTransformer.{AddToMapError, WriteError}
+import kantan.csv._
+import kantan.csv.ops._
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.collection.mutable
+
+object DataFramesIntoJsonTransformer {
+ val WriteError: String = "Problem reading JSON in write()"
+ val AddToMapError: String = "Problem reading JSON in addToMap()"
+}
+
+class DataFramesIntoJsonTransformer(logger: Logger) {
+ def this() {
+ this(LoggerFactory.getLogger("com.expedia.www.haystack.service.graph.snapshot.store.DataFramesIntoJsonTransformer"))
+ }
+
+ private def addToMap(map: mutable.Map[Long, Node],
+ either: Either[ReadError, NodeWithId]): Unit = {
+ either match {
+ case Left(readError) => logger.error(AddToMapError, readError)
+ case Right(nodeWithId) => map.put(nodeWithId.id, NodeWithId.nodeMapper(nodeWithId))
+ }
+ }
+
+ private var prependComma = false
+
+ private def write(stringBuilder: StringBuilder,
+ nodeIdVsNode: mutable.Map[Long, Node],
+ either: Either[ReadError, EdgeWithIds]): Unit = {
+ either match {
+ case Left(readError) => logger.error(WriteError, readError)
+ case Right(edgeWithId) =>
+ val edge = Edge.mapper(nodeIdVsNode, edgeWithId)
+ stringBuilder.append(edge.toJson(prependComma))
+ prependComma = true
+ }
+ }
+
+ private implicit val nodeDecoder: RowDecoder[NodeWithId] =
+ RowDecoder.decoder(0, 1, 2, 3)(NodeWithId.apply)
+
+ private implicit val edgeDecoder: RowDecoder[EdgeWithIds] =
+ RowDecoder.decoder(0, 1, 2, 3, 4, 5, 6, 7)(EdgeWithIds.apply)
+
+ def parseDataFrames(nodesRawData: String,
+ edgesRawData: String): String = {
+ val nodeIdVsNode = saveNodesToMap(nodesRawData)
+ val stringBuilder = new StringBuilder
+ stringBuilder.append("{\n \"").append(EdgesKey).append("\": [\n")
+ edgesRawData.asCsvReader[EdgeWithIds](rfc.withHeader).foreach(write(stringBuilder, nodeIdVsNode, _))
+ stringBuilder.append("\n ]\n}\n")
+ stringBuilder.toString()
+ }
+
+ private def saveNodesToMap(nodesRawData: String): mutable.Map[Long, Node] = {
+ val nodeIdVsNode = mutable.Map[Long, Node]()
+ nodesRawData.asCsvReader[NodeWithId](rfc.withHeader).foreach(addToMap(nodeIdVsNode, _))
+ nodeIdVsNode
+ }
+}
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/Edge.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/Edge.scala
new file mode 100644
index 000000000..f49ad8f44
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/Edge.scala
@@ -0,0 +1,96 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import Constants._
+
+import scala.collection.mutable
+
+case class Edge(source: Node,
+ destination: Node,
+ statsCount: Long,
+ statsLastSeen: Long,
+ statsErrorCount: Long,
+ effectiveFrom: Long,
+ effectiveTo: Long) {
+ private val Newline = "\n"
+ private val CommaNewline = "," + Newline
+ private val QuoteColonSpace = "\": "
+
+ def toJson(prependComma: Boolean): String = {
+ val stringBuilder = new StringBuilder
+ if(prependComma) {
+ stringBuilder.append(CommaNewline)
+ }
+ stringBuilder.append(" {")
+ appendNode(stringBuilder, source, SourceKey)
+ appendNode(stringBuilder, destination, DestinationKey)
+ appendStats(stringBuilder)
+ stringBuilder.append(" \"").append(EffectiveFromKey).append(QuoteColonSpace).append(effectiveFrom).append(CommaNewline)
+ stringBuilder.append(" \"").append(EffectiveToKey).append(QuoteColonSpace).append(effectiveTo).append(Newline)
+ stringBuilder.append(" }")
+ stringBuilder.toString()
+ }
+
+ private def appendStats(stringBuilder: StringBuilder) = {
+ stringBuilder.append("\n \"").append(StatsKey).append("\": {\n")
+ stringBuilder.append(" \"").append(CountKey).append(QuoteColonSpace).append(statsCount).append(CommaNewline)
+ stringBuilder.append(" \"").append(LastSeenKey).append(QuoteColonSpace).append(statsLastSeen).append(CommaNewline)
+ stringBuilder.append(" \"").append(ErrorCountKey).append(QuoteColonSpace).append(statsErrorCount).append(Newline)
+ stringBuilder.append(" },\n")
+ }
+
+ private def appendNode(stringBuilder: StringBuilder, node: Node, key: String) = {
+ stringBuilder.append("\n \"").append(key).append("\": {\n")
+ stringBuilder.append(" \"").append(NameKey).append("\": \"").append(node.name).append("\"")
+ stringBuilder.append(CommaNewline).append(" \"").append(TagsKey).append("\": {")
+ if (areAnyTagsDefined) {
+ stringBuilder.append(Newline)
+ if (node.tier.isDefined) {
+ stringBuilder.append(" \"").append(TierKey).append("\": \"").append(node.tier.get).append("\"")
+ if (node.infrastructureProvider.isDefined) {
+ stringBuilder.append(CommaNewline)
+ }
+ }
+ if (node.infrastructureProvider.isDefined) {
+ stringBuilder.append(" \"").append(InfrastructureProviderKey).append("\": \"")
+ .append(node.infrastructureProvider.get).append("\"")
+ }
+ stringBuilder.append("\n ")
+ }
+ stringBuilder.append("}\n")
+ stringBuilder.append(" },")
+
+ def areAnyTagsDefined = {
+ node.infrastructureProvider.isDefined || node.tier.isDefined
+ }
+ }
+}
+
+object Edge {
+ def mapper(nodeIdVsNode: mutable.Map[Long, Node],
+ edgeWithIds: EdgeWithIds): Edge = {
+ Edge(nodeIdVsNode(edgeWithIds.sourceId),
+ nodeIdVsNode(edgeWithIds.destinationId),
+ edgeWithIds.statsCount,
+ edgeWithIds.statsLastSeen,
+ edgeWithIds.statsErrorCount,
+ edgeWithIds.effectiveFrom,
+ edgeWithIds.effectiveTo)
+ }
+}
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/EdgeWithIds.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/EdgeWithIds.scala
new file mode 100644
index 000000000..fa2c87251
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/EdgeWithIds.scala
@@ -0,0 +1,27 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+case class EdgeWithIds(id: Long,
+ sourceId: Long,
+ destinationId: Long,
+ statsCount: Long,
+ statsLastSeen: Long,
+ statsErrorCount: Long,
+ effectiveFrom: Long,
+ effectiveTo: Long)
\ No newline at end of file
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/FileSnapshotStore.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/FileSnapshotStore.scala
new file mode 100644
index 000000000..562a3fd87
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/FileSnapshotStore.scala
@@ -0,0 +1,116 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import java.nio.charset.StandardCharsets
+import java.nio.file.{Files, Path, Paths}
+import java.time.Instant
+import java.util.{Comparator, Optional}
+
+class FileSnapshotStore(val directoryName: String) extends SnapshotStore {
+ private val directory = Paths.get(directoryName)
+
+ def this() = {
+ this("/")
+ }
+
+ /**
+ * Returns a FileSnapshotStore using the directory name specified
+ *
+ * @param constructorArguments constructorArguments[0] must specify the directory to which snapshots will be stored
+ * @return the concrete FileSnapshotStore to use
+ */
+ override def build(constructorArguments: Array[String]): SnapshotStore = {
+ new FileSnapshotStore(constructorArguments(0))
+ }
+
+ /**
+ * Writes a string to the persistent store
+ *
+ * @param instant date/time of the write, used to create the name, which will later be used in read() and purge()
+ * @param content String to write
+ * @return a tuple of the paths of the two CSV files (one for nodes, one for edges, in that order) to which the nodes
+ * and edges were written; @see java.nio.file.Path
+ */
+ override def write(instant: Instant,
+ content: String): (Path, Path) = {
+ if (!Files.exists(directory)) {
+ Files.createDirectories(directory)
+ }
+ val nodesAndEdges = transformJsonToNodesAndEdges(content)
+ val nodePath = write(instant, Constants._Nodes, nodesAndEdges.nodes)
+ val edgesPath = write(instant, Constants._Edges, nodesAndEdges.edges)
+ (nodePath, edgesPath)
+ }
+
+ private def write(instant: Instant, suffix: String, content: String) = {
+ val path = directory.resolve(createIso8601FileName(instant) + suffix)
+ Files.write(path, content.getBytes(StandardCharsets.UTF_8))
+ }
+
+ private val pathNameComparator: Comparator[Path] = (o1: Path, o2: Path) => o1.toString.compareTo(o2.toString)
+ /**
+ * Reads content from the persistent store
+ *
+ * @param instant date/time of the read
+ * @return the content, transformed to JSON, of the youngest _nodes and _edges files whose ISO-8601-based name is
+ * earlier or equal to instant
+ */
+ override def read(instant: Instant): Option[String] = {
+ var optionString: Option[String] = None
+ val fileNameForInstant = createIso8601FileName(instant)
+ val fileToUse: Optional[Path] = Files
+ .walk(directory, 1)
+ .filter(_.toFile.getName.endsWith(Constants._Nodes))
+ .filter(_.toFile.getName.substring(0, fileNameForInstant.length) <= fileNameForInstant)
+ .max(pathNameComparator)
+ if (fileToUse.isPresent) {
+ val nodesRawData = Files.readAllLines(fileToUse.get).toArray.mkString("\n")
+ val edgesPath = Paths.get(fileToUse.get().toAbsolutePath.toString.replace(Constants._Nodes, Constants._Edges))
+ val edgesRawData = Files.readAllLines(edgesPath).toArray.mkString("\n")
+ optionString = Some(transformNodesAndEdgesToJson(nodesRawData, edgesRawData))
+ }
+ optionString
+ }
+
+ /**
+ * Purges items from the persistent store
+ *
+ * @param instant date/time of items to be purged; items whose ISO-8601-based name is earlier than or equal to
+ * instant will be purged
+ * @return the number of items purged
+ */
+ override def purge(instant: Instant): Integer = {
+ val fileNameForInstant = createIso8601FileName(instant)
+ val pathsToPurge: Array[AnyRef] = Files
+ .walk(directory, 1)
+ .filter(isNodesOrEdgesFile(_))
+ .filter(_.toFile.getName.substring(0, fileNameForInstant.length) <= fileNameForInstant)
+ .toArray
+ for (anyRef <- pathsToPurge) {
+ Files.delete(anyRef.asInstanceOf[Path])
+ }
+ pathsToPurge.length
+ }
+
+ private def isNodesOrEdgesFile(path: Path): Boolean = {
+ val name = path.toFile.getName
+ name.endsWith(Constants._Nodes) || name.endsWith(Constants._Edges)
+ }
+
+}
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/JsonIntoDataFramesTransformer.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/JsonIntoDataFramesTransformer.scala
new file mode 100644
index 000000000..d1d2fe85f
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/JsonIntoDataFramesTransformer.scala
@@ -0,0 +1,143 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import com.expedia.www.haystack.service.graph.snapshot.store.Constants._
+import com.expedia.www.haystack.service.graph.snapshot.store.JsonIntoDataFramesTransformer._
+import org.json4s._
+import org.json4s.jackson.JsonMethods._
+
+import scala.collection.mutable
+import scala.util.Try
+
+object JsonIntoDataFramesTransformer {
+ private val SourceId = SourceKey + IdKey.capitalize
+ private val DestinationId = DestinationKey + IdKey.capitalize
+ private val StatsCount = StatsKey + CountKey.capitalize
+ private val StatsLastSeen = StatsKey + LastSeenKey.capitalize
+ private val StatsErrorCount = StatsKey + ErrorCountKey.capitalize
+ private val NodesFormatString = "%s,%s,%s,%s\n"
+ private val EdgesFormatString = "%s,%s,%s,%s,%s,%s,%s,%s\n"
+
+ val NodesHeader: String = NodesFormatString.format(
+ IdKey, NameKey, InfrastructureProviderKey, TierKey)
+ val EdgesHeader: String = EdgesFormatString.format(
+ IdKey, SourceId, DestinationId, StatsCount, StatsLastSeen, StatsErrorCount, EffectiveFromKey, EffectiveToKey)
+}
+
+class JsonIntoDataFramesTransformer {
+
+ def parseJson(jsonInput: String): NodesAndEdges = {
+ val jValue = parse(jsonInput, useBigDecimalForDouble = false, useBigIntForLong = true)
+ implicit val formats: DefaultFormats.type = DefaultFormats
+ val map = jValue.extract[Map[String, Any]]
+ val edgesList = map.getOrElse(EdgesKey, List[Any]()).asInstanceOf[List[Any]]
+ val nodeToIdMap = getNodeToIdMap(edgesList)
+ val nodes = createNodesCsvList(nodeToIdMap)
+ val edges = createEdgesCsvList(edgesList, nodeToIdMap)
+ NodesAndEdges(nodes, edges)
+ }
+
+ private def createEdgesCsvList(edgesList: List[Any], nodeToIdMap: mutable.Map[Node, Long]): String = {
+ val stringBuilder = new mutable.StringBuilder(EdgesHeader)
+ var id = 1
+ for {
+ edge <- edgesList
+ } yield {
+ val edgeAsMap = edge.asInstanceOf[Map[String, Any]]
+ val statsAsMap = edgeAsMap.getOrElse(StatsKey, Map[String, Any]()).asInstanceOf[Map[String, Any]]
+ val maybeSourceNode = findNode(SourceKey, edgeAsMap)
+ val maybeDestinationNode = findNode(DestinationKey, edgeAsMap)
+ val str = EdgesFormatString.format(id,
+ if (maybeSourceNode.isDefined) nodeToIdMap(maybeSourceNode.get).toString else "",
+ if (maybeDestinationNode.isDefined) nodeToIdMap(maybeDestinationNode.get).toString else "",
+ Try(statsAsMap(CountKey).asInstanceOf[BigInt].toString()).getOrElse(""),
+ Try(statsAsMap(LastSeenKey).asInstanceOf[BigInt].toString()).getOrElse(""),
+ Try(statsAsMap(ErrorCountKey).asInstanceOf[BigInt].toString()).getOrElse(""),
+ Try(edgeAsMap(EffectiveFromKey).asInstanceOf[BigInt].toString()).getOrElse(""),
+ Try(edgeAsMap(EffectiveToKey).asInstanceOf[BigInt].toString()).getOrElse(""))
+ stringBuilder.append(str)
+ id = id + 1
+ }
+ stringBuilder.toString
+ }
+
+ private def createNodesCsvList(nodeToIdMap: mutable.Map[Node, Long]): String = {
+ val stringBuilder = new mutable.StringBuilder(NodesHeader)
+ nodeToIdMap.foreach {
+ case (node, id) =>
+ val name = surroundWithQuotesIfNecessary(node.name)
+ val infrastructureProvider = surroundWithQuotesIfNecessary(node.infrastructureProvider.getOrElse(""))
+ val tier = surroundWithQuotesIfNecessary(node.tier.getOrElse(""))
+ val str = NodesFormatString.format(id, name, infrastructureProvider, tier)
+ stringBuilder.append(str)
+ }
+ stringBuilder.toString
+ }
+
+ // See http://www.creativyst.com/Doc/Articles/CSV/CSV01.htm#FileFormat
+ private def surroundWithQuotesIfNecessary(string: String): String = {
+ val stringWithEscapedQuotes = string.replaceAll("\"", "\"\"")
+ var stringToReturn = stringWithEscapedQuotes
+ if (string.startsWith(" ") || string.endsWith(" ") || string.contains(",")) {
+ stringToReturn = "\"" + string + "\""
+ }
+ stringToReturn
+ }
+
+ private def getNodeToIdMap(edgesList: List[Any]): mutable.Map[Node, Long] = {
+ val sourceNodes = findNodesOfType(edgesList, SourceKey)
+ val destinationNodes = findNodesOfType(edgesList, DestinationKey)
+ val nodes = (sourceNodes ::: destinationNodes).distinct
+ val nodeToIdMap = mutable.Map[Node, Long]()
+ var nodeId: Long = 1L
+ for (node <- nodes) {
+ nodeToIdMap(node) = nodeId
+ nodeId = nodeId + 1
+ }
+ nodeToIdMap
+ }
+
+ private def findNodesOfType(edgesList: List[Any], nodeType: String) = {
+ val nodes = for {
+ edge <- edgesList
+ } yield {
+ val edgeMap = edge.asInstanceOf[Map[String, Any]]
+ val sourceNode: Option[Node] = findNode(nodeType, edgeMap)
+ sourceNode
+ }
+ nodes.flatten
+ }
+
+ private def findNode(nodeType: String, edgeMap: Map[String, Any]): Option[Node] = {
+ var optionNode: Option[Node] = None
+ val nodeOptionAny = edgeMap.get(nodeType)
+ if (nodeOptionAny.isDefined) {
+ val map = nodeOptionAny.get.asInstanceOf[Map[String, Any]]
+ val nameOptionAny = map.get(NameKey)
+ if (nameOptionAny.isDefined) {
+ val name = nameOptionAny.get.asInstanceOf[String]
+ val tags = map.getOrElse(TagsKey, Map.empty[String, String]).asInstanceOf[Map[String, String]]
+ val xHaystackInfrastructureProvider = Option(tags.getOrElse(InfrastructureProviderKey, null))
+ val tier = Option(tags.getOrElse(TierKey, null))
+ optionNode = Some(Node(name, xHaystackInfrastructureProvider, tier))
+ }
+ }
+ optionNode
+ }
+}
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/Node.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/Node.scala
new file mode 100644
index 000000000..69e35747c
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/Node.scala
@@ -0,0 +1,22 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+case class Node(name: String,
+ infrastructureProvider: Option[String],
+ tier: Option[String])
\ No newline at end of file
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/NodeWithId.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/NodeWithId.scala
new file mode 100644
index 000000000..13f661d5c
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/NodeWithId.scala
@@ -0,0 +1,30 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+case class NodeWithId(id: Long,
+ name: String,
+ xHaystackInfrastructureProvider: Option[String],
+ tier: Option[String])
+
+object NodeWithId {
+ def nodeMapper: NodeWithId => Node = (nodeWithId: NodeWithId) =>
+ Node(nodeWithId.name,
+ nodeWithId.xHaystackInfrastructureProvider,
+ nodeWithId.tier)
+}
\ No newline at end of file
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/NodesAndEdges.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/NodesAndEdges.scala
new file mode 100644
index 000000000..5c1cca6c0
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/NodesAndEdges.scala
@@ -0,0 +1,20 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+case class NodesAndEdges(nodes: String, edges: String)
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/S3SnapshotStore.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/S3SnapshotStore.scala
new file mode 100644
index 000000000..a1af57d6a
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/S3SnapshotStore.scala
@@ -0,0 +1,163 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import java.time.Instant
+
+import com.amazonaws.regions.Regions
+import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder}
+import com.amazonaws.services.s3.model.{ListObjectsV2Request, ListObjectsV2Result}
+import com.expedia.www.haystack.service.graph.snapshot.store.Constants.{DotCsv, SlashEdges, SlashNodes}
+import com.expedia.www.haystack.service.graph.snapshot.store.S3SnapshotStore.createItemName
+
+import scala.collection.JavaConverters._
+import scala.math.Ordering.String.max
+
+/**
+ * Companion object, with public AmazonS3 that can be set to a mock for unit tests
+ */
+object S3SnapshotStore {
+ var amazonS3: AmazonS3 = AmazonS3ClientBuilder.standard.withRegion(Regions.US_WEST_2).build
+
+ def createItemName(folderName: String, fileName: String): String = {
+ s"$folderName/$fileName"
+ }
+
+
+}
+
+/**
+ * Object that stores snapshots in S3
+ *
+ * @param s3Client client with which to communicate with S3
+ * @param bucketName name of the bucket
+ * @param folderName name of the "folder" in the bucket (becomes the prefix of the S3 item name)
+ * @param listObjectsBatchSize number of results to return with each listObjectsV2 request to S3; smaller values
+ * use less memory at the cost of more calls to S3. The best value would be the maximum
+ * number of snapshots that will exist in S3 before being purged; for example, with a
+ * one hour snapshot interval and a snapshot TTL of 1 year, 366 * 24 = 8784 would be a good
+ * value (perhaps rounded to 10,000). Using a "good" value for listObjectsBatchSize
+ * improves the performance of calls to read from the S3SnapshotStore.
+ */
+class S3SnapshotStore(val s3Client: AmazonS3,
+ val bucketName: String,
+ val folderName: String,
+ val listObjectsBatchSize: Int) extends SnapshotStore {
+ private val itemNamePrefix = folderName + "/"
+
+ def this() = {
+ this(S3SnapshotStore.amazonS3, "", "", 0)
+ }
+
+ /**
+ * Builds an S3SnapshotStore implementation given arguments to pass to the constructor
+ *
+ * @param constructorArguments
+ * - '''constructorArguments[0]''' is unused by this method but will be the fully qualified name of the
+ * S3SnapshotStore class, i.e. "com.expedia.www.haystack.service.graph.snapshot.store.S3SnapshotStore"
+ * - '''constructorArguments[1]''' must be a String that specifies the bucket
+ * - '''constructorArguments[2]''' must be a String that specifies the folder in the bucket
+ * - '''constructorArguments[3]''' must be a String that specifies the batch count when listing items in the bucket
+ * @return the S3SnapshotStore to use
+ */
+ override def build(constructorArguments: Array[String]): SnapshotStore = {
+ val bucketName = constructorArguments(1)
+ val folderName = constructorArguments(2)
+ val listObjectsBatchSize = if (constructorArguments.length > 3) constructorArguments(3).toInt else 0
+ new S3SnapshotStore(s3Client, bucketName, folderName, listObjectsBatchSize)
+ }
+
+ /**
+ * Writes a string to the persistent store
+ *
+ * @param instant date/time of the write, used to create the name, which will later be used in read() and purge()
+ * @param content String to write
+ * @return the item names of the two objects written to S3 (does not include the bucket name): the first item name
+ * returned will end in "/nodes" and the other will end in "/edges"
+ */
+ override def write(instant: Instant,
+ content: String): (String, String) = {
+ if (!s3Client.doesBucketExistV2(bucketName)) {
+ s3Client.createBucket(bucketName)
+ }
+ val nodesAndEdges = transformJsonToNodesAndEdges(content)
+ write(bucketName, instant, SlashNodes + DotCsv, nodesAndEdges.nodes)
+ write(bucketName, instant, SlashEdges + DotCsv, nodesAndEdges.edges)
+ val itemNameBase = createIso8601FileName(instant)
+ (createItemName(folderName, itemNameBase + SlashNodes), createItemName(folderName, itemNameBase + SlashEdges))
+ }
+
+ private def write(bucketName: String, instant: Instant, suffix: String, content: String) = {
+ val itemNameBase = createItemName(folderName, createIso8601FileName(instant))
+ val itemName = itemNameBase + suffix
+ s3Client.putObject(bucketName, itemName, content)
+ }
+
+ /**
+ * Reads content from the persistent store
+ *
+ * @param instant date/time of the read
+ * @return the content of the youngest item whose ISO-8601-based name is earlier or equal to instant
+ * @throws IllegalArgumentException if listObjectsBatchSize <= 0
+ */
+ override def read(instant: Instant): Option[String] = {
+ var optionString: Option[String] = None
+ val itemName = getItemNameOfYoungestNodesItemBeforeInstant(instant)
+ if (itemName.isDefined) {
+ val nodesItemName = itemName.get
+ val nodesRawData = s3Client.getObjectAsString(bucketName, nodesItemName)
+ val edgesItemName = nodesItemName.replace(SlashNodes, SlashEdges)
+ val edgesRawData = s3Client.getObjectAsString(bucketName, edgesItemName)
+ optionString = Some(transformNodesAndEdgesToJson(nodesRawData, edgesRawData))
+ }
+ optionString
+ }
+
+ private def getItemNameOfYoungestNodesItemBeforeInstant(instant: Instant): Option[String] = {
+ var optionString: Option[String] = None
+ if (listObjectsBatchSize > 0) {
+ val listObjectsV2Request = new ListObjectsV2Request().withBucketName(bucketName).withMaxKeys(listObjectsBatchSize)
+ val instantAsItemName = createItemName(folderName, createIso8601FileName(instant))
+ var listObjectsV2Result: ListObjectsV2Result = null
+ do {
+ listObjectsV2Result = s3Client.listObjectsV2(bucketName)
+ val objectSummaries = listObjectsV2Result.getObjectSummaries.asScala
+ .filter(_.getKey.startsWith(itemNamePrefix))
+ .filter(_.getKey.endsWith(SlashNodes))
+ .filter(_.getKey.substring(0, instantAsItemName.length) <= instantAsItemName)
+ val potentialMax = if (objectSummaries.nonEmpty) Some(objectSummaries.maxBy(_.getKey).getKey) else None
+ (optionString, potentialMax) match {
+ case (None, None) =>
+ optionString = None
+ case (None, Some(_)) =>
+ optionString = potentialMax
+ case (Some(_), None) =>
+ // optionString stays unchanged
+ case (Some(optionStringItemName), Some(potentialMaxItemName)) =>
+ optionString = Some(max(optionStringItemName, potentialMaxItemName))
+ }
+ listObjectsV2Request.setContinuationToken(listObjectsV2Result.getNextContinuationToken)
+ } while (listObjectsV2Result.isTruncated)
+ } else {
+ throw new IllegalArgumentException("S3SnapshotStore objects that read from S3 must be created with a positive "
+ + s"value of listObjectsBatchSize, not the [$listObjectsBatchSize] value that was provided")
+ }
+ optionString
+ }
+
+}
diff --git a/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/SnapshotStore.scala b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/SnapshotStore.scala
new file mode 100644
index 000000000..1b82233d6
--- /dev/null
+++ b/service-graph/snapshot-store/src/main/scala/com/expedia/www/haystack/service/graph/snapshot/store/SnapshotStore.scala
@@ -0,0 +1,83 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import java.time.Instant
+import java.time.format.DateTimeFormatterBuilder
+
+trait SnapshotStore {
+ /**
+ * Builds a SnapshotStore implementation given arguments to pass to the constructor
+ *
+ * @param constructorArguments arguments to pass to the constructor
+ * @return the concrete SnapshotStore to use
+ */
+ def build(constructorArguments: Array[String]): SnapshotStore
+
+ /**
+ * Writes a string to the persistent store
+ *
+ * @param instant date/time of the write, used to create the name, which will later be used in read() and purge()
+ * @param content String to write
+ * @return implementation-dependent value; see implementation documentation for details
+ */
+ def write(instant: Instant,
+ content: String): AnyRef
+
+ /**
+ * Reads content from the persistent store
+ *
+ * @param instant date/time of the read
+ * @return the content of the youngest item whose ISO-8601-based name is earlier or equal to instant
+ */
+ def read(instant: Instant): Option[String]
+
+ /**
+ * Purges items from the persistent store (optional operation; the S3 implementation of SnapshotStore will use an S3
+ * lifecycle rule to purge items, but the file implementation must purge old files)
+ *
+ * @param instant date/time of items to be purged; items whose ISO-8601-based name is earlier than or equal to
+ * instant will be purged
+ * @return the number of items purged
+ */
+ def purge(instant: Instant): Integer = {
+ 0
+ // Override if purge code is needed by the particular SnapshotStore implementation
+ }
+
+ private val formatter = new DateTimeFormatterBuilder().appendInstant(3).toFormatter
+
+ def createIso8601FileName(instant: Instant): String = {
+ formatter.format(instant)
+ }
+
+ // Not stateful, so only one object is needed
+ private val jsonIntoDataFramesTransformer = new JsonIntoDataFramesTransformer
+
+ def transformJsonToNodesAndEdges(json: String): NodesAndEdges = {
+ jsonIntoDataFramesTransformer.parseJson(json)
+ }
+
+ def transformNodesAndEdgesToJson(nodesRawData: String,
+ edgesRawData: String): String = {
+ // Stateful because of instance variable DataFramesIntoJsonTransformer.prependComma, so each parse needs one
+ val dataFramesIntoJsonTransformer = new DataFramesIntoJsonTransformer
+
+ dataFramesIntoJsonTransformer.parseDataFrames(nodesRawData, edgesRawData)
+ }
+}
diff --git a/service-graph/snapshot-store/src/test/resources/serviceGraph.json b/service-graph/snapshot-store/src/test/resources/serviceGraph.json
new file mode 100644
index 000000000..7f03f0708
--- /dev/null
+++ b/service-graph/snapshot-store/src/test/resources/serviceGraph.json
@@ -0,0 +1,2547 @@
+{
+ "edges": [
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 202931,
+ "lastSeen": 1544575410111,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587937
+ },
+ {
+ "source": {
+ "name": "daily-data-update-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "async-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5136140,
+ "lastSeen": 1544575571142,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587796
+ },
+ {
+ "source": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 134125,
+ "lastSeen": 1544575498167,
+ "errorCount": 21882
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587999
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "front-door-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 17367200,
+ "lastSeen": 1544575567920,
+ "errorCount": 103264
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588026
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "stark-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 157201,
+ "lastSeen": 1544575421793,
+ "errorCount": 36
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588024
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "rails-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 130,
+ "lastSeen": 1544573988361,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3531583,
+ "lastSeen": 1544575569804,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587705
+ },
+ {
+ "source": {
+ "name": "authentication-service",
+ "tags": {}
+ },
+ "destination": {
+ "name": "userinteraction-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 76788,
+ "lastSeen": 1544575553640,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587815
+ },
+ {
+ "source": {
+ "name": "stark-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "context-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 180327,
+ "lastSeen": 1544575421874,
+ "errorCount": 2088
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587693
+ },
+ {
+ "source": {
+ "name": "stark-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "template-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 169216,
+ "lastSeen": 1544575421889,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588068
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 22890079,
+ "lastSeen": 1544575572251,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587672
+ },
+ {
+ "source": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2,
+ "lastSeen": 1544554416805,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544553000000,
+ "effectiveTo": 1544554800000
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "chargeback-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 60,
+ "lastSeen": 1544574583227,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "adapter-aws",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 303022,
+ "lastSeen": 1544575592096,
+ "errorCount": 1786
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587954
+ },
+ {
+ "source": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "multishop",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3893347,
+ "lastSeen": 1544575569844,
+ "errorCount": 2
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587734
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "guide-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1442998,
+ "lastSeen": 1544575571677,
+ "errorCount": 61
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587753
+ },
+ {
+ "source": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "help-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2826475,
+ "lastSeen": 1544575555819,
+ "errorCount": 1624211
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587681
+ },
+ {
+ "source": {
+ "name": "authentication-service",
+ "tags": {}
+ },
+ "destination": {
+ "name": "front-door-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 99413,
+ "lastSeen": 1544575499415,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587706
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3258,
+ "lastSeen": 1544575229219,
+ "errorCount": 800
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587979
+ },
+ {
+ "source": {
+ "name": "payment-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "fx",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 233615,
+ "lastSeen": 1544575277983,
+ "errorCount": 16
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587689
+ },
+ {
+ "source": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 196315,
+ "lastSeen": 1544575522183,
+ "errorCount": 4
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587866
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "booking-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3,
+ "lastSeen": 1544559964099,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544558400000,
+ "effectiveTo": 1544560200000
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "chargeback-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 169,
+ "lastSeen": 1544574617075,
+ "errorCount": 38
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588010
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1,2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "checkout-payment-domain-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 342981,
+ "lastSeen": 1544575571750,
+ "errorCount": 126
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587681
+ },
+ {
+ "source": {
+ "name": "forge-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "context-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5042343,
+ "lastSeen": 1544575571591,
+ "errorCount": 405
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587826
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "3,1,2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 21474934,
+ "lastSeen": 1544575571886,
+ "errorCount": 26012
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588099
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "his-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 698168,
+ "lastSeen": 1544575554236,
+ "errorCount": 87771
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587677
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "hers-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2907715,
+ "lastSeen": 1544575570922,
+ "errorCount": 33645
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587793
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 116,
+ "lastSeen": 1544574157710,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "lpt-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1821807,
+ "lastSeen": 1544575567232,
+ "errorCount": 6746
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588011
+ },
+ {
+ "source": {
+ "name": "satellite",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "endurance-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 12111,
+ "lastSeen": 1544575506385,
+ "errorCount": 140
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588024
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 570805,
+ "lastSeen": 1544575504575,
+ "errorCount": 32
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587811
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 74,
+ "lastSeen": 1544573552180,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "loom-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "lists-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 18356,
+ "lastSeen": 1544575525038,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588011
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "mars",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 73614,
+ "lastSeen": 1544575526411,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587848
+ },
+ {
+ "source": {
+ "name": "mormont-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "seo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 596144,
+ "lastSeen": 1544575525706,
+ "errorCount": 3
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587937
+ },
+ {
+ "source": {
+ "name": "info-site-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-cart",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 6,
+ "lastSeen": 1544572410049,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544500800000,
+ "effectiveTo": 1544572800000
+ },
+ {
+ "source": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2838890,
+ "lastSeen": 1544575569418,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587682
+ },
+ {
+ "source": {
+ "name": "loom-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "api-customer",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 8583,
+ "lastSeen": 1544575208652,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587796
+ },
+ {
+ "source": {
+ "name": "multishop",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "pricing-engine",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 15560819,
+ "lastSeen": 1544575569839,
+ "errorCount": 35
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587689
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "forge-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3161466,
+ "lastSeen": 1544575571588,
+ "errorCount": 4817
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587639
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "third-party-provider-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 9735,
+ "lastSeen": 1544575279207,
+ "errorCount": 3
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587827
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "insurance-shopping-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 410,
+ "lastSeen": 1544574748096,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587918
+ },
+ {
+ "source": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-user-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1214950,
+ "lastSeen": 1544575528611,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 20845,
+ "lastSeen": 1544575478589,
+ "errorCount": 68
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587644
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "third-party-api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5000,
+ "lastSeen": 1544574936681,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587722
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 687220,
+ "lastSeen": 1544575524183,
+ "errorCount": 20
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587866
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "compositor-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 243059,
+ "lastSeen": 1544575396324,
+ "errorCount": 22
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588026
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "mormont-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 652939,
+ "lastSeen": 1544575525612,
+ "errorCount": 68
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587815
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 27317,
+ "lastSeen": 1544575505207,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588010
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "userinteraction-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2907462,
+ "lastSeen": 1544575570897,
+ "errorCount": 10681
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587808
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "info-site-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 84704,
+ "lastSeen": 1544575480474,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587827
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "melisandre-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 125064,
+ "lastSeen": 1544575501716,
+ "errorCount": 2985
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588087
+ },
+ {
+ "source": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2469688,
+ "lastSeen": 1544575569284,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587909
+ },
+ {
+ "source": {
+ "name": "guide-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "template-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 806,
+ "lastSeen": 1544573623862,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 8,
+ "lastSeen": 1544569762201,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544491800000,
+ "effectiveTo": 1544571000000
+ },
+ {
+ "source": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "controller-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 20783217,
+ "lastSeen": 1544575572498,
+ "errorCount": 246
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587641
+ },
+ {
+ "source": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 973286,
+ "lastSeen": 1544575570804,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588061
+ },
+ {
+ "source": {
+ "name": "endurance-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 18729,
+ "lastSeen": 1544575266335,
+ "errorCount": 16
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "help-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "adapter-aws",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 379955,
+ "lastSeen": 1544575554493,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587808
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "targaryen-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 8861,
+ "lastSeen": 1544575485661,
+ "errorCount": 324
+ },
+ "effectiveFrom": 1544500800000,
+ "effectiveTo": 1544575587634
+ },
+ {
+ "source": {
+ "name": "guide-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "seo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 654,
+ "lastSeen": 1544573560757,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "drogo-api",
+ "tags": {}
+ },
+ "stats": {
+ "count": 11154,
+ "lastSeen": 1544575207980,
+ "errorCount": 11
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587669
+ },
+ {
+ "source": {
+ "name": "payment-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "lannister-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 13974992,
+ "lastSeen": 1544575556069,
+ "errorCount": 9
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587798
+ },
+ {
+ "source": {
+ "name": "info-site-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 234369,
+ "lastSeen": 1544575570813,
+ "errorCount": 6024
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587952
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 19029006,
+ "lastSeen": 1544575571645,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587677
+ },
+ {
+ "source": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "fx",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 304458,
+ "lastSeen": 1544575397477,
+ "errorCount": 5
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587674
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "loom-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 90053,
+ "lastSeen": 1544575481890,
+ "errorCount": 7
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587788
+ },
+ {
+ "source": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "payment-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 20680712,
+ "lastSeen": 1544575556069,
+ "errorCount": 32
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587782
+ },
+ {
+ "source": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 56878,
+ "lastSeen": 1544575522176,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587882
+ },
+ {
+ "source": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "margaery-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 14646996,
+ "lastSeen": 1544575571980,
+ "errorCount": 145
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587689
+ },
+ {
+ "source": {
+ "name": "loom-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 985553,
+ "lastSeen": 1544575571604,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588060
+ },
+ {
+ "source": {
+ "name": "tyrion-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "chargeback-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 176,
+ "lastSeen": 1544573823589,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "help-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "rules-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 237982,
+ "lastSeen": 1544575571933,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587954
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 33794745,
+ "lastSeen": 1544575571645,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587875
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "bolton-service",
+ "tags": {}
+ },
+ "stats": {
+ "count": 319715,
+ "lastSeen": 1544575500355,
+ "errorCount": 1004
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587658
+ },
+ {
+ "source": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "melisandre-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 13072,
+ "lastSeen": 1544575568510,
+ "errorCount": 59
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587716
+ },
+ {
+ "source": {
+ "name": "help-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1403232,
+ "lastSeen": 1544575555851,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587827
+ },
+ {
+ "source": {
+ "name": "tyrion-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 26,
+ "lastSeen": 1544571310584,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544572800000
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 32518,
+ "lastSeen": 1544575275982,
+ "errorCount": 75
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587870
+ },
+ {
+ "source": {
+ "name": "forge-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "template-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3548918,
+ "lastSeen": 1544575571596,
+ "errorCount": 30
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587738
+ },
+ {
+ "source": {
+ "name": "tips-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 764,
+ "lastSeen": 1544574548576,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "baratheon-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 578577,
+ "lastSeen": 1544575571747,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587662
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "hodor-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 6512590,
+ "lastSeen": 1544575570949,
+ "errorCount": 1296924
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587762
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "ecommerce-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 73,
+ "lastSeen": 1544572581996,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544493600000,
+ "effectiveTo": 1544572800000
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 9094,
+ "lastSeen": 1544575240906,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588090
+ },
+ {
+ "source": {
+ "name": "forge-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "seo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3369864,
+ "lastSeen": 1544575571631,
+ "errorCount": 6
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587848
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "varys-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 68388,
+ "lastSeen": 1544575520781,
+ "errorCount": 80
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587934
+ },
+ {
+ "source": {
+ "name": "bronn-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 99784,
+ "lastSeen": 1544575571751,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544563800000,
+ "effectiveTo": 1544575587662
+ },
+ {
+ "source": {
+ "name": "stark-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "seo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 167207,
+ "lastSeen": 1544575421914,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587725
+ },
+ {
+ "source": {
+ "name": "tyrion-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "adapter-aws",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 8112,
+ "lastSeen": 1544575271597,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588026
+ },
+ {
+ "source": {
+ "name": "guide-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "context-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1984413,
+ "lastSeen": 1544575571681,
+ "errorCount": 1983415
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "booking-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 37252,
+ "lastSeen": 1544575265116,
+ "errorCount": 1136
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588025
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1473126,
+ "lastSeen": 1544575571747,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587672
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "bronn-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1,
+ "lastSeen": 1544565534137,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544563800000,
+ "effectiveTo": 1544565600000
+ },
+ {
+ "source": {
+ "name": "baratheon-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "bronn-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 100667,
+ "lastSeen": 1544575571750,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544563800000,
+ "effectiveTo": 1544575587848
+ },
+ {
+ "source": {
+ "name": "varys-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3,
+ "lastSeen": 1544552474213,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544553000000
+ },
+ {
+ "source": {
+ "name": "endurance-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "authentication-service",
+ "tags": {}
+ },
+ "stats": {
+ "count": 8719,
+ "lastSeen": 1544575265175,
+ "errorCount": 77
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587674
+ },
+ {
+ "source": {
+ "name": "brienne-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "geo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 38627,
+ "lastSeen": 1544557965110,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544558400000
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 587165,
+ "lastSeen": 1544575520505,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587935
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "cache-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 72562,
+ "lastSeen": 1544575498524,
+ "errorCount": 206
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587934
+ },
+ {
+ "source": {
+ "name": "hodor-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 10500513,
+ "lastSeen": 1544575570958,
+ "errorCount": 76483
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588061
+ },
+ {
+ "source": {
+ "name": "margaery-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "airpricingservice",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 16824540,
+ "lastSeen": 1544575571994,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587641
+ },
+ {
+ "source": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 30148726,
+ "lastSeen": 1544575571973,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588099
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "domain-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 46,
+ "lastSeen": 1544569236656,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544497200000,
+ "effectiveTo": 1544571000000
+ },
+ {
+ "source": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5277998,
+ "lastSeen": 1544575572135,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587909
+ },
+ {
+ "source": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3891685,
+ "lastSeen": 1544575571930,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588087
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "authentication-service",
+ "tags": {}
+ },
+ "stats": {
+ "count": 170461,
+ "lastSeen": 1544575499257,
+ "errorCount": 11
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587699
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "session-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 611347,
+ "lastSeen": 1544575568839,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588024
+ },
+ {
+ "source": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "booking-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 13140,
+ "lastSeen": 1544575218424,
+ "errorCount": 23
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587914
+ },
+ {
+ "source": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5779855,
+ "lastSeen": 1544575569418,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587937
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "greyjoy-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3984995,
+ "lastSeen": 1544575570003,
+ "errorCount": 106528
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587704
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2,
+ "lastSeen": 1544559967407,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544558400000,
+ "effectiveTo": 1544560200000
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "margaery-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2674151,
+ "lastSeen": 1544575594133,
+ "errorCount": 49
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587953
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-user-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 18737197,
+ "lastSeen": 1544575571645,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587979
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "satellite",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 217364,
+ "lastSeen": 1544575482983,
+ "errorCount": 290
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "shopping-cart",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 10386,
+ "lastSeen": 1544575280832,
+ "errorCount": 204
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587797
+ },
+ {
+ "source": {
+ "name": "mormont-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "context-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 844962,
+ "lastSeen": 1544575554067,
+ "errorCount": 28241
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587815
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "user-profile-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 151657,
+ "lastSeen": 1544575488509,
+ "errorCount": 12
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588060
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "shae-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 7126,
+ "lastSeen": 1544575152362,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587866
+ },
+ {
+ "source": {
+ "name": "mormont-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "template-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 832484,
+ "lastSeen": 1544575554075,
+ "errorCount": 5
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "rules-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 586462,
+ "lastSeen": 1544575592045,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587763
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "controller-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1297487,
+ "lastSeen": 1544575592045,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587693
+ }
+ ]
+}
\ No newline at end of file
diff --git a/service-graph/snapshot-store/src/test/resources/serviceGraph_edges.csv b/service-graph/snapshot-store/src/test/resources/serviceGraph_edges.csv
new file mode 100644
index 000000000..3d4ebfa64
--- /dev/null
+++ b/service-graph/snapshot-store/src/test/resources/serviceGraph_edges.csv
@@ -0,0 +1,124 @@
+id,sourceId,destinationId,statsCount,statsLastSeen,statsErrorCount,effectiveFrom,effectiveTo
+1,1,45,202931,1544575410111,0,1544488200000,1544575587937
+2,2,46,5136140,1544575571142,0,1544488200000,1544575587796
+3,3,45,134125,1544575498167,21882,1544488200000,1544575587999
+4,4,47,17367200,1544575567920,103264,1544488200000,1544575588026
+5,5,8,157201,1544575421793,36,1544488200000,1544575588024
+6,4,48,130,1544573988361,0,1544488200000,1544574600000
+7,6,12,3531583,1544575569804,0,1544488200000,1544575587705
+8,7,49,76788,1544575553640,0,1544488200000,1544575587815
+9,8,50,180327,1544575421874,2088,1544488200000,1544575587693
+10,8,51,169216,1544575421889,1,1544488200000,1544575588068
+11,9,5,22890079,1544575572251,0,1544488200000,1544575587672
+12,10,44,2,1544554416805,0,1544553000000,1544554800000
+13,11,52,60,1544574583227,0,1544488200000,1544574600000
+14,1,53,303022,1544575592096,1786,1544488200000,1544575587954
+15,12,28,3893347,1544575569844,2,1544488200000,1544575587734
+16,5,32,1442998,1544575571677,61,1544488200000,1544575587753
+17,13,34,2826475,1544575555819,1624211,1544488200000,1544575587681
+18,7,54,99413,1544575499415,0,1544488200000,1544575587706
+19,14,45,3258,1544575229219,800,1544488200000,1544575587979
+20,15,55,233615,1544575277983,16,1544488200000,1544575587689
+21,10,56,196315,1544575522183,4,1544488200000,1544575587866
+22,16,57,3,1544559964099,1,1544558400000,1544560200000
+23,14,52,169,1544574617075,38,1544488200000,1544575588010
+24,17,58,342981,1544575571750,126,1544488200000,1544575587681
+25,18,50,5042343,1544575571591,405,1544488200000,1544575587826
+26,19,59,21474934,1544575571886,26012,1544488200000,1544575588099
+27,4,60,698168,1544575554236,87771,1544488200000,1544575587677
+28,4,61,2907715,1544575570922,33645,1544488200000,1544575587793
+29,20,12,116,1544574157710,0,1544488200000,1544574600000
+30,4,62,1821807,1544575567232,6746,1544488200000,1544575588011
+31,21,33,12111,1544575506385,140,1544488200000,1544575588024
+32,22,1,570805,1544575504575,32,1544488200000,1544575587811
+33,4,3,74,1544573552180,0,1544488200000,1544574600000
+34,23,63,18356,1544575525038,0,1544488200000,1544575588011
+35,24,64,73614,1544575526411,0,1544488200000,1544575587848
+36,25,65,596144,1544575525706,3,1544488200000,1544575587937
+37,26,66,6,1544572410049,0,1544500800000,1544572800000
+38,27,6,2838890,1544575569418,0,1544488200000,1544575587682
+39,23,67,8583,1544575208652,0,1544488200000,1544575587796
+40,28,68,15560819,1544575569839,35,1544488200000,1544575587689
+41,5,18,3161466,1544575571588,4817,1544488200000,1544575587639
+42,29,69,9735,1544575279207,3,1544488200000,1544575587827
+43,4,70,410,1544574748096,0,1544488200000,1544575587918
+44,27,71,1214950,1544575528611,0,1544488200000,1544575587723
+45,30,14,20845,1544575478589,68,1544488200000,1544575587644
+46,4,72,5000,1544574936681,0,1544488200000,1544575587722
+47,4,31,687220,1544575524183,20,1544488200000,1544575587866
+48,4,73,243059,1544575396324,22,1544488200000,1544575588026
+49,5,25,652939,1544575525612,68,1544488200000,1544575587815
+50,14,30,27317,1544575505207,0,1544488200000,1544575588010
+51,4,49,2907462,1544575570897,10681,1544488200000,1544575587808
+52,4,26,84704,1544575480474,0,1544488200000,1544575587827
+53,14,74,125064,1544575501716,2985,1544488200000,1544575588087
+54,31,27,2469688,1544575569284,0,1544488200000,1544575587909
+55,32,51,806,1544573623862,0,1544488200000,1544574600000
+56,10,27,8,1544569762201,0,1544491800000,1544571000000
+57,13,75,20783217,1544575572498,246,1544488200000,1544575587641
+58,31,9,973286,1544575570804,0,1544488200000,1544575588061
+59,33,56,18729,1544575266335,16,1544488200000,1544575587723
+60,34,53,379955,1544575554493,0,1544488200000,1544575587808
+61,4,76,8861,1544575485661,324,1544500800000,1544575587634
+62,32,65,654,1544573560757,0,1544488200000,1544574600000
+63,4,77,11154,1544575207980,11,1544488200000,1544575587669
+64,15,78,13974992,1544575556069,9,1544488200000,1544575587798
+65,26,1,234369,1544575570813,6024,1544488200000,1544575587952
+66,9,44,19029006,1544575571645,0,1544488200000,1544575587677
+67,12,55,304458,1544575397477,5,1544488200000,1544575587674
+68,4,23,90053,1544575481890,7,1544488200000,1544575587788
+69,12,15,20680712,1544575556069,32,1544488200000,1544575587782
+70,10,9,56878,1544575522176,0,1544488200000,1544575587882
+71,13,43,14646996,1544575571980,145,1544488200000,1544575587689
+72,23,10,985553,1544575571604,0,1544488200000,1544575588060
+73,35,52,176,1544573823589,0,1544488200000,1544574600000
+74,34,79,237982,1544575571933,0,1544488200000,1544575587954
+75,9,6,33794745,1544575571645,0,1544488200000,1544575587875
+76,24,80,319715,1544575500355,1004,1544488200000,1544575587658
+77,30,74,13072,1544575568510,59,1544488200000,1544575587716
+78,34,45,1403232,1544575555851,0,1544488200000,1544575587827
+79,35,45,26,1544571310584,0,1544488200000,1544572800000
+80,4,56,32518,1544575275982,75,1544488200000,1544575587870
+81,18,51,3548918,1544575571596,30,1544488200000,1544575587738
+82,36,56,764,1544574548576,0,1544488200000,1544574600000
+83,9,39,578577,1544575571747,0,1544488200000,1544575587662
+84,5,42,6512590,1544575570949,1296924,1544488200000,1544575587762
+85,37,81,73,1544572581996,0,1544493600000,1544572800000
+86,1,45,9094,1544575240906,0,1544488200000,1544575588090
+87,18,65,3369864,1544575571631,6,1544488200000,1544575587848
+88,4,40,68388,1544575520781,80,1544488200000,1544575587934
+89,38,56,99784,1544575571751,0,1544563800000,1544575587662
+90,8,65,167207,1544575421914,1,1544488200000,1544575587725
+91,35,53,8112,1544575271597,0,1544488200000,1544575588026
+92,32,50,1984413,1544575571681,1983415,1544488200000,1544575587723
+93,14,57,37252,1544575265116,1136,1544488200000,1544575588025
+94,9,56,1473126,1544575571747,0,1544488200000,1544575587672
+95,9,38,1,1544565534137,0,1544563800000,1544565600000
+96,39,38,100667,1544575571750,1,1544563800000,1544575587848
+97,40,56,3,1544552474213,0,1544488200000,1544553000000
+98,33,7,8719,1544575265175,77,1544488200000,1544575587674
+99,41,82,38627,1544557965110,0,1544488200000,1544558400000
+100,22,13,587165,1544575520505,1,1544488200000,1544575587935
+101,37,83,72562,1544575498524,206,1544488200000,1544575587934
+102,42,56,10500513,1544575570958,76483,1544488200000,1544575588061
+103,43,84,16824540,1544575571994,0,1544488200000,1544575587641
+104,44,56,30148726,1544575571973,0,1544488200000,1544575588099
+105,37,85,46,1544569236656,0,1544497200000,1544571000000
+106,31,44,5277998,1544575572135,0,1544488200000,1544575587909
+107,31,56,3891685,1544575571930,0,1544488200000,1544575588087
+108,29,7,170461,1544575499257,11,1544488200000,1544575587699
+109,4,86,611347,1544575568839,1,1544488200000,1544575588024
+110,30,57,13140,1544575218424,23,1544488200000,1544575587914
+111,27,44,5779855,1544575569418,0,1544488200000,1544575587937
+112,24,87,3984995,1544575570003,106528,1544488200000,1544575587704
+113,16,30,2,1544559967407,0,1544558400000,1544560200000
+114,1,43,2674151,1544575594133,49,1544488200000,1544575587953
+115,9,71,18737197,1544575571645,0,1544488200000,1544575587979
+116,4,21,217364,1544575482983,290,1544488200000,1544575587723
+117,4,66,10386,1544575280832,204,1544488200000,1544575587797
+118,25,50,844962,1544575554067,28241,1544488200000,1544575587815
+119,4,88,151657,1544575488509,12,1544488200000,1544575588060
+120,37,89,7126,1544575152362,0,1544488200000,1544575587866
+121,25,51,832484,1544575554075,5,1544488200000,1544575587723
+122,11,79,586462,1544575592045,0,1544488200000,1544575587763
+123,11,75,1297487,1544575592045,0,1544488200000,1544575587693
diff --git a/service-graph/snapshot-store/src/test/resources/serviceGraph_nodes.csv b/service-graph/snapshot-store/src/test/resources/serviceGraph_nodes.csv
new file mode 100644
index 000000000..6f38ed66c
--- /dev/null
+++ b/service-graph/snapshot-store/src/test/resources/serviceGraph_nodes.csv
@@ -0,0 +1,90 @@
+id,name,X-HAYSTACK-INFRASTRUCTURE-PROVIDER,tier
+38,bronn-service,aws,
+19,front-door-service,"aws,dc","3,1,2"
+68,pricing-engine,aws,
+54,front-door-service,dc,
+77,drogo-api,,
+30,ticket-service,aws,
+71,shopping-user-service,aws,
+10,api-service,aws,
+15,payment-service,aws,
+88,user-profile-service,aws,
+63,lists-service,aws,
+20,front-door-service,aws,1
+66,shopping-cart,aws,
+80,bolton-service,,
+81,ecommerce-service,aws,
+14,boss-service,dc,
+6,shopping-pricing,aws,
+70,insurance-shopping-service,aws,
+65,seo-service,aws,
+41,brienne-service,aws,
+45,provideradapter-service,aws,
+55,fx,aws,
+2,daily-data-update-service,dc,
+34,help-service,aws,
+11,detail-service,dc,
+46,async-service,aws,
+26,info-site-service,aws,
+22,front-door-service,"aws,dc",1
+39,baratheon-service,aws,
+89,shae-service,dc,
+53,adapter-aws,aws,
+31,progressive-webapp-api,aws,
+37,front-door-service,dc,1
+85,domain-service,aws,
+62,lpt-web,aws,
+69,third-party-provider-service,aws,
+40,varys-service,aws,
+78,lannister-service,aws,
+7,authentication-service,,
+76,targaryen-web,aws,
+61,hers-web,aws,
+25,mormont-service,aws,
+82,geo-service,aws,
+1,detail-service,"aws,dc",
+4,internet-proxy,,
+86,session-service,aws,
+24,front-door-service,dc,2
+33,endurance-service,aws,
+72,third-party-api-service,aws,
+49,userinteraction-web,aws,
+9,shopping-search-service,aws,
+17,front-door-service,"aws,dc","1,2"
+13,search-service,"aws,dc",
+64,mars,aws,
+12,new-shopping-pricing,aws,
+67,api-customer,aws,
+8,stark-service,aws,
+27,shopping-detail-service,aws,
+36,tips-service,aws,
+44,shopping-content-service,aws,
+83,cache-service,aws,
+58,checkout-payment-domain-service,dc,
+16,boss-service,aws,
+23,loom-service,aws,
+79,rules-service,aws,
+48,rails-web,aws,
+5,westeros-service,aws,
+35,tyrion-service,aws,
+75,controller-service,aws,
+51,template-service,aws,
+60,his-web,aws,
+28,multishop,aws,
+87,greyjoy-service,aws,
+84,airpricingservice,aws,
+32,guide-service,aws,
+18,forge-service,aws,
+47,front-door-service,"aws,dc",
+73,compositor-service,aws,
+42,hodor-service,aws,
+43,margaery-service,aws,
+52,chargeback-service,aws,
+59,location-service,"aws,dc",
+56,location-service,aws,
+29,front-door-service,"aws,dc",2
+21,satellite,aws,
+3,search-service,dc,
+74,melisandre-service,aws,
+50,context-service,aws,
+57,booking-service,aws,
diff --git a/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/DataFramesIntoJsonTransformerSpec.scala b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/DataFramesIntoJsonTransformerSpec.scala
new file mode 100644
index 000000000..bd9fd3ec3
--- /dev/null
+++ b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/DataFramesIntoJsonTransformerSpec.scala
@@ -0,0 +1,66 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import com.expedia.www.haystack.service.graph.snapshot.store.Constants._
+import com.expedia.www.haystack.service.graph.snapshot.store.DataFramesIntoJsonTransformer.{AddToMapError, WriteError}
+import kantan.csv.ReadError
+import org.mockito.Mockito
+import org.scalatest.mockito.MockitoSugar
+import org.scalatest.{FunSpec, Matchers, PrivateMethodTester}
+import org.slf4j.Logger
+
+import scala.collection.mutable
+
+class DataFramesIntoJsonTransformerSpec extends FunSpec with Matchers with MockitoSugar with PrivateMethodTester {
+ private val stringSnapshotStoreSpecBase = new SnapshotStoreSpecBase
+ private val mockLogger = mock[Logger]
+ private val mockReadError = mock[ReadError]
+ private val emptyMap = mutable.Map.empty[Long, Node]
+
+ describe("DataFramesIntoJsonTransformerSpec.parseDataFrames()") {
+ val dataFramesIntoJsonTransformer = new DataFramesIntoJsonTransformer(mockLogger)
+ it("should parse service graph nodes and edges into JSON") {
+ val nodesRawData = stringSnapshotStoreSpecBase.readFile(NodesCsvFileNameWithExtension)
+ val edgesRawData = stringSnapshotStoreSpecBase.readFile(EdgesCsvFileNameWithExtension)
+ val json = dataFramesIntoJsonTransformer.parseDataFrames(nodesRawData, edgesRawData)
+ json shouldEqual stringSnapshotStoreSpecBase.readFile(JsonFileNameWithExtension)
+ Mockito.verifyNoMoreInteractions(mockLogger, mockReadError)
+ }
+ }
+
+ describe("DataFramesIntoJsonTransformerSpec.write()") {
+ val dataFramesIntoJsonTransformer = new DataFramesIntoJsonTransformer(mockLogger)
+ it("should log an error when it sees a ReadError") {
+ val write = PrivateMethod[Unit]('write)
+ dataFramesIntoJsonTransformer invokePrivate write(new StringBuilder, emptyMap, Left(mockReadError))
+ Mockito.verify(mockLogger).error(WriteError, mockReadError)
+ Mockito.verifyNoMoreInteractions(mockLogger, mockReadError)
+ }
+ }
+
+ describe("DataFramesIntoJsonTransformerSpec.addToMap()") {
+ val dataFramesIntoJsonTransformer = new DataFramesIntoJsonTransformer(mockLogger)
+ it("should log an error when it sees a ReadError") {
+ val addToMap = PrivateMethod[Unit]('addToMap)
+ dataFramesIntoJsonTransformer invokePrivate addToMap(emptyMap, Left(mockReadError))
+ Mockito.verify(mockLogger).error(AddToMapError, mockReadError)
+ Mockito.verifyNoMoreInteractions(mockLogger, mockReadError)
+ }
+ }
+}
diff --git a/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/FileSnapshotStoreSpec.scala b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/FileSnapshotStoreSpec.scala
new file mode 100644
index 000000000..e163f43f5
--- /dev/null
+++ b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/FileSnapshotStoreSpec.scala
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import java.io.File
+import java.nio.file.{Files, Path, Paths}
+
+import com.expedia.www.haystack.service.graph.snapshot.store.Constants.JsonFileNameWithExtension
+
+class FileSnapshotStoreSpec extends SnapshotStoreSpecBase {
+ private val directory = Files.createTempDirectory("FileSnapshotStoreSpec")
+ directory.toFile.deleteOnExit()
+
+ private val directoryName = directory.toFile.getCanonicalPath
+ private val serviceGraphJson = readFile(JsonFileNameWithExtension)
+
+ describe("FileSnapshotStore") {
+ {
+ val defaultFaultSnapshotStore = new FileSnapshotStore
+ val fileSnapshotStore = defaultFaultSnapshotStore.build(Array(directoryName))
+ it("should use an existing directory without trying to create it when writing") {
+ val pathsFromWrite = fileSnapshotStore.write(now, serviceGraphJson).asInstanceOf[(Path, Path)]
+ assert(pathsFromWrite._1.toFile.getCanonicalPath.startsWith(directoryName))
+ assert(pathsFromWrite._2.toFile.getCanonicalPath.startsWith(directoryName))
+ val iso8601FileName = fileSnapshotStore.createIso8601FileName(now)
+ assert(pathsFromWrite._1.toFile.getCanonicalPath.endsWith(iso8601FileName + Constants._Nodes))
+ assert(pathsFromWrite._2.toFile.getCanonicalPath.endsWith(iso8601FileName + Constants._Edges))
+ fileSnapshotStore.write(oneMillisecondBeforeNow, serviceGraphJson)
+ fileSnapshotStore.write(twoMillisecondsAfterNow, serviceGraphJson)
+ }
+ it("should return None when read() is called with a time that is too early") {
+ val fileContent = fileSnapshotStore.read(twoMillisecondsBeforeNow)
+ assert(fileContent === None)
+ }
+ it("should read the correct file when read() is called with a later time") {
+ val fileContent = fileSnapshotStore.read(oneMillisecondAfterNow)
+ assert(fileContent.get == serviceGraphJson)
+ }
+ it("should purge a single file when calling purge() with the timestamp of the oldest file") {
+ val numberOfFilesPurged = fileSnapshotStore.purge(oneMillisecondBeforeNow)
+ numberOfFilesPurged shouldEqual 2
+ }
+ it("should purge the two remaining files when calling purge() with the youngest timestamp") {
+ val numberOfFilesPurged = fileSnapshotStore.purge(twoMillisecondsAfterNow)
+ numberOfFilesPurged shouldEqual 4
+ }
+ }
+ it("should create the directory when the directory does not exist") {
+ val suffix = File.separator + "DirectoryToCreate"
+ val fileStore = new FileSnapshotStore(directoryName + suffix)
+ Paths.get(directoryName + suffix).toFile.deleteOnExit()
+ val pathFromWrite = fileStore.write(now, serviceGraphJson)
+ assert(pathFromWrite._1.toFile.getCanonicalPath.startsWith(directoryName + suffix))
+ assert(pathFromWrite._2.toFile.getCanonicalPath.startsWith(directoryName + suffix))
+ val numberOfFilesPurged = fileStore.purge(now)
+ numberOfFilesPurged shouldEqual 2
+ }
+ }
+}
diff --git a/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/JsonIntoDataFramesTransformerSpec.scala b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/JsonIntoDataFramesTransformerSpec.scala
new file mode 100644
index 000000000..c3be16f99
--- /dev/null
+++ b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/JsonIntoDataFramesTransformerSpec.scala
@@ -0,0 +1,66 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import com.expedia.www.haystack.service.graph.snapshot.store.Constants._
+import com.expedia.www.haystack.service.graph.snapshot.store.JsonIntoDataFramesTransformer._
+import org.scalatest.{FunSpec, Matchers}
+
+class JsonIntoDataFramesTransformerSpec extends FunSpec with Matchers {
+ private val stringSnapshotStoreSpecBase = new SnapshotStoreSpecBase
+
+ val jsonIntoDataFramesTransformer = new JsonIntoDataFramesTransformer
+ describe("JsonIntoDataFramesTransformer.parseJson()") {
+ it("should parse service graph JSON into nodes and edges") {
+ val serviceGraphJson = stringSnapshotStoreSpecBase.readFile(JsonFileNameWithExtension)
+ val nodesAndEdges = jsonIntoDataFramesTransformer.parseJson(serviceGraphJson)
+ nodesAndEdges.nodes shouldEqual stringSnapshotStoreSpecBase.readFile(NodesCsvFileNameWithExtension)
+ nodesAndEdges.edges shouldEqual stringSnapshotStoreSpecBase.readFile(EdgesCsvFileNameWithExtension)
+ }
+ it("should return empty nodes and edges when passed empty JSON") {
+ val nodesAndEdges = jsonIntoDataFramesTransformer.parseJson(
+ "{}")
+ nodesAndEdges.nodes shouldEqual NodesHeader
+ nodesAndEdges.edges shouldEqual EdgesHeader
+ }
+ it("should return empty nodes and edges when passed JSON with an empty list of edges") {
+ val nodesAndEdges = jsonIntoDataFramesTransformer.parseJson(
+ "{\"edges\":[]}")
+ nodesAndEdges.nodes shouldEqual NodesHeader
+ nodesAndEdges.edges shouldEqual EdgesHeader
+ }
+ it("should return and edge with no nodes when passed JSON with unnamed source and destination") {
+ val nodesAndEdges = jsonIntoDataFramesTransformer.parseJson(
+ "{\"edges\":[{\"source\":{},\"destination\":{}}]}")
+ nodesAndEdges.nodes shouldEqual NodesHeader
+ nodesAndEdges.edges shouldEqual EdgesHeader + "1,,,,,,,\n"
+ }
+ it("should gracefully handle JSON with an edge that has only a bare bones source") {
+ val nodesAndEdges = jsonIntoDataFramesTransformer.parseJson(
+ "{\"edges\":[{\"source\":{\"name\":\"Name\"}}]}")
+ nodesAndEdges.nodes shouldEqual NodesHeader + "1,Name,,\n"
+ nodesAndEdges.edges shouldEqual EdgesHeader + "1,1,,,,,,\n"
+ }
+ it("should gracefully handle JSON with an edge that has only a bare bones destination") {
+ val nodesAndEdges = jsonIntoDataFramesTransformer.parseJson(
+ "{\"edges\":[{\"destination\":{\"name\":\"Name\"}}]}")
+ nodesAndEdges.nodes shouldEqual NodesHeader + "1,Name,,\n"
+ nodesAndEdges.edges shouldEqual EdgesHeader + "1,,1,,,,,\n"
+ }
+ }
+}
diff --git a/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/S3SnapshotStoreSpec.scala b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/S3SnapshotStoreSpec.scala
new file mode 100644
index 000000000..7062624f0
--- /dev/null
+++ b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/S3SnapshotStoreSpec.scala
@@ -0,0 +1,247 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import java.time.Instant
+import java.time.format.DateTimeFormatter.ISO_INSTANT
+import java.util
+
+import com.amazonaws.regions.Regions
+import com.amazonaws.services.s3.AmazonS3
+import com.amazonaws.services.s3.AmazonS3ClientBuilder.standard
+import com.amazonaws.services.s3.model.{ListObjectsV2Result, S3ObjectSummary}
+import com.expedia.www.haystack.service.graph.snapshot.store.Constants.{DotCsv, SlashEdges, SlashNodes}
+import com.expedia.www.haystack.service.graph.snapshot.store.S3SnapshotStoreSpec.itemNamesWrittenToS3
+import org.mockito.Matchers._
+import org.mockito.Mockito
+import org.mockito.Mockito.{times, verify, verifyNoMoreInteractions, when}
+import org.scalatest.{BeforeAndAfterAll, _}
+import org.scalatest.mockito.MockitoSugar
+
+import scala.collection.JavaConverters._
+import scala.collection.{immutable, mutable}
+
+object S3SnapshotStoreSpec {
+ private val itemNamesWrittenToS3 = mutable.SortedSet[(String, String)]()
+}
+
+class S3SnapshotStoreSpec extends SnapshotStoreSpecBase with BeforeAndAfterAll with MockitoSugar with Matchers {
+ // Set to true to run these test in an integration-type way, talking to a real S3.
+ // You must have valid keys on your machine to do so, typically in ~/.aws/credentials.
+ private val useRealS3 = false
+
+ private val bucketName = "haystack-service-graph-snapshots"
+ private val folderName = "unit-test-snapshots"
+ private val nextContinuationToken = "nextContinuationToken"
+ private val listObjectsV2Result = mock[ListObjectsV2Result]
+ private val s3Client = if (useRealS3) standard.withRegion(Regions.US_WEST_2).build else mock[AmazonS3]
+ private val serviceGraphJson = readFile(Constants.JsonFileNameWithExtension)
+ private val nodesCsv = readFile(Constants.NodesCsvFileNameWithExtension)
+ private val edgesCsv = readFile(Constants.EdgesCsvFileNameWithExtension)
+
+ override def afterAll() {
+ if (useRealS3) {
+ itemNamesWrittenToS3.foreach(itemName => s3Client.deleteObject(bucketName, itemName._1))
+ itemNamesWrittenToS3.foreach(itemName => s3Client.deleteObject(bucketName, itemName._2))
+ s3Client.deleteBucket(bucketName)
+ }
+ else {
+ verifyNoMoreInteractionsForAllMocksThenReset()
+ }
+ }
+
+ describe("S3SnapshotStore.build()") {
+ val store = new S3SnapshotStore()
+ var s3Store = store.build(Array(store.getClass.getCanonicalName, bucketName, folderName, "42"))
+ .asInstanceOf[S3SnapshotStore]
+ it("should use the arguments in the default constructor and the array") {
+ val s3Client: AmazonS3 = s3Store.s3Client
+ s3Client.getRegion.toString shouldEqual Regions.US_WEST_2.getName
+ s3Store.bucketName shouldEqual bucketName
+ s3Store.folderName shouldEqual folderName
+ s3Store.listObjectsBatchSize shouldEqual 42
+ }
+ it("should use 0 for listObjectsBatchSize if no listObjectsBatchSize is specified in the args array") {
+ s3Store = store.build(Array(store.getClass.getCanonicalName, bucketName, folderName))
+ .asInstanceOf[S3SnapshotStore]
+ s3Store.listObjectsBatchSize shouldEqual 0
+ }
+ }
+
+ describe("S3SnapshotStore") {
+ var s3Store = new S3SnapshotStore(s3Client, bucketName, folderName, 3)
+ it("should create the bucket when the bucket does not exist") {
+ if (!useRealS3) {
+ whensForWrite(false)
+ }
+ itemNamesWrittenToS3 += s3Store.write(oneMillisecondBeforeNow, serviceGraphJson)
+ if (!useRealS3) {
+ verify(s3Client).doesBucketExistV2(bucketName)
+ verify(s3Client).createBucket(bucketName)
+ verify(s3Client).putObject(bucketName, createItemName(oneMillisecondBeforeNow) + SlashNodes + DotCsv, nodesCsv)
+ verify(s3Client).putObject(bucketName, createItemName(oneMillisecondBeforeNow) + SlashEdges + DotCsv, edgesCsv)
+ verifyNoMoreInteractionsForAllMocksThenReset()
+ }
+ }
+ it("should not create the bucket when the bucket already exists") {
+ if (!useRealS3) {
+ whensForWrite(true)
+ }
+ itemNamesWrittenToS3 += s3Store.write(oneMillisecondAfterNow, serviceGraphJson)
+ itemNamesWrittenToS3 += s3Store.write(twoMillisecondsAfterNow, serviceGraphJson)
+ if (!useRealS3) {
+ verify(s3Client, times(2)).doesBucketExistV2(bucketName)
+ verify(s3Client).putObject(bucketName, createItemName(oneMillisecondAfterNow) + SlashNodes + DotCsv, nodesCsv)
+ verify(s3Client).putObject(bucketName, createItemName(oneMillisecondAfterNow) + SlashEdges + DotCsv, edgesCsv)
+ verify(s3Client).putObject(bucketName, createItemName(twoMillisecondsAfterNow) + SlashNodes + DotCsv, nodesCsv)
+ verify(s3Client).putObject(bucketName, createItemName(twoMillisecondsAfterNow) + SlashEdges + DotCsv, edgesCsv)
+ verifyNoMoreInteractionsForAllMocksThenReset()
+ }
+ }
+ it("should return None when read() is called with a time that is too early") {
+ if (!useRealS3) {
+ whensForRead
+ when(listObjectsV2Result.isTruncated).thenReturn(false)
+ when(listObjectsV2Result.getObjectSummaries).thenReturn(convertStringToS3ObjectSummary)
+ }
+ assert(s3Store.read(twoMillisecondsBeforeNow).isEmpty)
+ if (!useRealS3) {
+ verifiesForRead(1)
+ verifyNoMoreInteractionsForAllMocksThenReset()
+ }
+ }
+ it("should return the correct object when read() is called with a time that is not an exact match but is not too early") {
+ if (!useRealS3) {
+ whensForRead
+ when(s3Client.getObjectAsString(anyString(), anyString())).thenReturn(nodesCsv, edgesCsv)
+ when(listObjectsV2Result.isTruncated).thenReturn(false)
+ when(listObjectsV2Result.getObjectSummaries).thenReturn(convertStringToS3ObjectSummary)
+ }
+ assert(s3Store.read(now).get == serviceGraphJson)
+ if (!useRealS3) {
+ verifiesForRead(1)
+ verify(s3Client).getObjectAsString(anyString(),
+ org.mockito.Matchers.eq(createItemName(oneMillisecondBeforeNow) + SlashNodes))
+ verify(s3Client).getObjectAsString(anyString(),
+ org.mockito.Matchers.eq(createItemName(oneMillisecondBeforeNow) + SlashEdges))
+ verifyNoMoreInteractionsForAllMocksThenReset()
+ }
+ }
+ it("should return the correct object when read() is called with a time that is an exact match") {
+ if (!useRealS3) {
+ whensForRead
+ when(s3Client.getObjectAsString(anyString(), anyString())).thenReturn(nodesCsv, edgesCsv)
+ when(listObjectsV2Result.isTruncated).thenReturn(false)
+ when(listObjectsV2Result.getObjectSummaries).thenReturn(convertStringToS3ObjectSummary)
+ }
+ val actual = s3Store.read(twoMillisecondsAfterNow).get
+ val expected = serviceGraphJson
+ assert(actual == expected)
+ if (!useRealS3) {
+ verifiesForRead(1)
+ verify(s3Client).getObjectAsString(anyString(),
+ org.mockito.Matchers.eq(createItemName(twoMillisecondsAfterNow) + SlashNodes))
+ verify(s3Client).getObjectAsString(anyString(),
+ org.mockito.Matchers.eq(createItemName(twoMillisecondsAfterNow) + SlashEdges))
+ verifyNoMoreInteractionsForAllMocksThenReset()
+ }
+ }
+ it("should return the correct object for small batches") {
+ s3Store = new S3SnapshotStore(s3Client, bucketName, folderName, 1)
+ if (!useRealS3) {
+ whensForRead
+ when(s3Client.getObjectAsString(anyString(), anyString())).thenReturn(nodesCsv, edgesCsv)
+ when(listObjectsV2Result.isTruncated).thenReturn(true, true, false)
+ val it = itemNamesWrittenToS3.iterator
+ when(listObjectsV2Result.getObjectSummaries)
+ .thenReturn(
+ convertTupleToObjectSummary(it.next()).asJava,
+ convertTupleToObjectSummary(it.next()).asJava,
+ convertTupleToObjectSummary(it.next()).asJava)
+ }
+ assert(s3Store.read(twoMillisecondsAfterNow).get == serviceGraphJson)
+ if (!useRealS3) {
+ verifiesForRead(3)
+ verify(s3Client).getObjectAsString(anyString(),
+ org.mockito.Matchers.eq(createItemName(twoMillisecondsAfterNow) + SlashNodes))
+ verify(s3Client).getObjectAsString(anyString(),
+ org.mockito.Matchers.eq(createItemName(twoMillisecondsAfterNow) + SlashEdges))
+ verifyNoMoreInteractionsForAllMocksThenReset()
+ }
+ }
+ it("should never delete any items when purge() is called") {
+ s3Store.purge(twoMillisecondsAfterNow) shouldEqual 0
+ if (!useRealS3) {
+ verifyNoMoreInteractionsForAllMocksThenReset()
+ }
+ }
+ it("should throw an IllegalArgumentException when read() is called with a 0 value of listObjectsBatchSize") {
+ s3Store = new S3SnapshotStore(s3Client, bucketName, folderName, 0)
+ an [IllegalArgumentException] should be thrownBy s3Store.read(twoMillisecondsBeforeNow)
+ if (!useRealS3) {
+ verifyNoMoreInteractionsForAllMocksThenReset()
+ }
+ }
+ }
+
+ private def convertTupleToObjectSummary(tuple: (String, String)): immutable.Seq[S3ObjectSummary] = {
+ val s3ObjectSummary1 = new S3ObjectSummary
+ s3ObjectSummary1.setBucketName(bucketName)
+ s3ObjectSummary1.setKey(tuple._1)
+ val s3ObjectSummary2 = new S3ObjectSummary
+ s3ObjectSummary2.setBucketName(bucketName)
+ s3ObjectSummary2.setKey(tuple._2)
+ List(s3ObjectSummary1, s3ObjectSummary2)
+ }
+
+ private def convertStringToS3ObjectSummary: util.List[S3ObjectSummary] = {
+ val listBuilder = List.newBuilder[S3ObjectSummary]
+ for (tuple <- itemNamesWrittenToS3) {
+ val list: immutable.Seq[S3ObjectSummary] = convertTupleToObjectSummary(tuple)
+ listBuilder += list.head
+ listBuilder += list(1)
+ }
+ listBuilder.result().asJava
+ }
+
+ private def verifiesForRead(loopTimes: Int) = {
+ verify(s3Client, times(loopTimes)).listObjectsV2(bucketName)
+ verify(listObjectsV2Result, times(loopTimes)).getObjectSummaries
+ verify(listObjectsV2Result, times(loopTimes)).getNextContinuationToken
+ verify(listObjectsV2Result, times(loopTimes)).isTruncated
+ }
+
+ private def whensForRead = {
+ when(listObjectsV2Result.getNextContinuationToken).thenReturn(nextContinuationToken)
+ when(s3Client.listObjectsV2(anyString())).thenReturn(listObjectsV2Result)
+ }
+
+ private def whensForWrite(doesBucketExist: Boolean) = {
+ when(s3Client.doesBucketExistV2(anyString())).thenReturn(doesBucketExist)
+ when(listObjectsV2Result.getNextContinuationToken).thenReturn(nextContinuationToken)
+ }
+
+ private def verifyNoMoreInteractionsForAllMocksThenReset(): Unit = {
+ verifyNoMoreInteractions(s3Client, listObjectsV2Result)
+ Mockito.reset(s3Client, listObjectsV2Result)
+ }
+
+ private def createItemName(thisInstant: Instant) = {
+ folderName + "/" + ISO_INSTANT.format(thisInstant)
+ }
+}
\ No newline at end of file
diff --git a/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/SnapshotStoreSpec.scala b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/SnapshotStoreSpec.scala
new file mode 100644
index 000000000..1d4172830
--- /dev/null
+++ b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/SnapshotStoreSpec.scala
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+import java.time.Instant
+
+class SnapshotStoreSpec extends SnapshotStoreSpecBase {
+ private val snapshotStore = new SnapshotStore {
+ override def write(instant: Instant, content: String): AnyRef = {
+ None
+ }
+
+ override def read(instant: Instant): Option[String] = {
+ None
+ }
+
+ override def purge(instant: Instant): Integer = {
+ 0
+ }
+
+ override def build(constructorArguments: Array[String]): SnapshotStore = {
+ this
+ }
+ }
+
+ describe("SnapshotStore") {
+ it("should create the correct ISO 8601 file name") {
+ snapshotStore.createIso8601FileName(Instant.EPOCH) shouldEqual "1970-01-01T00:00:00.000Z"
+ snapshotStore.createIso8601FileName(Instant.EPOCH.plusMillis(1)) shouldEqual "1970-01-01T00:00:00.001Z"
+ snapshotStore.createIso8601FileName(Instant.EPOCH.plusMillis(-1)) shouldEqual "1969-12-31T23:59:59.999Z"
+ }
+ }
+
+}
diff --git a/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/SnapshotStoreSpecBase.scala b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/SnapshotStoreSpecBase.scala
new file mode 100644
index 000000000..b912b1fab
--- /dev/null
+++ b/service-graph/snapshot-store/src/test/scala/com/expedia/www/haystack/service/graph/snapshot/store/SnapshotStoreSpecBase.scala
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshot.store
+
+import java.time.Instant
+import java.time.temporal.ChronoUnit
+
+import org.scalatest.{FunSpec, Matchers}
+
+import scala.io.{BufferedSource, Codec, Source}
+
+class SnapshotStoreSpecBase extends FunSpec with Matchers {
+ protected val now: Instant = Instant.EPOCH
+
+ protected val twoMillisecondsBeforeNow: Instant = now.minus(2, ChronoUnit.MILLIS)
+
+ protected val oneMillisecondBeforeNow: Instant = now.minus(1, ChronoUnit.MILLIS)
+
+ protected val oneMillisecondAfterNow: Instant = now.plus(1, ChronoUnit.MILLIS)
+
+ protected val twoMillisecondsAfterNow: Instant = now.plus(2, ChronoUnit.MILLIS)
+
+ def readFile(fileName: String): String = {
+ implicit val codec: Codec = Codec.UTF8
+ lazy val bufferedSource: BufferedSource = Source.fromResource(fileName)
+ val fileContents = bufferedSource.getLines.mkString("\n")
+ bufferedSource.close()
+ fileContents + "\n"
+ }
+
+}
diff --git a/service-graph/snapshotter/Makefile b/service-graph/snapshotter/Makefile
new file mode 100644
index 000000000..8a034ac22
--- /dev/null
+++ b/service-graph/snapshotter/Makefile
@@ -0,0 +1,11 @@
+.PHONY: release
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-service-graph-snapshotter
+PWD := $(shell pwd)
+
+docker-image:
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+release: docker-image
+ ../deployment/scripts/publish-to-docker-hub.sh
diff --git a/service-graph/snapshotter/README.md b/service-graph/snapshotter/README.md
new file mode 100644
index 000000000..c79025a65
--- /dev/null
+++ b/service-graph/snapshotter/README.md
@@ -0,0 +1,27 @@
+#Haystack : snapshotter
+
+The "snapshot" feature of the service graph is a Scala "main" application that runs the specified
+[snapshot-store](https://github.com/ExpediaDotCom/haystack-service-graph). The
+[Scala Main class](https://github.com/ExpediaDotCom/haystack-service-graph/blob/master/snapshotter/src/main/scala/com/expedia/www/haystack/service/graph/snapshotter/Main.scala)
+expects as its first argument the fully qualified class name of the snapshot store to use. More precisely:
+
+1. The first parameter is the fully qualified class name of the implementation of the snapshot store to run. There are
+are currently two implementations:
+ * [com.expedia.www.haystack.service.graph.snapshot.store.FileSnapshotStore](https://github.com/ExpediaDotCom/haystack-service-graph/blob/master/snapshot-store/src/main/scala/com.expedia.www.haystack.service.graph.snapshot.store.FileSnapshotStore)
+ * [com.expedia.www.haystack.service.graph.snapshot.store.S3SnapshotStore](https://github.com/ExpediaDotCom/haystack-service-graph/blob/master/snapshot-store/src/main/scala/com.expedia.www.haystack.service.graph.snapshot.store.S3SnapshotStore)
+2. The rest of the arguments are passed to the constructor of the class specified by args(0).
+ * For FileSnapshotStore, the only additional argument required is the directory name where the snapshots will be stored,
+ e.g. /var/snapshots
+ * For S3SnapshotStore, there are three additional arguments, which are in order:
+ * the bucket name
+ * the folder name inside the bucket
+ * the number of items to fetch at one time when calling the S3 listObjectsV2 API; the best value to choose,
+ assuming sufficient memory on the JVM running the snapshotter, is the maximum number of snapshots that will exist
+ in S3 before being purged. For example, with a one hour snapshot interval and a snapshot TTL of 1 year,
+ 366 * 24 = 8784 would be a good value (perhaps rounded to 10,000).
+
+## Building
+
+```
+mvn clean package
+```
\ No newline at end of file
diff --git a/service-graph/snapshotter/build/docker/Dockerfile b/service-graph/snapshotter/build/docker/Dockerfile
new file mode 100644
index 000000000..fad2dd585
--- /dev/null
+++ b/service-graph/snapshotter/build/docker/Dockerfile
@@ -0,0 +1,25 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-service-graph-snapshotter
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+ENV DOCKERIZE_VERSION v0.6.1
+
+ADD https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/dockerize-alpine-linux-amd64-${DOCKERIZE_VERSION}.tar.gz dockerize.tar.gz
+RUN tar xzf dockerize.tar.gz
+RUN chmod +x dockerize
+
+RUN mkdir -p ${APP_HOME}
+RUN chmod a+w /app
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+
+RUN chmod +x ${APP_HOME}/start-app.sh
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/service-graph/snapshotter/build/docker/jmxtrans-agent.xml b/service-graph/snapshotter/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..cea1c8b91
--- /dev/null
+++ b/service-graph/snapshotter/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,44 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:false}
+
+ haystack.service-graph.snapshotter.#hostname#.
+
+ 60
+
diff --git a/service-graph/snapshotter/build/docker/start-app.sh b/service-graph/snapshotter/build/docker/start-app.sh
new file mode 100755
index 000000000..11f44788c
--- /dev/null
+++ b/service-graph/snapshotter/build/docker/start-app.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=512m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=512m
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+-XX:+UseG1GC \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+if [[ -n "$SERVICE_DEBUG_ON" ]] && [[ "$SERVICE_DEBUG_ON" == true ]]; then
+ JAVA_OPTS="$JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y"
+fi
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar" "$@"
diff --git a/service-graph/snapshotter/pom.xml b/service-graph/snapshotter/pom.xml
new file mode 100644
index 000000000..8a14f6bcb
--- /dev/null
+++ b/service-graph/snapshotter/pom.xml
@@ -0,0 +1,135 @@
+
+
+
+
+ haystack-service-graph
+ com.expedia.www
+ 1.0.15-SNAPSHOT
+
+
+ 4.0.0
+ haystack-service-graph-snapshotter
+ jar
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+
+ ${project.artifactId}-${project.version}
+ com.expedia.www.haystack.service.graph.snapshotter.Main
+
+
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+
+
+ com.expedia.www
+ haystack-service-graph-snapshot-store
+
+
+ org.apache.commons
+ commons-lang3
+
+
+ org.mockito
+ mockito-all
+
+
+ org.scalaj
+ scalaj-http_${scala.major.minor.version}
+
+
+
+
+ ${finalName}
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+ org.expedia.www.haystack.commons.scalatest.IntegrationSuite
+
+
+
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+
+ true
+ 100
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+ ${maven-shade-plugin-version}
+
+
+ package
+
+ shade
+
+
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+
diff --git a/service-graph/snapshotter/src/main/resources/app.conf b/service-graph/snapshotter/src/main/resources/app.conf
new file mode 100644
index 000000000..73b52f100
--- /dev/null
+++ b/service-graph/snapshotter/src/main/resources/app.conf
@@ -0,0 +1,12 @@
+snapshotter {
+ # 1 year in milliseconds: 365.2425 days/year
+ # * 24 hours/day
+ # * 60 minutes/hour
+ # * 60 seconds/second
+ # * 1000 milliseconds/second
+ # = 31,556,952,000
+ purge.age.ms = 315569520000
+
+ # Determines the "from" parameter in the call to retrieve the service graph: 3,600,000 milliseconds = 1 hour
+ window.size.ms = 3600000
+}
\ No newline at end of file
diff --git a/service-graph/snapshotter/src/main/resources/logback.xml b/service-graph/snapshotter/src/main/resources/logback.xml
new file mode 100644
index 000000000..a54f534c6
--- /dev/null
+++ b/service-graph/snapshotter/src/main/resources/logback.xml
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+
+
+
diff --git a/service-graph/snapshotter/src/main/scala/com/expedia/www/haystack/service/graph/snapshotter/AppConfiguration.scala b/service-graph/snapshotter/src/main/scala/com/expedia/www/haystack/service/graph/snapshotter/AppConfiguration.scala
new file mode 100644
index 000000000..57806b1a5
--- /dev/null
+++ b/service-graph/snapshotter/src/main/scala/com/expedia/www/haystack/service/graph/snapshotter/AppConfiguration.scala
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshotter
+
+import com.expedia.www.haystack.commons.config.ConfigurationLoader
+import org.apache.commons.lang3.StringUtils
+
+/**
+ * This class reads the configuration from the given resource name
+ *
+ * @param resourceName name of the resource file to load
+ */
+class AppConfiguration(resourceName: String) {
+
+ require(StringUtils.isNotBlank(resourceName))
+
+ private val config = ConfigurationLoader.loadConfigFileWithEnvOverrides(resourceName = this.resourceName)
+
+ /**
+ * Default constructor that loads configuration from the resource named "app.conf"
+ */
+ def this() = this("app.conf")
+
+ val purgeAgeMs: Long = config.getLong("snapshotter.purge.age.ms")
+
+ val windowSizeMs: Long = config.getLong("snapshotter.window.size.ms")
+}
diff --git a/service-graph/snapshotter/src/main/scala/com/expedia/www/haystack/service/graph/snapshotter/Main.scala b/service-graph/snapshotter/src/main/scala/com/expedia/www/haystack/service/graph/snapshotter/Main.scala
new file mode 100644
index 000000000..9d21a8986
--- /dev/null
+++ b/service-graph/snapshotter/src/main/scala/com/expedia/www/haystack/service/graph/snapshotter/Main.scala
@@ -0,0 +1,115 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshotter
+
+import java.time.{Clock, Instant}
+
+import com.expedia.www.haystack.service.graph.snapshot.store.SnapshotStore
+import org.slf4j.{Logger, LoggerFactory}
+import scalaj.http.{Http, HttpRequest}
+
+object Main {
+ val ServiceGraphUrlRequiredMsg =
+ "The first argument must specify the service graph URL"
+ val StringStoreClassRequiredMsg =
+ "The second argument must specify the fully qualified class name of a class that implements SnapshotStore"
+ val UrlBaseRequiredMsg =
+ "The third argument must specify the base of the service graph URL"
+ val ServiceGraphUrlSuffix: String = "?from=%d"
+ val appConfiguration = new AppConfiguration()
+
+ var logger: Logger = LoggerFactory.getLogger(Main.getClass)
+ var clock: Clock = Clock.systemUTC()
+ var factory: Factory = new Factory
+
+ /** Main method
+ * @param args specifies the class to run and its parameters.
+ * ==args(0)==
+ * The first parameter is the fully qualified class name of the implementation of
+ * [[com.expedia.www.haystack.service.graph.snapshot.store.SnapshotStore]] to run.
+ * There are currently two implementations:
+ * - [[com.expedia.www.haystack.service.graph.snapshot.store.FileSnapshotStore]]
+ * - [[com.expedia.www.haystack.service.graph.snapshot.store.S3SnapshotStore]]
+ * ==args(1+)==
+ * The rest of the arguments are passed to the constructor of the class specified by args(0).
+ * See the documentation in the build() method of the desired implementation for argument details.
+ * ===Examples===
+ * ====FileSnapshotStore====
+ * To run FileSnapshotStore and use /var/snapshots for snapshot storage, the arguments would be:
+ * - com.expedia.www.haystack.service.graph.snapshot.store.FileSnapshotStore
+ * - /var/snapshots
+ * ====S3SnapshotStore====
+ * To run S3SnapshotStore and use the "Haystack" bucket with subfolder "snapshots" for snapshot storage, and a batch
+ * size of 10,000 when calling the S3 "listObjectsV2" API, the arguments would be:
+ * - com.expedia.www.haystack.service.graph.snapshot.store.S3SnapshotStore
+ * - Haystack
+ * - snapshots
+ * - 10000
+ */
+ def main(args: Array[String]): Unit = {
+ if (args.length == 0) {
+ logger.error(ServiceGraphUrlRequiredMsg)
+ } else if (args.length == 1) {
+ logger.error(StringStoreClassRequiredMsg)
+ } else if (args.length == 2) {
+ logger.error(UrlBaseRequiredMsg)
+ } else {
+ val snapshotStore = instantiateSnapshotStore(args)
+ val now = clock.instant()
+ val json = getCurrentServiceGraph(args(0) + ServiceGraphUrlSuffix, now)
+ storeServiceGraphInTheStringStore(snapshotStore, now, json)
+ purgeOldSnapshots(snapshotStore, now)
+ }
+ }
+
+ private def instantiateSnapshotStore(args: Array[String]): SnapshotStore = {
+ def createStringStoreInstanceWithDefaultConstructor: SnapshotStore = {
+ val fullyQualifiedClassName = args(1)
+ val klass = Class.forName(fullyQualifiedClassName)
+ val instanceBuiltByDefaultConstructor = klass.newInstance().asInstanceOf[SnapshotStore]
+ instanceBuiltByDefaultConstructor
+ }
+
+ val snapshotStore = createStringStoreInstanceWithDefaultConstructor.build(args.drop(1))
+ snapshotStore
+ }
+
+ private def getCurrentServiceGraph(url: String, instant: Instant) = {
+ val request = factory.createHttpRequest(url, instant.toEpochMilli - appConfiguration.windowSizeMs)
+ val httpResponse = request.asString
+ httpResponse.body
+ }
+
+ private def storeServiceGraphInTheStringStore(snapshotStore: SnapshotStore,
+ instant: Instant,
+ json: String): AnyRef = {
+ snapshotStore.write(instant, json)
+ }
+
+ private def purgeOldSnapshots(snapshotStore: SnapshotStore,
+ instant: Instant): Integer = {
+ snapshotStore.purge(instant.minusMillis(appConfiguration.purgeAgeMs))
+ }
+}
+
+class Factory {
+ def createHttpRequest(url: String, windowSizeMs: Long): HttpRequest = {
+ val urlWithParameter = url.format(windowSizeMs)
+ Http(urlWithParameter)
+ }
+}
diff --git a/service-graph/snapshotter/src/test/resources/app.conf b/service-graph/snapshotter/src/test/resources/app.conf
new file mode 100644
index 000000000..3a7f2f1cf
--- /dev/null
+++ b/service-graph/snapshotter/src/test/resources/app.conf
@@ -0,0 +1,7 @@
+snapshotter {
+ # Setting purge age to 0 lets a unit test in MainSpec verify that purge is called when running Main.main()
+ purge.age.ms = 0
+
+ # 3,600,000 milliseconds = 1 hour
+ window.size.ms = 3600000
+}
\ No newline at end of file
diff --git a/service-graph/snapshotter/src/test/resources/serviceGraph.json b/service-graph/snapshotter/src/test/resources/serviceGraph.json
new file mode 100644
index 000000000..7f03f0708
--- /dev/null
+++ b/service-graph/snapshotter/src/test/resources/serviceGraph.json
@@ -0,0 +1,2547 @@
+{
+ "edges": [
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 202931,
+ "lastSeen": 1544575410111,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587937
+ },
+ {
+ "source": {
+ "name": "daily-data-update-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "async-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5136140,
+ "lastSeen": 1544575571142,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587796
+ },
+ {
+ "source": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 134125,
+ "lastSeen": 1544575498167,
+ "errorCount": 21882
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587999
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "front-door-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 17367200,
+ "lastSeen": 1544575567920,
+ "errorCount": 103264
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588026
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "stark-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 157201,
+ "lastSeen": 1544575421793,
+ "errorCount": 36
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588024
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "rails-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 130,
+ "lastSeen": 1544573988361,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3531583,
+ "lastSeen": 1544575569804,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587705
+ },
+ {
+ "source": {
+ "name": "authentication-service",
+ "tags": {}
+ },
+ "destination": {
+ "name": "userinteraction-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 76788,
+ "lastSeen": 1544575553640,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587815
+ },
+ {
+ "source": {
+ "name": "stark-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "context-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 180327,
+ "lastSeen": 1544575421874,
+ "errorCount": 2088
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587693
+ },
+ {
+ "source": {
+ "name": "stark-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "template-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 169216,
+ "lastSeen": 1544575421889,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588068
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 22890079,
+ "lastSeen": 1544575572251,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587672
+ },
+ {
+ "source": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2,
+ "lastSeen": 1544554416805,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544553000000,
+ "effectiveTo": 1544554800000
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "chargeback-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 60,
+ "lastSeen": 1544574583227,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "adapter-aws",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 303022,
+ "lastSeen": 1544575592096,
+ "errorCount": 1786
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587954
+ },
+ {
+ "source": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "multishop",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3893347,
+ "lastSeen": 1544575569844,
+ "errorCount": 2
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587734
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "guide-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1442998,
+ "lastSeen": 1544575571677,
+ "errorCount": 61
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587753
+ },
+ {
+ "source": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "help-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2826475,
+ "lastSeen": 1544575555819,
+ "errorCount": 1624211
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587681
+ },
+ {
+ "source": {
+ "name": "authentication-service",
+ "tags": {}
+ },
+ "destination": {
+ "name": "front-door-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 99413,
+ "lastSeen": 1544575499415,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587706
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3258,
+ "lastSeen": 1544575229219,
+ "errorCount": 800
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587979
+ },
+ {
+ "source": {
+ "name": "payment-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "fx",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 233615,
+ "lastSeen": 1544575277983,
+ "errorCount": 16
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587689
+ },
+ {
+ "source": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 196315,
+ "lastSeen": 1544575522183,
+ "errorCount": 4
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587866
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "booking-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3,
+ "lastSeen": 1544559964099,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544558400000,
+ "effectiveTo": 1544560200000
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "chargeback-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 169,
+ "lastSeen": 1544574617075,
+ "errorCount": 38
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588010
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1,2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "checkout-payment-domain-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 342981,
+ "lastSeen": 1544575571750,
+ "errorCount": 126
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587681
+ },
+ {
+ "source": {
+ "name": "forge-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "context-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5042343,
+ "lastSeen": 1544575571591,
+ "errorCount": 405
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587826
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "3,1,2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 21474934,
+ "lastSeen": 1544575571886,
+ "errorCount": 26012
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588099
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "his-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 698168,
+ "lastSeen": 1544575554236,
+ "errorCount": 87771
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587677
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "hers-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2907715,
+ "lastSeen": 1544575570922,
+ "errorCount": 33645
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587793
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 116,
+ "lastSeen": 1544574157710,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "lpt-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1821807,
+ "lastSeen": 1544575567232,
+ "errorCount": 6746
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588011
+ },
+ {
+ "source": {
+ "name": "satellite",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "endurance-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 12111,
+ "lastSeen": 1544575506385,
+ "errorCount": 140
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588024
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 570805,
+ "lastSeen": 1544575504575,
+ "errorCount": 32
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587811
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 74,
+ "lastSeen": 1544573552180,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "loom-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "lists-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 18356,
+ "lastSeen": 1544575525038,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588011
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "mars",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 73614,
+ "lastSeen": 1544575526411,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587848
+ },
+ {
+ "source": {
+ "name": "mormont-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "seo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 596144,
+ "lastSeen": 1544575525706,
+ "errorCount": 3
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587937
+ },
+ {
+ "source": {
+ "name": "info-site-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-cart",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 6,
+ "lastSeen": 1544572410049,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544500800000,
+ "effectiveTo": 1544572800000
+ },
+ {
+ "source": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2838890,
+ "lastSeen": 1544575569418,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587682
+ },
+ {
+ "source": {
+ "name": "loom-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "api-customer",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 8583,
+ "lastSeen": 1544575208652,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587796
+ },
+ {
+ "source": {
+ "name": "multishop",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "pricing-engine",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 15560819,
+ "lastSeen": 1544575569839,
+ "errorCount": 35
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587689
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "forge-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3161466,
+ "lastSeen": 1544575571588,
+ "errorCount": 4817
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587639
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "third-party-provider-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 9735,
+ "lastSeen": 1544575279207,
+ "errorCount": 3
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587827
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "insurance-shopping-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 410,
+ "lastSeen": 1544574748096,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587918
+ },
+ {
+ "source": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-user-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1214950,
+ "lastSeen": 1544575528611,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 20845,
+ "lastSeen": 1544575478589,
+ "errorCount": 68
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587644
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "third-party-api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5000,
+ "lastSeen": 1544574936681,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587722
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 687220,
+ "lastSeen": 1544575524183,
+ "errorCount": 20
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587866
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "compositor-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 243059,
+ "lastSeen": 1544575396324,
+ "errorCount": 22
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588026
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "mormont-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 652939,
+ "lastSeen": 1544575525612,
+ "errorCount": 68
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587815
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 27317,
+ "lastSeen": 1544575505207,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588010
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "userinteraction-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2907462,
+ "lastSeen": 1544575570897,
+ "errorCount": 10681
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587808
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "info-site-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 84704,
+ "lastSeen": 1544575480474,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587827
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "melisandre-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 125064,
+ "lastSeen": 1544575501716,
+ "errorCount": 2985
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588087
+ },
+ {
+ "source": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2469688,
+ "lastSeen": 1544575569284,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587909
+ },
+ {
+ "source": {
+ "name": "guide-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "template-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 806,
+ "lastSeen": 1544573623862,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 8,
+ "lastSeen": 1544569762201,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544491800000,
+ "effectiveTo": 1544571000000
+ },
+ {
+ "source": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "controller-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 20783217,
+ "lastSeen": 1544575572498,
+ "errorCount": 246
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587641
+ },
+ {
+ "source": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 973286,
+ "lastSeen": 1544575570804,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588061
+ },
+ {
+ "source": {
+ "name": "endurance-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 18729,
+ "lastSeen": 1544575266335,
+ "errorCount": 16
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "help-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "adapter-aws",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 379955,
+ "lastSeen": 1544575554493,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587808
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "targaryen-web",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 8861,
+ "lastSeen": 1544575485661,
+ "errorCount": 324
+ },
+ "effectiveFrom": 1544500800000,
+ "effectiveTo": 1544575587634
+ },
+ {
+ "source": {
+ "name": "guide-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "seo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 654,
+ "lastSeen": 1544573560757,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "drogo-api",
+ "tags": {}
+ },
+ "stats": {
+ "count": 11154,
+ "lastSeen": 1544575207980,
+ "errorCount": 11
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587669
+ },
+ {
+ "source": {
+ "name": "payment-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "lannister-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 13974992,
+ "lastSeen": 1544575556069,
+ "errorCount": 9
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587798
+ },
+ {
+ "source": {
+ "name": "info-site-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 234369,
+ "lastSeen": 1544575570813,
+ "errorCount": 6024
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587952
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 19029006,
+ "lastSeen": 1544575571645,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587677
+ },
+ {
+ "source": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "fx",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 304458,
+ "lastSeen": 1544575397477,
+ "errorCount": 5
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587674
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "loom-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 90053,
+ "lastSeen": 1544575481890,
+ "errorCount": 7
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587788
+ },
+ {
+ "source": {
+ "name": "new-shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "payment-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 20680712,
+ "lastSeen": 1544575556069,
+ "errorCount": 32
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587782
+ },
+ {
+ "source": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 56878,
+ "lastSeen": 1544575522176,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587882
+ },
+ {
+ "source": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "margaery-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 14646996,
+ "lastSeen": 1544575571980,
+ "errorCount": 145
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587689
+ },
+ {
+ "source": {
+ "name": "loom-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "api-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 985553,
+ "lastSeen": 1544575571604,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588060
+ },
+ {
+ "source": {
+ "name": "tyrion-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "chargeback-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 176,
+ "lastSeen": 1544573823589,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "help-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "rules-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 237982,
+ "lastSeen": 1544575571933,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587954
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-pricing",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 33794745,
+ "lastSeen": 1544575571645,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587875
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "bolton-service",
+ "tags": {}
+ },
+ "stats": {
+ "count": 319715,
+ "lastSeen": 1544575500355,
+ "errorCount": 1004
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587658
+ },
+ {
+ "source": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "melisandre-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 13072,
+ "lastSeen": 1544575568510,
+ "errorCount": 59
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587716
+ },
+ {
+ "source": {
+ "name": "help-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1403232,
+ "lastSeen": 1544575555851,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587827
+ },
+ {
+ "source": {
+ "name": "tyrion-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 26,
+ "lastSeen": 1544571310584,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544572800000
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 32518,
+ "lastSeen": 1544575275982,
+ "errorCount": 75
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587870
+ },
+ {
+ "source": {
+ "name": "forge-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "template-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3548918,
+ "lastSeen": 1544575571596,
+ "errorCount": 30
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587738
+ },
+ {
+ "source": {
+ "name": "tips-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 764,
+ "lastSeen": 1544574548576,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544574600000
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "baratheon-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 578577,
+ "lastSeen": 1544575571747,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587662
+ },
+ {
+ "source": {
+ "name": "westeros-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "hodor-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 6512590,
+ "lastSeen": 1544575570949,
+ "errorCount": 1296924
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587762
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "ecommerce-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 73,
+ "lastSeen": 1544572581996,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544493600000,
+ "effectiveTo": 1544572800000
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "provideradapter-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 9094,
+ "lastSeen": 1544575240906,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588090
+ },
+ {
+ "source": {
+ "name": "forge-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "seo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3369864,
+ "lastSeen": 1544575571631,
+ "errorCount": 6
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587848
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "varys-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 68388,
+ "lastSeen": 1544575520781,
+ "errorCount": 80
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587934
+ },
+ {
+ "source": {
+ "name": "bronn-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 99784,
+ "lastSeen": 1544575571751,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544563800000,
+ "effectiveTo": 1544575587662
+ },
+ {
+ "source": {
+ "name": "stark-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "seo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 167207,
+ "lastSeen": 1544575421914,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587725
+ },
+ {
+ "source": {
+ "name": "tyrion-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "adapter-aws",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 8112,
+ "lastSeen": 1544575271597,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588026
+ },
+ {
+ "source": {
+ "name": "guide-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "context-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1984413,
+ "lastSeen": 1544575571681,
+ "errorCount": 1983415
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "booking-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 37252,
+ "lastSeen": 1544575265116,
+ "errorCount": 1136
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588025
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1473126,
+ "lastSeen": 1544575571747,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587672
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "bronn-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1,
+ "lastSeen": 1544565534137,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544563800000,
+ "effectiveTo": 1544565600000
+ },
+ {
+ "source": {
+ "name": "baratheon-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "bronn-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 100667,
+ "lastSeen": 1544575571750,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544563800000,
+ "effectiveTo": 1544575587848
+ },
+ {
+ "source": {
+ "name": "varys-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3,
+ "lastSeen": 1544552474213,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544553000000
+ },
+ {
+ "source": {
+ "name": "endurance-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "authentication-service",
+ "tags": {}
+ },
+ "stats": {
+ "count": 8719,
+ "lastSeen": 1544575265175,
+ "errorCount": 77
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587674
+ },
+ {
+ "source": {
+ "name": "brienne-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "geo-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 38627,
+ "lastSeen": 1544557965110,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544558400000
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "stats": {
+ "count": 587165,
+ "lastSeen": 1544575520505,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587935
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "cache-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 72562,
+ "lastSeen": 1544575498524,
+ "errorCount": 206
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587934
+ },
+ {
+ "source": {
+ "name": "hodor-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 10500513,
+ "lastSeen": 1544575570958,
+ "errorCount": 76483
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588061
+ },
+ {
+ "source": {
+ "name": "margaery-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "airpricingservice",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 16824540,
+ "lastSeen": 1544575571994,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587641
+ },
+ {
+ "source": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 30148726,
+ "lastSeen": 1544575571973,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588099
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "domain-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 46,
+ "lastSeen": 1544569236656,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544497200000,
+ "effectiveTo": 1544571000000
+ },
+ {
+ "source": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5277998,
+ "lastSeen": 1544575572135,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587909
+ },
+ {
+ "source": {
+ "name": "progressive-webapp-api",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "location-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3891685,
+ "lastSeen": 1544575571930,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588087
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "authentication-service",
+ "tags": {}
+ },
+ "stats": {
+ "count": 170461,
+ "lastSeen": 1544575499257,
+ "errorCount": 11
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587699
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "session-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 611347,
+ "lastSeen": 1544575568839,
+ "errorCount": 1
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588024
+ },
+ {
+ "source": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "booking-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 13140,
+ "lastSeen": 1544575218424,
+ "errorCount": 23
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587914
+ },
+ {
+ "source": {
+ "name": "shopping-detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-content-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 5779855,
+ "lastSeen": 1544575569418,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587937
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "2",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "greyjoy-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 3984995,
+ "lastSeen": 1544575570003,
+ "errorCount": 106528
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587704
+ },
+ {
+ "source": {
+ "name": "boss-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "ticket-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2,
+ "lastSeen": 1544559967407,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544558400000,
+ "effectiveTo": 1544560200000
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws,dc"
+ }
+ },
+ "destination": {
+ "name": "margaery-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 2674151,
+ "lastSeen": 1544575594133,
+ "errorCount": 49
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587953
+ },
+ {
+ "source": {
+ "name": "shopping-search-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "shopping-user-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 18737197,
+ "lastSeen": 1544575571645,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587979
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "satellite",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 217364,
+ "lastSeen": 1544575482983,
+ "errorCount": 290
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "shopping-cart",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 10386,
+ "lastSeen": 1544575280832,
+ "errorCount": 204
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587797
+ },
+ {
+ "source": {
+ "name": "mormont-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "context-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 844962,
+ "lastSeen": 1544575554067,
+ "errorCount": 28241
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587815
+ },
+ {
+ "source": {
+ "name": "internet-proxy",
+ "tags": {}
+ },
+ "destination": {
+ "name": "user-profile-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 151657,
+ "lastSeen": 1544575488509,
+ "errorCount": 12
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575588060
+ },
+ {
+ "source": {
+ "name": "front-door-service",
+ "tags": {
+ "tier": "1",
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "shae-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "stats": {
+ "count": 7126,
+ "lastSeen": 1544575152362,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587866
+ },
+ {
+ "source": {
+ "name": "mormont-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "destination": {
+ "name": "template-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 832484,
+ "lastSeen": 1544575554075,
+ "errorCount": 5
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587723
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "rules-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 586462,
+ "lastSeen": 1544575592045,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587763
+ },
+ {
+ "source": {
+ "name": "detail-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "dc"
+ }
+ },
+ "destination": {
+ "name": "controller-service",
+ "tags": {
+ "X-HAYSTACK-INFRASTRUCTURE-PROVIDER": "aws"
+ }
+ },
+ "stats": {
+ "count": 1297487,
+ "lastSeen": 1544575592045,
+ "errorCount": 0
+ },
+ "effectiveFrom": 1544488200000,
+ "effectiveTo": 1544575587693
+ }
+ ]
+}
\ No newline at end of file
diff --git a/service-graph/snapshotter/src/test/resources/serviceGraph_edges.csv b/service-graph/snapshotter/src/test/resources/serviceGraph_edges.csv
new file mode 100644
index 000000000..3d4ebfa64
--- /dev/null
+++ b/service-graph/snapshotter/src/test/resources/serviceGraph_edges.csv
@@ -0,0 +1,124 @@
+id,sourceId,destinationId,statsCount,statsLastSeen,statsErrorCount,effectiveFrom,effectiveTo
+1,1,45,202931,1544575410111,0,1544488200000,1544575587937
+2,2,46,5136140,1544575571142,0,1544488200000,1544575587796
+3,3,45,134125,1544575498167,21882,1544488200000,1544575587999
+4,4,47,17367200,1544575567920,103264,1544488200000,1544575588026
+5,5,8,157201,1544575421793,36,1544488200000,1544575588024
+6,4,48,130,1544573988361,0,1544488200000,1544574600000
+7,6,12,3531583,1544575569804,0,1544488200000,1544575587705
+8,7,49,76788,1544575553640,0,1544488200000,1544575587815
+9,8,50,180327,1544575421874,2088,1544488200000,1544575587693
+10,8,51,169216,1544575421889,1,1544488200000,1544575588068
+11,9,5,22890079,1544575572251,0,1544488200000,1544575587672
+12,10,44,2,1544554416805,0,1544553000000,1544554800000
+13,11,52,60,1544574583227,0,1544488200000,1544574600000
+14,1,53,303022,1544575592096,1786,1544488200000,1544575587954
+15,12,28,3893347,1544575569844,2,1544488200000,1544575587734
+16,5,32,1442998,1544575571677,61,1544488200000,1544575587753
+17,13,34,2826475,1544575555819,1624211,1544488200000,1544575587681
+18,7,54,99413,1544575499415,0,1544488200000,1544575587706
+19,14,45,3258,1544575229219,800,1544488200000,1544575587979
+20,15,55,233615,1544575277983,16,1544488200000,1544575587689
+21,10,56,196315,1544575522183,4,1544488200000,1544575587866
+22,16,57,3,1544559964099,1,1544558400000,1544560200000
+23,14,52,169,1544574617075,38,1544488200000,1544575588010
+24,17,58,342981,1544575571750,126,1544488200000,1544575587681
+25,18,50,5042343,1544575571591,405,1544488200000,1544575587826
+26,19,59,21474934,1544575571886,26012,1544488200000,1544575588099
+27,4,60,698168,1544575554236,87771,1544488200000,1544575587677
+28,4,61,2907715,1544575570922,33645,1544488200000,1544575587793
+29,20,12,116,1544574157710,0,1544488200000,1544574600000
+30,4,62,1821807,1544575567232,6746,1544488200000,1544575588011
+31,21,33,12111,1544575506385,140,1544488200000,1544575588024
+32,22,1,570805,1544575504575,32,1544488200000,1544575587811
+33,4,3,74,1544573552180,0,1544488200000,1544574600000
+34,23,63,18356,1544575525038,0,1544488200000,1544575588011
+35,24,64,73614,1544575526411,0,1544488200000,1544575587848
+36,25,65,596144,1544575525706,3,1544488200000,1544575587937
+37,26,66,6,1544572410049,0,1544500800000,1544572800000
+38,27,6,2838890,1544575569418,0,1544488200000,1544575587682
+39,23,67,8583,1544575208652,0,1544488200000,1544575587796
+40,28,68,15560819,1544575569839,35,1544488200000,1544575587689
+41,5,18,3161466,1544575571588,4817,1544488200000,1544575587639
+42,29,69,9735,1544575279207,3,1544488200000,1544575587827
+43,4,70,410,1544574748096,0,1544488200000,1544575587918
+44,27,71,1214950,1544575528611,0,1544488200000,1544575587723
+45,30,14,20845,1544575478589,68,1544488200000,1544575587644
+46,4,72,5000,1544574936681,0,1544488200000,1544575587722
+47,4,31,687220,1544575524183,20,1544488200000,1544575587866
+48,4,73,243059,1544575396324,22,1544488200000,1544575588026
+49,5,25,652939,1544575525612,68,1544488200000,1544575587815
+50,14,30,27317,1544575505207,0,1544488200000,1544575588010
+51,4,49,2907462,1544575570897,10681,1544488200000,1544575587808
+52,4,26,84704,1544575480474,0,1544488200000,1544575587827
+53,14,74,125064,1544575501716,2985,1544488200000,1544575588087
+54,31,27,2469688,1544575569284,0,1544488200000,1544575587909
+55,32,51,806,1544573623862,0,1544488200000,1544574600000
+56,10,27,8,1544569762201,0,1544491800000,1544571000000
+57,13,75,20783217,1544575572498,246,1544488200000,1544575587641
+58,31,9,973286,1544575570804,0,1544488200000,1544575588061
+59,33,56,18729,1544575266335,16,1544488200000,1544575587723
+60,34,53,379955,1544575554493,0,1544488200000,1544575587808
+61,4,76,8861,1544575485661,324,1544500800000,1544575587634
+62,32,65,654,1544573560757,0,1544488200000,1544574600000
+63,4,77,11154,1544575207980,11,1544488200000,1544575587669
+64,15,78,13974992,1544575556069,9,1544488200000,1544575587798
+65,26,1,234369,1544575570813,6024,1544488200000,1544575587952
+66,9,44,19029006,1544575571645,0,1544488200000,1544575587677
+67,12,55,304458,1544575397477,5,1544488200000,1544575587674
+68,4,23,90053,1544575481890,7,1544488200000,1544575587788
+69,12,15,20680712,1544575556069,32,1544488200000,1544575587782
+70,10,9,56878,1544575522176,0,1544488200000,1544575587882
+71,13,43,14646996,1544575571980,145,1544488200000,1544575587689
+72,23,10,985553,1544575571604,0,1544488200000,1544575588060
+73,35,52,176,1544573823589,0,1544488200000,1544574600000
+74,34,79,237982,1544575571933,0,1544488200000,1544575587954
+75,9,6,33794745,1544575571645,0,1544488200000,1544575587875
+76,24,80,319715,1544575500355,1004,1544488200000,1544575587658
+77,30,74,13072,1544575568510,59,1544488200000,1544575587716
+78,34,45,1403232,1544575555851,0,1544488200000,1544575587827
+79,35,45,26,1544571310584,0,1544488200000,1544572800000
+80,4,56,32518,1544575275982,75,1544488200000,1544575587870
+81,18,51,3548918,1544575571596,30,1544488200000,1544575587738
+82,36,56,764,1544574548576,0,1544488200000,1544574600000
+83,9,39,578577,1544575571747,0,1544488200000,1544575587662
+84,5,42,6512590,1544575570949,1296924,1544488200000,1544575587762
+85,37,81,73,1544572581996,0,1544493600000,1544572800000
+86,1,45,9094,1544575240906,0,1544488200000,1544575588090
+87,18,65,3369864,1544575571631,6,1544488200000,1544575587848
+88,4,40,68388,1544575520781,80,1544488200000,1544575587934
+89,38,56,99784,1544575571751,0,1544563800000,1544575587662
+90,8,65,167207,1544575421914,1,1544488200000,1544575587725
+91,35,53,8112,1544575271597,0,1544488200000,1544575588026
+92,32,50,1984413,1544575571681,1983415,1544488200000,1544575587723
+93,14,57,37252,1544575265116,1136,1544488200000,1544575588025
+94,9,56,1473126,1544575571747,0,1544488200000,1544575587672
+95,9,38,1,1544565534137,0,1544563800000,1544565600000
+96,39,38,100667,1544575571750,1,1544563800000,1544575587848
+97,40,56,3,1544552474213,0,1544488200000,1544553000000
+98,33,7,8719,1544575265175,77,1544488200000,1544575587674
+99,41,82,38627,1544557965110,0,1544488200000,1544558400000
+100,22,13,587165,1544575520505,1,1544488200000,1544575587935
+101,37,83,72562,1544575498524,206,1544488200000,1544575587934
+102,42,56,10500513,1544575570958,76483,1544488200000,1544575588061
+103,43,84,16824540,1544575571994,0,1544488200000,1544575587641
+104,44,56,30148726,1544575571973,0,1544488200000,1544575588099
+105,37,85,46,1544569236656,0,1544497200000,1544571000000
+106,31,44,5277998,1544575572135,0,1544488200000,1544575587909
+107,31,56,3891685,1544575571930,0,1544488200000,1544575588087
+108,29,7,170461,1544575499257,11,1544488200000,1544575587699
+109,4,86,611347,1544575568839,1,1544488200000,1544575588024
+110,30,57,13140,1544575218424,23,1544488200000,1544575587914
+111,27,44,5779855,1544575569418,0,1544488200000,1544575587937
+112,24,87,3984995,1544575570003,106528,1544488200000,1544575587704
+113,16,30,2,1544559967407,0,1544558400000,1544560200000
+114,1,43,2674151,1544575594133,49,1544488200000,1544575587953
+115,9,71,18737197,1544575571645,0,1544488200000,1544575587979
+116,4,21,217364,1544575482983,290,1544488200000,1544575587723
+117,4,66,10386,1544575280832,204,1544488200000,1544575587797
+118,25,50,844962,1544575554067,28241,1544488200000,1544575587815
+119,4,88,151657,1544575488509,12,1544488200000,1544575588060
+120,37,89,7126,1544575152362,0,1544488200000,1544575587866
+121,25,51,832484,1544575554075,5,1544488200000,1544575587723
+122,11,79,586462,1544575592045,0,1544488200000,1544575587763
+123,11,75,1297487,1544575592045,0,1544488200000,1544575587693
diff --git a/service-graph/snapshotter/src/test/resources/serviceGraph_nodes.csv b/service-graph/snapshotter/src/test/resources/serviceGraph_nodes.csv
new file mode 100644
index 000000000..6f38ed66c
--- /dev/null
+++ b/service-graph/snapshotter/src/test/resources/serviceGraph_nodes.csv
@@ -0,0 +1,90 @@
+id,name,X-HAYSTACK-INFRASTRUCTURE-PROVIDER,tier
+38,bronn-service,aws,
+19,front-door-service,"aws,dc","3,1,2"
+68,pricing-engine,aws,
+54,front-door-service,dc,
+77,drogo-api,,
+30,ticket-service,aws,
+71,shopping-user-service,aws,
+10,api-service,aws,
+15,payment-service,aws,
+88,user-profile-service,aws,
+63,lists-service,aws,
+20,front-door-service,aws,1
+66,shopping-cart,aws,
+80,bolton-service,,
+81,ecommerce-service,aws,
+14,boss-service,dc,
+6,shopping-pricing,aws,
+70,insurance-shopping-service,aws,
+65,seo-service,aws,
+41,brienne-service,aws,
+45,provideradapter-service,aws,
+55,fx,aws,
+2,daily-data-update-service,dc,
+34,help-service,aws,
+11,detail-service,dc,
+46,async-service,aws,
+26,info-site-service,aws,
+22,front-door-service,"aws,dc",1
+39,baratheon-service,aws,
+89,shae-service,dc,
+53,adapter-aws,aws,
+31,progressive-webapp-api,aws,
+37,front-door-service,dc,1
+85,domain-service,aws,
+62,lpt-web,aws,
+69,third-party-provider-service,aws,
+40,varys-service,aws,
+78,lannister-service,aws,
+7,authentication-service,,
+76,targaryen-web,aws,
+61,hers-web,aws,
+25,mormont-service,aws,
+82,geo-service,aws,
+1,detail-service,"aws,dc",
+4,internet-proxy,,
+86,session-service,aws,
+24,front-door-service,dc,2
+33,endurance-service,aws,
+72,third-party-api-service,aws,
+49,userinteraction-web,aws,
+9,shopping-search-service,aws,
+17,front-door-service,"aws,dc","1,2"
+13,search-service,"aws,dc",
+64,mars,aws,
+12,new-shopping-pricing,aws,
+67,api-customer,aws,
+8,stark-service,aws,
+27,shopping-detail-service,aws,
+36,tips-service,aws,
+44,shopping-content-service,aws,
+83,cache-service,aws,
+58,checkout-payment-domain-service,dc,
+16,boss-service,aws,
+23,loom-service,aws,
+79,rules-service,aws,
+48,rails-web,aws,
+5,westeros-service,aws,
+35,tyrion-service,aws,
+75,controller-service,aws,
+51,template-service,aws,
+60,his-web,aws,
+28,multishop,aws,
+87,greyjoy-service,aws,
+84,airpricingservice,aws,
+32,guide-service,aws,
+18,forge-service,aws,
+47,front-door-service,"aws,dc",
+73,compositor-service,aws,
+42,hodor-service,aws,
+43,margaery-service,aws,
+52,chargeback-service,aws,
+59,location-service,"aws,dc",
+56,location-service,aws,
+29,front-door-service,"aws,dc",2
+21,satellite,aws,
+3,search-service,dc,
+74,melisandre-service,aws,
+50,context-service,aws,
+57,booking-service,aws,
diff --git a/service-graph/snapshotter/src/test/scala/com/expedia/www/haystack/service/graph/snapshotter/MainSpec.scala b/service-graph/snapshotter/src/test/scala/com/expedia/www/haystack/service/graph/snapshotter/MainSpec.scala
new file mode 100644
index 000000000..9d4876b0d
--- /dev/null
+++ b/service-graph/snapshotter/src/test/scala/com/expedia/www/haystack/service/graph/snapshotter/MainSpec.scala
@@ -0,0 +1,204 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.service.graph.snapshotter
+
+import java.io.File
+import java.nio.file.{Files, Path}
+import java.time.{Clock, Instant}
+import java.util.concurrent.TimeUnit.HOURS
+
+import com.amazonaws.services.s3.AmazonS3
+import com.expedia.www.haystack.service.graph.snapshot.store.Constants.{DotCsv, SlashEdges, SlashNodes}
+import com.expedia.www.haystack.service.graph.snapshot.store.S3SnapshotStore.createItemName
+import com.expedia.www.haystack.service.graph.snapshot.store.{FileSnapshotStore, S3SnapshotStore, SnapshotStore}
+import com.expedia.www.haystack.service.graph.snapshotter.Main.{ServiceGraphUrlRequiredMsg, StringStoreClassRequiredMsg, UrlBaseRequiredMsg}
+import org.mockito.Matchers.any
+import org.mockito.Mockito.{times, verify, verifyNoMoreInteractions, when}
+import org.scalatest.mockito.MockitoSugar
+import org.scalatest.{BeforeAndAfter, FunSpec, Matchers}
+import org.slf4j.Logger
+import scalaj.http.{HttpRequest, HttpResponse}
+
+import scala.io.{BufferedSource, Codec, Source}
+
+class MainSpec extends FunSpec with Matchers with MockitoSugar with BeforeAndAfter with SnapshotStore {
+ private var mockLogger: Logger = _
+ private var realLogger: Logger = _
+
+ private var mockFactory: Factory = _
+ private var realFactory: Factory = _
+
+ private var mockClock: Clock = _
+ private var realClock: Clock = _
+
+ private var mockAmazonS3: AmazonS3 = _
+ private var realAmazonS3: AmazonS3 = _
+
+ private val mockHttpRequest = mock[HttpRequest]
+
+ private def readFile(fileName: String): String = {
+ implicit val codec: Codec = Codec.UTF8
+ lazy val bufferedSource: BufferedSource = Source.fromResource(fileName)
+ val fileContents = bufferedSource.getLines.mkString("\n")
+ bufferedSource.close()
+ fileContents + "\n"
+ }
+
+ private val body = readFile("serviceGraph.json")
+ private val edges = readFile("serviceGraph_edges.csv")
+ private val nodes = readFile("serviceGraph_nodes.csv")
+ private val httpResponse: HttpResponse[String] = new HttpResponse[String](body = body, code = 0, headers = Map())
+ private val now = Instant.now()
+
+ private var tempDirectory: Path = _
+
+ before {
+ saveReaObjectsThatWillBeReplacedWithMocks()
+ createMocks()
+ replaceRealObjectsWithMocks()
+
+ tempDirectory = Files.createTempDirectory(this.getClass.getSimpleName)
+
+ def saveReaObjectsThatWillBeReplacedWithMocks(): Unit = {
+ realLogger = Main.logger
+ realFactory = Main.factory
+ realClock = Main.clock
+ realAmazonS3 = S3SnapshotStore.amazonS3
+ }
+
+ def createMocks(): Unit = {
+ mockLogger = mock[Logger]
+ mockFactory = mock[Factory]
+ mockClock = mock[Clock]
+ mockAmazonS3 = mock[AmazonS3]
+ }
+
+ def replaceRealObjectsWithMocks(): Unit = {
+ Main.logger = mockLogger
+ Main.factory = mockFactory
+ Main.clock = mockClock
+ S3SnapshotStore.amazonS3 = mockAmazonS3
+ }
+ }
+
+ after {
+ restoreRealObjects()
+ recursiveDelete(tempDirectory.toFile)
+ verifyNoMoreInteractions(mockLogger)
+ verifyNoMoreInteractions(mockFactory)
+ verifyNoMoreInteractions(mockClock)
+ verifyNoMoreInteractions(mockAmazonS3)
+
+ def restoreRealObjects(): Unit = {
+ Main.logger = realLogger
+ Main.factory = realFactory
+ Main.clock = realClock
+ }
+
+ def recursiveDelete(file: File) {
+ if (file.isDirectory)
+ Option(file.listFiles).map(_.toList).getOrElse(Nil).foreach(recursiveDelete)
+ file.delete
+ }
+ }
+
+ describe("Main.main() called with no arguments") {
+ it("should log an error") {
+ Main.main(Array())
+ verify(mockLogger).error(ServiceGraphUrlRequiredMsg)
+ }
+ }
+
+ describe("Main.main() called with one argument") {
+ it("should log an error") {
+ Main.main(Array(serviceGraphUrlBase))
+ verify(mockLogger).error(StringStoreClassRequiredMsg)
+ }
+ }
+
+ private val fullyQualifiedFileSnaphotStoreClassName = new FileSnapshotStore().getClass.getCanonicalName
+
+ describe("Main.main() called with two arguments") {
+ it("should log an error") {
+ Main.main(Array(serviceGraphUrlBase, fullyQualifiedFileSnaphotStoreClassName))
+ verify(mockLogger).error(UrlBaseRequiredMsg)
+ }
+ }
+
+ val serviceGraphUrlBase: String = "http://apis/graph/servicegraph"
+ val serviceGraphUrl: String = serviceGraphUrlBase + Main.ServiceGraphUrlSuffix
+
+ describe("Main.main() called with FileSnapshotStore arguments") {
+ it("should create a FileSnapshotStore, write to it, then call purge()") {
+ def verifyDirectoryIsEmptyToProveThatPurgeWasCalled = {
+ tempDirectory.toFile.listFiles().length shouldBe 0
+ }
+
+ when(mockFactory.createHttpRequest(any(), any())).thenReturn(mockHttpRequest)
+ when(mockHttpRequest.asString).thenReturn(httpResponse)
+ when(mockClock.instant()).thenReturn(now)
+
+ Main.main(Array(serviceGraphUrlBase, fullyQualifiedFileSnaphotStoreClassName, tempDirectory.toString))
+
+ verifyDirectoryIsEmptyToProveThatPurgeWasCalled
+ verifiesForCallToServiceGraphUrl(1)
+ }
+ }
+
+ describe("Main.main() called with all S3SnapshotStore arguments") {
+ it("should create an S3SnapshotStore, write to it, then call purge()") {
+ val bucketName = "haystack-snapshots"
+ val folderName = "hourly-snapshots"
+ val fileNameBase = createIso8601FileName(now)
+ when(mockFactory.createHttpRequest(any(), any())).thenReturn(mockHttpRequest)
+ when(mockHttpRequest.asString).thenReturn(httpResponse)
+ when(mockClock.instant()).thenReturn(now)
+
+ Main.main(Array(serviceGraphUrlBase, new S3SnapshotStore().getClass.getCanonicalName, bucketName, folderName, "1000"))
+
+ verifiesForCallToServiceGraphUrl(2)
+ verify(mockAmazonS3).doesBucketExistV2(bucketName)
+ verify(mockAmazonS3).createBucket(bucketName)
+ verify(mockAmazonS3).putObject(bucketName, createItemName(folderName, fileNameBase + SlashEdges + DotCsv), edges)
+ verify(mockAmazonS3).putObject(bucketName, createItemName(folderName, fileNameBase + SlashNodes + DotCsv), nodes)
+ }
+ }
+
+ private def verifiesForCallToServiceGraphUrl(wantedNumberOfInvocations: Int) = {
+ verify(mockFactory).createHttpRequest(serviceGraphUrl, now.toEpochMilli - HOURS.toMillis(1))
+ verify(mockHttpRequest, times(wantedNumberOfInvocations)).asString
+ verify(mockClock).instant()
+ }
+
+ describe("Factory.createHttpRequest()") {
+ it("should properly construct the URL") {
+ val factory = new Factory
+ val httpRequest = factory.createHttpRequest(serviceGraphUrl, now.toEpochMilli)
+ val url = httpRequest.url
+ url should startWith(serviceGraphUrlBase)
+ url should endWith(Main.ServiceGraphUrlSuffix.format(now.toEpochMilli))
+ }
+ }
+
+ //noinspection NotImplementedCode
+ def build(constructorArguments: Array[String]): SnapshotStore = ???
+ //noinspection NotImplementedCode
+ def read(instant: java.time.Instant): Option[String] = ???
+ //noinspection NotImplementedCode
+ def write(instant: java.time.Instant,content: String): AnyRef = ???
+}
diff --git a/traces/.gitignore b/traces/.gitignore
new file mode 100644
index 000000000..5383dcbfc
--- /dev/null
+++ b/traces/.gitignore
@@ -0,0 +1,13 @@
+*.class
+*.iml
+*.log
+.classpath
+.project
+logs/
+target/
+.idea/
+node_modules
+package-lock.json
+*.ipr
+*.iws
+reader/isHealthy
diff --git a/traces/.mvn/wrapper/MavenWrapperDownloader.java b/traces/.mvn/wrapper/MavenWrapperDownloader.java
new file mode 100755
index 000000000..fa4f7b499
--- /dev/null
+++ b/traces/.mvn/wrapper/MavenWrapperDownloader.java
@@ -0,0 +1,110 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+import java.net.*;
+import java.io.*;
+import java.nio.channels.*;
+import java.util.Properties;
+
+public class MavenWrapperDownloader {
+
+ /**
+ * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
+ */
+ private static final String DEFAULT_DOWNLOAD_URL =
+ "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar";
+
+ /**
+ * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
+ * use instead of the default one.
+ */
+ private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
+ ".mvn/wrapper/maven-wrapper.properties";
+
+ /**
+ * Path where the maven-wrapper.jar will be saved to.
+ */
+ private static final String MAVEN_WRAPPER_JAR_PATH =
+ ".mvn/wrapper/maven-wrapper.jar";
+
+ /**
+ * Name of the property which should be used to override the default download url for the wrapper.
+ */
+ private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
+
+ public static void main(String args[]) {
+ System.out.println("- Downloader started");
+ File baseDirectory = new File(args[0]);
+ System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
+
+ // If the maven-wrapper.properties exists, read it and check if it contains a custom
+ // wrapperUrl parameter.
+ File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
+ String url = DEFAULT_DOWNLOAD_URL;
+ if(mavenWrapperPropertyFile.exists()) {
+ FileInputStream mavenWrapperPropertyFileInputStream = null;
+ try {
+ mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
+ Properties mavenWrapperProperties = new Properties();
+ mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
+ url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
+ } catch (IOException e) {
+ System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
+ } finally {
+ try {
+ if(mavenWrapperPropertyFileInputStream != null) {
+ mavenWrapperPropertyFileInputStream.close();
+ }
+ } catch (IOException e) {
+ // Ignore ...
+ }
+ }
+ }
+ System.out.println("- Downloading from: : " + url);
+
+ File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
+ if(!outputFile.getParentFile().exists()) {
+ if(!outputFile.getParentFile().mkdirs()) {
+ System.out.println(
+ "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'");
+ }
+ }
+ System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
+ try {
+ downloadFileFromURL(url, outputFile);
+ System.out.println("Done");
+ System.exit(0);
+ } catch (Throwable e) {
+ System.out.println("- Error downloading");
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+
+ private static void downloadFileFromURL(String urlString, File destination) throws Exception {
+ URL website = new URL(urlString);
+ ReadableByteChannel rbc;
+ rbc = Channels.newChannel(website.openStream());
+ FileOutputStream fos = new FileOutputStream(destination);
+ fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+ fos.close();
+ rbc.close();
+ }
+
+}
diff --git a/traces/.mvn/wrapper/maven-wrapper.jar b/traces/.mvn/wrapper/maven-wrapper.jar
new file mode 100755
index 000000000..01e679973
Binary files /dev/null and b/traces/.mvn/wrapper/maven-wrapper.jar differ
diff --git a/traces/.mvn/wrapper/maven-wrapper.properties b/traces/.mvn/wrapper/maven-wrapper.properties
new file mode 100755
index 000000000..00d32aab1
--- /dev/null
+++ b/traces/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1 @@
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip
\ No newline at end of file
diff --git a/traces/.travis.yml b/traces/.travis.yml
new file mode 100644
index 000000000..ec88dceaf
--- /dev/null
+++ b/traces/.travis.yml
@@ -0,0 +1,39 @@
+sudo: required
+
+language: java
+
+jdk:
+- openjdk8
+
+dist: trusty
+
+services:
+ - docker
+
+cache:
+ directories:
+ - $HOME/.m2
+
+install:
+ - java -XX:+PrintFlagsFinal -version
+ - ./mvnw --version
+
+addons:
+ hosts:
+ - elasticsearch
+ - cassandra
+
+env:
+ global:
+ - BRANCH=${TRAVIS_BRANCH}
+ - TAG=${TRAVIS_TAG}
+ - SHA=${TRAVIS_COMMIT}
+
+script:
+ # build, create docker image
+ # upload to dockerhub only for master(non PR) and tag scenario
+ - if ([ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]) || [ -n "$TRAVIS_TAG" ]; then make release; else make all; fi
+
+notifications:
+ email:
+ - haystack-notifications@expedia.com
diff --git a/traces/CONTRIBUTING.md b/traces/CONTRIBUTING.md
new file mode 100644
index 000000000..8883eee74
--- /dev/null
+++ b/traces/CONTRIBUTING.md
@@ -0,0 +1,19 @@
+## Bugs
+
+We use Github Issues for our bug reporting. Please make sure the bug isn't already listed before opening a new issue.
+
+## Development
+
+All work on Haystack happens directly on Github. Core Haystack team members will review opened pull requests.
+
+## Requests
+
+If you see a feature that you would like to be added, please open an issue in the respective repository or in the general Haystack repo.
+
+## Contributing to Documentation
+
+To contribute to documentation, you can directly modify the corresponding .md files in the docs directory under the base haystack repository, and submit a pull request. Once your PR is merged, the documentation is automatically built and deployed to https://expediadotcom.github.io/haystack.
+
+## License
+
+By contributing to Haystack, you agree that your contributions will be licensed under its Apache License.
diff --git a/traces/LICENSE b/traces/LICENSE
new file mode 100644
index 000000000..9f133f5cd
--- /dev/null
+++ b/traces/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2017 Expedia, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/traces/Makefile b/traces/Makefile
new file mode 100644
index 000000000..04d19557d
--- /dev/null
+++ b/traces/Makefile
@@ -0,0 +1,38 @@
+.PHONY: all indexer reader backends release
+
+PWD := $(shell pwd)
+
+clean:
+ ./mvnw clean
+
+build: clean
+ ./mvnw package
+
+all: clean reader indexer backends
+
+report-coverage:
+ ./mvnw scoverage:report-only
+
+indexer: build_indexer
+ cd indexer && $(MAKE) integration_test
+
+reader: build_reader
+ cd reader && $(MAKE) integration_test
+
+build_reader:
+ ./mvnw -q package -DfinalName=haystack-trace-reader -pl reader -am
+
+build_indexer:
+ ./mvnw -q package -DfinalName=haystack-trace-indexer -pl indexer -am
+
+#Backends
+backends:
+ cd backends && $(MAKE) all
+
+# build all and release
+release: clean indexer reader backends
+ cd indexer && $(MAKE) docker_build && $(MAKE) release
+ cd reader && $(MAKE) docker_build && $(MAKE) release
+ cd backends && $(MAKE) release
+ ./.travis/deploy.sh
+
diff --git a/traces/README.md b/traces/README.md
new file mode 100644
index 000000000..0c36558fb
--- /dev/null
+++ b/traces/README.md
@@ -0,0 +1,52 @@
+[](https://travis-ci.org/ExpediaDotCom/haystack-traces)
+[](https://github.com/ExpediaDotCom/haystack/blob/master/LICENSE)
+
+# haystack-traces
+
+This repo contains the haystack components that build the traces, store them in Cassandra and ElasticSearch(for indexing) and provide a grpc endpoint for accessing them
+
+
+## Building
+
+Since this repo contains haystack-idl as the submodule, so use the following to clone the repo
+
+* git clone --recursive git@github.com:ExpediaDotCom/haystack-traces.git .
+
+#### Prerequisite:
+
+* Make sure you have Java 1.8
+* Make sure you have maven 3.3.9 or higher
+* Make sure you have docker 1.13 or higher
+
+
+Note : For mac users you can download docker for mac to set you up for the last two steps.
+
+#### Build
+
+For a full build, including unit tests and integration tests, docker image build, you can run -
+
+```
+make all
+```
+
+#### Integration Test
+
+#### Prerequisite:
+1. Install docker using Docker Tools or native docker if on mac
+2. Verify if docker-compose is installed by running following command else install it.
+
+```
+docker-compose
+```
+
+Run the build and integration tests for individual components with
+
+```
+make indexer
+```
+
+&&
+
+```
+make reader
+```
diff --git a/traces/Release.md b/traces/Release.md
new file mode 100644
index 000000000..5226c6e84
--- /dev/null
+++ b/traces/Release.md
@@ -0,0 +1,20 @@
+# Releasing
+
+Currently we publish the repo to docker hub and nexus central repository.
+
+## How to release and publish
+
+* Git tagging:
+
+```
+git tag -a -m "Release description..."
+git push origin
+```
+
+`` must follow semantic versioning scheme.
+
+Or one can also tag using UI: https://github.com/ExpediaDotCom/haystack-traces/releases
+
+It is preferred to create an annotated tag using `git tag -a` and then use the release UI to add release notes for the tag.
+
+* After the release is completed, please update the `pom.xml` files to next `-SNAPSHOT` version to match the next release
diff --git a/traces/backends/Makefile b/traces/backends/Makefile
new file mode 100644
index 000000000..e55c9c4ed
--- /dev/null
+++ b/traces/backends/Makefile
@@ -0,0 +1,23 @@
+.PHONY: all cassandra memory release
+
+PWD := $(shell pwd)
+
+all: cassandra memory
+
+cassandra: build_cassandra
+ cd cassandra && $(MAKE) integration_test
+
+build_cassandra:
+ cd ../ && ./mvnw -q package -DfinalName=haystack-trace-backend-cassandra -pl backends/cassandra -am
+
+
+memory: build_memory
+ cd memory && $(MAKE) integration_test
+
+build_memory:
+ cd ../ && ./mvnw -q package -DfinalName=haystack-trace-backend-memory -pl backends/memory -am
+
+# release all backends
+release:
+ cd cassandra && $(MAKE) docker_build && $(MAKE) release
+ cd memory && $(MAKE) docker_build && $(MAKE) release
diff --git a/traces/backends/cassandra/Makefile b/traces/backends/cassandra/Makefile
new file mode 100644
index 000000000..6fb32b1fe
--- /dev/null
+++ b/traces/backends/cassandra/Makefile
@@ -0,0 +1,24 @@
+.PHONY: docker_build prepare_integration_test_env integration_test release
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-trace-backend-cassandra
+PWD := $(shell pwd)
+SERVICE_DEBUG_ON ?= false
+
+docker_build:
+ # build docker image using existing app jar
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+prepare_integration_test_env: docker_build
+ # prepare environment to run integration tests against
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox up -d
+ sleep 30
+
+integration_test: prepare_integration_test_env
+ cd ../../ &&./mvnw -q integration-test -pl backends/cassandra -am
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox stop
+ docker rm $(shell docker ps -a -q)
+ docker volume rm $(shell docker volume ls -q)
+
+release:
+ ../../deployment/scripts/publish-to-docker-hub.sh
diff --git a/traces/backends/cassandra/README.md b/traces/backends/cassandra/README.md
new file mode 100644
index 000000000..a9528bb35
--- /dev/null
+++ b/traces/backends/cassandra/README.md
@@ -0,0 +1,15 @@
+# Storage Backend - Cassandra
+
+
+Grpc service which can read a write spans to a cassandra cluster
+
+## Technical Details
+
+In order to understand this service, we recommend to read the details of [haystack](https://github.com/ExpediaDotCom/haystack) project.
+This service reads from [Cassandra](http://cassandra.apache.org/). API endpoints are exposed as [GRPC](https://grpc.io/) endpoints.
+
+Will fill in more details as we go..
+
+## Building
+
+Check the details on [Build Section](../README.md)
diff --git a/traces/backends/cassandra/build/docker/Dockerfile b/traces/backends/cassandra/build/docker/Dockerfile
new file mode 100644
index 000000000..f9a428197
--- /dev/null
+++ b/traces/backends/cassandra/build/docker/Dockerfile
@@ -0,0 +1,30 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-trace-backend-cassandra
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+ENV DOCKERIZE_VERSION v0.6.1
+
+ADD https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/dockerize-alpine-linux-amd64-${DOCKERIZE_VERSION}.tar.gz dockerize.tar.gz
+RUN tar xzf dockerize.tar.gz
+RUN chmod +x dockerize
+
+RUN mkdir -p ${APP_HOME}
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+RUN chmod +x ${APP_HOME}/start-app.sh
+
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+ wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+ chmod +x /bin/grpc_health_probe
+
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+EXPOSE 8090
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/traces/backends/cassandra/build/docker/jmxtrans-agent.xml b/traces/backends/cassandra/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..6237d6900
--- /dev/null
+++ b/traces/backends/cassandra/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,48 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:true}
+ haystack.traces.backend-cassandra.#hostname#.
+
+ 30
+
diff --git a/traces/backends/cassandra/build/docker/start-app.sh b/traces/backends/cassandra/build/docker/start-app.sh
new file mode 100755
index 000000000..ba2c65569
--- /dev/null
+++ b/traces/backends/cassandra/build/docker/start-app.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+[ -z "$JAVA_GC_OPTS" ] && JAVA_GC_OPTS="-XX:+UseG1GC"
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+${JAVA_GC_OPTS} \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-XX:+ExitOnOutOfMemoryError \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+if [[ -n "$SERVICE_DEBUG_ON" ]] && [[ "$SERVICE_DEBUG_ON" == true ]]; then
+ JAVA_OPTS="$JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y"
+fi
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/traces/backends/cassandra/build/integration-tests/docker-compose.yml b/traces/backends/cassandra/build/integration-tests/docker-compose.yml
new file mode 100644
index 000000000..0f9ade550
--- /dev/null
+++ b/traces/backends/cassandra/build/integration-tests/docker-compose.yml
@@ -0,0 +1,9 @@
+version: '3'
+services:
+ cassandra:
+ image: cassandra:3.11.0
+ environment:
+ MAX_HEAP_SIZE: 256m
+ HEAP_NEWSIZE: 256m
+ ports:
+ - "9042:9042"
diff --git a/traces/backends/cassandra/pom.xml b/traces/backends/cassandra/pom.xml
new file mode 100644
index 000000000..c3f6acd6f
--- /dev/null
+++ b/traces/backends/cassandra/pom.xml
@@ -0,0 +1,173 @@
+
+
+
+
+ haystack-trace-backends
+ com.expedia.www
+ 1.0.9-SNAPSHOT
+ ../pom.xml
+
+
+ 4.0.0
+ haystack-trace-backend-cassandra
+ jar
+
+
+ com.expedia.www.haystack.trace.storage.backends.cassandra.Service
+ ${project.artifactId}-${project.version}
+ 3.3.0.1
+ 3.6.0
+
+
+
+
+
+ com.datastax.cassandra
+ cassandra-driver-extras
+ ${cassandra.driver.version}
+
+
+ com.google.guava
+ guava
+
+
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+ io.grpc
+ grpc-protobuf
+
+
+
+ io.grpc
+ grpc-stub
+
+
+
+ io.grpc
+ grpc-services
+
+
+
+ io.grpc
+ grpc-netty
+
+
+
+ io.netty
+ netty-handler
+
+
+
+ org.apache.commons
+ commons-lang3
+
+
+
+ org.apache.httpcomponents
+ httpclient
+
+
+
+ com.amazonaws
+ aws-java-sdk-ec2
+
+
+
+
+
+ ${finalName}
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+
+ cass1,cass2
+ cassandra_cql_schema_1
+
+ com.expedia.www.haystack.trace.storage.backends.cassandra.unit
+
+
+
+ integration-test
+ integration-test
+
+ test
+
+
+
+ /src/backends/cassandra/build/integration-tests/docker-app.conf
+
+ com.expedia.www.haystack.trace.storage.backends.cassandra.integration
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+ true
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ package
+
+ shade
+
+
+
+
+ reference.conf
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
+
diff --git a/traces/backends/cassandra/src/main/resources/config/base.conf b/traces/backends/cassandra/src/main/resources/config/base.conf
new file mode 100644
index 000000000..f2e82924a
--- /dev/null
+++ b/traces/backends/cassandra/src/main/resources/config/base.conf
@@ -0,0 +1,62 @@
+health.status.path = "/app/isHealthy"
+
+service {
+ port = 8090
+ ssl {
+ enabled = false
+ cert.path = ""
+ private.key.path = ""
+ }
+ max.message.size = 52428800 # 50MB in bytes
+}
+
+cassandra {
+ # multiple endpoints can be provided as comma separated list
+ endpoints = "cassandra"
+
+ # enable the auto.discovery mode, if true then we ignore the endpoints(above) and use auto discovery
+ # mechanism to find cassandra nodes. For today we only support aws node discovery provider
+ auto.discovery {
+ enabled: false
+ // aws: {
+ // region: "us-west-2"
+ // tags: {
+ // Role: haystack-cassandra
+ // Environment: ewetest
+ // }
+ // }
+ }
+
+ connections {
+ max.per.host = 50
+ read.timeout.ms = 30000
+ conn.timeout.ms = 10000
+ keep.alive = true
+ }
+
+ retries {
+ max = 10
+ backoff {
+ initial.ms = 100
+ factor = 2
+ }
+ }
+
+ consistency.level = "one"
+
+ on.error.consistency.level = [
+ "com.datastax.driver.core.exceptions.UnavailableException",
+ "any"
+ ]
+
+ ttl.sec = 259200
+
+ keyspace: {
+ # auto creates the keyspace and table name in cassandra(if absent)
+ # if schema field is empty or not present, then no operation is performed
+ auto.create.schema = "CREATE KEYSPACE IF NOT EXISTS haystack WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor' : 1 } AND durable_writes = false; CREATE TABLE IF NOT EXISTS haystack.traces (id varchar, ts timestamp, spans blob, PRIMARY KEY ((id), ts)) WITH CLUSTERING ORDER BY (ts ASC) AND compaction = { 'class' : 'DateTieredCompactionStrategy', 'max_sstable_age_days': '3' } AND gc_grace_seconds = 86400;"
+
+ name: "haystack"
+ table.name: "traces"
+ }
+}
diff --git a/traces/backends/cassandra/src/main/resources/logback.xml b/traces/backends/cassandra/src/main/resources/logback.xml
new file mode 100644
index 000000000..7ef04ff2c
--- /dev/null
+++ b/traces/backends/cassandra/src/main/resources/logback.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/Service.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/Service.scala
new file mode 100644
index 000000000..f0bd2b0e0
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/Service.scala
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trace.storage.backends.cassandra
+
+import java.io.File
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.commons.logger.LoggerUtils
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.storage.backends.cassandra.client.{CassandraClusterFactory, CassandraSession}
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.ProjectConfiguration
+import com.expedia.www.haystack.trace.storage.backends.cassandra.services.{GrpcHealthService, SpansPersistenceService}
+import com.expedia.www.haystack.trace.storage.backends.cassandra.store.{CassandraTraceRecordReader, CassandraTraceRecordWriter}
+import io.grpc.netty.NettyServerBuilder
+import org.slf4j.{Logger, LoggerFactory}
+
+object Service extends MetricsSupport {
+ private val LOGGER: Logger = LoggerFactory.getLogger("CassandraBackend")
+
+ // primary executor for service's async tasks
+ implicit private val executor = scala.concurrent.ExecutionContext.global
+
+ def main(args: Array[String]): Unit = {
+ startJmxReporter()
+ startService()
+ }
+
+ private def startJmxReporter(): Unit = {
+ JmxReporter
+ .forRegistry(metricRegistry)
+ .build()
+ .start()
+ }
+
+ private def startService(): Unit = {
+ try {
+ val config = new ProjectConfiguration
+ val serviceConfig = config.serviceConfig
+ val cassandraSession = new CassandraSession(config.cassandraConfig.clientConfig, new CassandraClusterFactory)
+
+ val tracerRecordWriter = new CassandraTraceRecordWriter(cassandraSession, config.cassandraConfig)
+ val tracerRecordReader = new CassandraTraceRecordReader(cassandraSession, config.cassandraConfig.clientConfig)
+
+ val serverBuilder = NettyServerBuilder
+ .forPort(serviceConfig.port)
+ .directExecutor()
+ .addService(new GrpcHealthService())
+ .addService(new SpansPersistenceService(reader = tracerRecordReader, writer = tracerRecordWriter)(executor))
+
+ // enable ssl if enabled
+ if (serviceConfig.ssl.enabled) {
+ serverBuilder.useTransportSecurity(new File(serviceConfig.ssl.certChainFilePath), new File(serviceConfig.ssl.privateKeyPath))
+ }
+
+ // default max message size in grpc is 4MB. if our max message size is greater than 4MB then we should configure this
+ // limit in the netty based grpc server.
+ if (serviceConfig.maxSizeInBytes > 4 * 1024 * 1024) serverBuilder.maxMessageSize(serviceConfig.maxSizeInBytes)
+
+ val server = serverBuilder.build().start()
+
+ LOGGER.info(s"server started, listening on ${serviceConfig.port}")
+
+ Runtime.getRuntime.addShutdownHook(new Thread() {
+ override def run(): Unit = {
+ LOGGER.info("shutting down gRPC server since JVM is shutting down")
+ cassandraSession.close()
+ server.shutdown()
+ LOGGER.info("server has been shutdown now")
+ }
+ })
+
+ server.awaitTermination()
+ } catch {
+ case ex: Throwable =>
+ ex.printStackTrace()
+ LOGGER.error("Fatal error observed while running the app", ex)
+ LoggerUtils.shutdownLogger()
+ System.exit(1)
+ }
+ }
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/AwsNodeDiscoverer.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/AwsNodeDiscoverer.scala
new file mode 100644
index 000000000..5cf7dacc5
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/AwsNodeDiscoverer.scala
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.client
+
+import java.util.Collections
+
+import com.amazonaws.regions.{Region, Regions}
+import com.amazonaws.services.ec2.AmazonEC2Client
+import com.amazonaws.services.ec2.model.{DescribeInstancesRequest, Filter, Instance, InstanceStateName}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+
+object AwsNodeDiscoverer {
+ private val LOGGER = LoggerFactory.getLogger(AwsNodeDiscoverer.getClass)
+
+ /**
+ * discovers the EC2 ip addresses on AWS for a given region and set of tags
+ * @param region aws region
+ * @param tags a set of ec2 node tags
+ * @return
+ */
+ def discover(region: String,
+ tags: Map[String, String]): Seq[String] = {
+ LOGGER.info(s"discovering EC2 nodes for region=$region, and tags=${tags.mkString(",")}")
+
+ val awsRegion = Region.getRegion(Regions.fromName(region))
+ val client:AmazonEC2Client = new AmazonEC2Client().withRegion(awsRegion)
+ try {
+ discover(client, tags)
+ } catch {
+ case ex: Exception =>
+ LOGGER.error(s"Fail to discover EC2 nodes for region=$region and tags=$tags with reason", ex)
+ throw new RuntimeException(ex)
+ } finally {
+ client.shutdown()
+ }
+ }
+
+ /**
+ * discovers the EC2 ip addresses on AWS for a given region and set of tags
+ * @param client ec2 client
+ * @param tags a set of ec2 node tags
+ * @return
+ */
+ private[haystack] def discover(client: AmazonEC2Client, tags: Map[String, String]): Seq[String] = {
+ val filters = tags.map { case (key, value) => new Filter("tag:" + key, Collections.singletonList(value)) }
+ val request = new DescribeInstancesRequest().withFilters(filters.asJavaCollection)
+
+ val result = client.describeInstances(request)
+
+ val nodes = result.getReservations
+ .asScala
+ .flatMap(_.getInstances.asScala)
+ .filter(isValidInstance)
+ .map(_.getPrivateIpAddress)
+
+ LOGGER.info("EC2 nodes discovered [{}]", nodes.mkString(","))
+ nodes
+ }
+
+ // check if an ec2 instance is in running state
+ private def isValidInstance(instance: Instance): Boolean = {
+ // instance should be in running state
+ InstanceStateName.Running.toString.equals(instance.getState.getName)
+ }
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/CassandraClusterFactory.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/CassandraClusterFactory.scala
new file mode 100644
index 000000000..2201c7f2d
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/CassandraClusterFactory.scala
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.client
+
+import com.datastax.driver.core._
+import com.datastax.driver.core.policies.{DefaultRetryPolicy, LatencyAwarePolicy, RoundRobinPolicy, TokenAwarePolicy}
+import com.datastax.driver.extras.codecs.date.SimpleTimestampCodec
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.{AwsNodeDiscoveryConfiguration, ClientConfiguration, CredentialsConfiguration}
+
+class CassandraClusterFactory extends ClusterFactory {
+
+ private def discoverNodes(nodeDiscoveryConfig: Option[AwsNodeDiscoveryConfiguration]): Seq[String] = {
+ nodeDiscoveryConfig match {
+ case Some(awsDiscovery) => AwsNodeDiscoverer.discover(awsDiscovery.region, awsDiscovery.tags)
+ case _ => Nil
+ }
+ }
+
+
+ override def buildCluster(config: ClientConfiguration): Cluster = {
+ val contactPoints = if (config.autoDiscoverEnabled) discoverNodes(config.awsNodeDiscovery) else config.endpoints
+ require(contactPoints.nonEmpty, "cassandra contact points can't be empty!!!")
+
+ val tokenAwarePolicy = new TokenAwarePolicy(new LatencyAwarePolicy.Builder(new RoundRobinPolicy()).build())
+ val authProvider = fetchAuthProvider(config.plaintextCredentials)
+ val cluster = Cluster.builder()
+ .withClusterName("cassandra-cluster")
+ .addContactPoints(contactPoints: _*)
+ .withRetryPolicy(DefaultRetryPolicy.INSTANCE)
+ .withAuthProvider(authProvider)
+ .withSocketOptions(new SocketOptions()
+ .setKeepAlive(config.socket.keepAlive)
+ .setConnectTimeoutMillis(config.socket.connectionTimeoutMillis)
+ .setReadTimeoutMillis(config.socket.readTimeoutMills))
+ .withLoadBalancingPolicy(tokenAwarePolicy)
+ .withPoolingOptions(new PoolingOptions().setMaxConnectionsPerHost(HostDistance.LOCAL, config.socket.maxConnectionPerHost))
+ .build()
+ cluster.getConfiguration.getCodecRegistry.register(SimpleTimestampCodec.instance)
+
+ cluster
+ }
+
+ private def fetchAuthProvider(plaintextCredentials: Option[CredentialsConfiguration]): AuthProvider = {
+ plaintextCredentials match {
+ case Some(credentialsConfiguration) => new PlainTextAuthProvider(credentialsConfiguration.username, credentialsConfiguration.password)
+ case _ => AuthProvider.NONE
+ }
+ }
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/CassandraSession.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/CassandraSession.scala
new file mode 100644
index 000000000..f2e24a103
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/CassandraSession.scala
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.client
+
+import java.nio.ByteBuffer
+import java.util.Date
+
+import com.datastax.driver.core._
+import com.datastax.driver.core.exceptions.NoHostAvailableException
+import com.datastax.driver.core.querybuilder.QueryBuilder
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.{ClientConfiguration, KeyspaceConfiguration}
+import org.slf4j.LoggerFactory
+import com.expedia.www.haystack.trace.storage.backends.cassandra.client.CassandraTableSchema._
+
+import scala.collection.JavaConverters._
+import scala.util.{Failure, Success, Try}
+
+object CassandraSession {
+ private val LOGGER = LoggerFactory.getLogger(classOf[CassandraSession])
+
+ def connect(config: ClientConfiguration,
+ factory: ClusterFactory): (Cluster, Session) = this.synchronized {
+ def tryConnect(): (Cluster, Session) = {
+ val cluster = factory.buildCluster(config)
+ Try(cluster.connect()) match {
+ case Success(session) => (cluster, session)
+ case Failure(e: NoHostAvailableException) =>
+ LOGGER.warn("Failed to connect to cassandra. Will try again", e)
+ Thread.sleep(5000)
+ tryConnect()
+ case Failure(e) => throw e
+ }
+ }
+
+ tryConnect()
+ }
+}
+
+class CassandraSession(config: ClientConfiguration, factory: ClusterFactory) {
+ import CassandraSession._
+
+ /**
+ * builds a session object to interact with cassandra cluster
+ * Also ensure that keyspace and table names exists in cassandra.
+ */
+ lazy val (cluster, session) = connect(config, factory)
+
+ def ensureKeyspace(keyspace: KeyspaceConfiguration): Unit = {
+ LOGGER.info("ensuring kespace exists with {}", keyspace)
+ CassandraTableSchema.ensureExists(keyspace.name, keyspace.table, keyspace.autoCreateSchema, session)
+ }
+
+ lazy val selectRawTracesPreparedStmt: PreparedStatement = {
+ import QueryBuilder.bindMarker
+ session.prepare(
+ QueryBuilder
+ .select()
+ .from(config.tracesKeyspace.name, config.tracesKeyspace.table)
+ .where(QueryBuilder.in(ID_COLUMN_NAME, bindMarker(ID_COLUMN_NAME))))
+ }
+
+
+ def createSpanInsertPreparedStatement(keyspace: KeyspaceConfiguration): PreparedStatement = {
+ import QueryBuilder.{bindMarker, ttl}
+
+ val insert = QueryBuilder
+ .insertInto(keyspace.name, keyspace.table)
+ .value(ID_COLUMN_NAME, bindMarker(ID_COLUMN_NAME))
+ .value(TIMESTAMP_COLUMN_NAME, bindMarker(TIMESTAMP_COLUMN_NAME))
+ .value(SPANS_COLUMN_NAME, bindMarker(SPANS_COLUMN_NAME))
+ .using(ttl(keyspace.recordTTLInSec))
+
+ session.prepare(insert)
+ }
+
+ /**
+ * close the session and client
+ */
+ def close(): Unit = {
+ Try(session.close())
+ Try(cluster.close())
+ }
+
+
+ /**
+ * create bound statement for writing to cassandra table
+ *
+ * @param traceId trace id
+ * @param spanBufferBytes data bytes of spanBuffer that belong to a given trace id
+ * @param consistencyLevel consistency level for cassandra write
+ * @param insertTraceStatement prepared statement to use
+ * @return
+ */
+ def newTraceInsertBoundStatement(traceId: String,
+ spanBufferBytes: Array[Byte],
+ consistencyLevel: ConsistencyLevel,
+ insertTraceStatement: PreparedStatement): Statement = {
+ new BoundStatement(insertTraceStatement)
+ .setString(ID_COLUMN_NAME, traceId)
+ .setTimestamp(TIMESTAMP_COLUMN_NAME, new Date())
+ .setBytes(SPANS_COLUMN_NAME, ByteBuffer.wrap(spanBufferBytes))
+ .setConsistencyLevel(consistencyLevel)
+ }
+
+
+ /**
+ * create new select statement for retrieving Raw Traces data for traceIds
+ *
+ * @param traceIds list of trace id
+ * @return statement for select query for traceIds
+ */
+ def newSelectRawTracesBoundStatement(traceIds: List[String]): Statement = {
+ new BoundStatement(selectRawTracesPreparedStmt).setList(ID_COLUMN_NAME, traceIds.asJava)
+ }
+
+ /**
+ * executes the statement async and return the resultset future
+ *
+ * @param statement prepared statement to be executed
+ * @return future object of ResultSet
+ */
+ def executeAsync(statement: Statement): ResultSetFuture = session.executeAsync(statement)
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/CassandraTableSchema.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/CassandraTableSchema.scala
new file mode 100644
index 000000000..8e8490ae6
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/CassandraTableSchema.scala
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.client
+
+import com.datastax.driver.core._
+import org.slf4j.LoggerFactory
+
+object CassandraTableSchema {
+ private val LOGGER = LoggerFactory.getLogger(CassandraTableSchema.getClass)
+
+ val ID_COLUMN_NAME = "id"
+ val TIMESTAMP_COLUMN_NAME = "ts"
+ val SPANS_COLUMN_NAME = "spans"
+ val SERVICE_COLUMN_NAME = "service_name"
+ val OPERATION_COLUMN_NAME = "operation_name"
+
+
+ /**
+ * ensures the keyspace and table name exists in com.expedia.www.haystack.trace.storage.backends.cassandra
+ *
+ * @param keyspace com.expedia.www.haystack.trace.storage.backends.cassandra keyspace
+ * @param tableName table name in com.expedia.www.haystack.trace.storage.backends.cassandra
+ * @param session com.expedia.www.haystack.trace.storage.backends.cassandra client session
+ * @param autoCreateSchema if present, then apply the cql schema that should create the keyspace and com.expedia.www.haystack.trace.storage.backends.cassandra table,
+ * else throw an exception if fail to find the keyspace and table
+ */
+ def ensureExists(keyspace: String, tableName: String, autoCreateSchema: Option[String], session: Session): Unit = {
+ val keyspaceMetadata = session.getCluster.getMetadata.getKeyspace(keyspace)
+ if (keyspaceMetadata == null || keyspaceMetadata.getTable(tableName) == null) {
+ autoCreateSchema match {
+ case Some(schema) => applyCqlSchema(session, schema)
+ case _ => throw new RuntimeException(s"Fail to find the keyspace=$keyspace and/or table=$tableName !!!!")
+ }
+ }
+ }
+
+ /**
+ * apply the cql schema
+ *
+ * @param session session object to interact with com.expedia.www.haystack.trace.storage.backends.cassandra
+ * @param schema schema data
+ */
+ private def applyCqlSchema(session: Session, schema: String): Unit = {
+ try {
+ for (cmd <- schema.split(";")) {
+ if (cmd.nonEmpty) session.execute(cmd)
+ }
+ } catch {
+ case ex: Exception =>
+ LOGGER.error(s"Failed to apply cql $schema with following reason:", ex)
+ throw new RuntimeException(ex)
+ }
+ }
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/ClusterFactory.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/ClusterFactory.scala
new file mode 100644
index 000000000..9d643935e
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/client/ClusterFactory.scala
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.client
+
+import com.datastax.driver.core.Cluster
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.ClientConfiguration
+
+/**
+ * factory that builds the cluster. this is useful for testing other classes
+ */
+trait ClusterFactory {
+ def buildCluster(config: ClientConfiguration): Cluster
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/ProjectConfiguration.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/ProjectConfiguration.scala
new file mode 100644
index 000000000..c747e3813
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/ProjectConfiguration.scala
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.config
+
+import com.datastax.driver.core.ConsistencyLevel
+import com.expedia.www.haystack.commons.config.ConfigurationLoader
+import com.expedia.www.haystack.commons.retries.RetryOperation
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities._
+import com.typesafe.config.Config
+import org.apache.commons.lang3.StringUtils
+
+import scala.collection.JavaConverters._
+
+class ProjectConfiguration {
+ private val config = ConfigurationLoader.loadConfigFileWithEnvOverrides()
+
+ val healthStatusFilePath: String = config.getString("health.status.path")
+
+ val serviceConfig: ServiceConfiguration = {
+ val serviceConfig = config.getConfig("service")
+
+ val ssl = serviceConfig.getConfig("ssl")
+ val sslConfig = SslConfiguration(ssl.getBoolean("enabled"), ssl.getString("cert.path"), ssl.getString("private.key.path"))
+
+ ServiceConfiguration(serviceConfig.getInt("port"), sslConfig, serviceConfig.getInt("max.message.size"))
+ }
+ /**
+ *
+ * cassandra configuration object
+ */
+ val cassandraConfig: CassandraConfiguration = {
+
+ def toConsistencyLevel(level: String) = ConsistencyLevel.values().find(_.toString.equalsIgnoreCase(level)).get
+
+ def consistencyLevelOnErrors(cs: Config) = {
+ val consistencyLevelOnErrors = cs.getStringList("on.error.consistency.level")
+ val consistencyLevelOnErrorList = scala.collection.mutable.ListBuffer[(Class[_], ConsistencyLevel)]()
+
+ var idx = 0
+ while (idx < consistencyLevelOnErrors.size()) {
+ val errorClass = consistencyLevelOnErrors.get(idx)
+ val level = consistencyLevelOnErrors.get(idx + 1)
+ consistencyLevelOnErrorList.+=((Class.forName(errorClass), toConsistencyLevel(level)))
+ idx = idx + 2
+ }
+
+ consistencyLevelOnErrorList.toList
+ }
+
+ def keyspaceConfig(kConfig: Config, ttl: Int): KeyspaceConfiguration = {
+ val autoCreateSchemaField = "auto.create.schema"
+ val autoCreateSchema = if (kConfig.hasPath(autoCreateSchemaField)
+ && StringUtils.isNotEmpty(kConfig.getString(autoCreateSchemaField))) {
+ Some(kConfig.getString(autoCreateSchemaField))
+ } else {
+ None
+ }
+
+ KeyspaceConfiguration(kConfig.getString("name"), kConfig.getString("table.name"), ttl, autoCreateSchema)
+ }
+
+ val cs = config.getConfig("cassandra")
+
+ val awsConfig: Option[AwsNodeDiscoveryConfiguration] =
+ if (cs.hasPath("auto.discovery.aws")) {
+ val aws = cs.getConfig("auto.discovery.aws")
+ val tags = aws.getConfig("tags")
+ .entrySet()
+ .asScala
+ .map(elem => elem.getKey -> elem.getValue.unwrapped().toString)
+ .toMap
+ Some(AwsNodeDiscoveryConfiguration(aws.getString("region"), tags))
+ } else {
+ None
+ }
+
+ val credentialsConfig: Option[CredentialsConfiguration] =
+ if (cs.hasPath("credentials")) {
+ Some(CredentialsConfiguration(cs.getString("credentials.username"), cs.getString("credentials.password")))
+ } else {
+ None
+ }
+
+ val socketConfig = cs.getConfig("connections")
+
+ val socket = SocketConfiguration(
+ socketConfig.getInt("max.per.host"),
+ socketConfig.getBoolean("keep.alive"),
+ socketConfig.getInt("conn.timeout.ms"),
+ socketConfig.getInt("read.timeout.ms"))
+
+ val consistencyLevel = toConsistencyLevel(cs.getString("consistency.level"))
+
+ CassandraConfiguration(
+ clientConfig = ClientConfiguration(
+ if (cs.hasPath("endpoints")) cs.getString("endpoints").split(",").toList else Nil,
+ cs.getBoolean("auto.discovery.enabled"),
+ awsConfig,
+ credentialsConfig,
+ keyspaceConfig(cs.getConfig("keyspace"), cs.getInt("ttl.sec")),
+ socket),
+ consistencyLevel = consistencyLevel,
+ retryConfig = RetryOperation.Config(
+ cs.getInt("retries.max"),
+ cs.getLong("retries.backoff.initial.ms"),
+ cs.getDouble("retries.backoff.factor")),
+ consistencyLevelOnErrors(cs))
+ }
+
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/AwsNodeDiscoveryConfiguration.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/AwsNodeDiscoveryConfiguration.scala
new file mode 100644
index 000000000..6eda87137
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/AwsNodeDiscoveryConfiguration.scala
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities
+
+/**
+ * defines the parameters required for aws discovery
+ * @param region aws region e.g. us-east-1, us-west-2
+ * @param tags: ec2 tags
+ */
+case class AwsNodeDiscoveryConfiguration(region: String, tags: Map[String, String])
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/ClientConfiguration.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/ClientConfiguration.scala
new file mode 100644
index 000000000..c425932dc
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/ClientConfiguration.scala
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities
+
+import com.datastax.driver.core.ConsistencyLevel
+import com.expedia.www.haystack.commons.retries.RetryOperation
+import org.apache.commons.lang3.StringUtils
+
+
+/** define the keyspace and table information in cassandra
+ *
+ * @param name : name of cassandra keyspace
+ * @param table : name of cassandra table
+ * @param recordTTLInSec : ttl of record in sec
+ * @param autoCreateSchema : apply cql and create keyspace and tables if not exist, optional
+ */
+case class KeyspaceConfiguration(name: String,
+ table: String,
+ recordTTLInSec: Int = -1,
+ autoCreateSchema: Option[String] = None) {
+ require(StringUtils.isNotEmpty(name))
+ require(StringUtils.isNotEmpty(table))
+}
+
+/**
+ * defines the configuration parameters for cassandra client
+ *
+ * @param endpoints : list of cassandra endpoints
+ * @param autoDiscoverEnabled : if autodiscovery is enabled, then 'endpoints' config parameter will be ignored
+ * @param awsNodeDiscovery : discovery configuration for aws, optional. This is applied only if autoDiscoverEnabled is true
+ * @param tracesKeyspace : cassandra keyspace for traces
+ * @param socket : socket configuration like maxConnections, timeouts and keepAlive
+ */
+case class ClientConfiguration(endpoints: List[String],
+ autoDiscoverEnabled: Boolean,
+ awsNodeDiscovery: Option[AwsNodeDiscoveryConfiguration],
+ plaintextCredentials: Option[CredentialsConfiguration],
+ tracesKeyspace: KeyspaceConfiguration,
+ socket: SocketConfiguration)
+
+/**
+ * @param consistencyLevel: consistency level of writes
+ * @param retryConfig retry configuration if writes fail
+ * @param consistencyLevelOnError: downgraded consistency level on write error
+ */
+case class CassandraConfiguration(clientConfig: ClientConfiguration,
+ consistencyLevel: ConsistencyLevel,
+ retryConfig: RetryOperation.Config,
+ consistencyLevelOnError: List[(Class[_], ConsistencyLevel)]) {
+ def writeConsistencyLevel(error: Throwable): ConsistencyLevel = {
+ if (error == null) {
+ consistencyLevel
+ } else {
+ consistencyLevelOnError
+ .find(errorClass => errorClass._1.isAssignableFrom(error.getClass))
+ .map(_._2).getOrElse(writeConsistencyLevel(error.getCause))
+ }
+ }
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/CredentialsConfiguration.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/CredentialsConfiguration.scala
new file mode 100644
index 000000000..b0f74c313
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/CredentialsConfiguration.scala
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities
+
+case class CredentialsConfiguration(username: String,
+ password: String)
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/ServiceConfiguration.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/ServiceConfiguration.scala
new file mode 100644
index 000000000..2f574e5c1
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/ServiceConfiguration.scala
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities
+
+/**
+ * @param port port to start grpc servicer on
+ */
+case class ServiceConfiguration(port: Int, ssl: SslConfiguration, maxSizeInBytes: Int)
+case class SslConfiguration(enabled: Boolean, certChainFilePath: String, privateKeyPath: String)
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/SocketConfiguration.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/SocketConfiguration.scala
new file mode 100644
index 000000000..147f38efa
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/config/entities/SocketConfiguration.scala
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities
+
+case class SocketConfiguration(maxConnectionPerHost: Int,
+ keepAlive: Boolean,
+ connectionTimeoutMillis: Int,
+ readTimeoutMills: Int)
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/metrics/AppMetricNames.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/metrics/AppMetricNames.scala
new file mode 100644
index 000000000..bc1166fac
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/metrics/AppMetricNames.scala
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.metrics
+
+object AppMetricNames {
+ val CASSANDRA_READ_TIME = "cassandra.read.time"
+ val CASSANDRA_READ_FAILURES = "cassandra.read.failures"
+ val CASSANDRA_WRITE_TIME = "cassandra.write.time"
+ val CASSANDRA_WRITE_FAILURE = "cassandra.write.failure"
+ val CASSANDRA_WRITE_WARNINGS = "cassandra.write.warnings"
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/services/GrpcHandler.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/services/GrpcHandler.scala
new file mode 100644
index 000000000..72a5b4003
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/services/GrpcHandler.scala
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.services
+
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.storage.backends.cassandra.services.GrpcHandler._
+import com.google.protobuf.GeneratedMessageV3
+import io.grpc.Status
+import io.grpc.stub.StreamObserver
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.concurrent.{ExecutionContextExecutor, Future}
+import scala.util.{Failure, Success}
+
+object GrpcHandler {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[GrpcHandler])
+}
+
+/**
+ * Handler for Grpc response
+ * populates responseObserver with response object or error accordingly
+ * takes care of corresponding logging and updating counters
+ *
+ * @param operationName : name of operation
+ * @param executor : executor service on which handler is invoked
+ */
+
+class GrpcHandler(operationName: String)(implicit val executor: ExecutionContextExecutor) extends MetricsSupport {
+ private val metricFriendlyOperationName = operationName.replace('/', '.')
+ private val timer = metricRegistry.timer(metricFriendlyOperationName)
+ private val failureMeter = metricRegistry.meter(s"$metricFriendlyOperationName.failures")
+
+ def handle[Rs](request: GeneratedMessageV3, responseObserver: StreamObserver[Rs])(op: => Future[Rs]): Unit = {
+ val time = timer.time()
+ op onComplete {
+ case Success(response) =>
+ responseObserver.onNext(response)
+ responseObserver.onCompleted()
+ time.stop()
+ LOGGER.debug(s"service invocation for operation=$operationName and request=${request.toString} completed successfully")
+
+ case Failure(ex) =>
+ responseObserver.onError(Status.fromThrowable(ex).asRuntimeException())
+ failureMeter.mark()
+ time.stop()
+ LOGGER.debug(s"service invocation for operation=$operationName and request=${request.toString} failed with error", ex)
+ }
+ }
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/services/GrpcHealthService.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/services/GrpcHealthService.scala
new file mode 100644
index 000000000..966526f62
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/services/GrpcHealthService.scala
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.services
+
+import io.grpc.health.v1.{HealthCheckRequest, HealthCheckResponse, HealthGrpc}
+import io.grpc.stub.StreamObserver
+
+class GrpcHealthService extends HealthGrpc.HealthImplBase {
+
+ override def check(request: HealthCheckRequest, responseObserver: StreamObserver[HealthCheckResponse]): Unit = {
+ responseObserver.onNext(HealthCheckResponse
+ .newBuilder()
+ .setStatus(HealthCheckResponse.ServingStatus.SERVING)
+ .build())
+ responseObserver.onCompleted()
+ }
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/services/SpansPersistenceService.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/services/SpansPersistenceService.scala
new file mode 100644
index 000000000..0a99e4ba4
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/services/SpansPersistenceService.scala
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.services
+
+import com.expedia.open.tracing.backend.WriteSpansResponse.ResultCode
+import com.expedia.open.tracing.backend._
+import com.expedia.www.haystack.trace.storage.backends.cassandra.store.{CassandraTraceRecordReader, CassandraTraceRecordWriter}
+import io.grpc.stub.StreamObserver
+
+import scala.collection.JavaConverters._
+import scala.concurrent.ExecutionContextExecutor
+
+class SpansPersistenceService(reader: CassandraTraceRecordReader,
+ writer: CassandraTraceRecordWriter)
+ (implicit val executor: ExecutionContextExecutor) extends StorageBackendGrpc.StorageBackendImplBase {
+
+ private val handleReadSpansResponse = new GrpcHandler(StorageBackendGrpc.METHOD_READ_SPANS.getFullMethodName)
+ private val handleWriteSpansResponse = new GrpcHandler(StorageBackendGrpc.METHOD_WRITE_SPANS.getFullMethodName)
+
+ override def writeSpans(request: WriteSpansRequest, responseObserver: StreamObserver[WriteSpansResponse]): Unit = {
+ handleWriteSpansResponse.handle(request, responseObserver) {
+ writer.writeTraceRecords(request.getRecordsList.asScala.toList) map (_ =>
+ WriteSpansResponse.newBuilder().setCode(ResultCode.SUCCESS).build())
+ }
+ }
+
+ /**
+ *
+ * read buffered spans from backend
+ *
+ */
+ override def readSpans(request: ReadSpansRequest, responseObserver: StreamObserver[ReadSpansResponse]): Unit = {
+
+ handleReadSpansResponse.handle(request, responseObserver) {
+ reader.readTraceRecords(request.getTraceIdsList.iterator().asScala.toList).map {
+ records => {
+ ReadSpansResponse.newBuilder()
+ .addAllRecords(records.asJava)
+ .build()
+ }
+ }
+ }
+ }
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordReadResultListener.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordReadResultListener.scala
new file mode 100644
index 000000000..c4fb0c08d
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordReadResultListener.scala
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.store
+
+import com.codahale.metrics.{Meter, Timer}
+import com.datastax.driver.core.exceptions.NoHostAvailableException
+import com.datastax.driver.core.{ResultSet, ResultSetFuture, Row}
+import com.expedia.open.tracing.api.Trace
+import com.expedia.open.tracing.backend.TraceRecord
+import com.expedia.www.haystack.trace.storage.backends.cassandra.client.CassandraTableSchema
+import com.google.protobuf.ByteString
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.collection.JavaConverters._
+import scala.concurrent.Promise
+import scala.util.{Failure, Success, Try}
+
+object CassandraTraceRecordReadResultListener {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[CassandraTraceRecordReadResultListener])
+}
+
+class CassandraTraceRecordReadResultListener(asyncResult: ResultSetFuture,
+ timer: Timer.Context,
+ failure: Meter,
+ promise: Promise[Seq[TraceRecord]]) extends Runnable {
+
+ import CassandraTraceRecordReadResultListener._
+
+ override def run(): Unit = {
+ timer.close()
+
+ Try(asyncResult.get)
+ .flatMap(tryGetTraceRows)
+ .flatMap(mapTraceRecords)
+ match {
+ case Success(records) =>
+ promise.success(records)
+ case Failure(ex) =>
+ if (fatalError(ex)) {
+ LOGGER.error("Fatal error in reading from cassandra, tearing down the app", ex)
+ } else {
+ LOGGER.error("Failed in reading the record from cassandra", ex)
+ }
+ failure.mark()
+ promise.failure(ex)
+ }
+ }
+
+ private def fatalError(ex: Throwable): Boolean = {
+ if (ex.isInstanceOf[NoHostAvailableException]) true else ex.getCause != null && fatalError(ex.getCause)
+ }
+
+ private def tryGetTraceRows(resultSet: ResultSet): Try[Seq[Row]] = {
+ val rows = resultSet.all().asScala
+ if (rows.isEmpty) Failure(new RuntimeException()) else Success(rows)
+ }
+
+ private def mapTraceRecords(rows: Seq[Row]): Try[List[TraceRecord]] = {
+ Try {
+ rows.map(row => {
+ val spanBytes = row.getBytes(CassandraTableSchema.SPANS_COLUMN_NAME).array()
+ val timeStamp = row.getLong(CassandraTableSchema.TIMESTAMP_COLUMN_NAME)
+ val traceId = row.getString(CassandraTableSchema.ID_COLUMN_NAME)
+ val record = TraceRecord.newBuilder()
+ .setSpans(ByteString.copyFrom(spanBytes))
+ .setTimestamp(timeStamp)
+ .setTraceId(traceId)
+ .build()
+ record
+ }).toList
+ }
+ }
+}
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordReader.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordReader.scala
new file mode 100644
index 000000000..b99a93c70
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordReader.scala
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.store
+
+import com.expedia.open.tracing.backend.TraceRecord
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.storage.backends.cassandra.client.CassandraSession
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.ClientConfiguration
+import com.expedia.www.haystack.trace.storage.backends.cassandra.metrics.AppMetricNames
+import org.slf4j.LoggerFactory
+
+import scala.concurrent.{ExecutionContextExecutor, Future, Promise}
+
+class CassandraTraceRecordReader(cassandra: CassandraSession, config: ClientConfiguration)
+ (implicit val dispatcher: ExecutionContextExecutor) extends MetricsSupport {
+ private val LOGGER = LoggerFactory.getLogger(classOf[CassandraTraceRecordReader])
+
+ private lazy val readTimer = metricRegistry.timer(AppMetricNames.CASSANDRA_READ_TIME)
+ private lazy val readFailures = metricRegistry.meter(AppMetricNames.CASSANDRA_READ_FAILURES)
+
+ def readTraceRecords(traceIds: List[String]): Future[Seq[TraceRecord]] = {
+ val timer = readTimer.time()
+ val promise = Promise[Seq[TraceRecord]]
+
+ try {
+ val statement = cassandra.newSelectRawTracesBoundStatement(traceIds)
+ val asyncResult = cassandra.executeAsync(statement)
+ asyncResult.addListener(new CassandraTraceRecordReadResultListener(asyncResult, timer, readFailures, promise), dispatcher)
+ promise.future
+ } catch {
+ case ex: Exception =>
+ readFailures.mark()
+ timer.stop()
+ LOGGER.error("Failed to read raw traces with exception", ex)
+ Future.failed(ex)
+ }
+ }
+}
\ No newline at end of file
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordWriteResultListener.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordWriteResultListener.scala
new file mode 100644
index 000000000..f180fc4ab
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordWriteResultListener.scala
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.store
+
+import com.codahale.metrics.{Meter, Timer}
+import com.datastax.driver.core.ResultSetFuture
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.commons.retries.RetryOperation
+import com.expedia.www.haystack.trace.storage.backends.cassandra.metrics.AppMetricNames
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.collection.JavaConverters._
+
+object CassandraTraceRecordWriteResultListener extends MetricsSupport {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(CassandraTraceRecordWriteResultListener.getClass)
+ protected val writeFailures: Meter = metricRegistry.meter(AppMetricNames.CASSANDRA_WRITE_FAILURE)
+ protected val writeWarnings: Meter = metricRegistry.meter(AppMetricNames.CASSANDRA_WRITE_WARNINGS)
+}
+
+class CassandraTraceRecordWriteResultListener(asyncResult: ResultSetFuture,
+ timer: Timer.Context,
+ retryOp: RetryOperation.Callback) extends Runnable {
+
+ import CassandraTraceRecordWriteResultListener._
+
+ /**
+ * this is invoked when the cassandra aysnc write completes.
+ * We measure the time write operation takes and records any warnings or errors
+ */
+ override def run(): Unit = {
+ try {
+ timer.close()
+
+ val result = asyncResult.get()
+ if (result != null &&
+ result.getExecutionInfo != null &&
+ result.getExecutionInfo.getWarnings != null &&
+ !result.getExecutionInfo.getWarnings.isEmpty) {
+ LOGGER.warn(s"Warning received in cassandra writes {}", result.getExecutionInfo.getWarnings.asScala.mkString(","))
+ writeWarnings.mark(result.getExecutionInfo.getWarnings.size())
+ }
+ if (retryOp != null) retryOp.onResult(result)
+ } catch {
+ case ex: Exception =>
+ LOGGER.error("Fail to write the record to cassandra with exception", ex)
+ writeFailures.mark()
+ if (retryOp != null) retryOp.onError(ex, retry = true)
+ }
+ }
+}
\ No newline at end of file
diff --git a/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordWriter.scala b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordWriter.scala
new file mode 100644
index 000000000..edb25c664
--- /dev/null
+++ b/traces/backends/cassandra/src/main/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/store/CassandraTraceRecordWriter.scala
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.store
+
+import java.util.concurrent.atomic.AtomicInteger
+
+import com.expedia.open.tracing.backend.TraceRecord
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.commons.retries.RetryOperation._
+import com.expedia.www.haystack.trace.storage.backends.cassandra.client.CassandraSession
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.CassandraConfiguration
+import com.expedia.www.haystack.trace.storage.backends.cassandra.metrics.AppMetricNames
+import org.slf4j.LoggerFactory
+
+import scala.concurrent.{ExecutionContextExecutor, Future, Promise}
+import scala.util.{Failure, Success}
+
+class CassandraTraceRecordWriter(cassandra: CassandraSession,
+ config: CassandraConfiguration)(implicit val dispatcher: ExecutionContextExecutor)
+ extends MetricsSupport {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[CassandraTraceRecordWriter])
+ private lazy val writeTimer = metricRegistry.timer(AppMetricNames.CASSANDRA_WRITE_TIME)
+ private lazy val writeFailures = metricRegistry.meter(AppMetricNames.CASSANDRA_WRITE_FAILURE)
+
+ cassandra.ensureKeyspace(config.clientConfig.tracesKeyspace)
+ private val spanInsertPreparedStmt = cassandra.createSpanInsertPreparedStatement(config.clientConfig.tracesKeyspace)
+
+ private def execute(record: TraceRecord): Future[Unit] = {
+
+ val promise = Promise[Unit]
+ // execute the request async with retry
+ withRetryBackoff(retryCallback => {
+ val timer = writeTimer.time()
+
+ // prepare the statement
+ val statement = cassandra.newTraceInsertBoundStatement(record.getTraceId,
+ record.getSpans.toByteArray,
+ config.writeConsistencyLevel(retryCallback.lastError()),
+ spanInsertPreparedStmt)
+
+ val asyncResult = cassandra.executeAsync(statement)
+ asyncResult.addListener(new CassandraTraceRecordWriteResultListener(asyncResult, timer, retryCallback), dispatcher)
+ },
+ config.retryConfig,
+ onSuccess = (_: Any) => promise.success(),
+ onFailure = ex => {
+ writeFailures.mark()
+ LOGGER.error(s"Fail to write to cassandra after ${config.retryConfig.maxRetries} retry attempts for ${record.getTraceId}", ex)
+ promise.failure(ex)
+ })
+ promise.future
+ }
+
+ /**
+ * writes the traceId and its spans to cassandra. Use the current timestamp as the sort key for the writes to same
+ * TraceId. Also if the parallel writes exceed the max inflight requests, then we block and this puts backpressure on
+ * upstream
+ *
+ * @param traceRecords : trace records which need to be written
+ * @return
+ */
+ def writeTraceRecords(traceRecords: List[TraceRecord]): Future[Unit] = {
+ val promise = Promise[Unit]
+ val writableRecordsLatch = new AtomicInteger(traceRecords.size)
+ traceRecords.foreach(record => {
+ /* write spanBuffer for a given traceId */
+ execute(record).onComplete {
+ case Success(_) => if (writableRecordsLatch.decrementAndGet() == 0) {
+ promise.success()
+ }
+ case Failure(ex) =>
+ //TODO: We fail the response only if the last cassandra write fails, ideally we should be failing if any of the cassandra writes fail
+ if (writableRecordsLatch.decrementAndGet() == 0) {
+ promise.failure(ex)
+ }
+ }
+ })
+ promise.future
+
+ }
+}
diff --git a/traces/backends/cassandra/src/test/resources/config/base.conf b/traces/backends/cassandra/src/test/resources/config/base.conf
new file mode 100644
index 000000000..e0e4bef90
--- /dev/null
+++ b/traces/backends/cassandra/src/test/resources/config/base.conf
@@ -0,0 +1,52 @@
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
+
+service {
+ port = 8090
+ ssl {
+ enabled = false
+ cert.path = "/ssl/cert"
+ private.key.path = "/ssl/private-key"
+ }
+ max.message.size = 52428800 # 50MB in bytes
+}
+
+cassandra {
+ # multiple endpoints can be provided as comma separated list
+ endpoints = "cassandra"
+
+ # if auto.discovery.enabled is true, we ignore the manually supplied endpoints(above)
+ auto.discovery {
+ enabled: false
+ ## optional AWS discovery
+ # aws: {
+ # region: "us-west-2"
+ # tags: {
+ # name: "cassandra"
+ # }
+ # }
+ }
+
+ connections {
+ max.per.host = 100
+ read.timeout.ms = 5000
+ conn.timeout.ms = 10000
+ keep.alive = true
+ }
+ ttl.sec = 86400
+
+ retries {
+ max = 10
+ backoff {
+ initial.ms = 250
+ factor = 2
+ }
+ }
+
+ keyspace {
+ name = "haystack"
+ table.name = "traces"
+ auto.create.schema = "CREATE KEYSPACE IF NOT EXISTS haystack WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor' : 1 } AND durable_writes = false; CREATE TABLE IF NOT EXISTS haystack.traces (id varchar, ts timestamp, spans blob, PRIMARY KEY ((id), ts)) WITH CLUSTERING ORDER BY (ts ASC) AND compaction = { 'class' : 'DateTieredCompactionStrategy', 'max_sstable_age_days': '3' } AND gc_grace_seconds = 86400;"
+
+
+ }
+}
diff --git a/traces/backends/cassandra/src/test/resources/logback-test.xml b/traces/backends/cassandra/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..298193e01
--- /dev/null
+++ b/traces/backends/cassandra/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
diff --git a/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/integration/BaseIntegrationTestSpec.scala b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/integration/BaseIntegrationTestSpec.scala
new file mode 100644
index 000000000..159c950ad
--- /dev/null
+++ b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/integration/BaseIntegrationTestSpec.scala
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.integration
+
+import java.nio.ByteBuffer
+import java.util.concurrent.Executors
+import java.util.{Date, UUID}
+
+import com.datastax.driver.core.querybuilder.QueryBuilder
+import com.datastax.driver.core.{Cluster, ResultSet, Session, SimpleStatement}
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.backend.{StorageBackendGrpc, TraceRecord}
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.storage.backends.cassandra.Service
+import com.expedia.www.haystack.trace.storage.backends.cassandra.client.CassandraTableSchema
+import com.google.protobuf.ByteString
+import io.grpc.ManagedChannelBuilder
+import io.grpc.health.v1.HealthGrpc
+import org.scalatest._
+
+import scala.collection.JavaConverters._
+
+trait BaseIntegrationTestSpec extends FunSpec with GivenWhenThen with Matchers with BeforeAndAfterAll with BeforeAndAfterEach {
+ protected var client: StorageBackendGrpc.StorageBackendBlockingStub = _
+
+ protected var healthCheckClient: HealthGrpc.HealthBlockingStub = _
+ private val CASSANDRA_ENDPOINT = "cassandra"
+ private val CASSANDRA_KEYSPACE = "haystack"
+ private val CASSANDRA_TABLE = "traces"
+
+ private val executors = Executors.newSingleThreadExecutor()
+
+ private var cassandraSession: Session = _
+
+ override def beforeAll() {
+ executors.submit(new Runnable {
+ override def run(): Unit = Service.main(null)
+ })
+ //waiting for the service to start up
+
+ Thread.sleep(5000)
+ // setup cassandra
+ cassandraSession = Cluster
+ .builder()
+ .addContactPoints(CASSANDRA_ENDPOINT)
+ .build()
+ .connect(CASSANDRA_KEYSPACE)
+ deleteCassandraTableRows()
+ client = StorageBackendGrpc.newBlockingStub(ManagedChannelBuilder.forAddress("localhost", 8090)
+ .usePlaintext(true)
+ .build())
+
+ healthCheckClient = HealthGrpc.newBlockingStub(ManagedChannelBuilder.forAddress("localhost", 8090)
+ .usePlaintext(true)
+ .build())
+ }
+
+ private def deleteCassandraTableRows(): Unit = {
+ cassandraSession.execute(new SimpleStatement(s"TRUNCATE $CASSANDRA_TABLE"))
+ }
+
+ protected def putTraceInCassandra(traceId: String = UUID.randomUUID().toString,
+ spanId: String = UUID.randomUUID().toString,
+ serviceName: String = "",
+ operationName: String = "",
+ tags: Map[String, String] = Map.empty,
+ startTime: Long = System.currentTimeMillis() * 1000,
+ sleep: Boolean = true): Unit = {
+ insertTraceInCassandra(traceId, spanId, serviceName, operationName, tags, startTime)
+ // wait for few sec to let ES refresh its index
+ if (sleep) Thread.sleep(5000)
+ }
+
+ protected def createTraceRecord(traceId: String = UUID.randomUUID().toString,
+ ): TraceRecord = {
+ val spans = "random span".getBytes
+ TraceRecord
+ .newBuilder()
+ .setTraceId(traceId)
+ .setTimestamp(System.currentTimeMillis())
+ .setSpans(ByteString.copyFrom(spans)).build()
+ }
+
+ private def insertTraceInCassandra(traceId: String,
+ spanId: String,
+ serviceName: String,
+ operationName: String,
+ tags: Map[String, String],
+ startTime: Long): ResultSet = {
+ val spanBuffer = createSpanBufferWithSingleSpan(traceId, spanId, serviceName, operationName, tags, startTime)
+ writeToCassandra(spanBuffer, traceId)
+ }
+
+ private def writeToCassandra(spanBuffer: SpanBuffer, traceId: String) = {
+
+ cassandraSession.execute(QueryBuilder
+ .insertInto(CASSANDRA_TABLE)
+ .value(CassandraTableSchema.ID_COLUMN_NAME, traceId)
+ .value(CassandraTableSchema.TIMESTAMP_COLUMN_NAME, new Date())
+ .value(CassandraTableSchema.SPANS_COLUMN_NAME, ByteBuffer.wrap(spanBuffer.toByteArray)))
+ }
+
+ private def createSpanBufferWithSingleSpan(traceId: String,
+ spanId: String,
+ serviceName: String,
+ operationName: String,
+ tags: Map[String, String],
+ startTime: Long) = {
+ val spanTags = tags.map(tag => com.expedia.open.tracing.Tag.newBuilder().setKey(tag._1).setVStr(tag._2).build())
+
+ SpanBuffer
+ .newBuilder()
+ .setTraceId(traceId)
+ .addChildSpans(Span
+ .newBuilder()
+ .setTraceId(traceId)
+ .setSpanId(spanId)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .setStartTime(startTime)
+ .addAllTags(spanTags.asJava)
+ .build())
+ .build()
+ }
+}
diff --git a/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/integration/CassandraStorageBackendServiceIntegrationTestSpec.scala b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/integration/CassandraStorageBackendServiceIntegrationTestSpec.scala
new file mode 100644
index 000000000..f9b1cecad
--- /dev/null
+++ b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/integration/CassandraStorageBackendServiceIntegrationTestSpec.scala
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.integration
+
+import java.util.UUID
+
+import com.expedia.open.tracing.backend.{ReadSpansRequest, WriteSpansRequest}
+
+class CassandraStorageBackendServiceIntegrationTestSpec extends BaseIntegrationTestSpec {
+
+
+ describe("Cassandra Persistence Service read trace records") {
+ it("should get trace records for given traceID from cassandra") {
+ Given("trace in cassandra")
+ val traceId = UUID.randomUUID().toString
+ putTraceInCassandra(traceId)
+
+ val readSpansRequest = ReadSpansRequest.newBuilder().addTraceIds(traceId).build()
+
+ When("readspans is invoked")
+ val traceRecords = client.readSpans(readSpansRequest)
+
+ Then("should return the trace")
+ traceRecords.getRecordsList should not be empty
+ traceRecords.getRecordsCount shouldEqual 1
+ traceRecords.getRecordsList.get(0).getTraceId shouldEqual traceId
+ }
+ it("should write trace records for given traceID to cassandra") {
+ Given("trace in cassandra")
+ val traceId = UUID.randomUUID().toString
+ val record = createTraceRecord(traceId)
+ val writeSpansRequest = WriteSpansRequest.newBuilder().addRecords(record).build()
+
+ When("writespans is invoked")
+ val traceRecords = client.writeSpans(writeSpansRequest)
+
+ Then("should write the trace")
+ val readSpansRequest = ReadSpansRequest.newBuilder().addTraceIds(traceId).build()
+ val retrievedRecord = client.readSpans(readSpansRequest)
+
+ retrievedRecord.getRecordsList should not be empty
+ retrievedRecord.getRecordsCount shouldEqual 1
+ retrievedRecord.getRecordsList.get(0).getTraceId shouldEqual traceId
+ }
+
+ }
+}
diff --git a/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/BaseUnitTestSpec.scala b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/BaseUnitTestSpec.scala
new file mode 100644
index 000000000..3f06b318b
--- /dev/null
+++ b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/BaseUnitTestSpec.scala
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.unit
+
+import org.scalatest.{FunSpec, GivenWhenThen, Matchers}
+import org.scalatest.easymock.EasyMockSugar
+
+trait BaseUnitTestSpec extends FunSpec with GivenWhenThen with Matchers with EasyMockSugar
diff --git a/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/client/AwsNodeDiscovererSpec.scala b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/client/AwsNodeDiscovererSpec.scala
new file mode 100644
index 000000000..f70f3fe21
--- /dev/null
+++ b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/client/AwsNodeDiscovererSpec.scala
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.unit.client
+
+import com.amazonaws.services.ec2.AmazonEC2Client
+import com.amazonaws.services.ec2.model._
+import com.expedia.www.haystack.trace.storage.backends.cassandra.client.AwsNodeDiscoverer
+import org.easymock.EasyMock
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+
+import scala.collection.JavaConverters._
+
+class AwsNodeDiscovererSpec extends FunSpec with Matchers with EasyMockSugar {
+ describe("AWS node discovery") {
+ it("should return only the running nodes for given ec2 tags") {
+ val client = mock[AmazonEC2Client]
+ val ec2Tags = Map("name" -> "cassandra")
+
+ val instance_1 = new Instance().withPrivateIpAddress("10.0.0.1").withState(new InstanceState().withName(InstanceStateName.Running))
+ val instance_2 = new Instance().withPrivateIpAddress("10.0.0.2").withState(new InstanceState().withName(InstanceStateName.Running))
+ val instance_3 = new Instance().withPrivateIpAddress("10.0.0.3").withState(new InstanceState().withName(InstanceStateName.Terminated))
+ val reservation = new Reservation().withInstances(instance_1, instance_2, instance_3)
+
+ val capturedRequest = EasyMock.newCapture[DescribeInstancesRequest]()
+ expecting {
+ client.describeInstances(EasyMock.capture(capturedRequest)).andReturn(new DescribeInstancesResult().withReservations(reservation))
+ }
+
+ whenExecuting(client) {
+ val ips = AwsNodeDiscoverer.discover(client, ec2Tags)
+ ips should contain allOf ("10.0.0.1", "10.0.0.2")
+ capturedRequest.getValue.getFilters.asScala.foreach(filter => {
+ filter.getName shouldEqual "tag:name"
+ filter.getValues.asScala.head shouldEqual "cassandra"
+ })
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/client/CassandraSessionSpec.scala b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/client/CassandraSessionSpec.scala
new file mode 100644
index 000000000..76fd4ec54
--- /dev/null
+++ b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/client/CassandraSessionSpec.scala
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.unit.client
+
+import com.datastax.driver.core._
+import com.datastax.driver.core.querybuilder.{Insert, Select}
+import com.expedia.www.haystack.trace.storage.backends.cassandra.client.{CassandraClusterFactory, CassandraSession}
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.{ClientConfiguration, KeyspaceConfiguration, SocketConfiguration}
+import org.easymock.EasyMock
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+
+class CassandraSessionSpec extends FunSpec with Matchers with EasyMockSugar {
+ describe("Cassandra Session") {
+ it("should connect to the cassandra cluster and provide prepared statement for inserts") {
+ val keyspaceName = "keyspace-1"
+ val tableName = "table-1"
+
+ val factory = mock[CassandraClusterFactory]
+ val session = mock[Session]
+ val cluster = mock[Cluster]
+ val metadata = mock[Metadata]
+ val keyspaceMetadata = mock[KeyspaceMetadata]
+ val tableMetadata = mock[TableMetadata]
+ val insertPrepStatement = mock[PreparedStatement]
+ val keyspaceConfig = KeyspaceConfiguration(keyspaceName, tableName, 100, None)
+
+ val config = ClientConfiguration(List("cassandra1"),
+ autoDiscoverEnabled = false,
+ None,
+ None,
+ keyspaceConfig,
+ SocketConfiguration(10, keepAlive = true, 1000, 1000))
+
+ val captured = EasyMock.newCapture[Insert.Options]()
+ expecting {
+ factory.buildCluster(config).andReturn(cluster).once()
+ cluster.connect().andReturn(session).once()
+ keyspaceMetadata.getTable(tableName).andReturn(tableMetadata).once()
+ metadata.getKeyspace(keyspaceName).andReturn(keyspaceMetadata).once()
+ cluster.getMetadata.andReturn(metadata).once()
+ session.getCluster.andReturn(cluster).once()
+ session.prepare(EasyMock.capture(captured)).andReturn(insertPrepStatement).anyTimes()
+ session.close().once()
+ cluster.close().once()
+ }
+
+ whenExecuting(factory, cluster, session, metadata, keyspaceMetadata, tableMetadata, insertPrepStatement) {
+ val session = new CassandraSession(config, factory)
+ session.ensureKeyspace(config.tracesKeyspace)
+ val stmt = session.createSpanInsertPreparedStatement(keyspaceConfig)
+ stmt shouldBe insertPrepStatement
+ captured.getValue.getQueryString() shouldEqual "INSERT INTO \"keyspace-1\".\"table-1\" (id,ts,spans) VALUES (:id,:ts,:spans) USING TTL 100;"
+ session.close()
+ }
+ }
+
+ it("should connect to the cassandra cluster and provide prepared statement for select with traces") {
+ val keyspaceName = "keyspace-1"
+ val tableName = "table-1"
+
+ val factory = mock[CassandraClusterFactory]
+ val session = mock[Session]
+ val cluster = mock[Cluster]
+ val metadata = mock[Metadata]
+ val keyspaceMetadata = mock[KeyspaceMetadata]
+ val tableMetadata = mock[TableMetadata]
+ val selectPrepStatement = mock[PreparedStatement]
+ val keyspaceConfig = KeyspaceConfiguration(keyspaceName, tableName, 100, None)
+
+ val config = ClientConfiguration(List("cassandra1"),
+ autoDiscoverEnabled = false,
+ None,
+ None,
+ keyspaceConfig,
+ SocketConfiguration(10, keepAlive = true, 1000, 1000))
+
+ val captured = EasyMock.newCapture[Select.Where]()
+ expecting {
+ factory.buildCluster(config).andReturn(cluster).once()
+ cluster.connect().andReturn(session).once()
+ keyspaceMetadata.getTable(tableName).andReturn(tableMetadata).once()
+ metadata.getKeyspace(keyspaceName).andReturn(keyspaceMetadata).once()
+ cluster.getMetadata.andReturn(metadata).once()
+ session.getCluster.andReturn(cluster).once()
+ session.prepare(EasyMock.capture(captured)).andReturn(selectPrepStatement).anyTimes()
+ session.close().once()
+ cluster.close().once()
+ }
+
+ whenExecuting(factory, cluster, session, metadata, keyspaceMetadata, tableMetadata, selectPrepStatement) {
+ val session = new CassandraSession(config, factory)
+ session.ensureKeyspace(config.tracesKeyspace)
+ val stmt = session.selectRawTracesPreparedStmt
+ stmt shouldBe selectPrepStatement
+ captured.getValue.getQueryString() shouldEqual "SELECT * FROM \"keyspace-1\".\"table-1\" WHERE id IN :id;"
+ session.close()
+ }
+ }
+ }
+}
diff --git a/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/client/CassandraTableSchemaSpec.scala b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/client/CassandraTableSchemaSpec.scala
new file mode 100644
index 000000000..aad03ad6a
--- /dev/null
+++ b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/client/CassandraTableSchemaSpec.scala
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.cassandra.unit.client
+
+import com.datastax.driver.core._
+import com.expedia.www.haystack.trace.storage.backends.cassandra.client.CassandraTableSchema
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+
+class CassandraTableSchemaSpec extends FunSpec with Matchers with EasyMockSugar {
+
+
+ it("should apply the schema if table does not exist in cassandra") {
+ val session = mock[Session]
+ val cluster = mock[Cluster]
+ val metadata = mock[Metadata]
+ val keyspaceMetadata = mock[KeyspaceMetadata]
+ val keyspace = "my-keyspace"
+ val cassandraTableName = "my-table"
+
+ expecting {
+ session.execute("apply schema").andReturn(null).once
+ keyspaceMetadata.getTable(cassandraTableName).andReturn(null).once()
+ metadata.getKeyspace(keyspace).andReturn(keyspaceMetadata).once()
+ cluster.getMetadata.andReturn(metadata).once()
+ session.getCluster.andReturn(cluster).once()
+ }
+ whenExecuting(session, cluster, metadata, keyspaceMetadata) {
+ CassandraTableSchema.ensureExists(keyspace, cassandraTableName, Some("apply schema"), session)
+ }
+ }
+
+ it("should apply the schema if keyspace and table does not exist in cassandra") {
+ val session = mock[Session]
+ val cluster = mock[Cluster]
+ val metadata = mock[Metadata]
+ val keyspace = "my-keyspace"
+ val cassandraTableName = "my-table"
+
+ expecting {
+ session.execute("apply schema").andReturn(null).once
+ session.execute("apply schema2").andReturn(null).once
+ metadata.getKeyspace(keyspace).andReturn(null).once()
+ cluster.getMetadata.andReturn(metadata).once()
+ session.getCluster.andReturn(cluster).once()
+ }
+ whenExecuting(session, cluster, metadata) {
+ CassandraTableSchema.ensureExists(keyspace, cassandraTableName, Some("apply schema;apply schema2"), session)
+ }
+ }
+
+ it("should not apply the schema if keyspace and table both exists in cassandra") {
+ val session = mock[Session]
+ val cluster = mock[Cluster]
+ val metadata = mock[Metadata]
+ val keyspaceMetadata = mock[KeyspaceMetadata]
+ val tableMetadata = mock[TableMetadata]
+
+ val keyspace = "my-keyspace"
+ val cassandraTableName = "my-table"
+
+ expecting {
+ keyspaceMetadata.getTable(cassandraTableName).andReturn(tableMetadata).once()
+ metadata.getKeyspace(keyspace).andReturn(keyspaceMetadata).once()
+ cluster.getMetadata.andReturn(metadata).once()
+ session.getCluster.andReturn(cluster).once()
+ }
+ whenExecuting(session, cluster, metadata, keyspaceMetadata, tableMetadata) {
+ CassandraTableSchema.ensureExists(keyspace, cassandraTableName, Some("apply schema"), session)
+ }
+ }
+
+ it("should throw an exception if keyspace and table does not exists in cassandra and no schema is applied") {
+ val session = mock[Session]
+ val cluster = mock[Cluster]
+ val metadata = mock[Metadata]
+ val keyspaceMetadata = mock[KeyspaceMetadata]
+
+ val keyspace = "my-keyspace"
+ val cassandraTableName = "my-table"
+
+ expecting {
+ keyspaceMetadata.getTable(cassandraTableName).andReturn(null).once()
+ metadata.getKeyspace(keyspace).andReturn(keyspaceMetadata).once()
+ cluster.getMetadata.andReturn(metadata).once()
+ session.getCluster.andReturn(cluster).once()
+ }
+ whenExecuting(session, cluster, metadata, keyspaceMetadata) {
+ val thrown = intercept[Exception] {
+ CassandraTableSchema.ensureExists(keyspace, cassandraTableName, None, session)
+ }
+ thrown.getMessage shouldEqual s"Fail to find the keyspace=$keyspace and/or table=$cassandraTableName !!!!"
+ }
+ }
+
+ it("should thrown an exception if fail to apply the schema when keyspace/table does not exist in cassandra") {
+ val session = mock[Session]
+ val cluster = mock[Cluster]
+ val metadata = mock[Metadata]
+ val applySchemaException = new RuntimeException
+ val keyspace = "my-keyspace"
+ val cassandraTableName = "my-table"
+
+ expecting {
+ session.execute("apply schema").andThrow(applySchemaException)
+ metadata.getKeyspace(keyspace).andReturn(null).once()
+ cluster.getMetadata.andReturn(metadata).once()
+ session.getCluster.andReturn(cluster).once()
+ }
+ whenExecuting(session, cluster, metadata) {
+ val thrown = intercept[Exception] {
+ CassandraTableSchema.ensureExists(keyspace, cassandraTableName, Some("apply schema;apply schema2"), session)
+ }
+ thrown.getCause shouldBe applySchemaException
+ }
+ }
+
+}
diff --git a/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/config/ConfigurationLoaderSpec.scala b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/config/ConfigurationLoaderSpec.scala
new file mode 100644
index 000000000..713e1dce7
--- /dev/null
+++ b/traces/backends/cassandra/src/test/scala/com/expedia/www/haystack/trace/storage/backends/cassandra/unit/config/ConfigurationLoaderSpec.scala
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.storage.backends.cassandra.unit.config
+
+import com.datastax.driver.core.ConsistencyLevel
+import com.datastax.driver.core.exceptions.UnavailableException
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.ProjectConfiguration
+import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.ServiceConfiguration
+import com.expedia.www.haystack.trace.storage.backends.cassandra.unit.BaseUnitTestSpec
+
+class ConfigurationLoaderSpec extends BaseUnitTestSpec {
+ describe("ConfigurationLoader") {
+ val project = new ProjectConfiguration()
+ it("should load the service config from base.conf") {
+ val serviceConfig: ServiceConfiguration = project.serviceConfig
+ serviceConfig.port shouldBe 8090
+ serviceConfig.ssl.enabled shouldBe false
+ serviceConfig.ssl.certChainFilePath shouldBe "/ssl/cert"
+ serviceConfig.ssl.privateKeyPath shouldBe "/ssl/private-key"
+ }
+ it("should load the cassandra config from base.conf and few properties overridden from env variable") {
+ val cassandraWriteConfig = project.cassandraConfig
+ val clientConfig = cassandraWriteConfig.clientConfig
+
+ cassandraWriteConfig.consistencyLevel shouldEqual ConsistencyLevel.ONE
+ clientConfig.autoDiscoverEnabled shouldBe false
+ // this will fail if run inside an editor, we override this config using env variable inside pom.xml
+ clientConfig.endpoints should contain allOf("cass1", "cass2")
+ clientConfig.tracesKeyspace.autoCreateSchema shouldBe Some("cassandra_cql_schema_1")
+ clientConfig.tracesKeyspace.name shouldBe "haystack"
+ clientConfig.tracesKeyspace.table shouldBe "traces"
+ clientConfig.tracesKeyspace.recordTTLInSec shouldBe 86400
+
+ clientConfig.awsNodeDiscovery shouldBe empty
+ clientConfig.socket.keepAlive shouldBe true
+ clientConfig.socket.maxConnectionPerHost shouldBe 100
+ clientConfig.socket.readTimeoutMills shouldBe 5000
+ clientConfig.socket.connectionTimeoutMillis shouldBe 10000
+ cassandraWriteConfig.retryConfig.maxRetries shouldBe 10
+ cassandraWriteConfig.retryConfig.backOffInMillis shouldBe 250
+ cassandraWriteConfig.retryConfig.backoffFactor shouldBe 2
+
+ // test consistency level on error
+ val writeError = new UnavailableException(ConsistencyLevel.ONE, 0, 0)
+ cassandraWriteConfig.writeConsistencyLevel(writeError) shouldEqual ConsistencyLevel.ANY
+ cassandraWriteConfig.writeConsistencyLevel(new RuntimeException(writeError)) shouldEqual ConsistencyLevel.ANY
+ cassandraWriteConfig.writeConsistencyLevel(null) shouldEqual ConsistencyLevel.ONE
+ cassandraWriteConfig.writeConsistencyLevel(new RuntimeException) shouldEqual ConsistencyLevel.ONE
+ }
+
+ }
+}
diff --git a/traces/backends/memory/Makefile b/traces/backends/memory/Makefile
new file mode 100644
index 000000000..9089e3fff
--- /dev/null
+++ b/traces/backends/memory/Makefile
@@ -0,0 +1,16 @@
+.PHONY: docker_build prepare_integration_test_env integration_test release
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-trace-backend-memory
+PWD := $(shell pwd)
+SERVICE_DEBUG_ON ?= false
+
+docker_build:
+ # build docker image using existing app jar
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+integration_test:
+ cd ../../ && ./mvnw -q integration-test -pl backends/memory -am
+
+release:
+ ../../deployment/scripts/publish-to-docker-hub.sh
diff --git a/traces/backends/memory/README.md b/traces/backends/memory/README.md
new file mode 100644
index 000000000..90e334716
--- /dev/null
+++ b/traces/backends/memory/README.md
@@ -0,0 +1,14 @@
+# Storage Backend - In Memory
+
+Grpc service which can read a write spans to a an in memory map
+
+## Technical Details
+
+In order to understand this service, we recommend to read the details of [haystack](https://github.com/ExpediaDotCom/haystack) project.
+This service reads from an in memory map. API endpoints are exposed as [GRPC](https://grpc.io/) endpoints.
+
+Will fill in more details as we go..
+
+## Building
+
+Check the details on [Build Section](../README.md)
diff --git a/traces/backends/memory/build/docker/Dockerfile b/traces/backends/memory/build/docker/Dockerfile
new file mode 100644
index 000000000..f76a36f70
--- /dev/null
+++ b/traces/backends/memory/build/docker/Dockerfile
@@ -0,0 +1,30 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-trace-backend-memory
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+ENV DOCKERIZE_VERSION v0.6.1
+
+ADD https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/dockerize-alpine-linux-amd64-${DOCKERIZE_VERSION}.tar.gz dockerize.tar.gz
+RUN tar xzf dockerize.tar.gz
+RUN chmod +x dockerize
+
+RUN mkdir -p ${APP_HOME}
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+RUN chmod +x ${APP_HOME}/start-app.sh
+
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+ wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+ chmod +x /bin/grpc_health_probe
+
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+EXPOSE 8090
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/traces/backends/memory/build/docker/jmxtrans-agent.xml b/traces/backends/memory/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..8b4c1a298
--- /dev/null
+++ b/traces/backends/memory/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:true}
+ haystack.traces.backend-cassandra.#hostname#.
+
+ 30
+
diff --git a/traces/backends/memory/build/docker/start-app.sh b/traces/backends/memory/build/docker/start-app.sh
new file mode 100755
index 000000000..ba2c65569
--- /dev/null
+++ b/traces/backends/memory/build/docker/start-app.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+[ -z "$JAVA_GC_OPTS" ] && JAVA_GC_OPTS="-XX:+UseG1GC"
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+${JAVA_GC_OPTS} \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-XX:+ExitOnOutOfMemoryError \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+if [[ -n "$SERVICE_DEBUG_ON" ]] && [[ "$SERVICE_DEBUG_ON" == true ]]; then
+ JAVA_OPTS="$JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y"
+fi
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/traces/backends/memory/pom.xml b/traces/backends/memory/pom.xml
new file mode 100644
index 000000000..d05fb6483
--- /dev/null
+++ b/traces/backends/memory/pom.xml
@@ -0,0 +1,142 @@
+
+
+
+
+ haystack-trace-backends
+ com.expedia.www
+ 1.0.9-SNAPSHOT
+ ../pom.xml
+
+
+ 4.0.0
+ haystack-trace-backend-memory
+ jar
+
+
+ com.expedia.www.haystack.trace.storage.backends.memory.Service
+ ${project.artifactId}-${project.version}
+ 3.3.0.1
+
+
+
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+ io.grpc
+ grpc-protobuf
+
+
+
+ io.grpc
+ grpc-stub
+
+
+
+ io.grpc
+ grpc-services
+
+
+
+ io.grpc
+ grpc-netty
+
+
+
+ io.netty
+ netty-handler
+
+
+
+ org.apache.commons
+ commons-lang3
+
+
+
+
+
+ ${finalName}
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+ com.expedia.www.haystack.trace.storage.backends.memory.unit
+
+
+
+ integration-test
+ integration-test
+
+ test
+
+
+ com.expedia.www.haystack.trace.storage.backends.memory.integration
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+ true
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ package
+
+ shade
+
+
+
+
+ reference.conf
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
+
diff --git a/traces/backends/memory/src/main/resources/config/base.conf b/traces/backends/memory/src/main/resources/config/base.conf
new file mode 100644
index 000000000..3b1d854c5
--- /dev/null
+++ b/traces/backends/memory/src/main/resources/config/base.conf
@@ -0,0 +1,9 @@
+health.status.path = "isHealthy"
+service {
+ port = 8090
+ ssl {
+ enabled = false
+ cert.path = ""
+ private.key.path = ""
+ }
+}
diff --git a/traces/backends/memory/src/main/resources/logback.xml b/traces/backends/memory/src/main/resources/logback.xml
new file mode 100644
index 000000000..e2e2c58e9
--- /dev/null
+++ b/traces/backends/memory/src/main/resources/logback.xml
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/Service.scala b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/Service.scala
new file mode 100644
index 000000000..c1ff30d0e
--- /dev/null
+++ b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/Service.scala
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trace.storage.backends.memory
+
+import java.io.File
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.commons.logger.LoggerUtils
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.storage.backends.memory.config.ProjectConfiguration
+import com.expedia.www.haystack.trace.storage.backends.memory.services.{GrpcHealthService, SpansPersistenceService}
+import com.expedia.www.haystack.trace.storage.backends.memory.store.InMemoryTraceRecordStore
+import io.grpc.netty.NettyServerBuilder
+import org.slf4j.{Logger, LoggerFactory}
+
+object Service extends MetricsSupport {
+ private val LOGGER: Logger = LoggerFactory.getLogger("MemoryBackend")
+
+ // primary executor for service's async tasks
+ implicit private val executor = scala.concurrent.ExecutionContext.global
+
+ def main(args: Array[String]): Unit = {
+ startJmxReporter()
+ startService(args)
+ }
+
+ private def startJmxReporter(): Unit = {
+ JmxReporter
+ .forRegistry(metricRegistry)
+ .build()
+ .start()
+ }
+
+ private def startService(args: Array[String]): Unit = {
+ try {
+ val config = new ProjectConfiguration
+ val serviceConfig = config.serviceConfig
+ var port = serviceConfig.port
+ if(args!=null && args.length!=0) {
+ port = args(0).toInt
+ }
+
+ val tracerRecordStore = new InMemoryTraceRecordStore()
+
+ val serverBuilder = NettyServerBuilder
+ .forPort(port)
+ .directExecutor()
+ .addService(new GrpcHealthService())
+ .addService(new SpansPersistenceService(store = tracerRecordStore)(executor))
+
+
+ // enable ssl if enabled
+ if (serviceConfig.ssl.enabled) {
+ serverBuilder.useTransportSecurity(new File(serviceConfig.ssl.certChainFilePath), new File(serviceConfig.ssl.privateKeyPath))
+ }
+
+
+ val server = serverBuilder.build().start()
+
+ LOGGER.info(s"server started, listening on ${serviceConfig.port}")
+
+ Runtime.getRuntime.addShutdownHook(new Thread() {
+ override def run(): Unit = {
+ LOGGER.info("shutting down gRPC server since JVM is shutting down")
+ server.shutdown()
+ LOGGER.info("server has been shutdown now")
+ }
+ })
+
+ server.awaitTermination()
+ } catch {
+ case ex: Throwable =>
+ ex.printStackTrace()
+ LOGGER.error("Fatal error observed while running the app", ex)
+ LoggerUtils.shutdownLogger()
+ System.exit(1)
+ }
+ }
+}
diff --git a/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/config/ProjectConfiguration.scala b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/config/ProjectConfiguration.scala
new file mode 100644
index 000000000..8ade8b187
--- /dev/null
+++ b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/config/ProjectConfiguration.scala
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.memory.config
+
+import com.expedia.www.haystack.commons.config.ConfigurationLoader
+import com.expedia.www.haystack.trace.storage.backends.memory.config.entities.{ServiceConfiguration, SslConfiguration}
+
+class ProjectConfiguration {
+ private val config = ConfigurationLoader.loadConfigFileWithEnvOverrides()
+
+ val healthStatusFilePath: String = config.getString("health.status.path")
+
+ val serviceConfig: ServiceConfiguration = {
+ val serviceConfig = config.getConfig("service")
+
+ val ssl = serviceConfig.getConfig("ssl")
+ val sslConfig = SslConfiguration(ssl.getBoolean("enabled"), ssl.getString("cert.path"), ssl.getString("private.key.path"))
+
+ ServiceConfiguration(serviceConfig.getInt("port"), sslConfig)
+ }
+}
diff --git a/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/config/entities/ServiceConfiguration.scala b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/config/entities/ServiceConfiguration.scala
new file mode 100644
index 000000000..3760407c3
--- /dev/null
+++ b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/config/entities/ServiceConfiguration.scala
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.memory.config.entities
+
+/**
+ * @param port port to start grpc servicer on
+ */
+case class ServiceConfiguration(port: Int, ssl: SslConfiguration)
+case class SslConfiguration(enabled: Boolean, certChainFilePath: String, privateKeyPath: String)
diff --git a/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/services/GrpcHealthService.scala b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/services/GrpcHealthService.scala
new file mode 100644
index 000000000..2ccaf25ce
--- /dev/null
+++ b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/services/GrpcHealthService.scala
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.memory.services
+
+import io.grpc.health.v1.{HealthCheckRequest, HealthCheckResponse, HealthGrpc}
+import io.grpc.stub.StreamObserver
+
+class GrpcHealthService extends HealthGrpc.HealthImplBase {
+
+ override def check(request: HealthCheckRequest, responseObserver: StreamObserver[HealthCheckResponse]): Unit = {
+ responseObserver.onNext(HealthCheckResponse
+ .newBuilder()
+ .setStatus(HealthCheckResponse.ServingStatus.SERVING)
+ .build())
+ responseObserver.onCompleted()
+ }
+}
diff --git a/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/services/SpansPersistenceService.scala b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/services/SpansPersistenceService.scala
new file mode 100644
index 000000000..00b5e3f35
--- /dev/null
+++ b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/services/SpansPersistenceService.scala
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.memory.services
+
+import com.expedia.open.tracing.backend.WriteSpansResponse.ResultCode
+import com.expedia.open.tracing.backend._
+import com.expedia.www.haystack.trace.storage.backends.memory.store.InMemoryTraceRecordStore
+import io.grpc.stub.StreamObserver
+
+import scala.collection.JavaConverters._
+import scala.concurrent.ExecutionContextExecutor
+
+class SpansPersistenceService(store: InMemoryTraceRecordStore)
+ (implicit val executor: ExecutionContextExecutor) extends StorageBackendGrpc.StorageBackendImplBase {
+
+
+ override def writeSpans(request: WriteSpansRequest, responseObserver: StreamObserver[WriteSpansResponse]): Unit = {
+ store.writeTraceRecords(request.getRecordsList.asScala.toList)
+ val response = WriteSpansResponse.newBuilder().setCode(
+ ResultCode.SUCCESS
+ ).build()
+ responseObserver.onNext(response)
+ responseObserver.onCompleted()
+ }
+
+ /**
+ *
+ * read buffered spans from backend
+ *
+ */
+ override def readSpans(request: ReadSpansRequest, responseObserver: StreamObserver[ReadSpansResponse]): Unit = {
+
+ val records = store.readTraceRecords(request.getTraceIdsList.iterator().asScala.toList)
+ val response = ReadSpansResponse.newBuilder()
+ .addAllRecords(records.asJava)
+ .build()
+ responseObserver.onNext(response)
+ responseObserver.onCompleted()
+ }
+}
diff --git a/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/store/InMemoryTraceRecordStore.scala b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/store/InMemoryTraceRecordStore.scala
new file mode 100644
index 000000000..c2b184dff
--- /dev/null
+++ b/traces/backends/memory/src/main/scala/com/expedia/www/haystack/trace/storage/backends/memory/store/InMemoryTraceRecordStore.scala
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.memory.store
+
+import com.expedia.open.tracing.backend.TraceRecord
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import org.slf4j.LoggerFactory
+
+import scala.concurrent.ExecutionContextExecutor
+
+class InMemoryTraceRecordStore()
+ (implicit val dispatcher: ExecutionContextExecutor) extends MetricsSupport with AutoCloseable {
+ private val LOGGER = LoggerFactory.getLogger(classOf[InMemoryTraceRecordStore])
+
+
+ private var inMemoryTraceRecords = Map[String, List[TraceRecord]]()
+
+ def readTraceRecords(traceIds: List[String]): Seq[TraceRecord] = {
+
+ try {
+ traceIds.flatMap(traceId => {
+ inMemoryTraceRecords.getOrElse(traceId, List())
+ })
+ } catch {
+ case ex: Exception =>
+ LOGGER.error("Failed to read raw traces with exception", ex)
+ List()
+ }
+ }
+
+ /**
+ * writes the traceId and its spans to a in. Use the current timestamp as the sort key for the writes to same
+ * TraceId. Also if the parallel writes exceed the max inflight requests, then we block and this puts backpressure on
+ * upstream
+ *
+ * @param traceRecords : trace records which need to be written
+ * @return
+ */
+ def writeTraceRecords(traceRecords: List[TraceRecord]): Unit = {
+
+
+ traceRecords.foreach(record => {
+
+ try {
+ val existingRecords: List[TraceRecord] = inMemoryTraceRecords.getOrElse(record.getTraceId, List())
+ val records = record :: existingRecords
+ inMemoryTraceRecords = inMemoryTraceRecords + (record.getTraceId -> records)
+ } catch {
+ case ex: Exception =>
+ LOGGER.error("Fail to write the spans to memory with exception", ex)
+
+ }
+ })
+ }
+
+ override def close(): Unit = ()
+}
diff --git a/traces/backends/memory/src/test/resources/config/base.conf b/traces/backends/memory/src/test/resources/config/base.conf
new file mode 100644
index 000000000..4bf5cb741
--- /dev/null
+++ b/traces/backends/memory/src/test/resources/config/base.conf
@@ -0,0 +1,10 @@
+health.status.path = "isHealthy"
+
+service {
+ port = 8090
+ ssl {
+ enabled = false
+ cert.path = "/ssl/cert"
+ private.key.path = "/ssl/private-key"
+ }
+}
diff --git a/traces/backends/memory/src/test/resources/logback-test.xml b/traces/backends/memory/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..298193e01
--- /dev/null
+++ b/traces/backends/memory/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
diff --git a/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/integration/BaseIntegrationTestSpec.scala b/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/integration/BaseIntegrationTestSpec.scala
new file mode 100644
index 000000000..1c72f5eb4
--- /dev/null
+++ b/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/integration/BaseIntegrationTestSpec.scala
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.memory.integration
+
+import java.util.UUID
+import java.util.concurrent.Executors
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.backend.StorageBackendGrpc
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.storage.backends.memory.Service
+import io.grpc.ManagedChannelBuilder
+import org.scalatest._
+
+import scala.collection.JavaConverters._
+
+trait BaseIntegrationTestSpec extends FunSpec with GivenWhenThen with Matchers with BeforeAndAfterAll with BeforeAndAfterEach {
+ protected var client: StorageBackendGrpc.StorageBackendBlockingStub = _
+
+
+ private val executors = Executors.newSingleThreadExecutor()
+
+
+ override def beforeAll() {
+
+
+
+
+ executors.submit(new Runnable {
+ override def run(): Unit = Service.main(null)
+ })
+
+ Thread.sleep(5000)
+
+ client = StorageBackendGrpc.newBlockingStub(ManagedChannelBuilder.forAddress("localhost", 8090)
+ .usePlaintext(true)
+ .build())
+ }
+
+ protected def createSerializedSpanBuffer(traceId: String = UUID.randomUUID().toString,
+ spanId: String = UUID.randomUUID().toString,
+ serviceName: String = "test-service",
+ operationName: String = "test-operation",
+ tags: Map[String, String] = Map.empty,
+ startTime: Long = System.currentTimeMillis() * 1000,
+ sleep: Boolean = true): Array[Byte] = {
+ val spanBuffer = createSpanBufferWithSingleSpan(traceId, spanId, serviceName, operationName, tags, startTime)
+ spanBuffer.toByteArray
+ }
+
+ private def createSpanBufferWithSingleSpan(traceId: String,
+ spanId: String,
+ serviceName: String,
+ operationName: String,
+ tags: Map[String, String],
+ startTime: Long) = {
+ val spanTags = tags.map(tag => com.expedia.open.tracing.Tag.newBuilder().setKey(tag._1).setVStr(tag._2).build())
+
+ SpanBuffer
+ .newBuilder()
+ .setTraceId(traceId)
+ .addChildSpans(Span
+ .newBuilder()
+ .setTraceId(traceId)
+ .setSpanId(spanId)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .setStartTime(startTime)
+ .addAllTags(spanTags.asJava)
+ .build())
+ .build()
+ }
+}
diff --git a/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/integration/InMemoryTraceBackendServiceIntegrationTestSpec.scala b/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/integration/InMemoryTraceBackendServiceIntegrationTestSpec.scala
new file mode 100644
index 000000000..93aad6dd9
--- /dev/null
+++ b/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/integration/InMemoryTraceBackendServiceIntegrationTestSpec.scala
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.memory.integration
+
+import java.util.UUID
+
+import com.expedia.open.tracing.backend.{ReadSpansRequest, TraceRecord, WriteSpansRequest}
+import com.google.protobuf.ByteString
+
+class InMemoryTraceBackendServiceIntegrationTestSpec extends BaseIntegrationTestSpec {
+
+
+ describe("In Memory Persistence Service read trace records") {
+ it("should get trace records for given traceID from in memory") {
+ Given("trace-record ")
+ val traceId = UUID.randomUUID().toString
+ val serializedSpans = createSerializedSpanBuffer(traceId)
+ val traceRecord = TraceRecord.newBuilder()
+ .setTraceId(traceId)
+ .setSpans(ByteString.copyFrom(serializedSpans))
+ .setTimestamp(System.currentTimeMillis())
+ .build()
+
+ When("write span is invoked")
+ val writeSpanRequest = WriteSpansRequest.newBuilder().addRecords(traceRecord).build()
+ val response = client.writeSpans(writeSpanRequest)
+
+ Then("should be able to retrieve the trace-record back")
+ val readSpansResponse = client.readSpans(ReadSpansRequest.newBuilder().addTraceIds(traceId).build())
+ readSpansResponse.getRecordsCount shouldBe 1
+ readSpansResponse.getRecordsCount shouldEqual 1
+ readSpansResponse.getRecordsList.get(0).getTraceId shouldEqual traceId
+ }
+ }
+}
diff --git a/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/unit/BaseUnitTestSpec.scala b/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/unit/BaseUnitTestSpec.scala
new file mode 100644
index 000000000..fa83ad76c
--- /dev/null
+++ b/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/unit/BaseUnitTestSpec.scala
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.storage.backends.memory.unit
+
+import org.scalatest.{FunSpec, GivenWhenThen, Matchers}
+import org.scalatest.easymock.EasyMockSugar
+
+trait BaseUnitTestSpec extends FunSpec with GivenWhenThen with Matchers with EasyMockSugar
diff --git a/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/unit/config/ConfigurationLoaderSpec.scala b/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/unit/config/ConfigurationLoaderSpec.scala
new file mode 100644
index 000000000..10a784e6b
--- /dev/null
+++ b/traces/backends/memory/src/test/scala/com/expedia/www/haystack/trace/storage/backends/memory/unit/config/ConfigurationLoaderSpec.scala
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.storage.backends.memory.unit.config
+
+import com.expedia.www.haystack.trace.storage.backends.memory.config.ProjectConfiguration
+import com.expedia.www.haystack.trace.storage.backends.memory.config.entities.ServiceConfiguration
+import com.expedia.www.haystack.trace.storage.backends.memory.unit.BaseUnitTestSpec
+
+class ConfigurationLoaderSpec extends BaseUnitTestSpec {
+ describe("ConfigurationLoader") {
+ val project = new ProjectConfiguration()
+ it("should load the service config from base.conf") {
+ val serviceConfig: ServiceConfiguration = project.serviceConfig
+ serviceConfig.port shouldBe 8090
+ serviceConfig.ssl.enabled shouldBe false
+ serviceConfig.ssl.certChainFilePath shouldBe "/ssl/cert"
+ serviceConfig.ssl.privateKeyPath shouldBe "/ssl/private-key"
+ }
+ }
+}
diff --git a/traces/backends/pom.xml b/traces/backends/pom.xml
new file mode 100644
index 000000000..f152a902b
--- /dev/null
+++ b/traces/backends/pom.xml
@@ -0,0 +1,27 @@
+
+
+
+ 4.0.0
+ haystack-trace-backends
+ 1.0.9-SNAPSHOT
+ pom
+
+
+ haystack-traces
+ com.expedia.www
+ 1.0.9-SNAPSHOT
+ ../pom.xml
+
+
+
+
+ cassandra
+ memory
+
+
+
+ ${basedir}/../../checkstyles/scalastyle_config.xml
+
+
+
diff --git a/traces/checkstyles/scalastyle_config.xml b/traces/checkstyles/scalastyle_config.xml
new file mode 100644
index 000000000..0b5ba9469
--- /dev/null
+++ b/traces/checkstyles/scalastyle_config.xml
@@ -0,0 +1,134 @@
+
+ Scalastyle standard configuration
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/traces/commons/pom.xml b/traces/commons/pom.xml
new file mode 100644
index 000000000..b49a403eb
--- /dev/null
+++ b/traces/commons/pom.xml
@@ -0,0 +1,121 @@
+
+
+
+
+ haystack-traces
+ com.expedia.www
+ 1.0.9-SNAPSHOT
+
+
+ 4.0.0
+ haystack-trace-commons
+ jar
+
+
+
+
+ vc.inreach.aws
+ aws-signing-request-interceptor
+
+
+
+ com.amazonaws
+ aws-java-sdk-sts
+
+
+
+ org.json4s
+ json4s-jackson_${scala.major.minor.version}
+
+
+
+
+ com.amazonaws
+ aws-java-sdk-ec2
+
+
+
+ io.searchbox
+ jest
+
+
+
+ org.apache.commons
+ commons-lang3
+
+
+
+ org.xerial.snappy
+ snappy-java
+
+
+
+ com.github.luben
+ zstd-jni
+
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+ io.grpc
+ grpc-protobuf
+
+
+
+ io.grpc
+ grpc-stub
+
+
+
+ io.grpc
+ grpc-services
+
+
+
+ io.grpc
+ grpc-netty
+
+
+
+ org.json4s
+ json4s-ext_${scala.major.minor.version}
+
+
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+ com.expedia.www.haystack.trace.commons.unit
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+
+ false
+
+
+
+
+
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/clients/es/AWSSigningJestClientFactory.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/clients/es/AWSSigningJestClientFactory.scala
new file mode 100644
index 000000000..fa75e322b
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/clients/es/AWSSigningJestClientFactory.scala
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.clients.es
+
+import java.time.{LocalDateTime, ZoneId}
+
+import com.expedia.www.haystack.trace.commons.config.entities.AWSRequestSigningConfiguration
+import com.google.common.base.Supplier
+import io.searchbox.client.JestClientFactory
+import org.apache.http.impl.client.HttpClientBuilder
+import org.apache.http.impl.nio.client.HttpAsyncClientBuilder
+import org.slf4j.LoggerFactory
+import vc.inreach.aws.request.{AWSSigner, AWSSigningRequestInterceptor}
+import com.amazonaws.auth.AWSCredentialsProvider
+import com.amazonaws.auth.BasicAWSCredentials
+import com.amazonaws.auth.DefaultAWSCredentialsProviderChain
+import com.amazonaws.internal.StaticCredentialsProvider
+
+
+/**
+ * wrapper for JestClientFactory. Provides support for AWS ES request signing by adding an interceptor to the client.
+ *
+ * @param awsRequestSigningConfig config required for request signing like creds, region
+ */
+class AWSSigningJestClientFactory(awsRequestSigningConfig: AWSRequestSigningConfiguration) extends JestClientFactory {
+ private val LOGGER = LoggerFactory.getLogger(classOf[AWSSigningJestClientFactory])
+
+ val awsSigner = new AWSSigner(getCredentialProvider, awsRequestSigningConfig.region, awsRequestSigningConfig.awsServiceName, new ClockSupplier)
+ val requestInterceptor = new AWSSigningRequestInterceptor(awsSigner)
+
+ override def configureHttpClient(builder: HttpClientBuilder): HttpClientBuilder = {
+ builder.addInterceptorLast(requestInterceptor)
+ }
+
+ override def configureHttpClient(builder: HttpAsyncClientBuilder): HttpAsyncClientBuilder = {
+ builder.addInterceptorLast(requestInterceptor)
+ }
+
+ def getCredentialProvider: AWSCredentialsProvider = {
+ if (awsRequestSigningConfig.accessKey.isDefined) {
+ LOGGER.info("using static aws credential provider with access and secret key for ES")
+ new StaticCredentialsProvider(
+ new BasicAWSCredentials(awsRequestSigningConfig.accessKey.get, awsRequestSigningConfig.secretKey.get))
+ } else {
+ LOGGER.info("using default credential provider chain for ES")
+ new DefaultAWSCredentialsProviderChain
+ }
+ }
+}
+
+class ClockSupplier extends Supplier[LocalDateTime] {
+ override def get(): LocalDateTime = {
+ LocalDateTime.now(ZoneId.of("UTC"))
+ }
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/clients/es/document/ServiceMetadataDoc.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/clients/es/document/ServiceMetadataDoc.scala
new file mode 100644
index 000000000..b88b2c235
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/clients/es/document/ServiceMetadataDoc.scala
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2018 Expedia, Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.clients.es.document
+
+import org.json4s.jackson.Serialization
+
+
+case class ServiceMetadataDoc(servicename: String,
+ operationname: String) {
+ val json: String = Serialization.write(this)(TraceIndexDoc.formats)
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/clients/es/document/TraceIndexDoc.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/clients/es/document/TraceIndexDoc.scala
new file mode 100644
index 000000000..eff43c504
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/clients/es/document/TraceIndexDoc.scala
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018 Expedia, Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.clients.es.document
+
+import org.json4s.DefaultFormats
+import org.json4s.jackson.Serialization
+
+import scala.collection.mutable
+
+object TraceIndexDoc {
+ implicit val formats = DefaultFormats
+ type TagKey = String
+ type TagValue = Any
+
+ val SERVICE_KEY_NAME = "servicename"
+ val OPERATION_KEY_NAME = "operationname"
+ val DURATION_KEY_NAME = "duration"
+ val START_TIME_KEY_NAME = "starttime"
+}
+
+case class TraceIndexDoc(traceid: String, rootduration: Long, starttime: Long, spans: Seq[mutable.Map[String, Any]]) {
+ val json: String = Serialization.write(this)(TraceIndexDoc.formats)
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/AWSRequestSigningConfiguration.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/AWSRequestSigningConfiguration.scala
new file mode 100644
index 000000000..ac41e847f
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/AWSRequestSigningConfiguration.scala
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.config.entities
+
+/**
+ * defines the configuration parameters for AWS request signing
+ *
+ * @param enabled: signing will be performed if this flag is enabled
+ * @param region: aws region
+ * @param awsServiceName: aws service name for which signing needs to be done
+ * @param accessKey: aws access key. If not present DefaultAWSCredentialsProviderChain is used
+ * @param secretKey: aws secret key
+ */
+case class AWSRequestSigningConfiguration (enabled: Boolean,
+ region: String,
+ awsServiceName: String,
+ accessKey: Option[String],
+ secretKey: Option[String])
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/ReloadConfiguration.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/ReloadConfiguration.scala
new file mode 100644
index 000000000..8fff2e7a6
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/ReloadConfiguration.scala
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.config.entities
+
+import com.expedia.www.haystack.trace.commons.config.reload.Reloadable
+
+/**
+ * defines the configuration parameters for reloading the app configs from external store like ElasticSearch
+ *
+ * @param configStoreEndpoint: endpoint for external store where app configuration is stored
+ * @param databaseName: name of the database
+ * @param reloadIntervalInMillis: app config will be refreshed after this given interval in millis
+ * @param observers: list of reloadable configuration objects that subscribe to the reloader
+ * @param loadOnStartup: loads the app configuration from external store on startup, default is true
+ */
+case class ReloadConfiguration(configStoreEndpoint: String,
+ databaseName: String,
+ reloadIntervalInMillis: Int,
+ username: Option[String],
+ password: Option[String],
+ observers: Seq[Reloadable],
+ loadOnStartup: Boolean = true)
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/TraceStoreBackends.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/TraceStoreBackends.scala
new file mode 100644
index 000000000..af56f37f4
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/TraceStoreBackends.scala
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.config.entities
+
+
+/**
+ * defines the configuration parameters for trace-backend *
+ * @param host : trace backend grpc hostname
+ * @param port : trace backend grpc port
+ */
+case class GrpcClientConfig(host: String, port: Int)
+
+/**
+ * multiple store backends
+ * @param backends configuration of all trace store backends
+ */
+case class TraceStoreBackends(backends: Seq[GrpcClientConfig])
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/WhitelistIndexFieldConfiguration.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/WhitelistIndexFieldConfiguration.scala
new file mode 100644
index 000000000..19a1ac429
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/entities/WhitelistIndexFieldConfiguration.scala
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.config.entities
+
+import java.util.concurrent.ConcurrentHashMap
+
+import com.expedia.www.haystack.trace.commons.config.entities.IndexFieldType.IndexFieldType
+import com.expedia.www.haystack.trace.commons.config.reload.Reloadable
+import org.apache.commons.lang3.StringUtils
+import org.json4s.ext.EnumNameSerializer
+import org.json4s.jackson.Serialization
+import org.json4s.{DefaultFormats, Formats}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+object IndexFieldType extends Enumeration {
+ type IndexFieldType = Value
+ val string, long, int, double, bool = Value
+}
+
+case class WhitelistIndexField(name: String,
+ `type`: IndexFieldType,
+ aliases: Set[String] = Set(),
+ enableRangeQuery: Boolean = false,
+ searchContext: String = "span",
+ enabled: Boolean = true)
+
+case class WhiteListIndexFields(fields: List[WhitelistIndexField])
+
+case class WhitelistIndexFieldConfiguration() extends Reloadable {
+ private val LOGGER = LoggerFactory.getLogger(classOf[WhitelistIndexFieldConfiguration])
+
+ protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)
+
+ @volatile
+ private var currentVersion: Int = 0
+
+ val indexFieldMap = new ConcurrentHashMap[String, WhitelistIndexField]()
+
+ var reloadConfigTableName: Option[String] = None
+
+ private val onChangeListeners = mutable.ListBuffer[() => Unit]()
+
+ // fail fast
+ override def name: String = reloadConfigTableName
+ .getOrElse(throw new RuntimeException("fail to find the reload config table name!"))
+
+ /**
+ * this is called whenever the configuration reloader system reads the configuration object from external store
+ * we check if the config data has changed using the string's hashCode
+ * @param configData config object that is loaded at regular intervals from external store
+ */
+ override def onReload(configData: String): Unit = {
+ if(StringUtils.isNotEmpty(configData) && hasConfigChanged(configData)) {
+ LOGGER.info("new indexing fields have been detected: " + configData)
+ val fieldsToIndex = Serialization.read[WhiteListIndexFields](configData)
+
+ val lowercaseFieldNames = fieldsToIndex
+ .fields
+ .map(field => field.copy(name = field.name.toLowerCase, aliases = field.aliases.map(_.toLowerCase)))
+
+ updateIndexFieldMap(WhiteListIndexFields(lowercaseFieldNames))
+ // set the current version to newer one
+ currentVersion = configData.hashCode
+
+ this.synchronized {
+ onChangeListeners.foreach(l => l())
+ }
+ }
+ }
+
+ def addOnChangeListener(listener: () => Unit): Unit = {
+ this.synchronized {
+ onChangeListeners.append(listener)
+ }
+ }
+
+ private def updateIndexFieldMap(fList: WhiteListIndexFields): Unit = {
+ // remove the fields from the map if they are not present in the newly provided whitelist set
+ val indexableFieldNames = fList.fields.flatMap(field => field.aliases + field.name)
+
+ indexFieldMap.values().removeIf((f: WhitelistIndexField) => !indexableFieldNames.contains(f.name))
+
+ // add the fields in the map
+ for(field <- fList.fields) {
+ indexFieldMap.put(field.name, field)
+ field.aliases.foreach(alias => indexFieldMap.put(alias, field))
+ }
+ }
+
+ /**
+ * detect if configuration has changed using the hashCode as version
+ * @param newConfigData new configuration data
+ * @return
+ */
+ private def hasConfigChanged(newConfigData: String): Boolean = newConfigData.hashCode != currentVersion
+
+ /**
+ * @return the whitelist index fields
+ */
+ def whitelistIndexFields: List[WhitelistIndexField] = indexFieldMap.values().asScala.toList
+
+ def globalTraceContextIndexFieldNames: Set[String] = whitelistIndexFields.filter(_.searchContext == "trace").map(_.name).toSet
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/reload/ConfigurationReloadElasticSearchProvider.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/reload/ConfigurationReloadElasticSearchProvider.scala
new file mode 100644
index 000000000..68222a8ea
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/reload/ConfigurationReloadElasticSearchProvider.scala
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.config.reload
+
+import com.expedia.www.haystack.commons.retries.RetryOperation
+import com.expedia.www.haystack.trace.commons.clients.es.AWSSigningJestClientFactory
+import com.expedia.www.haystack.trace.commons.config.entities.{AWSRequestSigningConfiguration, ReloadConfiguration}
+import io.searchbox.client.config.HttpClientConfig
+import io.searchbox.client.{JestClient, JestClientFactory}
+import io.searchbox.core.Search
+
+import scala.util.{Failure, Success}
+
+class ConfigurationReloadElasticSearchProvider(reloadConfig: ReloadConfiguration, awsRequestSigningConfig: AWSRequestSigningConfiguration)
+ extends ConfigurationReloadProvider(reloadConfig) {
+
+ private val matchAllQuery = "{\"query\":{\"match_all\":{\"boost\":1.0}}}"
+
+ private val esClient: JestClient = {
+ val factory = {
+ if (awsRequestSigningConfig.enabled) {
+ LOGGER.info("using AWSSigningJestClientFactory for es client")
+ new AWSSigningJestClientFactory(awsRequestSigningConfig)
+ } else {
+ LOGGER.info("using JestClientFactory for es client")
+ new JestClientFactory()
+ }
+ }
+
+ val builder = new HttpClientConfig.Builder(reloadConfig.configStoreEndpoint).multiThreaded(false)
+
+ if (reloadConfig.username.isDefined && reloadConfig.password.isDefined) {
+ builder.defaultCredentials(reloadConfig.username.get, reloadConfig.password.get)
+ }
+
+ factory.setHttpClientConfig(builder.build())
+ factory.getObject
+ }
+
+ /**
+ * loads the configuration from external store
+ */
+ override def load(): Unit = {
+ reloadConfig.observers.foreach(observer => {
+
+ val searchQuery = new Search.Builder(matchAllQuery)
+ .addIndex(reloadConfig.databaseName)
+ .addType(observer.name)
+ .build()
+
+ RetryOperation.executeWithRetryBackoff(() => esClient.execute(searchQuery), RetryOperation.Config(3, 1000, 2)) match {
+ case Success(result) =>
+ if (result.isSucceeded) {
+ LOGGER.info(s"Reloading(or loading) is successfully done for the configuration name =${observer.name}")
+ observer.onReload(result.getSourceAsString)
+ } else {
+ LOGGER.error(s"Fail to reload the configuration from elastic search with error: ${result.getErrorMessage} " +
+ s"for observer name=${observer.name}")
+ }
+
+ case Failure(reason) =>
+ LOGGER.error(s"Fail to reload the configuration from elastic search for observer name=${observer.name}. " +
+ s"Will try at next scheduled time", reason)
+ }
+ })
+ }
+
+ override def close(): Unit = {
+ this.esClient.shutdownClient()
+ super.close()
+ }
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/reload/ConfigurationReloadProvider.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/reload/ConfigurationReloadProvider.scala
new file mode 100644
index 000000000..539fb7823
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/reload/ConfigurationReloadProvider.scala
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.config.reload
+
+import java.util.concurrent.{Executors, TimeUnit}
+
+import com.expedia.www.haystack.trace.commons.config.entities.ReloadConfiguration
+import org.slf4j.{Logger, LoggerFactory}
+
+abstract class ConfigurationReloadProvider(config: ReloadConfiguration) extends AutoCloseable {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[ConfigurationReloadProvider])
+
+ private val executor = Executors.newSingleThreadScheduledExecutor()
+
+ // schedule the reload process from an external store
+ if(config.reloadIntervalInMillis > -1) {
+ LOGGER.info("configuration reload scheduler has been started with a delay of {}ms", config.reloadIntervalInMillis)
+ executor.scheduleWithFixedDelay(() => {
+ load()
+ }, config.reloadIntervalInMillis, config.reloadIntervalInMillis, TimeUnit.MILLISECONDS)
+ }
+
+ def load(): Unit
+
+ def close(): Unit = executor.shutdownNow()
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/reload/Reloadable.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/reload/Reloadable.scala
new file mode 100644
index 000000000..97131ccee
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/config/reload/Reloadable.scala
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.config.reload
+
+/**
+ * An entity(for e.g. a configuration object) that extends reloadable trait allows it to reload the config object
+ * dynamically. The config reloader reads the new configuration periodically from an external store and
+ * calls 'onReload()' method.
+ * The 'name' provides the tableName where the configuration is stored for this entity.
+ */
+trait Reloadable {
+ def name: String
+
+ def onReload(newConfig: String): Unit
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/PackedMessage.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/PackedMessage.scala
new file mode 100644
index 000000000..9197f4d76
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/PackedMessage.scala
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.packer
+
+import java.nio.ByteBuffer
+
+import com.google.protobuf.GeneratedMessageV3
+import org.json4s.jackson.Serialization
+import org.json4s.{DefaultFormats, Formats}
+
+object PackedMessage {
+ implicit val formats: Formats = DefaultFormats + new org.json4s.ext.EnumSerializer(PackerType)
+ val MAGIC_BYTES: Array[Byte] = "hytc".getBytes("utf-8")
+}
+
+case class PackedMessage[T <: GeneratedMessageV3](protoObj: T,
+ private val pack: (T => Array[Byte]),
+ private val metadata: PackedMetadata) {
+ import PackedMessage._
+ private lazy val metadataBytes: Array[Byte] = Serialization.write(metadata).getBytes("utf-8")
+
+ val packedDataBytes: Array[Byte] = {
+ val packedDataBytes = pack(protoObj)
+ if (PackerType.NONE == metadata.t) {
+ packedDataBytes
+ } else {
+ ByteBuffer
+ .allocate(MAGIC_BYTES.length + 4 + metadataBytes.length + packedDataBytes.length)
+ .put(MAGIC_BYTES)
+ .putInt(metadataBytes.length)
+ .put(metadataBytes)
+ .put(packedDataBytes).array()
+ }
+ }
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/Packer.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/Packer.scala
new file mode 100644
index 000000000..370829225
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/Packer.scala
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.packer
+
+import java.io.{ByteArrayInputStream, ByteArrayOutputStream, OutputStream}
+import java.util.zip.GZIPOutputStream
+
+import com.expedia.www.haystack.trace.commons.packer.PackerType.PackerType
+import com.github.luben.zstd.ZstdOutputStream
+import com.google.protobuf.GeneratedMessageV3
+import org.apache.commons.io.IOUtils
+import org.xerial.snappy.SnappyOutputStream
+
+object PackerType extends Enumeration {
+ type PackerType = Value
+ val GZIP, SNAPPY, NONE, ZSTD = Value
+}
+
+case class PackedMetadata(t: PackerType)
+
+abstract class Packer[T <: GeneratedMessageV3] {
+ val packerType: PackerType
+
+ protected def compressStream(stream: OutputStream): OutputStream
+
+ private def pack(protoObj: T): Array[Byte] = {
+ val outStream = new ByteArrayOutputStream
+ val compressedStream = compressStream(outStream)
+ if (compressedStream != null) {
+ IOUtils.copy(new ByteArrayInputStream(protoObj.toByteArray), compressedStream)
+ compressedStream.close() // this flushes the data to final outStream
+ outStream.toByteArray
+ } else {
+ protoObj.toByteArray
+ }
+ }
+
+ def apply(protoObj: T): PackedMessage[T] = {
+ PackedMessage(protoObj, pack, PackedMetadata(packerType))
+ }
+}
+
+class NoopPacker[T <: GeneratedMessageV3] extends Packer[T] {
+ override val packerType = PackerType.NONE
+ override protected def compressStream(stream: OutputStream): OutputStream = null
+}
+
+class SnappyPacker[T <: GeneratedMessageV3] extends Packer[T] {
+ override val packerType = PackerType.SNAPPY
+ override protected def compressStream(stream: OutputStream): OutputStream = new SnappyOutputStream(stream)
+}
+
+
+class ZstdPacker[T <: GeneratedMessageV3] extends Packer[T] {
+ override val packerType = PackerType.ZSTD
+ override protected def compressStream(stream: OutputStream): OutputStream = new ZstdOutputStream(stream)
+}
+
+class GzipPacker[T <: GeneratedMessageV3] extends Packer[T] {
+ override val packerType = PackerType.GZIP
+ override protected def compressStream(stream: OutputStream): OutputStream = new GZIPOutputStream(stream)
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/PackerFactory.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/PackerFactory.scala
new file mode 100644
index 000000000..820e3e9a4
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/PackerFactory.scala
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.packer
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.commons.packer.PackerType.PackerType
+
+object PackerFactory {
+ def spanBufferPacker(`type`: PackerType): Packer[SpanBuffer] = {
+ `type` match {
+ case PackerType.SNAPPY => new SnappyPacker[SpanBuffer]
+ case PackerType.GZIP => new GzipPacker[SpanBuffer]
+ case PackerType.ZSTD => new ZstdPacker[SpanBuffer]
+ case _ => new NoopPacker[SpanBuffer]
+ }
+ }
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/Unpacker.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/Unpacker.scala
new file mode 100644
index 000000000..8afe55857
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/packer/Unpacker.scala
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.packer
+
+import java.io.{ByteArrayInputStream, ByteArrayOutputStream, InputStream}
+import java.nio.ByteBuffer
+import java.util.zip.GZIPInputStream
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.github.luben.zstd.ZstdInputStream
+import org.apache.commons.io.IOUtils
+import org.json4s.jackson.Serialization
+import org.xerial.snappy.SnappyInputStream
+
+object Unpacker {
+ import PackedMessage._
+
+ private def readMetadata(packedDataBytes: Array[Byte]): Array[Byte] = {
+ val byteBuffer = ByteBuffer.wrap(packedDataBytes)
+ val magicBytesExist = MAGIC_BYTES.indices forall { idx => byteBuffer.get() == MAGIC_BYTES.apply(idx) }
+ if (magicBytesExist) {
+ val headerLength = byteBuffer.getInt
+ val metadataBytes = new Array[Byte](headerLength)
+ byteBuffer.get(metadataBytes, 0, headerLength)
+ metadataBytes
+ } else {
+ null
+ }
+ }
+
+ private def unpack(compressedStream: InputStream) = {
+ val outputStream = new ByteArrayOutputStream()
+ IOUtils.copy(compressedStream, outputStream)
+ outputStream.toByteArray
+ }
+
+ def readSpanBuffer(packedDataBytes: Array[Byte]): SpanBuffer = {
+ var parsedDataBytes: Array[Byte] = null
+ val metadataBytes = readMetadata(packedDataBytes)
+ if (metadataBytes != null) {
+ val packedMetadata = Serialization.read[PackedMetadata](new String(metadataBytes))
+ val compressedDataOffset = MAGIC_BYTES.length + 4 + metadataBytes.length
+ packedMetadata.t match {
+ case PackerType.SNAPPY =>
+ parsedDataBytes = unpack(
+ new SnappyInputStream(
+ new ByteArrayInputStream(packedDataBytes, compressedDataOffset, packedDataBytes.length - compressedDataOffset)))
+ case PackerType.GZIP =>
+ parsedDataBytes = unpack(
+ new GZIPInputStream(
+ new ByteArrayInputStream(packedDataBytes, compressedDataOffset, packedDataBytes.length - compressedDataOffset)))
+ case PackerType.ZSTD =>
+ parsedDataBytes = unpack(
+ new ZstdInputStream(
+ new ByteArrayInputStream(packedDataBytes, compressedDataOffset, packedDataBytes.length - compressedDataOffset)))
+ case _ =>
+ return SpanBuffer.parseFrom(
+ new ByteArrayInputStream(packedDataBytes, compressedDataOffset, packedDataBytes.length - compressedDataOffset))
+ }
+ } else {
+ parsedDataBytes = packedDataBytes
+ }
+ SpanBuffer.parseFrom(parsedDataBytes)
+ }
+}
diff --git a/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/utils/SpanUtils.scala b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/utils/SpanUtils.scala
new file mode 100644
index 000000000..2017b5765
--- /dev/null
+++ b/traces/commons/src/main/scala/com/expedia/www/haystack/trace/commons/utils/SpanUtils.scala
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.commons.utils
+
+import com.expedia.open.tracing.Log
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.Tag
+import com.expedia.www.haystack.trace.commons.utils.SpanMarkers._
+
+import scala.collection.JavaConverters._
+
+object SpanUtils {
+ val URL_TAG_KEY = "url"
+
+ def getEventTimestamp(span: Span, event: String): Long = {
+ span.getLogsList.asScala.find(log => {
+ log.getFieldsList.asScala.exists(tag => {
+ tag.getKey.equalsIgnoreCase(LOG_EVENT_TAG_KEY) && tag.getVStr.equalsIgnoreCase(event)
+ })
+ }).get.getTimestamp
+ }
+
+ def getEndTime(span: Span): Long = {
+ span.getStartTime + span.getDuration
+ }
+
+ def isMergedSpan(span: Span): Boolean = {
+ containsClientLogTag(span) && containsServerLogTag(span)
+ }
+
+ def spanKind(span: Span): String = {
+ val kind = span.getTagsList.asScala.find(_.getKey == SpanMarkers.SPAN_KIND_TAG_KEY).map(_.getVStr).getOrElse("")
+ if (kind == "") {
+ if (containsServerLogTag(span)) {
+ return SERVER_SPAN_KIND
+ } else if (containsClientLogTag(span)) {
+ return CLIENT_SPAN_KIND
+ }
+ }
+ kind
+ }
+
+ def containsServerLogTag(span: Span): Boolean = {
+ containsLogTag(span, SERVER_RECV_EVENT) && containsLogTag(span, SERVER_SEND_EVENT)
+ }
+
+ def getServiceTag(span: Span): Option[Tag] = {
+ span.getTagsList.asScala.find(tag => {
+ tag.getKey.equalsIgnoreCase(SERVICE_TAG_KEY)
+ })
+ }
+
+ def containsClientLogTag(span: Span): Boolean = {
+ containsLogTag(span, CLIENT_RECV_EVENT) && containsLogTag(span, CLIENT_SEND_EVENT)
+ }
+
+ def addServerLogTag(span: Span): Span = {
+ val receiveEventLog = Log.newBuilder()
+ .setTimestamp(span.getStartTime)
+ .addFields(
+ Tag.newBuilder().setKey(LOG_EVENT_TAG_KEY).setVStr(SERVER_RECV_EVENT))
+
+ val sendEventLog = Log.newBuilder()
+ .setTimestamp(span.getStartTime + span.getDuration)
+ .addFields(
+ Tag.newBuilder().setKey(LOG_EVENT_TAG_KEY).setVStr(SERVER_SEND_EVENT) )
+
+ span
+ .toBuilder
+ .addLogs(receiveEventLog)
+ .addLogs(sendEventLog)
+ .build()
+ }
+
+ def addClientLogTag(span: Span): Span = {
+ val sendEventLog = Log.newBuilder()
+ .setTimestamp(span.getStartTime)
+ .addFields(
+ Tag.newBuilder().setType(Tag.TagType.STRING).setKey(LOG_EVENT_TAG_KEY).setVStr(CLIENT_SEND_EVENT))
+
+ val receiveEventLog = Log.newBuilder()
+ .setTimestamp(span.getStartTime + span.getDuration)
+ .addFields(
+ Tag.newBuilder().setType(Tag.TagType.STRING).setKey(LOG_EVENT_TAG_KEY).setVStr(CLIENT_RECV_EVENT))
+
+ span
+ .toBuilder
+ .addLogs(sendEventLog)
+ .addLogs(receiveEventLog)
+ .build()
+ }
+
+ private def containsLogTag(span: Span, event: String) = {
+ span.getLogsList.asScala.exists(log => {
+ log.getFieldsList.asScala.exists(tag => {
+ tag.getKey.equalsIgnoreCase(LOG_EVENT_TAG_KEY) && tag.getVStr.equalsIgnoreCase(event)
+ })
+ })
+ }
+
+ def createAutoGeneratedRootSpan(spans: Seq[Span],
+ reason: String,
+ rootSpanId: String): Span.Builder = {
+ val spanWithEarliestStartTime = spans.minBy(_.getStartTime)
+ val spanWithLatestEndTime = spans.maxBy(span => span.getStartTime + span.getDuration)
+
+ val startTime = spanWithEarliestStartTime.getStartTime
+ val duration = (spanWithLatestEndTime.getStartTime + spanWithLatestEndTime.getDuration) - startTime
+
+ val autoGenSpanBuilder = Span.newBuilder()
+ .setServiceName(spanWithEarliestStartTime.getServiceName)
+ .setOperationName(AUTOGEN_OPERATION_NAME)
+ .setTraceId(spanWithEarliestStartTime.getTraceId)
+ .setSpanId(rootSpanId)
+ .setParentSpanId("")
+ .setStartTime(startTime)
+ .setDuration(duration)
+ .addTags(Tag.newBuilder().setKey(AUTOGEN_REASON_TAG).setVStr(reason).setType(Tag.TagType.STRING))
+ .addTags(Tag.newBuilder().setKey(AUTOGEN_SPAN_ID_TAG).setVStr(rootSpanId).setType(Tag.TagType.STRING))
+ .addTags(Tag.newBuilder().setKey(AUTOGEN_FLAG_TAG).setVBool(true).setType(Tag.TagType.BOOL))
+
+ spanWithEarliestStartTime.getTagsList.asScala.find(_.getKey.equalsIgnoreCase(URL_TAG_KEY)) match {
+ case Some(urlTag) => autoGenSpanBuilder.addTags(urlTag)
+ case _ => autoGenSpanBuilder
+ }
+ }
+}
+
+object SpanMarkers {
+ val AUTOGEN_OPERATION_NAME = "auto-generated"
+ val AUTOGEN_REASON_TAG = "X-HAYSTACK-AUTOGEN-REASON"
+ val AUTOGEN_SPAN_ID_TAG = "X-HAYSTACK-AUTOGEN-SPAN-ID"
+ val AUTOGEN_FLAG_TAG = "X-HAYSTACK-AUTOGEN"
+
+ val LOG_EVENT_TAG_KEY = "event"
+ val SERVER_SEND_EVENT = "ss"
+ val SERVER_RECV_EVENT = "sr"
+ val CLIENT_SEND_EVENT = "cs"
+ val CLIENT_RECV_EVENT = "cr"
+
+ val SPAN_KIND_TAG_KEY = "span.kind"
+ val SERVICE_TAG_KEY = "service"
+ val SERVER_SPAN_KIND = "server"
+ val CLIENT_SPAN_KIND = "client"
+}
diff --git a/traces/commons/src/test/resources/logback-test.xml b/traces/commons/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..adfa02c68
--- /dev/null
+++ b/traces/commons/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/BaseUnitTestSpec.scala b/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/BaseUnitTestSpec.scala
new file mode 100644
index 000000000..fc12a7a6b
--- /dev/null
+++ b/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/BaseUnitTestSpec.scala
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.commons.unit
+
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.FunSpec
+import org.scalatest.GivenWhenThen
+import org.scalatest.Matchers
+
+trait BaseUnitTestSpec extends FunSpec with GivenWhenThen with Matchers with EasyMockSugar
diff --git a/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/PackerSpec.scala b/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/PackerSpec.scala
new file mode 100644
index 000000000..de2947f16
--- /dev/null
+++ b/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/PackerSpec.scala
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.unit
+
+import java.util.UUID
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.commons.packer.{PackerFactory, PackerType, Unpacker}
+import org.scalatest.{FunSpec, Matchers}
+import org.scalatest.easymock.EasyMockSugar
+
+class PackerSpec extends FunSpec with Matchers with EasyMockSugar {
+ describe("A Packer") {
+ it("should pack and unpack spanBuffer proto object for all packer types") {
+ PackerType.values.foreach(packerType => {
+ val packer = PackerFactory.spanBufferPacker(packerType)
+ val span_1 = Span.newBuilder()
+ .setTraceId(UUID.randomUUID().toString)
+ .setSpanId(UUID.randomUUID().toString)
+ .setServiceName("test_service")
+ .setOperationName("/foo")
+ .addTags(Tag.newBuilder().setKey("error").setVBool(false))
+ .addTags(Tag.newBuilder().setKey("http.status_code").setVLong(200))
+ .addTags(Tag.newBuilder().setKey("version").setVStr("1.1"))
+ .build()
+ val span_2 = Span.newBuilder()
+ .setTraceId(UUID.randomUUID().toString)
+ .setSpanId(UUID.randomUUID().toString)
+ .setParentSpanId(UUID.randomUUID().toString)
+ .setServiceName("another_test_service")
+ .setOperationName("/bar")
+ .addTags(Tag.newBuilder().setKey("error").setVBool(true))
+ .addTags(Tag.newBuilder().setKey("http.status_code").setVLong(404))
+ .addTags(Tag.newBuilder().setKey("version").setVStr("1.2"))
+ .build()
+ val spanBuffer = SpanBuffer.newBuilder().setTraceId("trace-1").addChildSpans(span_1).addChildSpans(span_2).build()
+ val packedMessage = packer.apply(spanBuffer)
+ val packedDataBytes = packedMessage.packedDataBytes
+ packedDataBytes should not be null
+ val spanBufferProto = Unpacker.readSpanBuffer(packedDataBytes)
+ spanBufferProto shouldEqual spanBuffer
+ })
+ }
+ }
+}
diff --git a/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/TraceIndexDocSpec.scala b/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/TraceIndexDocSpec.scala
new file mode 100644
index 000000000..37bbe53a5
--- /dev/null
+++ b/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/TraceIndexDocSpec.scala
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.unit
+
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc
+import org.scalatest.{FunSpec, Matchers}
+
+import scala.collection.mutable
+
+class TraceIndexDocSpec extends FunSpec with Matchers {
+ describe("TraceIndex Document") {
+ it("should produce the valid json document for indexing") {
+ val startTime = 1528715319040L
+ val spanDoc = mutable.Map("spanid" -> "SPAN-1", "operatioName" -> "op1", "serviceName" -> "svc", "duration" -> 100, "starttime" -> startTime)
+ val indexDoc = TraceIndexDoc("trace-id", 100L, startTime, Seq(spanDoc))
+ indexDoc.json shouldEqual "{\"traceid\":\"trace-id\",\"rootduration\":100,\"starttime\":1528715319040,\"spans\":[{\"spanid\":\"SPAN-1\",\"serviceName\":\"svc\",\"starttime\":1528715319040,\"operatioName\":\"op1\",\"duration\":100}]}"
+ }
+ }
+}
diff --git a/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/WhitelistIndexFieldConfigurationSpec.scala b/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/WhitelistIndexFieldConfigurationSpec.scala
new file mode 100644
index 000000000..24febefb0
--- /dev/null
+++ b/traces/commons/src/test/scala/com/expedia/www/haystack/trace/commons/unit/WhitelistIndexFieldConfigurationSpec.scala
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.commons.unit
+
+import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhiteListIndexFields, WhitelistIndexField, WhitelistIndexFieldConfiguration}
+import org.json4s.ext.EnumNameSerializer
+import org.json4s.jackson.Serialization
+import org.json4s.{DefaultFormats, Formats}
+import org.scalatest.{Entry, FunSpec, Matchers}
+
+import scala.collection.JavaConverters._
+
+class WhitelistIndexFieldConfigurationSpec extends FunSpec with Matchers {
+
+ protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)
+
+ describe("whitelist field configuration") {
+ it("an empty configuration should return whitelist fields as empty") {
+ val config = WhitelistIndexFieldConfiguration()
+ config.indexFieldMap shouldBe 'empty
+ config.whitelistIndexFields shouldBe 'empty
+ }
+
+ it("a loaded configuration should return the non empty whitelist fields") {
+ val whitelistField_1 = WhitelistIndexField(name = "role", `type` = IndexFieldType.string, enableRangeQuery = true)
+ val whitelistField_2 = WhitelistIndexField(name = "Errorcode", `type` = IndexFieldType.long)
+
+ val config = WhitelistIndexFieldConfiguration()
+ val cfgJsonData = Serialization.write(WhiteListIndexFields(List(whitelistField_1, whitelistField_2)))
+
+ // reload
+ config.onReload(cfgJsonData)
+
+ config.whitelistIndexFields.map(_.name) should contain allOf("role", "errorcode")
+ config.whitelistIndexFields.filter(r => r.name == "role").head.enableRangeQuery shouldBe true
+ config.indexFieldMap.size() shouldBe 2
+ config.indexFieldMap.keys().asScala.toList should contain allOf("role", "errorcode")
+ config.globalTraceContextIndexFieldNames.size shouldBe 0
+
+ val whitelistField_3 = WhitelistIndexField(name = "status", `type` = IndexFieldType.string, aliases = Set("_status", "HTTP-STATUS"))
+ val whitelistField_4 = WhitelistIndexField(name = "something", `type` = IndexFieldType.long, searchContext = "trace")
+
+ val newCfgJsonData = Serialization.write(WhiteListIndexFields(List(whitelistField_1, whitelistField_3, whitelistField_4)))
+ config.onReload(newCfgJsonData)
+
+ config.whitelistIndexFields.size shouldBe 5
+ config.whitelistIndexFields.map(_.name).toSet should contain allOf("status", "something", "role")
+ config.indexFieldMap.size shouldBe 5
+ config.indexFieldMap.keys().asScala.toList should contain allOf("status", "something", "role", "http-status", "_status")
+
+ config.onReload(newCfgJsonData)
+ config.whitelistIndexFields.size shouldBe 5
+ config.whitelistIndexFields.map(_.name).toSet should contain allOf("status", "something", "role")
+ config.indexFieldMap.size() shouldBe 5
+ config.indexFieldMap.keys().asScala.toList should contain allOf("status", "something", "role", "http-status", "_status")
+
+ config.indexFieldMap.get("http-status").name shouldEqual "status"
+ config.indexFieldMap.get("_status").name shouldEqual "status"
+
+ config.globalTraceContextIndexFieldNames.size shouldBe 1
+ config.globalTraceContextIndexFieldNames.head shouldEqual "something"
+ }
+ }
+}
diff --git a/traces/deployment/scripts/publish-to-docker-hub.sh b/traces/deployment/scripts/publish-to-docker-hub.sh
new file mode 100755
index 000000000..0ff8e3bf4
--- /dev/null
+++ b/traces/deployment/scripts/publish-to-docker-hub.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+set -e
+
+QUALIFIED_DOCKER_IMAGE_NAME=$DOCKER_ORG/$DOCKER_IMAGE_NAME
+echo "DOCKER_ORG=$DOCKER_ORG, DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME, QUALIFIED_DOCKER_IMAGE_NAME=$QUALIFIED_DOCKER_IMAGE_NAME"
+echo "BRANCH=$BRANCH, TAG=$TAG, SHA=$SHA"
+
+# login
+docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
+
+# Add tags
+if [[ $TAG =~ ([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
+ echo "releasing semantic versions"
+
+ unset MAJOR MINOR PATCH
+ MAJOR="${BASH_REMATCH[1]}"
+ MINOR="${BASH_REMATCH[2]}"
+ PATCH="${BASH_REMATCH[3]}"
+
+ # for tag, add MAJOR, MAJOR.MINOR, MAJOR.MINOR.PATCH and latest as tag
+ # publish image with tags
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR.$PATCH
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR.$PATCH
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:latest
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:latest
+
+elif [[ "$BRANCH" == "master" ]]; then
+ echo "releasing master branch"
+
+ # for 'master' branch, add SHA as tags
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$SHA
+
+ # publish image with tags
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME
+fi
diff --git a/traces/deployment/terraform/es-indices/curator-service-metadata/main.tf b/traces/deployment/terraform/es-indices/curator-service-metadata/main.tf
new file mode 100755
index 000000000..9b14010c0
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/curator-service-metadata/main.tf
@@ -0,0 +1,29 @@
+locals {
+ count = "${var.enabled?1:0}"
+}
+
+
+data "template_file" "curator_cron_job" {
+ template = "${file("${path.module}/templates/curator-cron-job-yaml.tpl")}"
+ vars {
+ elasticsearch_host = "${var.elasticsearch_hostname}"
+ elasticsearch_port = "${var.elasticsearch_port}"
+ app_namespace = "${var.namespace}"
+ }
+}
+resource "null_resource" "curator_addons" {
+ triggers {
+ template = "${data.template_file.curator_cron_job.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.curator_cron_job.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.curator_cron_job.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
+
+
diff --git a/traces/deployment/terraform/es-indices/curator-service-metadata/outputs.tf b/traces/deployment/terraform/es-indices/curator-service-metadata/outputs.tf
new file mode 100755
index 000000000..e69de29bb
diff --git a/traces/deployment/terraform/es-indices/curator-service-metadata/templates/curator-cron-job-yaml.tpl b/traces/deployment/terraform/es-indices/curator-service-metadata/templates/curator-cron-job-yaml.tpl
new file mode 100755
index 000000000..325452768
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/curator-service-metadata/templates/curator-cron-job-yaml.tpl
@@ -0,0 +1,79 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: curator-es-service-metadata-index-store
+ namespace: ${app_namespace}
+ labels:
+ app: curator-es-service-metadata-index-store
+data:
+ curator.yml: |-
+ client:
+ hosts:
+ - ${elasticsearch_host}
+ port: ${elasticsearch_port}
+ url_prefix:
+ use_ssl: False
+ certificate:
+ client_cert:
+ client_key:
+ aws_key:
+ aws_secret_key:
+ aws_region:
+ ssl_no_validate: False
+ http_auth:
+ timeout: 30
+ master_only: False
+ logging:
+ loglevel: DEBUG
+ logfile:
+ logformat: default
+ blacklist: ['elasticsearch', 'urllib3']
+ actions.yml: |-
+ actions:
+ 1:
+ action: delete_indices
+ options:
+ ignore_empty_list: True
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: prefix
+ value: service-metadata-
+ exclude:
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: "%Y-%m-%d"
+ unit: days
+ unit_count: 4
+ exclude:
+---
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: curator-es-service-metadata-index-store
+ namespace: ${app_namespace}
+
+spec:
+ schedule: "0 */4 * * *"
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: curator-es-service-metadata-index-store
+ image: bobrik/curator:5.4.0
+ args:
+ - --config
+ - /config/curator.yml
+ - /config/actions.yml
+ volumeMounts:
+ - mountPath: /config
+ name: config
+ restartPolicy: OnFailure
+ volumes:
+ - name: config
+ configMap:
+ name: curator-es-service-metadata-index-store
diff --git a/traces/deployment/terraform/es-indices/curator-service-metadata/variables.tf b/traces/deployment/terraform/es-indices/curator-service-metadata/variables.tf
new file mode 100755
index 000000000..706d95c4f
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/curator-service-metadata/variables.tf
@@ -0,0 +1,7 @@
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "enabled" {}
+variable "elasticsearch_hostname" {}
+variable "elasticsearch_port" {}
+variable "namespace" {}
+
diff --git a/traces/deployment/terraform/es-indices/curator-trace-index/main.tf b/traces/deployment/terraform/es-indices/curator-trace-index/main.tf
new file mode 100755
index 000000000..9b14010c0
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/curator-trace-index/main.tf
@@ -0,0 +1,29 @@
+locals {
+ count = "${var.enabled?1:0}"
+}
+
+
+data "template_file" "curator_cron_job" {
+ template = "${file("${path.module}/templates/curator-cron-job-yaml.tpl")}"
+ vars {
+ elasticsearch_host = "${var.elasticsearch_hostname}"
+ elasticsearch_port = "${var.elasticsearch_port}"
+ app_namespace = "${var.namespace}"
+ }
+}
+resource "null_resource" "curator_addons" {
+ triggers {
+ template = "${data.template_file.curator_cron_job.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.curator_cron_job.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.curator_cron_job.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
+
+
diff --git a/traces/deployment/terraform/es-indices/curator-trace-index/outputs.tf b/traces/deployment/terraform/es-indices/curator-trace-index/outputs.tf
new file mode 100755
index 000000000..e69de29bb
diff --git a/traces/deployment/terraform/es-indices/curator-trace-index/templates/curator-cron-job-yaml.tpl b/traces/deployment/terraform/es-indices/curator-trace-index/templates/curator-cron-job-yaml.tpl
new file mode 100755
index 000000000..45a0eb58e
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/curator-trace-index/templates/curator-cron-job-yaml.tpl
@@ -0,0 +1,79 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: curator-es-haystack-traces-index-store
+ namespace: ${app_namespace}
+ labels:
+ app: curator-es-haystack-traces-index-store
+data:
+ curator.yml: |-
+ client:
+ hosts:
+ - ${elasticsearch_host}
+ port: ${elasticsearch_port}
+ url_prefix:
+ use_ssl: False
+ certificate:
+ client_cert:
+ client_key:
+ aws_key:
+ aws_secret_key:
+ aws_region:
+ ssl_no_validate: False
+ http_auth:
+ timeout: 30
+ master_only: False
+ logging:
+ loglevel: DEBUG
+ logfile:
+ logformat: default
+ blacklist: ['elasticsearch', 'urllib3']
+ actions.yml: |-
+ actions:
+ 1:
+ action: delete_indices
+ options:
+ ignore_empty_list: True
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: prefix
+ value: haystack-traces-
+ exclude:
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: "%Y-%m-%d"
+ unit: days
+ unit_count: 4
+ exclude:
+---
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: curator-es-haystack-traces-index-store
+ namespace: ${app_namespace}
+
+spec:
+ schedule: "0 */4 * * *"
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: curator-es-haystack-traces-index-store
+ image: bobrik/curator:5.4.0
+ args:
+ - --config
+ - /config/curator.yml
+ - /config/actions.yml
+ volumeMounts:
+ - mountPath: /config
+ name: config
+ restartPolicy: OnFailure
+ volumes:
+ - name: config
+ configMap:
+ name: curator-es-haystack-traces-index-store
diff --git a/traces/deployment/terraform/es-indices/curator-trace-index/variables.tf b/traces/deployment/terraform/es-indices/curator-trace-index/variables.tf
new file mode 100755
index 000000000..706d95c4f
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/curator-trace-index/variables.tf
@@ -0,0 +1,7 @@
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "enabled" {}
+variable "elasticsearch_hostname" {}
+variable "elasticsearch_port" {}
+variable "namespace" {}
+
diff --git a/traces/deployment/terraform/es-indices/main.tf b/traces/deployment/terraform/es-indices/main.tf
new file mode 100644
index 000000000..1036902e0
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/main.tf
@@ -0,0 +1,29 @@
+module "curator_trace_index" {
+ source = "curator-trace-index"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ enabled = "${var.enabled}"
+ elasticsearch_hostname = "${var.elasticsearch_hostname}"
+ elasticsearch_port = "${var.elasticsearch_port}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ namespace = "${var.namespace}"
+}
+
+module "curator_service_metadata" {
+ source = "curator-service-metadata"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ enabled = "${var.enabled}"
+ elasticsearch_hostname = "${var.elasticsearch_hostname}"
+ elasticsearch_port = "${var.elasticsearch_port}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ namespace = "${var.namespace}"
+}
+
+module "whitelisted_fields" {
+ source = "whitelisted-fields"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ enabled = "${var.enabled}"
+ elasticsearch_hostname = "${var.elasticsearch_hostname}"
+ elasticsearch_port = "${var.elasticsearch_port}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ namespace = "${var.namespace}"
+}
diff --git a/traces/deployment/terraform/es-indices/outputs.tf b/traces/deployment/terraform/es-indices/outputs.tf
new file mode 100644
index 000000000..e69de29bb
diff --git a/traces/deployment/terraform/es-indices/variables.tf b/traces/deployment/terraform/es-indices/variables.tf
new file mode 100644
index 000000000..bacd8c4c8
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/variables.tf
@@ -0,0 +1,7 @@
+variable "elasticsearch_hostname" {}
+variable "elasticsearch_port" {}
+variable "kubectl_context_name" {}
+variable "kubectl_executable_name" {}
+variable "namespace" {}
+variable "node_selector_label"{}
+variable "enabled"{}
diff --git a/traces/deployment/terraform/es-indices/whitelisted-fields/main.tf b/traces/deployment/terraform/es-indices/whitelisted-fields/main.tf
new file mode 100755
index 000000000..3fd7ff4c0
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/whitelisted-fields/main.tf
@@ -0,0 +1,29 @@
+locals {
+ count = "${var.enabled?1:0}"
+}
+
+
+data "template_file" "whitelisted-fields-pod-yaml" {
+ template = "${file("${path.module}/templates/whitelisted-fields-pod-yaml.tpl")}"
+ vars {
+ elasticsearch_host = "${var.elasticsearch_hostname}"
+ elasticsearch_port = "${var.elasticsearch_port}"
+ app_namespace = "${var.namespace}"
+ }
+}
+resource "null_resource" "whitelisted-fields-pod" {
+ triggers {
+ template = "${data.template_file.whitelisted-fields-pod-yaml.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.whitelisted-fields-pod-yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.whitelisted-fields-pod-yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
+
+
diff --git a/traces/deployment/terraform/es-indices/whitelisted-fields/outputs.tf b/traces/deployment/terraform/es-indices/whitelisted-fields/outputs.tf
new file mode 100755
index 000000000..e69de29bb
diff --git a/traces/deployment/terraform/es-indices/whitelisted-fields/templates/whitelisted-fields-pod-yaml.tpl b/traces/deployment/terraform/es-indices/whitelisted-fields/templates/whitelisted-fields-pod-yaml.tpl
new file mode 100755
index 000000000..94c66e15b
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/whitelisted-fields/templates/whitelisted-fields-pod-yaml.tpl
@@ -0,0 +1,44 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: whitelist-json
+ namespace: ${app_namespace}
+data:
+ whitelist.json: |-
+ {
+ "fields": [{
+ "name": "error",
+ "type": "string",
+ "enabled": true,
+ "searchContext": "trace"
+ }]
+ }
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: es-whitelist
+ namespace: ${app_namespace}
+spec:
+ template:
+ spec:
+ containers:
+ - name: es-whitelist
+ image: yauritux/busybox-curl
+ command:
+ - curl
+ args:
+ - -XPUT
+ - -H
+ - "Content-Type: application/json"
+ - -d
+ - "@/data/whitelist.json"
+ - "http://${elasticsearch_host}:${elasticsearch_port}/reload-configs/indexing-fields/1"
+ volumeMounts:
+ - mountPath: /data
+ name: data
+ restartPolicy: OnFailure
+ volumes:
+ - name: data
+ configMap:
+ name: whitelist-json
\ No newline at end of file
diff --git a/traces/deployment/terraform/es-indices/whitelisted-fields/variables.tf b/traces/deployment/terraform/es-indices/whitelisted-fields/variables.tf
new file mode 100755
index 000000000..706d95c4f
--- /dev/null
+++ b/traces/deployment/terraform/es-indices/whitelisted-fields/variables.tf
@@ -0,0 +1,7 @@
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "enabled" {}
+variable "elasticsearch_hostname" {}
+variable "elasticsearch_port" {}
+variable "namespace" {}
+
diff --git a/traces/deployment/terraform/main.tf b/traces/deployment/terraform/main.tf
new file mode 100644
index 000000000..c208e9976
--- /dev/null
+++ b/traces/deployment/terraform/main.tf
@@ -0,0 +1,71 @@
+module "trace-indexer" {
+ source = "trace-indexer"
+ image = "expediadotcom/haystack-trace-indexer:${var.traces["version"]}"
+ storage_backend_image = "expediadotcom/haystack-trace-backend-cassandra:${var.traces["version"]}"
+ replicas = "${var.traces["indexer_instances"]}"
+ enabled = "${var.traces["enabled"]}"
+ cpu_limit = "${var.traces["indexer_cpu_limit"]}"
+ cpu_request = "${var.traces["indexer_cpu_request"]}"
+ memory_limit = "${var.traces["indexer_memory_limit"]}"
+ memory_request = "${var.traces["indexer_memory_request"]}"
+ jvm_memory_limit = "${var.traces["indexer_jvm_memory_limit"]}"
+ env_vars = "${var.traces["indexer_environment_overrides"]}"
+ elasticsearch_template = "${var.traces["indexer_elasticsearch_template"]}"
+ namespace = "${var.namespace}"
+ kafka_endpoint = "${var.kafka_hostname}:${var.kafka_port}"
+ elasticsearch_port = "${var.elasticsearch_port}"
+ elasticsearch_hostname = "${var.elasticsearch_hostname}"
+ cassandra_hostname = "${var.cassandra_hostname}"
+ graphite_hostname = "${var.graphite_hostname}"
+ graphite_port = "${var.graphite_port}"
+ graphite_enabled = "${var.graphite_enabled}"
+ node_selector_label = "${var.node_selector_label}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ backend_cpu_limit = "${var.traces["backend_cpu_limit"]}"
+ backend_cpu_request = "${var.traces["backend_cpu_request"]}"
+ backend_memory_limit = "${var.traces["backend_memory_limit"]}"
+ backend_memory_request = "${var.traces["backend_memory_request"]}"
+ backend_jvm_memory_limit = "${var.traces["backend_jvm_memory_limit"]}"
+ backend_env_vars = "${var.traces["backend_environment_overrides"]}"
+}
+
+module "trace-reader" {
+ source = "trace-reader"
+ image = "expediadotcom/haystack-trace-reader:${var.traces["version"]}"
+ storage_backend_image = "expediadotcom/haystack-trace-backend-cassandra:${var.traces["version"]}"
+ replicas = "${var.traces["reader_instances"]}"
+ namespace = "${var.namespace}"
+ elasticsearch_endpoint = "${var.elasticsearch_hostname}:${var.elasticsearch_port}"
+ cassandra_hostname = "${var.cassandra_hostname}"
+ graphite_hostname = "${var.graphite_hostname}"
+ graphite_port = "${var.graphite_port}"
+ graphite_enabled = "${var.graphite_enabled}"
+ enabled = "${var.traces["enabled"]}"
+ node_selector_label = "${var.node_selector_label}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ cpu_limit = "${var.traces["reader_cpu_limit"]}"
+ cpu_request = "${var.traces["reader_cpu_request"]}"
+ memory_limit = "${var.traces["reader_memory_limit"]}"
+ memory_request = "${var.traces["reader_memory_request"]}"
+ jvm_memory_limit = "${var.traces["reader_jvm_memory_limit"]}"
+ backend_cpu_limit = "${var.traces["backend_cpu_limit"]}"
+ backend_cpu_request = "${var.traces["backend_cpu_request"]}"
+ backend_memory_limit = "${var.traces["backend_memory_limit"]}"
+ backend_memory_request = "${var.traces["backend_memory_request"]}"
+ backend_jvm_memory_limit = "${var.traces["backend_jvm_memory_limit"]}"
+ env_vars = "${var.traces["reader_environment_overrides"]}"
+ backend_env_vars = "${var.traces["backend_environment_overrides"]}"
+}
+
+module "es-indices" {
+ source = "es-indices"
+ enabled = "${var.traces["enabled"]}"
+ namespace = "${var.namespace}"
+ node_selector_label = "${var.node_selector_label}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ elasticsearch_port = "${var.elasticsearch_port}"
+ elasticsearch_hostname = "${var.elasticsearch_hostname}"
+}
diff --git a/traces/deployment/terraform/outputs.tf b/traces/deployment/terraform/outputs.tf
new file mode 100644
index 000000000..27ecf772c
--- /dev/null
+++ b/traces/deployment/terraform/outputs.tf
@@ -0,0 +1,7 @@
+output "reader_hostname" {
+ value = "${module.trace-reader.hostname}"
+}
+
+output "reader_port" {
+ value = "${module.trace-reader.service_port}"
+}
\ No newline at end of file
diff --git a/traces/deployment/terraform/trace-indexer/main.tf b/traces/deployment/terraform/trace-indexer/main.tf
new file mode 100644
index 000000000..5ea2c0f0a
--- /dev/null
+++ b/traces/deployment/terraform/trace-indexer/main.tf
@@ -0,0 +1,82 @@
+locals {
+ app_name = "trace-indexer"
+ config_file_path = "${path.module}/templates/trace-indexer.conf"
+ deployment_yaml_file_path = "${path.module}/templates/deployment.yaml"
+ count = "${var.enabled?1:0}"
+ span_produce_topic = "${var.enable_kafka_sink?"span-buffer":""}"
+ elasticsearch_endpoint = "${var.elasticsearch_hostname}:${var.elasticsearch_port}"
+ checksum = "${sha1("${data.template_file.config_data.rendered}")}"
+ configmap_name = "indexer-${local.checksum}"
+}
+
+resource "kubernetes_config_map" "haystack-config" {
+ metadata {
+ name = "${local.configmap_name}"
+ namespace = "${var.namespace}"
+ }
+ data {
+ "trace-indexer.conf" = "${data.template_file.config_data.rendered}"
+ }
+ count = "${local.count}"
+}
+
+data "template_file" "config_data" {
+ template = "${file("${local.config_file_path}")}"
+
+ vars {
+ kafka_endpoint = "${var.kafka_endpoint}"
+ elasticsearch_endpoint = "${local.elasticsearch_endpoint}"
+ elasticsearch_template = "${var.elasticsearch_template}"
+ span_produce_topic = "${local.span_produce_topic}"
+ }
+}
+
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ graphite_port = "${var.graphite_port}"
+ graphite_host = "${var.graphite_hostname}"
+ graphite_enabled = "${var.graphite_enabled}"
+ cassandra_hostname = "${var.cassandra_hostname}"
+ node_selecter_label = "${var.node_selector_label}"
+ storage_backend_image = "${var.storage_backend_image}"
+ image = "${var.image}"
+ replicas = "${var.replicas}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ jvm_memory_limit = "${var.jvm_memory_limit}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ backend_memory_limit = "${var.backend_memory_limit}"
+ backend_memory_request = "${var.backend_memory_request}"
+ backend_jvm_memory_limit = "${var.backend_jvm_memory_limit}"
+ backend_cpu_limit = "${var.backend_cpu_limit}"
+ backend_cpu_request = "${var.backend_cpu_request}"
+
+ configmap_name = "${local.configmap_name}"
+ env_vars= "${indent(9,"${var.env_vars}")}"
+ backend_env_vars = "${indent(9,"${var.backend_env_vars}")}"
+ }
+}
+
+resource "null_resource" "kubectl_apply" {
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+ count = "${local.count}"
+}
+
+
+resource "null_resource" "kubectl_destroy" {
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/traces/deployment/terraform/trace-indexer/outputs.tf b/traces/deployment/terraform/trace-indexer/outputs.tf
new file mode 100644
index 000000000..e69de29bb
diff --git a/traces/deployment/terraform/trace-indexer/templates/deployment.yaml b/traces/deployment/terraform/trace-indexer/templates/deployment.yaml
new file mode 100644
index 000000000..f1d89090c
--- /dev/null
+++ b/traces/deployment/terraform/trace-indexer/templates/deployment.yaml
@@ -0,0 +1,97 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: storage-backend-cassandra
+ image: ${storage_backend_image}
+ resources:
+ limits:
+ cpu: ${backend_cpu_limit}
+ memory: ${backend_memory_limit}Mi
+ requests:
+ cpu: ${backend_cpu_request}
+ memory: ${backend_memory_request}Mi
+ env:
+ - name: "HAYSTACK_PROP_CASSANDRA_ENDPOINTS"
+ value: "${cassandra_hostname}"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "HAYSTACK_GRAPHITE_ENABLED"
+ value: "${graphite_enabled}"
+ - name: "JAVA_XMS"
+ value: "${backend_jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${backend_jvm_memory_limit}m"
+ ${backend_env_vars}
+ livenessProbe:
+ exec:
+ command:
+ - /bin/grpc_health_probe
+ - "-addr=:8090"
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 5
+ failureThreshold: 3
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "HAYSTACK_OVERRIDES_CONFIG_PATH"
+ value: "/config/trace-indexer.conf"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "HAYSTACK_GRAPHITE_ENABLED"
+ value: "${graphite_enabled}"
+ - name: "JAVA_XMS"
+ value: "${jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${jvm_memory_limit}m"
+ ${env_vars}
+ livenessProbe:
+ exec:
+ command:
+ - grep
+ - "true"
+ - /app/isHealthy
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ timeoutSeconds: 5
+ failureThreshold: 6
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
+
diff --git a/traces/deployment/terraform/trace-indexer/templates/trace-indexer.conf b/traces/deployment/terraform/trace-indexer/templates/trace-indexer.conf
new file mode 100644
index 000000000..19fa9f19f
--- /dev/null
+++ b/traces/deployment/terraform/trace-indexer/templates/trace-indexer.conf
@@ -0,0 +1,157 @@
+health.status.path = "/app/isHealthy"
+
+span.accumulate {
+ store {
+ min.traces.per.cache = 1000 # this defines the minimum traces in each cache before eviction check is applied. This is also useful for testing the code
+ all.max.entries = 150000 # this is the maximum number of spans that can live across all the stores
+ }
+ window.ms = 10000
+ poll.ms = 2000
+ packer = zstd
+}
+
+kafka {
+ close.stream.timeout.ms = 15000
+
+ topic.consume = "proto-spans"
+ topic.produce = "${span_produce_topic}"
+
+ num.stream.threads = 2
+ poll.timeout.ms = 100
+
+ # if consumer poll hangs, then wakeup it after after a timeout
+ # also set the maximum wakeups allowed, if max threshold is reached, then task will raise the shutdown request
+ max.wakeups = 10
+ wakeup.timeout.ms = 3000
+
+ commit.offset {
+ retries = 3
+ backoff.ms = 200
+ }
+
+ # consumer specific configurations
+ consumer {
+ group.id = "haystack-proto-trace-indexer"
+ bootstrap.servers = "${kafka_endpoint}"
+ auto.offset.reset = "latest"
+
+ # disable auto commit as the app manages offset itself
+ enable.auto.commit = "false"
+ }
+
+ # producer specific configurations
+ producer {
+ bootstrap.servers = "${kafka_endpoint}"
+ }
+}
+
+backend {
+
+ client {
+ host = "localhost"
+ port = 8090
+ }
+ # defines the max inflight writes for backend client
+ max.inflight.requests = 100
+}
+elasticsearch {
+ endpoint = "http://${elasticsearch_endpoint}"
+
+ # defines settings for bulk operation like max inflight bulks, number of documents and the total size in a single bulk
+ bulk.max {
+ docs {
+ count = 200
+ size.kb = 1000
+ }
+ inflight = 25
+ }
+
+ conn.timeout.ms = 10000
+ read.timeout.ms = 30000
+ consistency.level = "one"
+ max.connections.per.route = 5
+
+ retries {
+ max = 10
+ backoff {
+ initial.ms = 100
+ factor = 2
+ }
+ }
+
+ index {
+ # apply the template before starting the client, if json is empty, no operation is performed
+ template.json = """${elasticsearch_template}"""
+
+ name.prefix = "haystack-traces"
+ type = "spans"
+ hour.bucket = 6
+ }
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
+
+service.metadata {
+ enabled = true
+ flush {
+ interval.sec = 60
+ operation.count = 10000
+ }
+ es {
+ endpoint = "http://${elasticsearch_endpoint}"
+ conn.timeout.ms = 10000
+ read.timeout.ms = 5000
+ consistency.level = "one"
+ index {
+ # apply the template before starting the client, if json is empty, no operation is performed
+ template.json = "{\"template\": \"service-metadata\", \"index_patterns\": [\"service-metadata*\"], \"aliases\": {\"service-metadata\":{}}, \"settings\": {\"number_of_shards\": 4, \"index.mapping.ignore_malformed\": true, \"analysis\": {\"normalizer\": {\"lowercase_normalizer\": {\"type\": \"custom\", \"filter\": [\"lowercase\"]}}}}, \"mappings\": {\"metadata\": {\"_field_names\": {\"enabled\": false}, \"_all\": {\"enabled\": false}, \"properties\": {\"servicename\": {\"type\": \"keyword\", \"norms\": false}, \"operationname\": {\"type\": \"keyword\", \"norms\": false}}}}}"
+ name = "service-metadata"
+ type = "metadata"
+ }
+ # defines settings for bulk operation like max inflight bulks, number of documents and the total size in a single bulk
+ bulk.max {
+ docs {
+ count = 100
+ size.kb = 1000
+ }
+ inflight = 10
+ }
+ retries {
+ max = 10
+ backoff {
+ initial.ms = 100
+ factor = 2
+ }
+ }
+ }
+}
+
+reload {
+ tables {
+ index.fields.config = "indexing-fields"
+ }
+ config {
+ endpoint = "http://${elasticsearch_endpoint}"
+ database.name = "reload-configs"
+ }
+ startup.load = true
+ interval.ms = 60000 # -1 will imply 'no reload'
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
diff --git a/traces/deployment/terraform/trace-indexer/variables.tf b/traces/deployment/terraform/trace-indexer/variables.tf
new file mode 100644
index 000000000..52fb41b39
--- /dev/null
+++ b/traces/deployment/terraform/trace-indexer/variables.tf
@@ -0,0 +1,35 @@
+variable "storage_backend_image" {}
+variable "image" {}
+variable "replicas" {}
+variable "namespace" {}
+variable "kafka_endpoint" {}
+variable "elasticsearch_hostname" {}
+variable "elasticsearch_port" {}
+variable "elasticsearch_template" {}
+variable "cassandra_hostname" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "graphite_enabled" {}
+variable "enabled"{}
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selector_label"{}
+variable "memory_request"{}
+variable "memory_limit"{}
+variable "jvm_memory_limit"{}
+variable "cpu_request"{}
+variable "cpu_limit"{}
+variable "backend_memory_request"{}
+variable "backend_memory_limit"{}
+variable "backend_jvm_memory_limit"{}
+variable "backend_cpu_request"{}
+variable "backend_cpu_limit"{}
+variable "env_vars" {}
+variable "backend_env_vars" {}
+variable "enable_kafka_sink" {
+ default = false
+}
+
+variable "termination_grace_period" {
+ default = 30
+}
diff --git a/traces/deployment/terraform/trace-reader/main.tf b/traces/deployment/terraform/trace-reader/main.tf
new file mode 100644
index 000000000..83a4e76f5
--- /dev/null
+++ b/traces/deployment/terraform/trace-reader/main.tf
@@ -0,0 +1,79 @@
+locals {
+ app_name = "trace-reader"
+ config_file_path = "${path.module}/templates/trace-reader.conf"
+ deployment_yaml_file_path = "${path.module}/templates/deployment.yaml"
+ count = "${var.enabled?1:0}"
+ checksum = "${sha1("${data.template_file.config_data.rendered}")}"
+ configmap_name = "reader-${local.checksum}"
+}
+
+resource "kubernetes_config_map" "haystack-config" {
+ metadata {
+ name = "${local.configmap_name}"
+ namespace = "${var.namespace}"
+ }
+ data {
+ "trace-reader.conf" = "${data.template_file.config_data.rendered}"
+ }
+ count = "${local.count}"
+
+}
+
+data "template_file" "config_data" {
+ template = "${file("${local.config_file_path}")}"
+
+ vars {
+ elasticsearch_endpoint = "${var.elasticsearch_endpoint}"
+ }
+}
+
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ graphite_port = "${var.graphite_port}"
+ graphite_host = "${var.graphite_hostname}"
+ graphite_enabled = "${var.graphite_enabled}"
+ node_selecter_label = "${var.node_selector_label}"
+ image = "${var.image}"
+ storage_backend_image = "${var.storage_backend_image}"
+ cassandra_hostname = "${var.cassandra_hostname}"
+ replicas = "${var.replicas}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ jvm_memory_limit = "${var.jvm_memory_limit}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ backend_memory_limit = "${var.backend_memory_limit}"
+ backend_memory_request = "${var.backend_memory_request}"
+ backend_jvm_memory_limit = "${var.backend_jvm_memory_limit}"
+ backend_cpu_limit = "${var.backend_cpu_limit}"
+ backend_cpu_request = "${var.backend_cpu_request}"
+ service_port = "${var.service_port}"
+ container_port = "${var.container_port}"
+ configmap_name = "${local.configmap_name}"
+ env_vars= "${indent(9,"${var.env_vars}")}"
+ backend_env_vars = "${indent(9,"${var.backend_env_vars}")}"
+ }
+}
+
+resource "null_resource" "kubectl_apply" {
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+ count = "${local.count}"
+}
+
+
+resource "null_resource" "kubectl_destroy" {
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/traces/deployment/terraform/trace-reader/outputs.tf b/traces/deployment/terraform/trace-reader/outputs.tf
new file mode 100644
index 000000000..562aba81f
--- /dev/null
+++ b/traces/deployment/terraform/trace-reader/outputs.tf
@@ -0,0 +1,7 @@
+output "hostname" {
+ value = "${local.app_name}"
+}
+
+output "service_port" {
+ value = "${var.service_port}"
+}
\ No newline at end of file
diff --git a/traces/deployment/terraform/trace-reader/templates/deployment.yaml b/traces/deployment/terraform/trace-reader/templates/deployment.yaml
new file mode 100644
index 000000000..5bbf98543
--- /dev/null
+++ b/traces/deployment/terraform/trace-reader/templates/deployment.yaml
@@ -0,0 +1,111 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: storage-backend-cassandra-reader
+ image: ${storage_backend_image}
+ resources:
+ limits:
+ cpu: ${backend_cpu_limit}
+ memory: ${backend_memory_limit}Mi
+ requests:
+ cpu: ${backend_cpu_request}
+ memory: ${backend_memory_request}Mi
+ env:
+ - name: "HAYSTACK_PROP_CASSANDRA_ENDPOINTS"
+ value: "${cassandra_hostname}"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "HAYSTACK_GRAPHITE_ENABLED"
+ value: "${graphite_enabled}"
+ - name: "JAVA_XMS"
+ value: "${backend_jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${backend_jvm_memory_limit}m"
+ ${backend_env_vars}
+ livenessProbe:
+ exec:
+ command:
+ - /bin/grpc_health_probe
+ - "-addr=:8090"
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 5
+ failureThreshold: 3
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "HAYSTACK_OVERRIDES_CONFIG_PATH"
+ value: "/config/trace-reader.conf"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "HAYSTACK_GRAPHITE_ENABLED"
+ value: "${graphite_enabled}"
+ - name: "JAVA_XMS"
+ value: "${jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${jvm_memory_limit}m"
+ ${env_vars}
+ livenessProbe:
+ exec:
+ command:
+ - /bin/grpc_health_probe
+ - "-addr=:${container_port}"
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 5
+ failureThreshold: 3
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
+
+# ------------------- Service ------------------- #
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ ports:
+ - port: ${service_port}
+ targetPort: ${container_port}
+ selector:
+ k8s-app: ${app_name}
diff --git a/traces/deployment/terraform/trace-reader/templates/trace-reader.conf b/traces/deployment/terraform/trace-reader/templates/trace-reader.conf
new file mode 100644
index 000000000..a196abb72
--- /dev/null
+++ b/traces/deployment/terraform/trace-reader/templates/trace-reader.conf
@@ -0,0 +1,97 @@
+service {
+ port = 8080
+ ssl {
+ enabled = false
+ cert.path = ""
+ private.key.path = ""
+ }
+ max.message.size = 52428800 # 50MB in bytes
+}
+
+backend {
+ client {
+ host = "localhost"
+ port = 8090
+ }
+}
+
+elasticsearch {
+ client {
+ endpoint = "http://${elasticsearch_endpoint}"
+ conn.timeout.ms = 10000
+ read.timeout.ms = 30000
+ }
+ index {
+ spans {
+ name.prefix = "haystack-traces"
+ type = "spans"
+ hour.bucket = 6
+ hour.ttl = 72 // 3 * 24 hours
+ use.root.doc.starttime = true
+ }
+ service.metadata {
+ enabled = true
+ name = "service-metadata"
+ type = "metadata"
+ }
+ }
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
+
+trace {
+ validators {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.validators.TraceIdValidator"
+ ]
+ }
+
+ transformers {
+ pre {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.transformers.DeDuplicateSpanTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.ClientServerEventLogTransformer"
+ ]
+ }
+ post {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.transformers.PartialSpanTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.ServerClientSpanMergeTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.InvalidRootTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.InvalidParentTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.ClockSkewTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.SortSpanTransformer"
+ ]
+ }
+ }
+}
+
+reload {
+ tables {
+ index.fields.config = "indexing-fields"
+ }
+ config {
+ endpoint = "http://${elasticsearch_endpoint}"
+ database.name = "reload-configs"
+ }
+ startup.load = true
+ interval.ms = 60000 # -1 will imply 'no reload'
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
diff --git a/traces/deployment/terraform/trace-reader/variables.tf b/traces/deployment/terraform/trace-reader/variables.tf
new file mode 100644
index 000000000..0ceada02e
--- /dev/null
+++ b/traces/deployment/terraform/trace-reader/variables.tf
@@ -0,0 +1,36 @@
+variable "image" {}
+variable "storage_backend_image" {}
+variable "replicas" {}
+variable "namespace" {}
+variable "elasticsearch_endpoint" {}
+variable "cassandra_hostname" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "graphite_enabled" {}
+variable "enabled"{}
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selector_label"{}
+variable "memory_request"{}
+variable "memory_limit"{}
+variable "cpu_request"{}
+variable "cpu_limit"{}
+variable "jvm_memory_limit"{}
+variable "backend_memory_request"{}
+variable "backend_memory_limit"{}
+variable "backend_jvm_memory_limit"{}
+variable "backend_cpu_request"{}
+variable "backend_cpu_limit"{}
+variable "env_vars" {}
+variable "backend_env_vars" {}
+
+variable "termination_grace_period" {
+ default = 30
+}
+
+variable "service_port" {
+ default = 8080
+}
+variable "container_port" {
+ default = 8080
+}
diff --git a/traces/deployment/terraform/variables.tf b/traces/deployment/terraform/variables.tf
new file mode 100644
index 000000000..020eecde4
--- /dev/null
+++ b/traces/deployment/terraform/variables.tf
@@ -0,0 +1,19 @@
+variable "elasticsearch_hostname" {}
+variable "elasticsearch_port" {}
+variable "kafka_hostname" {}
+variable "kafka_port" {}
+variable "cassandra_hostname" {}
+variable "cassandra_port" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "graphite_enabled" {}
+variable "kubectl_context_name" {}
+variable "kubectl_executable_name" {}
+variable "namespace" {}
+variable "node_selector_label"{}
+
+
+# traces config
+variable "traces" {
+ type = "map"
+}
diff --git a/traces/indexer/Makefile b/traces/indexer/Makefile
new file mode 100644
index 000000000..911775640
--- /dev/null
+++ b/traces/indexer/Makefile
@@ -0,0 +1,20 @@
+.PHONY: docker_build integration_test release
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-trace-indexer
+PWD := $(shell pwd)
+
+docker_build:
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+prepare_integration_test_env: docker_build
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox up -d
+
+integration_test: prepare_integration_test_env
+ # run tests in a container so that we can join the docker-compose network and talk to kafka, elasticsearch and trace-backend
+ cd ../ && ./mvnw -q integration-test -pl indexer -am
+ # clean up the docker
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox stop
+ docker rm $(shell docker ps -a -q)
+release:
+ ../deployment/scripts/publish-to-docker-hub.sh
diff --git a/traces/indexer/README.md b/traces/indexer/README.md
new file mode 100644
index 000000000..07d0b616f
--- /dev/null
+++ b/traces/indexer/README.md
@@ -0,0 +1,33 @@
+# haystack-trace-indexer
+
+This haystack component accumulates the spans associated with a TraceId in a given time window(configurable).
+The time window for every unique traceId starts with the kafka-record's timestamp of the first observed child span.
+These accumulated spans are written as single entity to external trace-backends for persistence and elastic search for indexing. We also output these
+accumulated spans back to kafka for other consumers to consume.
+
+The buffering approach provides a performance optimization as it will potentially reduce the number of write calls to external stores.
+Secondly, the output can also by used by dependency graph component to build the complete call graph for all the services.
+
+Note that the system can still emit partial spans for a given traceId, possible cases can be
+ * The time window is not configured correctly or doesn't match with the speed at which spans appear in kafka
+ * On redeployment of this component, we might spit out partially buffered spans.
+
+However, the partial buffered spans are ok to be written to the trace-backend and elastic search. In trace-backend, we use TraceId as the
+primary key and store buffered-spans as a time series.
+
+In ElasticSearch, we use TraceId appended by a 4 character random ID with every document that we write. This ensures
+that if the same TraceId reappears, we generate a new document.
+
+## Required Reading
+
+In order to understand the haystack, we recommend to read the details of [haystack](https://github.com/ExpediaDotCom/haystack) project.
+Its written in kafka-streams(http://docs.confluent.io/current/streams/index.html) and hence some prior knowledge of kafka-streams would be useful.
+
+
+## Technical Details
+
+Fill this as we go along..
+
+## Building
+
+Check the details on [Build Section](../README.md)
diff --git a/traces/indexer/build/docker/Dockerfile b/traces/indexer/build/docker/Dockerfile
new file mode 100644
index 000000000..07f2bcba9
--- /dev/null
+++ b/traces/indexer/build/docker/Dockerfile
@@ -0,0 +1,24 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-trace-indexer
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+ENV DOCKERIZE_VERSION v0.6.1
+
+ADD https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/dockerize-alpine-linux-amd64-${DOCKERIZE_VERSION}.tar.gz dockerize.tar.gz
+RUN tar xzf dockerize.tar.gz
+RUN chmod +x dockerize
+
+RUN mkdir -p ${APP_HOME}
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+
+RUN chmod +x ${APP_HOME}/start-app.sh
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/traces/indexer/build/docker/jmxtrans-agent.xml b/traces/indexer/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..0c33e7822
--- /dev/null
+++ b/traces/indexer/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:true}
+
+ haystack.traces.indexer.#hostname#.
+
+ 60
+
diff --git a/traces/indexer/build/docker/start-app.sh b/traces/indexer/build/docker/start-app.sh
new file mode 100755
index 000000000..ba2c65569
--- /dev/null
+++ b/traces/indexer/build/docker/start-app.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+[ -z "$JAVA_GC_OPTS" ] && JAVA_GC_OPTS="-XX:+UseG1GC"
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+${JAVA_GC_OPTS} \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-XX:+ExitOnOutOfMemoryError \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+if [[ -n "$SERVICE_DEBUG_ON" ]] && [[ "$SERVICE_DEBUG_ON" == true ]]; then
+ JAVA_OPTS="$JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y"
+fi
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/traces/indexer/build/integration-tests/docker-compose.yml b/traces/indexer/build/integration-tests/docker-compose.yml
new file mode 100644
index 000000000..d07c15b26
--- /dev/null
+++ b/traces/indexer/build/integration-tests/docker-compose.yml
@@ -0,0 +1,8 @@
+version: '3'
+services:
+ elasticsearch:
+ image: elastic/elasticsearch:6.0.1
+ environment:
+ ES_JAVA_OPTS: "-Xms256m -Xmx256m"
+ ports:
+ - "9200:9200"
\ No newline at end of file
diff --git a/traces/indexer/pom.xml b/traces/indexer/pom.xml
new file mode 100644
index 000000000..31879dd8f
--- /dev/null
+++ b/traces/indexer/pom.xml
@@ -0,0 +1,236 @@
+
+
+
+
+ haystack-traces
+ com.expedia.www
+ 1.0.9-SNAPSHOT
+
+
+ 4.0.0
+ haystack-trace-indexer
+ jar
+
+
+ 0.11.0.0
+ com.expedia.www.haystack.trace.indexer.App
+ ${project.artifactId}-${project.version}
+ false
+
+
+
+
+ com.expedia.www
+ haystack-trace-commons
+ ${project.version}
+
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+ io.grpc
+ grpc-protobuf
+
+
+
+ io.grpc
+ grpc-stub
+
+
+
+ io.grpc
+ grpc-services
+
+
+
+ io.grpc
+ grpc-netty
+
+
+ io.searchbox
+ jest
+
+
+
+ org.elasticsearch
+ elasticsearch
+
+
+
+ org.apache.commons
+ commons-lang3
+
+
+
+ org.apache.httpcomponents
+ httpclient
+
+
+
+ com.amazonaws
+ aws-java-sdk-ec2
+
+
+
+ org.json4s
+ json4s-jackson_${scala.major.minor.version}
+
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ ${kafka-version}
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+
+
+ org.apache.kafka
+ kafka-clients
+ ${kafka-version}
+
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+
+
+
+
+
+ org.apache.kafka
+ kafka-streams
+ ${kafka-version}
+ test
+ test
+
+
+
+ com.expedia.www
+ haystack-trace-backend-memory
+ ${project.version}
+ test
+
+
+ org.apache.kafka
+ kafka-streams
+ ${kafka-version}
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ ${kafka-version}
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ test
+ test
+
+
+
+ org.apache.kafka
+ kafka-clients
+ ${kafka-version}
+ test
+ test
+
+
+
+
+ ${finalName}
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+
+ earliest
+ haystack-test
+
+ com.expedia.www.haystack.trace.indexer.unit
+ ${skip.unit.tests}
+
+
+
+ integration-test
+ integration-test
+
+ test
+
+
+ com.expedia.www.haystack.trace.indexer.integration
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+ true
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ package
+
+ shade
+
+
+
+
+ reference.conf
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
diff --git a/traces/indexer/src/main/resources/config/base.conf b/traces/indexer/src/main/resources/config/base.conf
new file mode 100644
index 000000000..0df856db8
--- /dev/null
+++ b/traces/indexer/src/main/resources/config/base.conf
@@ -0,0 +1,160 @@
+health.status.path = "/app/isHealthy"
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
+
+span.accumulate {
+ store {
+ min.traces.per.cache = 1000 # this defines the minimum traces in each cache before eviction check is applied. This is also useful for testing the code
+ all.max.entries = 20000 # this is the maximum number of spans that can live across all the stores
+ }
+ window.ms = 10000
+ poll.ms = 2000
+ packer = none
+}
+
+kafka {
+ close.stream.timeout.ms = 15000
+
+ topic.consume = "proto-spans"
+ topic.produce = "span-buffer"
+
+ num.stream.threads = 2
+ poll.timeout.ms = 100
+
+ # if consumer poll hangs, then wakeup it after after a timeout
+ # also set the maximum wakeups allowed, if max threshold is reached, then task will raise the shutdown request
+ max.wakeups = 100
+ wakeup.timeout.ms = 3000
+
+ commit.offset {
+ retries = 3
+ backoff.ms = 200
+ }
+
+ # consumer specific configurations
+ consumer {
+ group.id = "haystack-trace-indexer"
+ bootstrap.servers = "kafkasvc:9092"
+ auto.offset.reset = "latest"
+
+ # disable auto commit as the app manages offset itself
+ enable.auto.commit = "false"
+ }
+
+# producer specific configurations
+ producer {
+ bootstrap.servers = "kafkasvc:9092"
+ }
+}
+
+
+backend {
+
+ client {
+ host = "localhost"
+ port = 8090
+ }
+ # defines the max inflight writes for backend client
+ max.inflight.requests = 100
+}
+
+service.metadata {
+ enabled = true
+ flush {
+ interval.sec = 60
+ operation.count = 10000
+ }
+ es {
+ endpoint = "http://elasticsearch:9200"
+ conn.timeout.ms = 10000
+ read.timeout.ms = 5000
+ consistency.level = "one"
+ index {
+ # apply the template before starting the client, if json is empty, no operation is performed
+ template.json = "{\"template\": \"service-metadata\", \"index_patterns\": [\"service-metadata*\"], \"aliases\": {\"service-metadata\":{}}, \"settings\": {\"number_of_shards\": 4, \"index.mapping.ignore_malformed\": true, \"analysis\": {\"normalizer\": {\"lowercase_normalizer\": {\"type\": \"custom\", \"filter\": [\"lowercase\"]}}}}, \"mappings\": {\"metadata\": {\"_field_names\": {\"enabled\": false}, \"_all\": {\"enabled\": false}, \"properties\": {\"servicename\": {\"type\": \"keyword\", \"norms\": false}, \"operationname\": {\"type\": \"keyword\", \"norms\": false}}}}}"
+ name = "service-metadata"
+ type = "metadata"
+ }
+ # defines settings for bulk operation like max inflight bulks, number of documents and the total size in a single bulk
+ bulk.max {
+ docs {
+ count = 100
+ size.kb = 1000
+ }
+ inflight = 10
+ }
+ retries {
+ max = 10
+ backoff {
+ initial.ms = 100
+ factor = 2
+ }
+ }
+ }
+}
+
+elasticsearch {
+ endpoint = "http://elasticsearch:9200"
+
+ # defines settings for bulk operation like max inflight bulks, number of documents and the total size in a single bulk
+ bulk.max {
+ docs {
+ count = 100
+ size.kb = 1000
+ }
+ inflight = 10
+ }
+
+ conn.timeout.ms = 10000
+ read.timeout.ms = 5000
+ consistency.level = "one"
+ max.connections.per.route = 5
+
+ retries {
+ max = 10
+ backoff {
+ initial.ms = 100
+ factor = 2
+ }
+ }
+
+ index {
+ # apply the template before starting the client, if json is empty, no operation is performed
+ template.json = "{\"template\":\"haystack-traces*\",\"settings\":{\"number_of_shards\":16,\"index.mapping.ignore_malformed\":true,\"analysis\":{\"normalizer\":{\"lowercase_normalizer\":{\"type\":\"custom\",\"filter\":[\"lowercase\"]}}}},\"aliases\":{\"haystack-traces\":{}},\"mappings\":{\"spans\":{\"_field_names\":{\"enabled\":false},\"_all\":{\"enabled\":false},\"_source\":{\"includes\":[\"traceid\"]},\"properties\":{\"traceid\":{\"enabled\":false},\"starttime\":{\"type\":\"long\",\"doc_values\":true},\"spans\":{\"type\":\"nested\",\"properties\":{\"servicename\":{\"type\":\"keyword\",\"normalizer\":\"lowercase_normalizer\",\"doc_values\":true,\"norms\":false},\"operationname\":{\"type\":\"keyword\",\"normalizer\":\"lowercase_normalizer\",\"doc_values\":true,\"norms\":false},\"starttime\":{\"type\":\"long\",\"doc_values\":true},\"duration\":{\"type\":\"long\",\"doc_values\":true}}}},\"dynamic_templates\":[{\"strings_as_keywords_1\":{\"match_mapping_type\":\"string\",\"mapping\":{\"type\":\"keyword\",\"normalizer\":\"lowercase_normalizer\",\"doc_values\":false,\"norms\":false}}},{\"longs_disable_doc_norms\":{\"match_mapping_type\":\"long\",\"mapping\":{\"type\":\"long\",\"doc_values\":false,\"norms\":false}}}]}}}"
+ name.prefix = "haystack-traces"
+ hour.bucket = 6
+ type = "spans"
+ }
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
+
+reload {
+ tables {
+ index.fields.config = "whitelist-index-fields"
+ }
+ config {
+ endpoint = "http://elasticsearch:9200"
+ database.name = "reload-configs"
+ }
+ startup.load = true
+ interval.ms = 60000 # -1 will imply 'no reload'
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
diff --git a/traces/indexer/src/main/resources/logback.xml b/traces/indexer/src/main/resources/logback.xml
new file mode 100644
index 000000000..ab4e25a63
--- /dev/null
+++ b/traces/indexer/src/main/resources/logback.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/App.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/App.scala
new file mode 100644
index 000000000..2ca21d724
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/App.scala
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.commons.health.{HealthController, UpdateHealthStatusFile}
+import com.expedia.www.haystack.commons.logger.LoggerUtils
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.indexer.config.ProjectConfiguration
+import org.slf4j.LoggerFactory
+
+object App extends MetricsSupport {
+ private val LOGGER = LoggerFactory.getLogger(App.getClass)
+
+ private var stream: StreamRunner = _
+ private var appConfig: ProjectConfiguration = _
+
+ def main(args: Array[String]): Unit = {
+ startJmxReporter()
+
+ try {
+ appConfig = new ProjectConfiguration
+
+ HealthController.addListener(new UpdateHealthStatusFile(appConfig.healthStatusFilePath))
+
+ stream = new StreamRunner(
+ appConfig.kafkaConfig,
+ appConfig.spanAccumulateConfig,
+ appConfig.elasticSearchConfig,
+ appConfig.backendConfig,
+ appConfig.serviceMetadataWriteConfig,
+ appConfig.indexConfig)
+
+ Runtime.getRuntime.addShutdownHook(new Thread {
+ override def run(): Unit = {
+ LOGGER.info("Shutdown hook is invoked, tearing down the application.")
+ shutdown()
+ }
+ })
+
+ stream.start()
+
+ // mark the status of app as 'healthy'
+ HealthController.setHealthy()
+ } catch {
+ case ex: Exception =>
+ LOGGER.error("Observed fatal exception while running the app", ex)
+ shutdown()
+ System.exit(1)
+ }
+ }
+
+ private def shutdown(): Unit = {
+ if(stream != null) stream.close()
+ if(appConfig != null) appConfig.close()
+ LoggerUtils.shutdownLogger()
+ }
+
+ private def startJmxReporter() = {
+ val jmxReporter = JmxReporter.forRegistry(metricRegistry).build()
+ jmxReporter.start()
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/StreamRunner.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/StreamRunner.scala
new file mode 100644
index 000000000..abd3860a0
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/StreamRunner.scala
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer
+
+import java.util.concurrent.atomic.AtomicBoolean
+import java.util.concurrent.{Executors, TimeUnit}
+
+import com.expedia.www.haystack.commons.health.HealthController
+import com.expedia.www.haystack.trace.commons.config.entities.WhitelistIndexFieldConfiguration
+import com.expedia.www.haystack.trace.commons.packer.PackerFactory
+import com.expedia.www.haystack.trace.indexer.config.entities._
+import com.expedia.www.haystack.trace.indexer.processors.StreamTaskState.StreamTaskState
+import com.expedia.www.haystack.trace.indexer.processors._
+import com.expedia.www.haystack.trace.indexer.processors.supplier.SpanIndexProcessorSupplier
+import com.expedia.www.haystack.trace.indexer.store.SpanBufferMemoryStoreSupplier
+import com.expedia.www.haystack.trace.indexer.writers.TraceWriter
+import com.expedia.www.haystack.trace.indexer.writers.es.{ElasticSearchWriter, ServiceMetadataWriter}
+import com.expedia.www.haystack.trace.indexer.writers.grpc.GrpcTraceWriter
+import com.expedia.www.haystack.trace.indexer.writers.kafka.KafkaWriter
+import org.apache.commons.lang3.StringUtils
+import org.slf4j.LoggerFactory
+
+import scala.collection.mutable
+import scala.concurrent.ExecutionContextExecutor
+
+class StreamRunner(kafkaConfig: KafkaConfiguration,
+ accumulatorConfig: SpanAccumulatorConfiguration,
+ esConfig: ElasticSearchConfiguration,
+ traceWriteConfig: TraceBackendConfiguration,
+ serviceMetadataWriteConfig: ServiceMetadataWriteConfiguration,
+ indexConfig: WhitelistIndexFieldConfiguration) extends AutoCloseable with StateListener {
+
+ implicit private val executor: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[StreamRunner])
+
+ private val isStarted = new AtomicBoolean(false)
+ private val streamThreadExecutor = Executors.newFixedThreadPool(kafkaConfig.numStreamThreads)
+ private val taskRunnables = mutable.ListBuffer[StreamTaskRunnable]()
+
+ private val writers: Seq[TraceWriter] = {
+ val writers = mutable.ListBuffer[TraceWriter]()
+ writers += new GrpcTraceWriter(traceWriteConfig)(executor)
+ writers += new ElasticSearchWriter(esConfig, indexConfig)
+
+ if (serviceMetadataWriteConfig.enabled) {
+ writers += new ServiceMetadataWriter(serviceMetadataWriteConfig, esConfig.awsRequestSigningConfiguration)
+ }
+
+ if (StringUtils.isNotEmpty(kafkaConfig.produceTopic)) {
+ writers += new KafkaWriter(kafkaConfig.producerProps, kafkaConfig.produceTopic)
+ }
+ writers
+ }
+
+ def start(): Unit = {
+ LOGGER.info("Starting the span indexing stream..")
+
+ val storeSupplier = new SpanBufferMemoryStoreSupplier(
+ accumulatorConfig.minTracesPerCache,
+ accumulatorConfig.maxEntriesAllStores)
+
+ val streamProcessSupplier = new SpanIndexProcessorSupplier(
+ accumulatorConfig,
+ storeSupplier,
+ writers,
+ PackerFactory.spanBufferPacker(accumulatorConfig.packerType))
+
+ for (streamId <- 0 until kafkaConfig.numStreamThreads) {
+ val task = new StreamTaskRunnable(streamId, kafkaConfig, streamProcessSupplier)
+ task.setStateListener(this)
+ taskRunnables += task
+ streamThreadExecutor.execute(task)
+ }
+
+ isStarted.set(true)
+ }
+
+ override def close(): Unit = {
+ if (isStarted.getAndSet(false)) {
+ val shutdownThread = new Thread() {
+ closeStreamTasks()
+ closeWriters()
+ waitAndTerminate()
+ }
+ shutdownThread.setDaemon(true)
+ shutdownThread.run()
+ }
+ }
+
+ override def onTaskStateChange(state: StreamTaskState): Unit = {
+ if (state == StreamTaskState.FAILED) {
+ LOGGER.error("Thread state has changed to 'FAILED', so tearing down the app")
+ HealthController.setUnhealthy()
+ }
+ }
+
+ private def closeStreamTasks(): Unit = {
+ LOGGER.info("Closing all the stream tasks..")
+ taskRunnables foreach {
+ _.close()
+ }
+ }
+
+ private def closeWriters(): Unit = {
+ LOGGER.info("Closing all the writers now..")
+ writers foreach {
+ _.close
+ }
+ }
+
+ private def waitAndTerminate(): Unit = {
+ LOGGER.info("Shutting down the stream executor service")
+ streamThreadExecutor.shutdown()
+ streamThreadExecutor.awaitTermination(kafkaConfig.consumerCloseTimeoutInMillis, TimeUnit.MILLISECONDS)
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/ProjectConfiguration.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/ProjectConfiguration.scala
new file mode 100644
index 000000000..9d276546e
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/ProjectConfiguration.scala
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.config
+
+import java.util.Properties
+
+import com.expedia.www.haystack.commons.config.ConfigurationLoader
+import com.expedia.www.haystack.commons.retries.RetryOperation
+import com.expedia.www.haystack.trace.commons.config.entities._
+import com.expedia.www.haystack.trace.commons.config.reload.{ConfigurationReloadElasticSearchProvider, Reloadable}
+import com.expedia.www.haystack.trace.commons.packer.PackerType
+import com.expedia.www.haystack.trace.indexer.config.entities._
+import com.expedia.www.haystack.trace.indexer.serde.SpanDeserializer
+import com.typesafe.config.Config
+import org.apache.commons.lang3.StringUtils
+import org.apache.kafka.clients.consumer.ConsumerConfig
+import org.apache.kafka.clients.producer.ProducerConfig
+import org.apache.kafka.common.serialization.{ByteArraySerializer, StringDeserializer, StringSerializer}
+
+import scala.collection.JavaConverters._
+import scala.util.Try
+
+class ProjectConfiguration extends AutoCloseable {
+ private val config = ConfigurationLoader.loadConfigFileWithEnvOverrides()
+
+ val healthStatusFilePath: String = config.getString("health.status.path")
+
+ /**
+ * span accumulation related configuration like max buffered records, buffer window, poll interval
+ *
+ * @return a span config object
+ */
+ val spanAccumulateConfig: SpanAccumulatorConfiguration = {
+ val cfg = config.getConfig("span.accumulate")
+ SpanAccumulatorConfiguration(
+ cfg.getInt("store.min.traces.per.cache"),
+ cfg.getInt("store.all.max.entries"),
+ cfg.getLong("poll.ms"),
+ cfg.getLong("window.ms"),
+ PackerType.withName(cfg.getString("packer").toUpperCase))
+ }
+
+ /**
+ *
+ * @return streams configuration object
+ */
+ val kafkaConfig: KafkaConfiguration = {
+ // verify if the applicationId and bootstrap server config are non empty
+ def verifyAndUpdateConsumerProps(props: Properties): Unit = {
+ require(props.getProperty(ConsumerConfig.GROUP_ID_CONFIG).nonEmpty)
+ require(props.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG).nonEmpty)
+
+ // make sure auto commit is false
+ require(props.getProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) == "false")
+
+ // set the deserializers explicitly
+ props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer].getCanonicalName)
+ props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, new SpanDeserializer().getClass.getCanonicalName)
+ }
+
+ def verifyAndUpdateProducerProps(props: Properties): Unit = {
+ require(props.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG).nonEmpty)
+ props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer].getCanonicalName)
+ props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer].getCanonicalName)
+ }
+
+ def addProps(config: Config, props: Properties): Unit = {
+ if (config != null) {
+ config.entrySet().asScala foreach {
+ kv => {
+ props.setProperty(kv.getKey, kv.getValue.unwrapped().toString)
+ }
+ }
+ }
+ }
+
+ val kafka = config.getConfig("kafka")
+ val producerConfig = if (kafka.hasPath("producer")) kafka.getConfig("producer") else null
+ val consumerConfig = kafka.getConfig("consumer")
+
+ val consumerProps = new Properties
+ val producerProps = new Properties
+
+ // producer specific properties
+ addProps(producerConfig, producerProps)
+
+ // consumer specific properties
+ addProps(consumerConfig, consumerProps)
+
+ // validate consumer props
+ verifyAndUpdateConsumerProps(consumerProps)
+ verifyAndUpdateProducerProps(producerProps)
+
+ KafkaConfiguration(
+ numStreamThreads = kafka.getInt("num.stream.threads"),
+ pollTimeoutMs = kafka.getLong("poll.timeout.ms"),
+ consumerProps = consumerProps,
+ producerProps = producerProps,
+ produceTopic = if (kafka.hasPath("topic.produce")) kafka.getString("topic.produce") else "",
+ consumeTopic = kafka.getString("topic.consume"),
+ consumerCloseTimeoutInMillis = kafka.getInt("close.stream.timeout.ms"),
+ commitOffsetRetries = kafka.getInt("commit.offset.retries"),
+ commitBackoffInMillis = kafka.getLong("commit.offset.backoff.ms"),
+ maxWakeups = kafka.getInt("max.wakeups"),
+ wakeupTimeoutInMillis = kafka.getInt("wakeup.timeout.ms"))
+ }
+
+
+ /**
+ *
+ * trace backend configuration object
+ */
+ val backendConfig: TraceBackendConfiguration = {
+ val traceBackendConfig = config.getConfig("backend")
+
+ val grpcClients = traceBackendConfig.entrySet().asScala
+ .map(k => StringUtils.split(k.getKey, '.')(0)).toSeq
+ .map(cl => traceBackendConfig.getConfig(cl))
+ .filter(cl => cl.hasPath("host") && cl.hasPath("port"))
+ .map(cl => GrpcClientConfig(cl.getString("host"), cl.getInt("port")))
+
+ // we dont support multiple backends for write operations
+ require(grpcClients.size == 1)
+
+ TraceBackendConfiguration(
+ TraceStoreBackends(grpcClients),
+ maxInFlightRequests = traceBackendConfig.getInt("max.inflight.requests"))
+
+ }
+
+ /**
+ * service metadata write configuration
+ */
+ val serviceMetadataWriteConfig: ServiceMetadataWriteConfiguration = {
+ val serviceMetadata = config.getConfig("service.metadata")
+ val es = serviceMetadata.getConfig("es")
+ val templateJsonConfigField = "index.template.json"
+ val indexTemplateJson = if (es.hasPath(templateJsonConfigField)
+ && StringUtils.isNotEmpty(es.getString(templateJsonConfigField))) {
+ Some(es.getString(templateJsonConfigField))
+ } else {
+ None
+ }
+ val username = if (es.hasPath("username")) Option(es.getString("username")) else None
+ val password = if (es.hasPath("password")) Option(es.getString("password")) else None
+ ServiceMetadataWriteConfiguration(
+ enabled = serviceMetadata.getBoolean("enabled"),
+ flushIntervalInSec = serviceMetadata.getInt("flush.interval.sec"),
+ flushOnMaxOperationCount = serviceMetadata.getInt("flush.operation.count"),
+ esEndpoint = es.getString("endpoint"),
+ username = username,
+ password = password,
+ consistencyLevel = es.getString("consistency.level"),
+ indexName = es.getString("index.name"),
+ indexType = es.getString("index.type"),
+ indexTemplateJson = indexTemplateJson,
+ connectionTimeoutMillis = es.getInt("conn.timeout.ms"),
+ readTimeoutMillis = es.getInt("read.timeout.ms"),
+ maxInFlightBulkRequests = es.getInt("bulk.max.inflight"),
+ maxDocsInBulk = es.getInt("bulk.max.docs.count"),
+ maxBulkDocSizeInBytes = es.getInt("bulk.max.docs.size.kb") * 1000,
+ retryConfig = RetryOperation.Config(
+ es.getInt("retries.max"),
+ es.getLong("retries.backoff.initial.ms"),
+ es.getDouble("retries.backoff.factor"))
+ )
+ }
+
+ /**
+ *
+ * elastic search configuration object
+ */
+ val elasticSearchConfig: ElasticSearchConfiguration = {
+ val es = config.getConfig("elasticsearch")
+ val indexConfig = es.getConfig("index")
+
+ val templateJsonConfigField = "template.json"
+ val indexTemplateJson = if (indexConfig.hasPath(templateJsonConfigField)
+ && StringUtils.isNotEmpty(indexConfig.getString(templateJsonConfigField))) {
+ Some(indexConfig.getString(templateJsonConfigField))
+ } else {
+ None
+ }
+ val ausername = if (es.hasPath("username")) Option(es.getString("username")) else None
+ val apassword = if (es.hasPath("password")) Option(es.getString("password")) else None
+
+ ElasticSearchConfiguration(
+ endpoint = es.getString("endpoint"),
+ username = ausername,
+ password = apassword,
+ indexTemplateJson,
+ consistencyLevel = es.getString("consistency.level"),
+ indexNamePrefix = indexConfig.getString("name.prefix"),
+ indexHourBucket = indexConfig.getInt("hour.bucket"),
+ indexType = indexConfig.getString("type"),
+ connectionTimeoutMillis = es.getInt("conn.timeout.ms"),
+ readTimeoutMillis = es.getInt("read.timeout.ms"),
+ maxConnectionsPerRoute = es.getInt("max.connections.per.route"),
+ maxInFlightBulkRequests = es.getInt("bulk.max.inflight"),
+ maxDocsInBulk = es.getInt("bulk.max.docs.count"),
+ maxBulkDocSizeInBytes = es.getInt("bulk.max.docs.size.kb") * 1000,
+ retryConfig = RetryOperation.Config(
+ es.getInt("retries.max"),
+ es.getLong("retries.backoff.initial.ms"),
+ es.getDouble("retries.backoff.factor")),
+ awsRequestSigningConfig(config.getConfig("elasticsearch.signing.request.aws")))
+ }
+
+ private def awsRequestSigningConfig(awsESConfig: Config): AWSRequestSigningConfiguration = {
+ val accessKey: Option[String] = if (awsESConfig.hasPath("access.key") && awsESConfig.getString("access.key").nonEmpty) {
+ Some(awsESConfig.getString("access.key"))
+ } else
+ None
+
+ val secretKey: Option[String] = if (awsESConfig.hasPath("secret.key") && awsESConfig.getString("secret.key").nonEmpty) {
+ Some(awsESConfig.getString("secret.key"))
+ } else
+ None
+
+ AWSRequestSigningConfiguration(
+ awsESConfig.getBoolean("enabled"),
+ awsESConfig.getString("region"),
+ awsESConfig.getString("service.name"),
+ accessKey,
+ secretKey)
+ }
+
+ /**
+ * configuration that contains list of tags that should be indexed for a span
+ */
+ val indexConfig: WhitelistIndexFieldConfiguration = {
+ val indexConfig = WhitelistIndexFieldConfiguration()
+ indexConfig.reloadConfigTableName = Option(config.getConfig("reload.tables").getString("index.fields.config"))
+ indexConfig
+ }
+
+ // configuration reloader
+ private val reloader = registerReloadableConfigurations(List(indexConfig))
+
+ /**
+ * registers a reloadable config object to reloader instance.
+ * The reloader registers them as observers and invokes them periodically when it re-reads the
+ * configuration from an external store
+ *
+ * @param observers list of reloadable configuration objects
+ * @return the reloader instance that uses ElasticSearch as an external database for storing the configs
+ */
+ private def registerReloadableConfigurations(observers: Seq[Reloadable]): ConfigurationReloadElasticSearchProvider = {
+ val reload = config.getConfig("reload")
+ val reloadConfig = ReloadConfiguration(
+ reload.getString("config.endpoint"),
+ reload.getString("config.database.name"),
+ reload.getInt("interval.ms"),
+ if (reload.hasPath("config.username")) Option(reload.getString("config.username")) else None,
+ if (reload.hasPath("config.password")) Option(reload.getString("config.password")) else None,
+ observers,
+ loadOnStartup = reload.getBoolean("startup.load"))
+
+ val loader = new ConfigurationReloadElasticSearchProvider(reloadConfig, awsRequestSigningConfig(config.getConfig("reload.signing.request.aws")))
+ if (reloadConfig.loadOnStartup) loader.load()
+ loader
+ }
+
+ override def close(): Unit = {
+ Try(reloader.close())
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/ElasticSearchConfiguration.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/ElasticSearchConfiguration.scala
new file mode 100644
index 000000000..e92cd0531
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/ElasticSearchConfiguration.scala
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.config.entities
+
+import com.expedia.www.haystack.commons.retries.RetryOperation
+import com.expedia.www.haystack.trace.commons.config.entities.AWSRequestSigningConfiguration
+
+/**
+ * defines the config parameters for elastic search writes
+ *
+ * @param endpoint: http endpoint to connect
+ * @param indexTemplateJson: template as json that will be applied when the app runs, this is optional
+ * @param consistencyLevel: consistency level of writes, for e.g. one, quoram
+ * @param indexNamePrefix: prefix for naming the elastic search index
+ * @param indexHourBucket: creates index for that hour duration. for e.g. for value 6, we create an index every 6 hours in a day so total 4 buckets
+ * @param indexType: elastic search index type
+ * @param connectionTimeoutMillis: connection timeout in millis
+ * @param readTimeoutMillis: read timeout in millis
+ * @param maxConnectionsPerRoute: max connections per http route
+ * @param maxInFlightBulkRequests: max bulk writes that can be run in parallel
+ * @param maxDocsInBulk maximum number of index documents in a single bulk
+ * @param maxBulkDocSizeInBytes maximum size (in bytes) of a single bulk request
+ * @param retryConfig retry max retries limit, initial backoff and exponential factor values
+ * @param awsRequestSigningConfiguration aws ES request signing config
+ */
+case class ElasticSearchConfiguration(endpoint: String,
+ username: Option[String],
+ password: Option[String],
+ indexTemplateJson: Option[String],
+ consistencyLevel: String,
+ indexNamePrefix: String,
+ indexHourBucket: Int,
+ indexType: String,
+ connectionTimeoutMillis: Int,
+ readTimeoutMillis: Int,
+ maxConnectionsPerRoute: Int,
+ maxInFlightBulkRequests: Int,
+ maxDocsInBulk: Int,
+ maxBulkDocSizeInBytes: Int,
+ retryConfig: RetryOperation.Config,
+ awsRequestSigningConfiguration: AWSRequestSigningConfiguration)
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/KafkaConfiguration.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/KafkaConfiguration.scala
new file mode 100644
index 000000000..8de15b8cd
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/KafkaConfiguration.scala
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.config.entities
+
+import java.util.Properties
+
+/** @param numStreamThreads num of stream threads
+ * @param pollTimeoutMs kafka consumer poll timeout
+ * @param consumerProps consumer config object
+ * @param producerProps producer config object
+ * @param produceTopic producer topic
+ * @param consumeTopic consumer topic
+ * @param consumerCloseTimeoutInMillis kafka consumer close timeout
+ * @param commitOffsetRetries retries of commit offset failed
+ * @param commitBackoffInMillis if commit operation fails, retry with backoff
+ * @param maxWakeups maximum wakeups allowed
+ * @param wakeupTimeoutInMillis wait timeout for consumer.poll() to return zero or more records
+ */
+case class KafkaConfiguration(numStreamThreads: Int,
+ pollTimeoutMs: Long,
+ consumerProps: Properties,
+ producerProps: Properties,
+ produceTopic: String,
+ consumeTopic: String,
+ consumerCloseTimeoutInMillis: Int,
+ commitOffsetRetries: Int,
+ commitBackoffInMillis: Long,
+ maxWakeups: Int,
+ wakeupTimeoutInMillis: Int)
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/ServiceMetadataWriteConfiguration.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/ServiceMetadataWriteConfiguration.scala
new file mode 100644
index 000000000..e99309241
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/ServiceMetadataWriteConfiguration.scala
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.indexer.config.entities
+
+import com.expedia.www.haystack.commons.retries.RetryOperation
+
+/**
+ * Configurations for writing service metadata to elastic search
+ * @param enabled: enable writing service metadata, if its set to false, list of service_names and operation names would be fetched from elastic search traces index, which is an expensive aggregation
+ * @param esEndpoint: http endpoint to connect
+ * @param indexTemplateJson: template as json that will be applied when the app runs, this is optional * @param username
+ * @param password: password for the es
+ * @param consistencyLevel: consistency level of writes, for e.g. one, quoram
+ * @param indexName: name of the elastic search index where the data is written
+ * @param indexType: elastic search index type
+ * @param connectionTimeoutMillis : connection timeout in millis
+ * @param readTimeoutMillis: read timeout in millis
+ * @param maxInFlightBulkRequests: max bulk writes that can be run in parallel
+ * @param maxDocsInBulk: maximum number of index documents in a single bulk
+ * @param maxBulkDocSizeInBytes maximum size (in bytes) of a single bulk request
+ * @param flushIntervalInSec: interval for collecting service name operation names in memory before flushing to es
+ * @param flushOnMaxOperationCount: maximum number of unique operations to force flushing to es
+ * @param retryConfig: retry max retries limit, initial backoff and exponential factor values
+ */
+
+case class ServiceMetadataWriteConfiguration(enabled: Boolean,
+ esEndpoint: String,
+ username: Option[String],
+ password: Option[String],
+ consistencyLevel: String,
+ indexTemplateJson: Option[String],
+ indexName: String,
+ indexType: String,
+ connectionTimeoutMillis: Int,
+ readTimeoutMillis: Int,
+ maxInFlightBulkRequests: Int,
+ maxDocsInBulk: Int,
+ maxBulkDocSizeInBytes: Int,
+ flushIntervalInSec: Int,
+ flushOnMaxOperationCount: Int,
+ retryConfig: RetryOperation.Config
+ ) {
+ require(maxInFlightBulkRequests > 0)
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/SpanAccumulatorConfiguration.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/SpanAccumulatorConfiguration.scala
new file mode 100644
index 000000000..7f0f3490c
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/SpanAccumulatorConfiguration.scala
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.config.entities
+
+import com.expedia.www.haystack.trace.commons.packer.PackerType.PackerType
+
+/**
+ * @param minTracesPerCache minimum number of traces that will reside in each store.
+ * @param maxEntriesAllStores maximum number of records across all state stores, one record is one span buffer object
+ * @param pollIntervalMillis poll interval to gather the buffered-spans that are ready to emit out to sink
+ * @param bufferingWindowMillis time window for which unique traceId will be hold to gather its child spans
+ * @param packerType apply the compression on the spanbuffer before storing to trace-backend
+ */
+case class SpanAccumulatorConfiguration(minTracesPerCache: Int,
+ maxEntriesAllStores: Int,
+ pollIntervalMillis: Long,
+ bufferingWindowMillis: Long,
+ packerType: PackerType)
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/TraceBackendConfiguration.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/TraceBackendConfiguration.scala
new file mode 100644
index 000000000..929b83753
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/config/entities/TraceBackendConfiguration.scala
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.config.entities
+
+import com.expedia.www.haystack.trace.commons.config.entities.TraceStoreBackends
+
+/**
+ * @param clientConfig defines the grpc client configuration for connecting to the trace backend
+ * @param maxInFlightRequests defines the max parallel writes to trace-backend
+ */
+case class TraceBackendConfiguration(clientConfig: TraceStoreBackends,
+ maxInFlightRequests: Int)
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/metrics/AppMetricNames.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/metrics/AppMetricNames.scala
new file mode 100644
index 000000000..f260fca83
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/metrics/AppMetricNames.scala
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.metrics
+
+/**
+ * list all app metric names that are published on jmx
+ */
+object AppMetricNames {
+ val PROCESS_TIMER = "buffer.process"
+ val KAFKA_ITERATOR_AGE_MS = "kafka.iterator.age.ms"
+
+ val BUFFERED_SPANS_COUNT = "buffered.spans.count"
+ val STATE_STORE_EVICTION = "state.store.eviction"
+ val SPAN_PROTO_DESER_FAILURE = "span.proto.deser.failure"
+
+ val BACKEND_WRITE_TIME = "backend.write.time"
+ val BACKEND_WRITE_FAILURE = "backend.write.failure"
+ val BACKEND_WRITE_WARNINGS = "backend.write.warnings"
+
+ val ES_WRITE_FAILURE = "es.write.failure"
+ val ES_WRITE_TIME = "es.writer.time"
+
+ val METADATA_WRITE_TIME = "metadata.write.time"
+ val METADATA_WRITE_FAILURE = "metadata.write.failure"
+
+ val KAFKA_PRODUCE_FAILURES = "kafka.produce.failure"
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/SpanIndexProcessor.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/SpanIndexProcessor.scala
new file mode 100644
index 000000000..c26f98214
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/SpanIndexProcessor.scala
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.processors
+
+import com.codahale.metrics.{Histogram, Timer}
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.commons.packer.Packer
+import com.expedia.www.haystack.trace.indexer.config.entities.SpanAccumulatorConfiguration
+import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames.{BUFFERED_SPANS_COUNT, KAFKA_ITERATOR_AGE_MS, PROCESS_TIMER}
+import com.expedia.www.haystack.trace.indexer.store.SpanBufferMemoryStoreSupplier
+import com.expedia.www.haystack.trace.indexer.store.data.model.SpanBufferWithMetadata
+import com.expedia.www.haystack.trace.indexer.store.traits.{EldestBufferedSpanEvictionListener, SpanBufferKeyValueStore}
+import com.expedia.www.haystack.trace.indexer.writers.TraceWriter
+import org.apache.kafka.clients.consumer.{ConsumerRecord, OffsetAndMetadata}
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.concurrent.ExecutionContextExecutor
+
+object SpanIndexProcessor extends MetricsSupport {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(SpanIndexProcessor.getClass)
+
+ protected val processTimer: Timer = metricRegistry.timer(PROCESS_TIMER)
+ protected val bufferedSpansHistogram: Histogram = metricRegistry.histogram(BUFFERED_SPANS_COUNT)
+ protected val iteratorAge: Histogram = metricRegistry.histogram(KAFKA_ITERATOR_AGE_MS)
+}
+
+class SpanIndexProcessor(accumulatorConfig: SpanAccumulatorConfiguration,
+ storeSupplier: SpanBufferMemoryStoreSupplier,
+ writers: Seq[TraceWriter],
+ spanBufferPacker: Packer[SpanBuffer])(implicit val dispatcher: ExecutionContextExecutor)
+ extends StreamProcessor[String, Span] with EldestBufferedSpanEvictionListener {
+
+ import com.expedia.www.haystack.trace.indexer.processors.SpanIndexProcessor._
+
+ private var spanBufferMemStore: SpanBufferKeyValueStore = _
+
+ // defines the last time we look into the store for emitting the traces
+ private var lastEmitTimestamp: Long = 0L
+
+ override def init(): Unit = {
+ spanBufferMemStore = storeSupplier.get()
+ spanBufferMemStore.init()
+ spanBufferMemStore.addEvictionListener(this)
+ LOGGER.info("Span Index Processor has been initialized successfully!")
+ }
+
+ override def close(): Unit = {
+ spanBufferMemStore.close()
+ LOGGER.info("Span Index Processor has been closed now!")
+ }
+
+ override def process(records: Iterable[ConsumerRecord[String, Span]]): Option[OffsetAndMetadata] = {
+ val timer = processTimer.time()
+ try {
+ var currentTimestamp = 0L
+ var minEventTime = Long.MaxValue
+
+ records
+ .filter(_ != null)
+ .foreach {
+ record => {
+ spanBufferMemStore.addOrUpdateSpanBuffer(record.key(), record.value(), record.timestamp(), record.offset())
+ currentTimestamp = Math.max(record.timestamp(), currentTimestamp)
+
+ // record the smallest event timestamp observed across the spans
+ if (record.value().getStartTime > 0) {
+ minEventTime = Math.min(record.value().getStartTime, minEventTime) // this is in micros
+ }
+ }
+ }
+
+ iteratorAge.update(System.currentTimeMillis() - (minEventTime/1000l))
+ mayBeEmit(currentTimestamp)
+ } finally {
+ timer.stop()
+ }
+ }
+
+ private def writeTrace(spanBuffer: SpanBuffer, isLastSpanBuffer: Boolean) = {
+ // get a metric on spans that are buffered before we write them to external databases
+ bufferedSpansHistogram.update(spanBuffer.getChildSpansCount)
+
+ val traceId = spanBuffer.getTraceId
+ val packedMessage = spanBufferPacker.apply(spanBuffer)
+ writers.foreach {
+ writer =>
+ writer.writeAsync(traceId, packedMessage, isLastSpanBuffer)
+ }
+ }
+
+ private def mayBeEmit(currentTimestamp: Long): Option[OffsetAndMetadata] = {
+ if ((currentTimestamp - accumulatorConfig.pollIntervalMillis) > lastEmitTimestamp) {
+
+ var committableOffset = -1L
+
+ val emittableSpanBuffers = spanBufferMemStore.getAndRemoveSpanBuffersOlderThan(currentTimestamp - accumulatorConfig.bufferingWindowMillis)
+
+ emittableSpanBuffers.zipWithIndex foreach {
+ case (sb, idx) =>
+ val spanBuffer = sb.builder.build()
+ writeTrace(spanBuffer, idx == emittableSpanBuffers.size - 1)
+ if (committableOffset < sb.firstSeenSpanKafkaOffset) committableOffset = sb.firstSeenSpanKafkaOffset
+ }
+
+ lastEmitTimestamp = currentTimestamp
+
+ if (committableOffset >= 0) Some(new OffsetAndMetadata(committableOffset)) else None
+ } else {
+ None
+ }
+ }
+
+ // for now we set islastSpanBuffer as false.
+ // if too many eviction happens, then writer will flush it out eventually
+ override def onEvict(key: String, value: SpanBufferWithMetadata): Unit = writeTrace(value.builder.build(), isLastSpanBuffer = false)
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/StateListener.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/StateListener.scala
new file mode 100644
index 000000000..07ef22c95
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/StateListener.scala
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.processors
+
+import com.expedia.www.haystack.trace.indexer.processors.StreamTaskState.StreamTaskState
+
+object StreamTaskState extends Enumeration {
+ type StreamTaskState = Value
+ val NOT_RUNNING, RUNNING, FAILED, CLOSED = Value
+}
+
+trait StateListener {
+ def onTaskStateChange(state: StreamTaskState)
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/StreamProcessor.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/StreamProcessor.scala
new file mode 100644
index 000000000..e01d4e656
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/StreamProcessor.scala
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.processors
+
+import org.apache.kafka.clients.consumer.{ConsumerRecord, OffsetAndMetadata}
+
+trait StreamProcessor[K, V] {
+ def process(record: Iterable[ConsumerRecord[K, V]]): Option[OffsetAndMetadata]
+
+ def close(): Unit
+
+ def init(): Unit
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/StreamTaskRunnable.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/StreamTaskRunnable.scala
new file mode 100644
index 000000000..8c8312e16
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/StreamTaskRunnable.scala
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.processors
+
+import java.util
+import java.util.Properties
+import java.util.concurrent.atomic.AtomicBoolean
+import java.util.concurrent.{ConcurrentHashMap, Executors, TimeUnit}
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.indexer.config.entities.KafkaConfiguration
+import com.expedia.www.haystack.trace.indexer.processors.StreamTaskState.StreamTaskState
+import com.expedia.www.haystack.trace.indexer.processors.supplier.StreamProcessorSupplier
+import org.apache.kafka.clients.consumer._
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.errors.WakeupException
+import org.slf4j.LoggerFactory
+
+import scala.annotation.tailrec
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+import scala.util.Try
+
+class StreamTaskRunnable(taskId: Int, kafkaConfig: KafkaConfiguration, processorSupplier: StreamProcessorSupplier[String, Span])
+ extends Runnable with AutoCloseable {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[StreamTaskRunnable])
+
+ /**
+ * consumer rebalance listener
+ */
+ private class RebalanceListener extends ConsumerRebalanceListener {
+
+ /**
+ * close the running processors for the revoked partitions
+ *
+ * @param revokedPartitions revoked partitions
+ */
+ override def onPartitionsRevoked(revokedPartitions: util.Collection[TopicPartition]): Unit = {
+ LOGGER.info("Partitions {} revoked at the beginning of consumer rebalance for taskId={}", revokedPartitions, taskId)
+
+ revokedPartitions.asScala.foreach(
+ p => {
+ val processor = processors.remove(p)
+ if (processor != null) processor.close()
+ })
+ }
+
+ /**
+ * create processors for newly assigned partitions
+ *
+ * @param assignedPartitions newly assigned partitions
+ */
+ override def onPartitionsAssigned(assignedPartitions: util.Collection[TopicPartition]): Unit = {
+ LOGGER.info("Partitions {} assigned at the beginning of consumer rebalance for taskId={}", assignedPartitions, taskId)
+
+ assignedPartitions.asScala foreach {
+ partition => {
+ val processor = processorSupplier.get()
+ val previousProcessor = processors.putIfAbsent(partition, processor)
+ if (previousProcessor == null) processor.init()
+ }
+ }
+ }
+ }
+
+ @volatile
+ private var state = StreamTaskState.NOT_RUNNING
+ private var wakeups: Int = 0
+
+ private val shutdownRequested = new AtomicBoolean(false)
+ private val wakeupScheduler = Executors.newScheduledThreadPool(1)
+ private val listeners = mutable.ListBuffer[StateListener]()
+ private val processors = new ConcurrentHashMap[TopicPartition, StreamProcessor[String, Span]]()
+
+ private val consumer = {
+ val props = new Properties()
+ kafkaConfig.consumerProps.entrySet().asScala.foreach(entry => props.put(entry.getKey, entry.getValue))
+ props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, taskId.toString)
+ new KafkaConsumer[String, Span](props)
+ }
+
+ private val rebalanceListener = new RebalanceListener
+
+ consumer.subscribe(util.Arrays.asList(kafkaConfig.consumeTopic), rebalanceListener)
+
+ /**
+ * Execute the stream processors
+ *
+ */
+ override def run(): Unit = {
+ LOGGER.info("Starting stream processing thread with id={}", taskId)
+ try {
+ updateStateAndNotify(StreamTaskState.RUNNING)
+ runLoop()
+ } catch {
+ case ie: InterruptedException =>
+ LOGGER.error(s"This stream task with taskId=$taskId has been interrupted", ie)
+ case ex: Exception =>
+ if (!shutdownRequested.get()) updateStateAndNotify(StreamTaskState.FAILED)
+ // may be logging the exception again for kafka specific exceptions, but it is ok.
+ LOGGER.error(s"Stream application faced an exception during processing for taskId=$taskId: ", ex)
+ }
+ finally {
+ consumer.close(kafkaConfig.consumerCloseTimeoutInMillis, TimeUnit.MILLISECONDS)
+ updateStateAndNotify(StreamTaskState.CLOSED)
+ }
+ }
+
+ /**
+ * invoke the processor per partition for the records that are read from kafka.
+ * Update the offsets (if any) that need to be committed in the committableOffsets map
+ *
+ * @param partition kafka partition
+ * @param partitionRecords records of the given kafka partition
+ * @param committableOffsets offsets that need to be committed for the given topic partition
+ */
+ private def invokeProcessor(partition: Int,
+ partitionRecords: Iterable[ConsumerRecord[String, Span]],
+ committableOffsets: util.HashMap[TopicPartition, OffsetAndMetadata]): Unit = {
+ val topicPartition = new TopicPartition(kafkaConfig.consumeTopic, partition)
+ val processor = processors.get(topicPartition)
+
+ if (processor != null) {
+ processor.process(partitionRecords) match {
+ case Some(offsetMetadata) => committableOffsets.put(topicPartition, offsetMetadata)
+ case _ => /* the processor has nothing to commit for now */
+ }
+ }
+ }
+
+ /**
+ * run the consumer loop till the shutdown is requested or any exception is thrown
+ */
+ private def runLoop(): Unit = {
+ while (!shutdownRequested.get()) {
+ poll() match {
+ case Some(records) if records != null && !records.isEmpty && !processors.isEmpty =>
+ val committableOffsets = new util.HashMap[TopicPartition, OffsetAndMetadata]()
+ val groupedByPartition = records.asScala.groupBy(_.partition())
+
+ groupedByPartition foreach {
+ case (partition, partitionRecords) => invokeProcessor(partition, partitionRecords, committableOffsets)
+ }
+
+ // commit offsets
+ commit(committableOffsets)
+ // if no records are returned in poll, then do nothing
+ case _ =>
+ }
+ }
+ }
+
+ /**
+ * before requesting consumer.poll(), schedule a wakeup call as poll() may hang due to network errors in kafka
+ * if the poll() doesnt return after a timeout, then wakeup the consumer.
+ *
+ * @return consumer records from kafka
+ */
+ private def poll(): Option[ConsumerRecords[String, Span]] = {
+
+ def scheduleWakeup() = wakeupScheduler.schedule(new Runnable {
+ override def run(): Unit = consumer.wakeup()
+ }, kafkaConfig.wakeupTimeoutInMillis, TimeUnit.MILLISECONDS)
+
+ def handleWakeup(we: WakeupException): Unit = {
+ // if in shutdown phase, then do not swallow the exception, throw it to upstream
+ if (shutdownRequested.get()) throw we
+
+ wakeups = wakeups + 1
+ if (wakeups == kafkaConfig.maxWakeups) {
+ LOGGER.error(s"WakeupException limit exceeded, throwing up wakeup exception for taskId=$taskId.", we)
+ throw we
+ } else {
+ LOGGER.error(s"Consumer poll took more than ${kafkaConfig.wakeupTimeoutInMillis} ms for taskId=$taskId, wakeup attempt=$wakeups!. Will try poll again!")
+ }
+ }
+
+ val wakeupCall = scheduleWakeup()
+
+ try {
+ val records: ConsumerRecords[String, Span] = consumer.poll(kafkaConfig.pollTimeoutMs)
+ wakeups = 0
+ Some(records)
+ } catch {
+ case we: WakeupException =>
+ handleWakeup(we)
+ None
+ } finally {
+ Try(wakeupCall.cancel(true))
+ }
+ }
+
+ /**
+ * commit the offset to kafka with a retry logic
+ *
+ * @param offsets map of offsets for each topic partition
+ * @param retryAttempt current retry attempt
+ */
+ @tailrec
+ private def commit(offsets: util.HashMap[TopicPartition, OffsetAndMetadata], retryAttempt: Int = 0): Unit = {
+ try {
+ if (!offsets.isEmpty && retryAttempt <= kafkaConfig.commitOffsetRetries) {
+ consumer.commitSync(offsets)
+ }
+ } catch {
+ case _: CommitFailedException =>
+ Thread.sleep(kafkaConfig.commitBackoffInMillis)
+ // retry offset again
+ commit(offsets, retryAttempt + 1)
+ case ex: Exception =>
+ LOGGER.error("Fail to commit the offsets with exception", ex)
+ }
+ }
+
+ private def updateStateAndNotify(newState: StreamTaskState) = {
+ if (state != newState) {
+ state = newState
+
+ // invoke listeners for any state change
+ listeners foreach (listener => listener.onTaskStateChange(state))
+ }
+ }
+
+ /**
+ * close the runnable. If still in running state, then wakeup the consumer
+ */
+ override def close(): Unit = {
+ Try {
+ LOGGER.info(s"Close has been requested for taskId=$taskId")
+ shutdownRequested.set(true)
+ if (isStillRunning) consumer.wakeup()
+ wakeupScheduler.shutdown()
+ }
+ }
+
+ /**
+ * if consumer is still in running state
+ *
+ * @return
+ */
+ def isStillRunning: Boolean = state == StreamTaskState.RUNNING
+
+ /**
+ * set the state change listener
+ *
+ * @param listener state change listener
+ */
+ def setStateListener(listener: StateListener): Unit = listeners += listener
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/supplier/SpanIndexProcessorSupplier.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/supplier/SpanIndexProcessorSupplier.scala
new file mode 100644
index 000000000..64529f606
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/supplier/SpanIndexProcessorSupplier.scala
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.processors.supplier
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.commons.packer.Packer
+import com.expedia.www.haystack.trace.indexer.config.entities.SpanAccumulatorConfiguration
+import com.expedia.www.haystack.trace.indexer.processors.{SpanIndexProcessor, StreamProcessor}
+import com.expedia.www.haystack.trace.indexer.store.SpanBufferMemoryStoreSupplier
+import com.expedia.www.haystack.trace.indexer.writers.TraceWriter
+
+import scala.concurrent.ExecutionContextExecutor
+
+class SpanIndexProcessorSupplier(accumulatorConfig: SpanAccumulatorConfiguration,
+ storeSupplier: SpanBufferMemoryStoreSupplier,
+ writers: Seq[TraceWriter],
+ spanBufferPacker: Packer[SpanBuffer])(implicit val dispatcher: ExecutionContextExecutor)
+ extends StreamProcessorSupplier[String, Span] {
+
+ override def get(): StreamProcessor[String, Span] = {
+ new SpanIndexProcessor(accumulatorConfig, storeSupplier, writers, spanBufferPacker)
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/supplier/StreamProcessorSupplier.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/supplier/StreamProcessorSupplier.scala
new file mode 100644
index 000000000..ba7e7f6ff
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/processors/supplier/StreamProcessorSupplier.scala
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.processors.supplier
+
+import com.expedia.www.haystack.trace.indexer.processors.StreamProcessor
+
+trait StreamProcessorSupplier[K, V] {
+ def get(): StreamProcessor[K, V]
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/serde/SpanDeserializer.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/serde/SpanDeserializer.scala
new file mode 100644
index 000000000..47b921231
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/serde/SpanDeserializer.scala
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.serde
+
+import java.util
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames
+import org.apache.kafka.common.serialization.Deserializer
+
+class SpanDeserializer extends Deserializer[Span] with MetricsSupport {
+
+ private val spanDeserMeter = metricRegistry.meter(AppMetricNames.SPAN_PROTO_DESER_FAILURE)
+
+ override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+ /**
+ * converts the binary protobuf bytes into Span object
+ * @param data serialized bytes of Span
+ * @return
+ */
+ override def deserialize(topic: String, data: Array[Byte]): Span = {
+ try {
+ if(data == null || data.length == 0) null else Span.parseFrom(data)
+ } catch {
+ case _: Exception =>
+ /* may be log and add metric */
+ spanDeserMeter.mark()
+ null
+ }
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/DynamicCacheSizer.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/DynamicCacheSizer.scala
new file mode 100644
index 000000000..d969b2081
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/DynamicCacheSizer.scala
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.store
+
+import com.expedia.www.haystack.trace.indexer.store.traits.CacheSizeObserver
+
+import scala.collection.mutable
+
+class DynamicCacheSizer(val minTracesPerCache: Int, maxEntriesAcrossCaches: Int) {
+
+ private val cacheObservers = mutable.HashSet[CacheSizeObserver]()
+
+ /**
+ * adds cache observer
+ *
+ * @param observer state store acts as an observer
+ */
+ def addCacheObserver(observer: CacheSizeObserver): Unit = {
+ this.synchronized {
+ cacheObservers.add(observer)
+ evaluateNewCacheSizeAndNotify(cacheObservers)
+ }
+ }
+
+ /**
+ * removes cache observer
+ * @param observer state store acts as an observer
+ */
+ def removeCacheObserver(observer: CacheSizeObserver): Unit = {
+ this.synchronized {
+ cacheObservers.remove(observer)
+ evaluateNewCacheSizeAndNotify(cacheObservers)
+ }
+ }
+
+ /**
+ * Cache sizing strategy is simple, distribute the maxEntriesAcrossCaches across all observers
+ * @param observers list of changed observers
+ */
+ private def evaluateNewCacheSizeAndNotify(observers: mutable.HashSet[CacheSizeObserver]): Unit = {
+ //notify the observers with a change in their cache size
+ def notifyObservers(newMaxEntriesPerCache: Int): Unit = {
+ observers.foreach(obs => obs.onCacheSizeChange(newMaxEntriesPerCache))
+ }
+
+ if(observers.nonEmpty) {
+ val newMaxEntriesPerCache = Math.floor(maxEntriesAcrossCaches / observers.size).toInt
+ notifyObservers(newMaxEntriesPerCache)
+ }
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/SpanBufferMemoryStoreSupplier.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/SpanBufferMemoryStoreSupplier.scala
new file mode 100644
index 000000000..19322e33b
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/SpanBufferMemoryStoreSupplier.scala
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.store
+
+import com.expedia.www.haystack.trace.indexer.store.impl.SpanBufferMemoryStore
+import com.expedia.www.haystack.trace.indexer.store.traits.SpanBufferKeyValueStore
+
+class SpanBufferMemoryStoreSupplier(minTracesPerCache: Int,
+ maxEntriesAcrossStores: Int)
+ extends StoreSupplier[SpanBufferKeyValueStore] {
+
+ private val dynamicCacheSizer = new DynamicCacheSizer(minTracesPerCache, maxEntriesAcrossStores)
+
+ /**
+ * @return kv store for maintaining buffered-spans. If logging is enabled, we persist the changelog to kafka topic
+ * else it is purely in-memory
+ */
+ override def get(): SpanBufferKeyValueStore = new SpanBufferMemoryStore(dynamicCacheSizer)
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/StoreSupplier.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/StoreSupplier.scala
new file mode 100644
index 000000000..1111f0436
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/StoreSupplier.scala
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.store
+
+trait StoreSupplier[K] {
+ def get(): K
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/data/model/SpanBufferWithMetadata.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/data/model/SpanBufferWithMetadata.scala
new file mode 100644
index 000000000..5ec142db5
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/data/model/SpanBufferWithMetadata.scala
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ASpanGroupWithTimestampNY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trace.indexer.store.data.model
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+
+/**
+ * @param builder protobuf builder for building span buffer.
+ * @param firstSpanSeenAt timestamp when the first span of a given traceId is seen
+ */
+case class SpanBufferWithMetadata(builder: SpanBuffer.Builder, firstSpanSeenAt: Long, firstSeenSpanKafkaOffset: Long)
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/impl/SpanBufferMemoryStore.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/impl/SpanBufferMemoryStore.scala
new file mode 100644
index 000000000..a7bcf11d4
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/impl/SpanBufferMemoryStore.scala
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.store.impl
+
+import java.util
+import java.util.concurrent.atomic.AtomicInteger
+
+import com.codahale.metrics.Meter
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames._
+import com.expedia.www.haystack.trace.indexer.store.DynamicCacheSizer
+import com.expedia.www.haystack.trace.indexer.store.data.model.SpanBufferWithMetadata
+import com.expedia.www.haystack.trace.indexer.store.traits.{CacheSizeObserver, EldestBufferedSpanEvictionListener, SpanBufferKeyValueStore}
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.collection.mutable
+
+object SpanBufferMemoryStore extends MetricsSupport {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(SpanBufferMemoryStore.getClass)
+ protected val evictionMeter: Meter = metricRegistry.meter(STATE_STORE_EVICTION)
+}
+
+class SpanBufferMemoryStore(cacheSizer: DynamicCacheSizer) extends SpanBufferKeyValueStore with CacheSizeObserver {
+ import SpanBufferMemoryStore._
+
+ @volatile protected var open = false
+
+ // This maxEntries will be adjusted by the dynamic cacheSizer, lets default it to a reasonable value 10000
+ protected val maxEntries = new AtomicInteger(10000)
+ private val listeners: mutable.ListBuffer[EldestBufferedSpanEvictionListener] = mutable.ListBuffer()
+ private var totalSpansInMemStore: Int = 0
+ private var map: util.LinkedHashMap[String, SpanBufferWithMetadata] = _
+
+ override def init() {
+ cacheSizer.addCacheObserver(this)
+
+ // initialize the map
+ map = new util.LinkedHashMap[String, SpanBufferWithMetadata](cacheSizer.minTracesPerCache, 1.01f, false) {
+ override protected def removeEldestEntry(eldest: util.Map.Entry[String, SpanBufferWithMetadata]): Boolean = {
+ val evict = totalSpansInMemStore >= maxEntries.get()
+ if (evict) {
+ evictionMeter.mark()
+ totalSpansInMemStore -= eldest.getValue.builder.getChildSpansCount
+ listeners.foreach(listener => listener.onEvict(eldest.getKey, eldest.getValue))
+ }
+ evict
+ }
+ }
+
+ open = true
+
+ LOGGER.info("Span buffer memory store has been initialized")
+ }
+
+ /**
+ * removes and returns all the span buffers from the map that are recorded before the given timestamp
+ *
+ * @param timestamp timestamp before which all buffered spans should be read and removed
+ * @return
+ */
+ override def getAndRemoveSpanBuffersOlderThan(timestamp: Long): mutable.ListBuffer[SpanBufferWithMetadata] = {
+ val result = mutable.ListBuffer[SpanBufferWithMetadata]()
+
+ val iterator = this.map.entrySet().iterator()
+ var done = false
+
+ while (!done && iterator.hasNext) {
+ val el = iterator.next()
+ if (el.getValue.firstSpanSeenAt <= timestamp) {
+ iterator.remove()
+ totalSpansInMemStore -= el.getValue.builder.getChildSpansCount
+ result += el.getValue
+ } else {
+ // here we apply a basic optimization and skip further iteration because all following records
+ // in this map will have higher recordTimestamp. When we insert the first span for a unique traceId
+ // in the map, we set the 'firstRecordTimestamp' attribute with record's timestamp
+ done = true
+ }
+ }
+ result
+ }
+
+ override def addEvictionListener(l: EldestBufferedSpanEvictionListener): Unit = this.listeners += l
+
+ override def close(): Unit = {
+ if(open) {
+ LOGGER.info("Closing the span buffer memory store")
+ cacheSizer.removeCacheObserver(this)
+ open = false
+ }
+ }
+
+ def onCacheSizeChange(maxEntries: Int): Unit = {
+ LOGGER.info("Cache size has been changed to " + maxEntries)
+ this.maxEntries.set(maxEntries)
+ }
+
+ override def addOrUpdateSpanBuffer(traceId: String, span: Span, spanRecordTimestamp: Long, offset: Long): SpanBufferWithMetadata = {
+ var value = this.map.get(traceId)
+ if (value == null) {
+ val spanBuffer = SpanBuffer.newBuilder().setTraceId(span.getTraceId).addChildSpans(span)
+ value = SpanBufferWithMetadata(spanBuffer, spanRecordTimestamp, offset)
+ this.map.put(traceId, value)
+ } else {
+ value.builder.addChildSpans(span)
+ }
+ totalSpansInMemStore += 1
+ value
+ }
+
+ def totalSpans: Int = totalSpansInMemStore
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/traits/CacheSizeObserver.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/traits/CacheSizeObserver.scala
new file mode 100644
index 000000000..69d1665f8
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/traits/CacheSizeObserver.scala
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.store.traits
+
+/**
+ * this is an observer that is called whenever maxSize of the cache is changed. This happens when kafka partitions
+ * are assigned or revoked resulting in a change of total number of state stores
+ */
+trait CacheSizeObserver {
+ def onCacheSizeChange(maxEntries: Int): Unit
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/traits/EldestBufferedSpanEvictionListener.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/traits/EldestBufferedSpanEvictionListener.scala
new file mode 100644
index 000000000..86a07b0fc
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/traits/EldestBufferedSpanEvictionListener.scala
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.store.traits
+
+import com.expedia.www.haystack.trace.indexer.store.data.model.SpanBufferWithMetadata
+
+/**
+ * the listener is called when the eldest buffered span is evicted from the cache
+ */
+trait EldestBufferedSpanEvictionListener {
+ def onEvict(key: String, value: SpanBufferWithMetadata): Unit
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/traits/SpanBufferKeyValueStore.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/traits/SpanBufferKeyValueStore.scala
new file mode 100644
index 000000000..69d430370
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/store/traits/SpanBufferKeyValueStore.scala
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.store.traits
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.indexer.store.data.model.SpanBufferWithMetadata
+
+import scala.collection.mutable
+
+/**
+ * this interface extends KeyValueStore to provide span buffering operations
+ */
+trait SpanBufferKeyValueStore {
+
+ /**
+ * get all buffered span objects that are recorded before the given timestamp
+ * @param timestamp timestamp in millis
+ * @return
+ */
+ def getAndRemoveSpanBuffersOlderThan(timestamp: Long): mutable.ListBuffer[SpanBufferWithMetadata]
+
+ /**
+ * add a listener to the store, that gets called when the eldest spanBuffer is evicted
+ * due to constraints of maxEntries in the store cache
+ * @param l listener object that is called by the store
+ */
+ def addEvictionListener(l: EldestBufferedSpanEvictionListener): Unit
+
+ /**
+ * adds new spanBuffer for the traceId(if absent)in the store else add the spans
+ * @param traceId traceId
+ * @param span span object
+ * @param spanRecordTimestamp timestamp of the span record
+ * @param offset kafka offset of this span record
+ */
+ def addOrUpdateSpanBuffer(traceId: String, span: Span, spanRecordTimestamp: Long, offset: Long): SpanBufferWithMetadata
+
+ /**
+ * close the store
+ */
+ def close()
+
+ /**
+ * init the store
+ */
+ def init()
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/TraceWriter.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/TraceWriter.scala
new file mode 100644
index 000000000..39ed338ea
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/TraceWriter.scala
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.commons.packer.PackedMessage
+
+trait TraceWriter extends AutoCloseable {
+
+ /**
+ * writes the span buffer to external store like grpc, elastic, or kafka
+ * @param traceId trace id
+ * @param packedSpanBuffer compressed serialized bytes of the span buffer object
+ * @param isLastSpanBuffer tells if this is the last record, so the writer can flush
+ */
+ def writeAsync(traceId: String, packedSpanBuffer: PackedMessage[SpanBuffer], isLastSpanBuffer: Boolean)
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ElasticSearchResultHandler.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ElasticSearchResultHandler.scala
new file mode 100644
index 000000000..5f999b7fd
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ElasticSearchResultHandler.scala
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers.es
+
+import com.codahale.metrics.{Meter, Timer}
+import com.expedia.www.haystack.commons.retries.RetryOperation
+import io.searchbox.client.JestResultHandler
+import io.searchbox.core.BulkResult
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.collection.JavaConverters._
+
+class ElasticSearchResultHandler(timer: Timer.Context, failureMeter: Meter, retryOp: RetryOperation.Callback)
+ extends JestResultHandler[BulkResult] {
+
+ protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[ElasticSearchResultHandler])
+
+ /**
+ * this callback is invoked when the elastic search writes is completed with success or warnings
+ *
+ * @param result bulk result
+ */
+ def completed(result: BulkResult): Unit = {
+ timer.close()
+
+ // group the failed items as per status and log once for such a failed item
+ if (result.getFailedItems != null) {
+ result.getFailedItems.asScala.groupBy(_.status) foreach {
+ case (statusCode, failedItems) =>
+ failureMeter.mark(failedItems.size)
+ LOGGER.error(s"Index operation has failed with status=$statusCode, totalFailedItems=${failedItems.size}, " +
+ s"errorReason=${failedItems.head.errorReason}, errorType=${failedItems.head.errorType}")
+ }
+ }
+ retryOp.onResult(result)
+ }
+
+ /**
+ * this callback is invoked when the writes to elastic search fail completely
+ *
+ * @param ex the exception contains the reason of failure
+ */
+ def failed(ex: Exception): Unit = {
+ timer.close()
+ failureMeter.mark()
+ LOGGER.error("Fail to write the documents in elastic search with reason:", ex)
+ retryOp.onError(ex, retry = true)
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ElasticSearchWriter.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ElasticSearchWriter.scala
new file mode 100644
index 000000000..162472d20
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ElasticSearchWriter.scala
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers.es
+
+import java.util.concurrent.Semaphore
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.commons.retries.RetryOperation._
+import com.expedia.www.haystack.trace.commons.clients.es.AWSSigningJestClientFactory
+import com.expedia.www.haystack.trace.commons.config.entities.WhitelistIndexFieldConfiguration
+import com.expedia.www.haystack.trace.commons.packer.PackedMessage
+import com.expedia.www.haystack.trace.indexer.config.entities.ElasticSearchConfiguration
+import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames
+import com.expedia.www.haystack.trace.indexer.writers.TraceWriter
+import io.searchbox.client.config.HttpClientConfig
+import io.searchbox.client.{JestClient, JestClientFactory}
+import io.searchbox.core._
+import io.searchbox.params.Parameters
+import org.joda.time.format.DateTimeFormat
+import org.joda.time.{DateTime, DateTimeZone}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.util.Try
+
+object ElasticSearchWriterUtils {
+
+ // creates an index name based on current date. following example illustrates the naming convention of
+ // elastic search indices:
+ // haystack-span-2017-08-30-1
+ def indexName(prefix: String, indexHourBucket: Int, eventTimeMicros: Long): String = {
+ val eventTime = new DateTime(eventTimeMicros / 1000, DateTimeZone.UTC)
+ val dataFormatter = DateTimeFormat.forPattern("yyyy-MM-dd")
+ val bucket: Int = eventTime.getHourOfDay / indexHourBucket
+ s"$prefix-${dataFormatter.print(eventTime)}-$bucket"
+ }
+}
+
+class ElasticSearchWriter(esConfig: ElasticSearchConfiguration, whitelistFieldConfig: WhitelistIndexFieldConfiguration)
+ extends TraceWriter with MetricsSupport {
+ private val LOGGER = LoggerFactory.getLogger(classOf[ElasticSearchWriter])
+
+ // meter that measures the write failures
+ private val esWriteFailureMeter = metricRegistry.meter(AppMetricNames.ES_WRITE_FAILURE)
+
+ // a timer that measures the amount of time it takes to complete one bulk write
+ private val esWriteTime = metricRegistry.timer(AppMetricNames.ES_WRITE_TIME)
+
+ // converts a span into an indexable document
+ private val documentGenerator = new IndexDocumentGenerator(whitelistFieldConfig)
+
+ // this semaphore controls the parallel writes to index store
+ private val inflightRequestsSemaphore = new Semaphore(esConfig.maxInFlightBulkRequests, true)
+
+ // initialize the elastic search client
+ private lazy val esClient: JestClient = {
+ LOGGER.info("Initializing the http elastic search client with endpoint={}", esConfig.endpoint)
+
+ val factory = {
+ if (esConfig.awsRequestSigningConfiguration.enabled) {
+ LOGGER.info("using AWSSigningJestClientFactory for es client")
+ new AWSSigningJestClientFactory(esConfig.awsRequestSigningConfiguration)
+ } else {
+ LOGGER.info("using JestClientFactory for es client")
+ new JestClientFactory()
+ }
+ }
+ val builder = new HttpClientConfig.Builder(esConfig.endpoint)
+ .multiThreaded(true)
+ .connTimeout(esConfig.connectionTimeoutMillis)
+ .readTimeout(esConfig.readTimeoutMillis)
+ .defaultMaxTotalConnectionPerRoute(esConfig.maxConnectionsPerRoute)
+
+ if (esConfig.username.isDefined && esConfig.password.isDefined) {
+ builder.defaultCredentials(esConfig.username.get, esConfig.password.get)
+ }
+
+ factory.setHttpClientConfig(builder.build())
+ val client = factory.getObject
+ new IndexTemplateHandler(client, esConfig.indexTemplateJson, esConfig.indexType, whitelistFieldConfig).run()
+ client
+ }
+
+ private val bulkBuilder = new ThreadSafeBulkBuilder(esConfig.maxDocsInBulk, esConfig.maxBulkDocSizeInBytes)
+
+ override def close(): Unit = {
+ LOGGER.info("Closing the elastic search client now.")
+ Try(esClient.shutdownClient())
+ }
+
+ /**
+ * converts the spans to an index document and writes to elastic search. Also if the parallel writes
+ * exceed the max inflight requests, then we block and this puts backpressure on upstream
+ *
+ * @param traceId trace id
+ * @param packedSpanBuffer list of spans belonging to this traceId - packed bytes of span buffer
+ * @param isLastSpanBuffer tells if this is the last record, so the writer can flush
+ * @return
+ */
+ override def writeAsync(traceId: String, packedSpanBuffer: PackedMessage[SpanBuffer], isLastSpanBuffer: Boolean): Unit = {
+ var isSemaphoreAcquired = false
+
+ try {
+ val eventTimeInMicros = packedSpanBuffer.protoObj.getChildSpansList.asScala.head.getStartTime
+ val indexName = ElasticSearchWriterUtils.indexName(esConfig.indexNamePrefix, esConfig.indexHourBucket, eventTimeInMicros)
+ addIndexOperation(traceId, packedSpanBuffer.protoObj, indexName, isLastSpanBuffer) match {
+ case Some(bulkToDispatch) =>
+ inflightRequestsSemaphore.acquire()
+ isSemaphoreAcquired = true
+
+ // execute the request async with retry
+ withRetryBackoff((retryCallback) => {
+ esClient.executeAsync(bulkToDispatch,
+ new ElasticSearchResultHandler(esWriteTime.time(), esWriteFailureMeter, retryCallback))
+ },
+ esConfig.retryConfig,
+ onSuccess = (_: Any) => inflightRequestsSemaphore.release(),
+ onFailure = (ex) => {
+ inflightRequestsSemaphore.release()
+ LOGGER.error("Fail to write to ES after {} retry attempts", esConfig.retryConfig.maxRetries, ex)
+ })
+ case _ =>
+ }
+ } catch {
+ case ex: Exception =>
+ if (isSemaphoreAcquired) inflightRequestsSemaphore.release()
+ esWriteFailureMeter.mark()
+ LOGGER.error("Failed to write spans to elastic search with exception", ex)
+ }
+ }
+
+ private def addIndexOperation(traceId: String, spanBuffer: SpanBuffer, indexName: String, forceBulkCreate: Boolean): Option[Bulk] = {
+ // add all the spans as one document
+ val idxDocument = documentGenerator.createIndexDocument(traceId, spanBuffer)
+
+ idxDocument match {
+ case Some(doc) =>
+ val action: Index = new Index.Builder(doc.json)
+ .index(indexName)
+ .`type`(esConfig.indexType)
+ .setParameter(Parameters.CONSISTENCY, esConfig.consistencyLevel)
+ .build()
+ bulkBuilder.addAction(action, doc.json.getBytes("utf-8").length, forceBulkCreate)
+ case _ =>
+ LOGGER.warn("Skipping the span buffer record for index operation for traceId={}!", traceId)
+ None
+ }
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/IndexDocumentGenerator.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/IndexDocumentGenerator.scala
new file mode 100644
index 000000000..ec53bffc1
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/IndexDocumentGenerator.scala
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers.es
+
+import java.util.concurrent.TimeUnit
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc.{OPERATION_KEY_NAME, SERVICE_KEY_NAME, TagValue}
+import com.expedia.www.haystack.trace.commons.config.entities.IndexFieldType.IndexFieldType
+import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhitelistIndexFieldConfiguration}
+import org.apache.commons.lang3.StringUtils
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+import scala.util.{Failure, Success, Try}
+
+class IndexDocumentGenerator(config: WhitelistIndexFieldConfiguration) extends MetricsSupport {
+
+ private val MIN_DURATION_FOR_TRUNCATION = TimeUnit.SECONDS.toMicros(20)
+
+ /**
+ * @param spanBuffer a span buffer object
+ * @return index document that can be put in elastic search
+ */
+ def createIndexDocument(traceId: String, spanBuffer: SpanBuffer): Option[TraceIndexDoc] = {
+ // We maintain a white list of tags that are to be indexed. The whitelist is maintained as a configuration
+ // in an external database (outside this app boundary). However, the app periodically reads this whitelist config
+ // and applies it to the new spans that are read.
+ val spanIndices = mutable.ListBuffer[mutable.Map[String, Any]]()
+
+ var traceStartTime = Long.MaxValue
+ var rootDuration = 0l
+
+ spanBuffer.getChildSpansList.asScala filter isValidForIndex foreach(span => {
+
+ // calculate the trace starttime based on the minimum starttime observed across all child spans.
+ traceStartTime = Math.min(traceStartTime, truncateToSecondGranularity(span.getStartTime))
+ if(span.getParentSpanId == null) rootDuration = span.getDuration
+
+ val spanIndexDoc = spanIndices
+ .find(sp => sp(OPERATION_KEY_NAME).equals(span.getOperationName) && sp(SERVICE_KEY_NAME).equals(span.getServiceName))
+ .getOrElse({
+ val newSpanIndexDoc = mutable.Map[String, Any](
+ SERVICE_KEY_NAME -> span.getServiceName,
+ OPERATION_KEY_NAME -> span.getOperationName)
+ spanIndices.append(newSpanIndexDoc)
+ newSpanIndexDoc
+ })
+ updateSpanIndexDoc(spanIndexDoc, span)
+ })
+ if (spanIndices.nonEmpty) Some(TraceIndexDoc(traceId, rootDuration, traceStartTime, spanIndices)) else None
+ }
+
+ private def isValidForIndex(span: Span): Boolean = {
+ StringUtils.isNotEmpty(span.getServiceName) && StringUtils.isNotEmpty(span.getOperationName)
+ }
+
+ /**
+ * transforms a span object into a index document. serviceName, operationName, duration and tags(depending upon the
+ * configuration) are used to create an index document.
+ * @param spanIndexDoc a span index document
+ * @param span a span object
+ * @return span index document as a map
+ */
+ private def updateSpanIndexDoc(spanIndexDoc: mutable.Map[String, Any], span: Span): Unit = {
+ def append(key: String, value: Any): Unit = {
+ spanIndexDoc.getOrElseUpdate(key, mutable.Set[Any]())
+ .asInstanceOf[mutable.Set[Any]]
+ .add(value)
+ }
+
+ for (tag <- span.getTagsList.asScala;
+ normalizedTagKey = tag.getKey.toLowerCase;
+ indexField = config.indexFieldMap.get(normalizedTagKey); if indexField != null && indexField.enabled;
+ v = readTagValue(tag);
+ indexableValue = transformValueForIndexing(indexField.`type`, v); if indexableValue.isDefined) {
+ append(indexField.name, indexableValue)
+ }
+
+ import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc._
+ append(DURATION_KEY_NAME, adjustDurationForLowCardinality(span.getDuration))
+ append(START_TIME_KEY_NAME, truncateToSecondGranularity(span.getStartTime))
+ }
+
+
+ /**
+ * this method adjusts the tag's value to the indexing field type. Take an example of 'httpstatus' tag
+ * that we always want to index as a 'long' type in elastic search. Now services may send this tag value as string,
+ * hence in this method, we will transform the tag value to its expected type for e.g. long.
+ * In case we fail to adjust the type, we ignore the tag for indexing.
+ * @param fieldType expected field type that is valid for indexing
+ * @param value tag value
+ * @return tag value with adjusted(expected) type
+ */
+ private def transformValueForIndexing(fieldType: IndexFieldType, value: TagValue): Option[TagValue] = {
+ Try (fieldType match {
+ case IndexFieldType.string => value.toString
+ case IndexFieldType.long | IndexFieldType.int => value.toString.toLong
+ case IndexFieldType.bool => value.toString.toBoolean
+ case IndexFieldType.double => value.toString.toDouble
+ case _ => value
+ }) match {
+ case Success(result) => Some(result)
+ case Failure(_) =>
+ // TODO: should we also log the tag name etc? wondering if input is crazy, then we might end up logging too many errors
+ None
+ }
+ }
+
+ /**
+ * converts the tag into key value pair
+ * @param tag span tag
+ * @return TagValue(Any)
+ */
+ private def readTagValue(tag: Tag): TagValue = {
+ import com.expedia.open.tracing.Tag.TagType._
+
+ tag.getType match {
+ case BOOL => tag.getVBool
+ case STRING => tag.getVStr
+ case LONG => tag.getVLong
+ case DOUBLE => tag.getVDouble
+ case BINARY => tag.getVBytes.toStringUtf8
+ case _ => throw new RuntimeException(s"Fail to understand the span tag type ${tag.getType} !!!")
+ }
+ }
+
+ private def truncateToSecondGranularity(value: Long): Long = {
+ TimeUnit.SECONDS.toMicros(TimeUnit.MICROSECONDS.toSeconds(value))
+ }
+
+ private def adjustDurationForLowCardinality(value: Long): Long = {
+ // dont consider millis, if it accounts for less than 5% of the actual value
+ if (value > MIN_DURATION_FOR_TRUNCATION) {
+ truncateToSecondGranularity(value)
+ } else {
+ value
+ }
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/IndexTemplateHandler.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/IndexTemplateHandler.scala
new file mode 100644
index 000000000..dc6470e88
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/IndexTemplateHandler.scala
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2019 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers.es
+
+import java.util
+
+import com.expedia.www.haystack.trace.commons.config.entities.IndexFieldType.IndexFieldType
+import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhitelistIndexFieldConfiguration}
+import com.fasterxml.jackson.core.`type`.TypeReference
+import com.fasterxml.jackson.databind.ObjectMapper
+import io.searchbox.client.JestClient
+import io.searchbox.indices.template.{GetTemplate, PutTemplate}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+
+class IndexTemplateHandler(client: JestClient,
+ applyTemplate: Option[String],
+ indexType: String,
+ whitelistFieldConfig: WhitelistIndexFieldConfiguration) {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[IndexTemplateHandler])
+
+ private val ES_TEMPLATE_NAME = "spans-index-template"
+ private val mapper = new ObjectMapper()
+
+ def run() {
+ applyTemplate match {
+ case Some(template) => updateESTemplate(template)
+ case _ => /* may be the template is set from outside the app */
+ }
+
+ whitelistFieldConfig.addOnChangeListener(() => {
+ LOGGER.info("applying the new elastic template as whitelist fields have changed from query perspective like enableRangeQuery")
+ readTemplate() match {
+ case Some(template) => updateESTemplate(template)
+ case _ =>
+ }
+ })
+ }
+
+ private def esDataType(`type`: IndexFieldType): String = {
+ `type` match {
+ case IndexFieldType.int => "integer"
+ case IndexFieldType.string => "keyword"
+ case _ => `type`.toString
+ }
+ }
+
+ private def updateESTemplate(templateJson: String): Unit = {
+ val esTemplate: util.HashMap[String, Object] = mapper.readValue(templateJson, new TypeReference[util.HashMap[String, Object]]() {})
+ val mappings = esTemplate.get("mappings").asInstanceOf[util.HashMap[String, Object]]
+ val propertyMap =
+ mappings.get(indexType).asInstanceOf[util.HashMap[String, Object]]
+ .get("properties").asInstanceOf[util.HashMap[String, Object]]
+ .get(indexType).asInstanceOf[util.HashMap[String, Object]]
+ .get("properties").asInstanceOf[util.HashMap[String, Object]]
+
+ whitelistFieldConfig.whitelistIndexFields.foreach(wf => {
+ val prop = propertyMap.get(wf.name)
+ if (prop != null) {
+ if (wf.enabled && wf.enableRangeQuery) {
+ propertyMap.put(wf.name, Map("type" -> esDataType(wf.`type`), "doc_values" -> true, "norms" -> false).asJava)
+ } else {
+ prop.asInstanceOf[util.HashMap[String, Object]].put("doc_values", Boolean.box(wf.enableRangeQuery))
+ }
+ }
+ })
+
+ val newTemplateJson = mapper.writeValueAsString(esTemplate)
+
+ LOGGER.info(s"setting the template with name $ES_TEMPLATE_NAME - $newTemplateJson")
+
+ val putTemplateRequest = new PutTemplate.Builder(ES_TEMPLATE_NAME, newTemplateJson).build()
+ val result = client.execute(putTemplateRequest)
+ if (!result.isSucceeded) {
+ throw new RuntimeException(s"Fail to apply the following template to elastic search with reason=${result.getErrorMessage}")
+ }
+ }
+
+ private def readTemplate(): Option[String] = {
+ val request = new GetTemplate.Builder(ES_TEMPLATE_NAME).build()
+ val result = client.execute(request)
+ if (result.isSucceeded) {
+ Some(result.getJsonObject.get(ES_TEMPLATE_NAME).toString)
+ } else {
+ LOGGER.error(s"Fail to read the template with name $ES_TEMPLATE_NAME for reason ${result.getErrorMessage}")
+ None
+ }
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ServiceMetadataDocumentGenerator.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ServiceMetadataDocumentGenerator.scala
new file mode 100644
index 000000000..f0e88269c
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ServiceMetadataDocumentGenerator.scala
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2018 Expedia, Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers.es
+
+import java.time.Instant
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.commons.clients.es.document.ServiceMetadataDoc
+import com.expedia.www.haystack.trace.commons.utils.SpanUtils
+import com.expedia.www.haystack.trace.indexer.config.entities.ServiceMetadataWriteConfiguration
+import org.apache.commons.lang3.StringUtils
+
+import scala.collection.mutable
+
+class ServiceMetadataDocumentGenerator(config: ServiceMetadataWriteConfiguration) extends MetricsSupport {
+
+ private var serviceMetadataMap = new mutable.HashMap[String, mutable.Set[String]]()
+ private var allOperationCount: Int = 0
+ private var lastFlushInstant = Instant.MIN
+
+ private def shouldFlush: Boolean = {
+ config.flushIntervalInSec == 0 || Instant.now().minusSeconds(config.flushIntervalInSec).isAfter(lastFlushInstant)
+ }
+
+ private def areStatementsReadyToBeExecuted(): Seq[ServiceMetadataDoc] = {
+ if (serviceMetadataMap.nonEmpty && (shouldFlush || allOperationCount > config.flushOnMaxOperationCount)) {
+ val statements = serviceMetadataMap.flatMap {
+ case (serviceName, operationList) =>
+ createServiceMetadataDoc(serviceName, operationList)
+ }
+
+ lastFlushInstant = Instant.now()
+ serviceMetadataMap = new mutable.HashMap[String, mutable.Set[String]]()
+ allOperationCount = 0
+ statements.toSeq
+ } else {
+ Nil
+ }
+ }
+
+ /**
+ * get the list of unique service metadata documents contained in the list of spans
+ *
+ * @param spans : list of service metadata
+ * @return
+ */
+ def getAndUpdateServiceMetadata(spans: Iterable[Span]): Seq[ServiceMetadataDoc] = {
+ this.synchronized {
+ spans.foreach(span => {
+ if (StringUtils.isNotEmpty(span.getServiceName) && StringUtils.isNotEmpty(span.getOperationName)) {
+ val operationsList = serviceMetadataMap.getOrElseUpdate(span.getServiceName, mutable.Set[String]())
+ if (operationsList.add(span.getOperationName)) {
+ allOperationCount += 1
+ }
+ }
+ })
+ areStatementsReadyToBeExecuted()
+ }
+ }
+
+ /**
+ * @return index document that can be put in elastic search
+ */
+ def createServiceMetadataDoc(serviceName: String, operationList: mutable.Set[String]): List[ServiceMetadataDoc] = {
+ operationList.map(operationName => ServiceMetadataDoc(serviceName, operationName)).toList
+
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ServiceMetadataWriter.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ServiceMetadataWriter.scala
new file mode 100644
index 000000000..df28e7217
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ServiceMetadataWriter.scala
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2019 Expedia, Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers.es
+
+import java.util.concurrent.{Semaphore, TimeUnit}
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.commons.retries.RetryOperation.withRetryBackoff
+import com.expedia.www.haystack.trace.commons.clients.es.AWSSigningJestClientFactory
+import com.expedia.www.haystack.trace.commons.clients.es.document.ServiceMetadataDoc
+import com.expedia.www.haystack.trace.commons.config.entities.AWSRequestSigningConfiguration
+import com.expedia.www.haystack.trace.commons.packer.PackedMessage
+import com.expedia.www.haystack.trace.indexer.config.entities.ServiceMetadataWriteConfiguration
+import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames
+import com.expedia.www.haystack.trace.indexer.writers.TraceWriter
+import io.searchbox.client.config.HttpClientConfig
+import io.searchbox.client.{JestClient, JestClientFactory}
+import io.searchbox.core.{Bulk, Index}
+import io.searchbox.indices.template.PutTemplate
+import io.searchbox.params.Parameters
+import org.joda.time.format.DateTimeFormat
+import org.joda.time.{DateTime, DateTimeZone}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.util.Try
+
+object ServiceMetadataUtils {
+
+ // creates an index name based on current date. following example illustrates the naming convention of
+ // elastic search indices for service metadata:
+ // service-metadata-2019-02-20
+ def indexName(prefix: String): String = {
+ val eventTime = new DateTime(DateTimeZone.UTC)
+ val dataFormatter = DateTimeFormat.forPattern("yyyy-MM-dd")
+ s"$prefix-${dataFormatter.print(eventTime)}"
+ }
+}
+
+class ServiceMetadataWriter(config: ServiceMetadataWriteConfiguration, awsRequestSigningConfig: AWSRequestSigningConfiguration)
+ extends TraceWriter with MetricsSupport {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[ServiceMetadataWriter])
+
+ // a timer that measures the amount of time it takes to complete one bulk write
+ private val writeTimer = metricRegistry.timer(AppMetricNames.METADATA_WRITE_TIME)
+
+ // meter that measures the write failures
+ private val failureMeter = metricRegistry.meter(AppMetricNames.METADATA_WRITE_FAILURE)
+
+ // converts a serviceMetadata object into an indexable document
+ private val documentGenerator = new ServiceMetadataDocumentGenerator(config)
+
+
+ // this semaphore controls the parallel writes to service metadata index
+ private val inflightRequestsSemaphore = new Semaphore(config.maxInFlightBulkRequests, true)
+
+ // initialize the elastic search client
+ private val esClient: JestClient = {
+ LOGGER.info("Initializing the http elastic search client with endpoint={}", config.esEndpoint)
+
+ val factory = {
+ if (awsRequestSigningConfig.enabled) {
+ LOGGER.info("using AWSSigningJestClientFactory for es client")
+ new AWSSigningJestClientFactory(awsRequestSigningConfig)
+ } else {
+ LOGGER.info("using JestClientFactory for es client")
+ new JestClientFactory()
+ }
+ }
+ val builder = new HttpClientConfig.Builder(config.esEndpoint)
+ .multiThreaded(true)
+ .maxConnectionIdleTime(config.flushIntervalInSec + 10, TimeUnit.SECONDS)
+ .connTimeout(config.connectionTimeoutMillis)
+ .readTimeout(config.readTimeoutMillis)
+
+ if (config.username.isDefined && config.password.isDefined) {
+ builder.defaultCredentials(config.username.get, config.password.get)
+ }
+
+ factory.setHttpClientConfig(builder.build())
+ factory.getObject
+ }
+
+ private val bulkBuilder = new ThreadSafeBulkBuilder(config.maxDocsInBulk, config.maxBulkDocSizeInBytes)
+
+ if (config.indexTemplateJson.isDefined) applyTemplate(config.indexTemplateJson.get)
+
+ override def close(): Unit = {
+ LOGGER.info("Closing the elastic search client now.")
+ Try(esClient.shutdownClient())
+ }
+
+ /**
+ * converts the spans to an index document and writes to elastic search. Also if the parallel writes
+ * exceed the max inflight requests, then we block and this puts backpressure on upstream
+ *
+ * @param traceId trace id
+ * @param packedSpanBuffer list of spans belonging to this traceId - packed bytes of span buffer
+ * @param isLastSpanBuffer tells if this is the last record, so the writer can flush
+ * @return
+ */
+ override def writeAsync(traceId: String, packedSpanBuffer: PackedMessage[SpanBuffer], isLastSpanBuffer: Boolean): Unit = {
+ var isSemaphoreAcquired = false
+ val idxDocument: Seq[ServiceMetadataDoc] = documentGenerator.getAndUpdateServiceMetadata(packedSpanBuffer.protoObj.getChildSpansList.asScala)
+ idxDocument.foreach(document => {
+ try {
+ addIndexOperation(traceId, document, ServiceMetadataUtils.indexName(config.indexName)) match {
+ case Some(bulkToDispatch) =>
+ inflightRequestsSemaphore.acquire()
+ isSemaphoreAcquired = true
+
+ // execute the request async with retry
+ withRetryBackoff(retryCallback => {
+ esClient.executeAsync(bulkToDispatch, new ElasticSearchResultHandler(writeTimer.time(), failureMeter, retryCallback))
+ },
+ config.retryConfig,
+ onSuccess = (_: Any) => inflightRequestsSemaphore.release(),
+ onFailure = ex => {
+ inflightRequestsSemaphore.release()
+ LOGGER.error("Fail to write to ES after {} retry attempts", config.retryConfig.maxRetries, ex)
+ })
+ case _ =>
+ }
+ } catch {
+ case ex: Exception =>
+ if (isSemaphoreAcquired) inflightRequestsSemaphore.release()
+ failureMeter.mark()
+ LOGGER.error("Failed to write spans to elastic search with exception", ex)
+ }
+ })
+ }
+
+ private def addIndexOperation(traceId: String, document: ServiceMetadataDoc, indexName: String): Option[Bulk] = { // add all the service operation combinations in one bulk
+ val action: Index = new Index.Builder(document.json)
+ .index(indexName)
+ .`type`(config.indexType)
+ .setParameter(Parameters.CONSISTENCY, config.consistencyLevel)
+ .id(s"${document.servicename}_${document.operationname}")
+ .build()
+ bulkBuilder.addAction(action, document.json.getBytes("utf-8").length, forceBulkCreate = false)
+ }
+
+ private def applyTemplate(templateJson: String) {
+ val putTemplateRequest = new PutTemplate.Builder("service-metadata-template", templateJson).build()
+ val result = esClient.execute(putTemplateRequest)
+ if (!result.isSucceeded) {
+ throw new RuntimeException(s"Fail to apply the following template to elastic search with reason=${result.getErrorMessage}")
+ }
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ThreadSafeBulkBuilder.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ThreadSafeBulkBuilder.scala
new file mode 100644
index 000000000..c8460bfd8
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/es/ThreadSafeBulkBuilder.scala
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers.es
+
+import java.util
+
+import io.searchbox.action.BulkableAction
+import io.searchbox.core.{Bulk, DocumentResult}
+
+/**
+ * this is a thread safe builder to build index actions
+ */
+class ThreadSafeBulkBuilder(maxDocuments: Int, maxDocSizeInBytes: Int) {
+ private var bulkActions = new util.LinkedList[BulkableAction[DocumentResult]]
+ private var docsCount = 0
+ private var totalSizeInBytes = 0
+
+ /**
+ * add the action in the bulk builder, returns bulk if any of the following condition is true
+ * a) the total doc count in bulk is more than allowed setting
+ * b) total size of the docs in bulk is more than allowed setting
+ * c) force create the bulk
+ *
+ * @param action index action
+ * @param sizeInBytes total size of the json in the index action
+ * @param forceBulkCreate force to build the existing bulk
+ * @return
+ */
+ def addAction(action: BulkableAction[DocumentResult],
+ sizeInBytes: Int,
+ forceBulkCreate: Boolean): Option[Bulk] = {
+ var dispatchActions: util.LinkedList[BulkableAction[DocumentResult]] = null
+
+ this.synchronized {
+ bulkActions.add(action)
+ docsCount += 1
+ totalSizeInBytes += sizeInBytes
+
+ if (forceBulkCreate ||
+ docsCount >= maxDocuments ||
+ totalSizeInBytes >= maxDocSizeInBytes) {
+ dispatchActions = getAndResetBulkActions()
+ }
+ }
+
+ if (dispatchActions == null) {
+ None
+ } else {
+ Some(new Bulk.Builder().addAction(dispatchActions).build())
+ }
+ }
+
+ private def getAndResetBulkActions(): util.LinkedList[BulkableAction[DocumentResult]] = {
+ val dispatchActions = bulkActions
+ bulkActions = new util.LinkedList[BulkableAction[DocumentResult]]
+ docsCount = 0
+ totalSizeInBytes = 0
+ dispatchActions
+ }
+
+ def getDocsCount: Int = docsCount
+
+ def getTotalSizeInBytes: Int = totalSizeInBytes
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/grpc/GrpcTraceWriter.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/grpc/GrpcTraceWriter.scala
new file mode 100644
index 000000000..965ff2675
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/grpc/GrpcTraceWriter.scala
@@ -0,0 +1,98 @@
+package com.expedia.www.haystack.trace.indexer.writers.grpc
+
+/*
+ * Copyright 2018 Expedia, Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+
+import java.util.concurrent.Semaphore
+
+import com.expedia.open.tracing.backend.{StorageBackendGrpc, TraceRecord, WriteSpansRequest}
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.commons.packer.PackedMessage
+import com.expedia.www.haystack.trace.indexer.config.entities.TraceBackendConfiguration
+import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames
+import com.expedia.www.haystack.trace.indexer.writers.TraceWriter
+import com.google.protobuf.ByteString
+import io.grpc.ManagedChannelBuilder
+import org.slf4j.LoggerFactory
+
+import scala.concurrent.ExecutionContextExecutor
+import scala.util.Try
+
+class GrpcTraceWriter(config: TraceBackendConfiguration)(implicit val dispatcher: ExecutionContextExecutor)
+ extends TraceWriter with MetricsSupport {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[GrpcTraceWriter])
+ private val writeTimer = metricRegistry.timer(AppMetricNames.BACKEND_WRITE_TIME)
+ private val writeFailures = metricRegistry.meter(AppMetricNames.BACKEND_WRITE_FAILURE)
+
+ private val channel = {
+ val grpcConfig = config.clientConfig.backends.head
+ ManagedChannelBuilder.forAddress(grpcConfig.host, grpcConfig.port)
+ .usePlaintext(true)
+ .build()
+ }
+ private val client = StorageBackendGrpc.newStub(channel)
+
+ // this semaphore controls the parallel writes to trace-backend
+ private val inflightRequestsSemaphore = new Semaphore(config.maxInFlightRequests, true)
+
+ private def execute(traceId: String, packedSpanBuffer: PackedMessage[SpanBuffer]): Unit = {
+ val timer = writeTimer.time()
+ val singleRecord = TraceRecord
+ .newBuilder()
+ .setTraceId(traceId)
+ .setTimestamp(System.currentTimeMillis())
+ .setSpans(ByteString.copyFrom(packedSpanBuffer.packedDataBytes))
+ val writeSpansRequest = WriteSpansRequest.newBuilder().addRecords(singleRecord).build()
+
+ // execute the request async with retry
+ client.writeSpans(writeSpansRequest, new WriteSpansResponseObserver(timer, inflightRequestsSemaphore))
+ }
+
+ /**
+ * writes the traceId and its spans to trace-backend. Use the current timestamp as the sort key for the writes to same
+ * TraceId. Also if the parallel writes exceed the max inflight requests, then we block and this puts backpressure on
+ * upstream
+ *
+ * @param traceId : trace id
+ * @param packedSpanBuffer : list of spans belonging to this traceId - span buffer
+ * @param isLastSpanBuffer tells if this is the last record, so the writer can flush``
+ * @return
+ */
+ override def writeAsync(traceId: String, packedSpanBuffer: PackedMessage[SpanBuffer], isLastSpanBuffer: Boolean): Unit = {
+ var isSemaphoreAcquired = false
+
+ try {
+ inflightRequestsSemaphore.acquire()
+ isSemaphoreAcquired = true
+ /* write spanBuffer for a given traceId */
+ execute(traceId, packedSpanBuffer)
+ } catch {
+ case ex: Exception =>
+ LOGGER.error("Fail to write the spans to trace-backend with exception", ex)
+ writeFailures.mark()
+ if (isSemaphoreAcquired) inflightRequestsSemaphore.release()
+ }
+ }
+
+ override def close(): Unit = {
+ LOGGER.info("Closing backend client now..")
+ Try(channel.shutdown())
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/grpc/WriteSpansResponseObserver.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/grpc/WriteSpansResponseObserver.scala
new file mode 100644
index 000000000..44035258f
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/grpc/WriteSpansResponseObserver.scala
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers.grpc
+
+import java.util.concurrent.Semaphore
+
+import com.codahale.metrics.Timer
+import com.expedia.open.tracing.backend.WriteSpansResponse
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames
+import io.grpc.stub.StreamObserver
+import org.slf4j.LoggerFactory
+
+
+class WriteSpansResponseObserver(timer:Timer.Context, inflightRequest: Semaphore) extends StreamObserver[WriteSpansResponse] with MetricsSupport {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[WriteSpansResponseObserver])
+ private val writeFailures = metricRegistry.meter(AppMetricNames.BACKEND_WRITE_FAILURE)
+
+ /**
+ * this is invoked when the grpc aysnc write completes.
+ * We measure the time write operation takes and records any warnings or errors
+ */
+
+ override def onNext(writeSpanResponse: WriteSpansResponse): Unit = {
+ timer.close()
+ inflightRequest.release()
+ }
+
+ override def onError(error: Throwable): Unit = {
+ timer.close()
+ inflightRequest.release()
+ writeFailures.mark()
+ LOGGER.error(s"Fail to write to trace-backend with exception ", error)
+ }
+
+ override def onCompleted(): Unit = {
+ LOGGER.debug(s"Closing WriteSpans Trace Observer")
+ }
+}
diff --git a/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/kafka/KafkaWriter.scala b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/kafka/KafkaWriter.scala
new file mode 100644
index 000000000..fa8b0e8fe
--- /dev/null
+++ b/traces/indexer/src/main/scala/com/expedia/www/haystack/trace/indexer/writers/kafka/KafkaWriter.scala
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.writers.kafka
+
+import java.util.Properties
+
+import com.codahale.metrics.Meter
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.commons.packer.PackedMessage
+import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames
+import com.expedia.www.haystack.trace.indexer.writers.TraceWriter
+import org.apache.kafka.clients.producer._
+import org.slf4j.LoggerFactory
+
+import scala.util.Try
+
+object KafkaWriter extends MetricsSupport {
+ protected val kafkaProducerFailures: Meter = metricRegistry.meter(AppMetricNames.KAFKA_PRODUCE_FAILURES)
+}
+
+class KafkaWriter(producerConfig: Properties, topic: String) extends TraceWriter {
+ private val LOGGER = LoggerFactory.getLogger(classOf[KafkaWriter])
+
+ private val producer = new KafkaProducer[String, Array[Byte]](producerConfig)
+
+ override def writeAsync(traceId: String, packedSpanBuffer: PackedMessage[SpanBuffer], isLastSpanBuffer: Boolean): Unit = {
+ val record = new ProducerRecord[String, Array[Byte]](topic, traceId, packedSpanBuffer.packedDataBytes)
+ producer.send(record, (_: RecordMetadata, exception: Exception) => {
+ if (exception != null) {
+ LOGGER.error(s"Fail to write the span buffer record to kafka topic=$topic", exception)
+ KafkaWriter.kafkaProducerFailures.mark()
+ }
+ })
+ }
+
+ override def close(): Unit = Try(producer.close())
+}
diff --git a/traces/indexer/src/test/resources/config/base.conf b/traces/indexer/src/test/resources/config/base.conf
new file mode 100644
index 000000000..6d1619145
--- /dev/null
+++ b/traces/indexer/src/test/resources/config/base.conf
@@ -0,0 +1,155 @@
+health.status.path = "/app/isHealthy"
+
+span.accumulate {
+ store {
+ min.traces.per.cache = 1000 # this defines the minimum traces in each cache before eviction check is applied. This is also useful for testing the code
+ all.max.entries = 20000 # this is the maximum number of spans that can live across all the stores
+ }
+ window.ms = 10000
+ poll.ms = 2000
+ packer = snappy
+}
+
+kafka {
+ close.stream.timeout.ms = 30000
+
+ topic.consume = "spans"
+ topic.produce = "span-buffer"
+ num.stream.threads = 2
+
+ max.wakeups = 5
+ wakeup.timeout.ms = 5000
+
+ commit.offset {
+ retries = 3
+ backoff.ms = 200
+ }
+
+ # consumer specific configurations
+ consumer {
+ group.id = "haystack-trace-indexer"
+ bootstrap.servers = "kafkasvc:9092"
+ auto.offset.reset = "latest"
+
+ # disable auto commit as the app manages offset itself
+ enable.auto.commit = "false"
+ }
+
+# producer specific configurations
+ producer {
+ bootstrap.servers = "kafkasvc:9092"
+ }
+}
+
+
+backend {
+
+ client {
+ host = "localhost"
+ port = 8090
+ max.message.size = 52428800 # 50MB in bytes
+ }
+ # defines the max inflight writes for backend client
+ max.inflight.requests = 100
+}
+
+service.metadata {
+ enabled = true
+ flush {
+ interval.sec = 60
+ operation.count = 10000
+ }
+ es {
+ endpoint = "http://elasticsearch:9200"
+ conn.timeout.ms = 10000
+ read.timeout.ms = 5000
+ consistency.level = "one"
+ index {
+ # apply the template before starting the client, if json is empty, no operation is performed
+ template.json = "some_template_json"
+ name = "service-metadata"
+ type = "metadata"
+ }
+ # defines settings for bulk operation like max inflight bulks, number of documents and the total size in a single bulk
+ bulk.max {
+ docs {
+ count = 100
+ size.kb = 1000
+ }
+ inflight = 10
+ }
+ retries {
+ max = 10
+ backoff {
+ initial.ms = 100
+ factor = 2
+ }
+ }
+ }
+}
+
+elasticsearch {
+ endpoint = "http://elasticsearch:9200"
+ max.inflight.requests = 50
+ conn.timeout.ms = 10000
+ read.timeout.ms = 5000
+ max.connections.per.route = 10
+ consistency.level = "one"
+ index {
+ template {
+ json = "some_template_json"
+ }
+
+ name.prefix = "haystack-traces"
+ hour.bucket = 6
+ type = "spans"
+ }
+ # defines settings for bulk operation like max inflight bulks, number of documents and the total size in a single bulk
+ bulk.max {
+ docs {
+ count = 100
+ size.kb = 1000
+ }
+ inflight = 10
+ }
+
+ retries {
+ max = 10
+ backoff {
+ initial.ms = 1000
+ factor = 2
+ }
+ }
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
+
+reload {
+ tables {
+ index.fields.config = "whitelist-index-fields"
+ }
+ config {
+ endpoint = "http://elasticsearch:9200"
+ database.name = "reload-configs"
+ }
+ interval.ms = 600
+ startup.load = false
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
\ No newline at end of file
diff --git a/traces/indexer/src/test/resources/logback-test.xml b/traces/indexer/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..adfa02c68
--- /dev/null
+++ b/traces/indexer/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/BaseIntegrationTestSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/BaseIntegrationTestSpec.scala
new file mode 100644
index 000000000..51ef660e2
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/BaseIntegrationTestSpec.scala
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.integration
+
+import java.util.UUID
+import java.util.concurrent.{Executors, ScheduledExecutorService, ScheduledFuture, TimeUnit}
+
+import com.expedia.open.tracing.Tag.TagType
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.open.tracing.{Log, Span, Tag}
+import com.expedia.www.haystack.trace.commons.packer.{PackerType, Unpacker}
+import com.expedia.www.haystack.trace.indexer.config.entities.SpanAccumulatorConfiguration
+import com.expedia.www.haystack.trace.indexer.integration.clients.{ElasticSearchTestClient, GrpcTestClient, KafkaTestClient}
+import org.apache.kafka.streams.KeyValue
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+import org.scalatest._
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration.FiniteDuration
+
+case class TraceDescription(traceId: String, spanIdPrefix: String)
+
+abstract class BaseIntegrationTestSpec extends WordSpec with GivenWhenThen with Matchers with BeforeAndAfterAll with BeforeAndAfterEach {
+ protected val MAX_WAIT_FOR_OUTPUT_MS = 12000
+
+ protected val spanAccumulatorConfig = SpanAccumulatorConfiguration(
+ minTracesPerCache = 100,
+ maxEntriesAllStores = 500,
+ pollIntervalMillis = 2000L,
+ bufferingWindowMillis = 6000L,
+ PackerType.SNAPPY)
+
+ protected var scheduler: ScheduledExecutorService = _
+
+ val kafka = new KafkaTestClient
+ val traceBackendClient = new GrpcTestClient
+ val elastic = new ElasticSearchTestClient
+
+ override def beforeAll() {
+ scheduler = Executors.newSingleThreadScheduledExecutor()
+ kafka.prepare(getClass.getSimpleName)
+ traceBackendClient.prepare()
+ elastic.prepare()
+ }
+
+ override def afterAll(): Unit = if (scheduler != null) scheduler.shutdownNow()
+
+ protected def validateChildSpans(spanBuffer: SpanBuffer,
+ traceId: String,
+ spanIdPrefix: String,
+ childSpanCount: Int): Unit = {
+ spanBuffer.getTraceId shouldBe traceId
+
+ withClue(s"the trace-id $traceId has lesser spans than expected"){
+ spanBuffer.getChildSpansCount shouldBe childSpanCount
+ }
+
+ (0 until spanBuffer.getChildSpansCount).toList foreach { idx =>
+ spanBuffer.getChildSpans(idx).getSpanId shouldBe s"$spanIdPrefix-$idx"
+ spanBuffer.getChildSpans(idx).getTraceId shouldBe spanBuffer.getTraceId
+ spanBuffer.getChildSpans(idx).getServiceName shouldBe s"service$idx"
+ spanBuffer.getChildSpans(idx).getParentSpanId should not be null
+ spanBuffer.getChildSpans(idx).getOperationName shouldBe s"op$idx"
+ }
+ }
+
+ private def randomSpan(traceId: String, spanId: String, serviceName: String, operationName: String): Span = {
+ Span.newBuilder()
+ .setTraceId(traceId)
+ .setParentSpanId(UUID.randomUUID().toString)
+ .setSpanId(spanId)
+ .setServiceName(serviceName)
+ .setOperationName(operationName)
+ .setStartTime(System.currentTimeMillis() * 1000)
+ .addTags(Tag.newBuilder().setKey("errorCode").setType(TagType.LONG).setVLong(404))
+ .addTags(Tag.newBuilder().setKey("_role").setType(TagType.STRING).setVStr("haystack"))
+ .addLogs(Log.newBuilder().addFields(Tag.newBuilder().setKey("exceptiontype").setType(TagType.STRING).setVStr("external").build()).build())
+ .build()
+ }
+
+ protected def produceSpansAsync(maxSpansPerTrace: Int,
+ produceInterval: FiniteDuration,
+ traceDescription: List[TraceDescription],
+ startRecordTimestamp: Long,
+ maxRecordTimestamp: Long,
+ startSpanIdxFrom: Int = 0): ScheduledFuture[_] = {
+ var timestamp = startRecordTimestamp
+ var cnt = 0
+ scheduler.scheduleWithFixedDelay(() => {
+ if (cnt < maxSpansPerTrace) {
+ val spans = traceDescription.map(sd => {
+ new KeyValue[String, Span](sd.traceId, randomSpan(sd.traceId, s"${sd.spanIdPrefix}-${startSpanIdxFrom + cnt}", s"service${startSpanIdxFrom + cnt}", s"op${startSpanIdxFrom + cnt}"))
+ }).asJava
+ IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(
+ kafka.INPUT_TOPIC,
+ spans,
+ kafka.TEST_PRODUCER_CONFIG,
+ timestamp)
+ timestamp = timestamp + (maxRecordTimestamp / (maxSpansPerTrace - 1))
+ }
+ cnt = cnt + 1
+ }, 0, produceInterval.toMillis, TimeUnit.MILLISECONDS)
+ }
+
+ def verifyBackendWrites(traceDescriptions: Seq[TraceDescription], minSpansPerTrace: Int, maxSpansPerTrace: Int): Unit = {
+ val traceRecords = traceBackendClient.queryTraces(traceDescriptions)
+
+ traceRecords should have size traceDescriptions.size
+
+ traceRecords.foreach(record => {
+ val spanBuffer = Unpacker.readSpanBuffer(record.getSpans.toByteArray)
+ val descr = traceDescriptions.find(_.traceId == record.getTraceId).get
+ record.getSpans should not be null
+ spanBuffer.getChildSpansCount should be >= minSpansPerTrace
+ spanBuffer.getChildSpansCount should be <= maxSpansPerTrace
+
+ spanBuffer.getChildSpansList.asScala.zipWithIndex foreach {
+ case (sp, idx) =>
+ sp.getSpanId shouldBe s"${descr.spanIdPrefix}-$idx"
+ sp.getServiceName shouldBe s"service$idx"
+ sp.getOperationName shouldBe s"op$idx"
+ }
+ })
+ }
+
+ def verifyOperationNames(): Unit = {
+ val operationNamesQuery =
+ """{
+ | "query" : {
+ | "term" : {
+ | "servicename" : {
+ | "value" : "service0",
+ | "boost" : 1.0
+ | }
+ | }
+ | },
+ | "_source" : {
+ | "includes" : [
+ | "operationname"
+ | ],
+ | "excludes" : [
+ | "servicename"
+ | ]
+ | }
+ |}""".stripMargin
+ val docs = elastic.queryServiceMetadataIndex(operationNamesQuery)
+ docs.size shouldBe 1
+
+ }
+
+ def verifyElasticSearchWrites(traceIds: Seq[String]): Unit = {
+ val matchAllQuery =
+ """{
+ | "query": {
+ | "match_all": {}
+ | }
+ |}""".stripMargin
+
+ var docs = elastic.querySpansIndex(matchAllQuery)
+ docs.size shouldBe traceIds.size
+ docs.indices.foreach { idx =>
+ val traceId = docs.apply(idx).traceid
+ traceIds should contain(traceId)
+ }
+
+ val spanSpecificQuery =
+ """
+ |{
+ | "query": {
+ | "bool": {
+ | "must": [
+ | {
+ | "nested": {
+ | "path": "spans",
+ | "query": {
+ | "bool": {
+ | "must": [
+ | {
+ | "match": {
+ | "spans.servicename": "service0"
+ | }
+ | },
+ | {
+ | "match": {
+ | "spans.operationname": "op0"
+ | }
+ | }
+ | ]
+ | }
+ | }
+ | }
+ | }
+ | ]
+ |}}}
+ """.stripMargin
+ docs = elastic.querySpansIndex(spanSpecificQuery)
+ docs.size shouldBe traceIds.size
+
+ val emptyResponseQuery =
+ """
+ |{
+ | "query": {
+ | "bool": {
+ | "must": [
+ | {
+ | "nested": {
+ | "path": "spans",
+ | "query": {
+ | "bool": {
+ | "must": [
+ | {
+ | "match": {
+ | "spans.servicename": "service0"
+ | }
+ | },
+ | {
+ | "match": {
+ | "spans.operationname": "op1"
+ | }
+ | }
+ | ]
+ | }
+ | }
+ | }
+ | }
+ | ]
+ |}}}
+ """.stripMargin
+ docs = elastic.querySpansIndex(emptyResponseQuery)
+ docs.size shouldBe 0
+
+ val tagQuery =
+ """
+ |{
+ | "query": {
+ | "bool": {
+ | "must": [
+ | {
+ | "nested": {
+ | "path": "spans",
+ | "query": {
+ | "bool": {
+ | "must": [
+ | {
+ | "match": {
+ | "spans.servicename": "service2"
+ | }
+ | },
+ | {
+ | "match": {
+ | "spans.operationname": "op2"
+ | }
+ | },
+ | {
+ | "match": {
+ | "spans.errorcode": "404"
+ | }
+ | }
+ | ]
+ | }
+ | }
+ | }
+ | }
+ | ]
+ |}}}
+ """.stripMargin
+ docs = elastic.querySpansIndex(tagQuery)
+ docs.size shouldBe traceIds.size
+ docs.map(_.traceid) should contain theSameElementsAs traceIds
+
+
+ val roleTagQuery =
+ """
+ |{
+ | "query": {
+ | "bool": {
+ | "must": [
+ | {
+ | "nested": {
+ | "path": "spans",
+ | "query": {
+ | "bool": {
+ | "must": [
+ | {
+ | "match": {
+ | "spans.servicename": "service2"
+ | }
+ | },
+ | {
+ | "match": {
+ | "spans.operationname": "op2"
+ | }
+ | },
+ | {
+ | "match": {
+ | "spans.role": "haystack"
+ | }
+ | }
+ | ]
+ | }
+ | }
+ | }
+ | }
+ | ]
+ |}}}
+ """.stripMargin
+ docs = elastic.querySpansIndex(roleTagQuery)
+ docs.size shouldBe traceIds.size
+ docs.map(_.traceid) should contain theSameElementsAs traceIds
+
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/EvictedSpanBufferSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/EvictedSpanBufferSpec.scala
new file mode 100644
index 000000000..8596d25f2
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/EvictedSpanBufferSpec.scala
@@ -0,0 +1,52 @@
+package com.expedia.www.haystack.trace.indexer.integration
+
+import java.util
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.indexer.StreamRunner
+import org.apache.kafka.streams.KeyValue
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+class EvictedSpanBufferSpec extends BaseIntegrationTestSpec {
+ private val MAX_CHILD_SPANS = 5
+ private val TRACE_ID_1 = "traceid-1"
+ private val TRACE_ID_2 = "traceid-2"
+ private val SPAN_ID_PREFIX = "span-id-"
+
+ "Trace Indexing Topology" should {
+ s"consume spans from input '${kafka.INPUT_TOPIC}', buffer them together for a given traceId and write to trace-backend and elastic on eviction" in {
+ Given("a set of spans produced async with extremely extremely small store size configuration")
+ val kafkaConfig = kafka.buildConfig
+ val esConfig = elastic.buildConfig
+ val indexTagsConfig = elastic.indexingConfig
+ val backendConfig = traceBackendClient.buildConfig
+ val serviceMetadataConfig = elastic.buildServiceMetadataConfig
+ val accumulatorConfig = spanAccumulatorConfig.copy(minTracesPerCache = 1, maxEntriesAllStores = 1)
+
+ produceSpansAsync(MAX_CHILD_SPANS,
+ produceInterval = 1.seconds,
+ List(TraceDescription(TRACE_ID_1, SPAN_ID_PREFIX), TraceDescription(TRACE_ID_2, SPAN_ID_PREFIX)),
+ 0L, accumulatorConfig.bufferingWindowMillis)
+
+ When(s"kafka-streams topology is started")
+ val topology = new StreamRunner(kafkaConfig, accumulatorConfig, esConfig, backendConfig, serviceMetadataConfig, indexTagsConfig)
+ topology.start()
+
+ Then(s"we should get multiple span-buffers bearing only 1 span due to early eviction from store")
+ val records: util.List[KeyValue[String, SpanBuffer]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(kafka.RESULT_CONSUMER_CONFIG, kafka.OUTPUT_TOPIC, 10, MAX_WAIT_FOR_OUTPUT_MS)
+
+ validateKafkaOutput(records.asScala)
+ topology.close()
+ }
+ }
+
+ // validate the kafka output
+ private def validateKafkaOutput(records: Seq[KeyValue[String, SpanBuffer]]): Unit = {
+ records.map(_.key).toSet should contain allOf (TRACE_ID_1, TRACE_ID_2)
+ records.foreach(rec => rec.value.getChildSpansCount shouldBe 1)
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/FailedTopologyRecoverySpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/FailedTopologyRecoverySpec.scala
new file mode 100644
index 000000000..fc07d76b7
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/FailedTopologyRecoverySpec.scala
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.integration
+
+import java.util
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.indexer.StreamRunner
+import org.apache.kafka.streams.KeyValue
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+class FailedTopologyRecoverySpec extends BaseIntegrationTestSpec {
+ private val MAX_CHILD_SPANS_PER_TRACE = 5
+ private val TRACE_ID_3 = "traceid-3"
+ private val SPAN_ID_PREFIX = "span-id-"
+ private val TRACE_DESCRIPTIONS = List(TraceDescription(TRACE_ID_3, SPAN_ID_PREFIX))
+
+ "Trace Indexing Topology" should {
+ s"consume spans from input '${kafka.INPUT_TOPIC}', buffer them together keyed by unique TraceId and write to trace-backend and elastic even if crashed in between" in {
+ Given("a set of spans produced async with spanBuffer+kafka configurations")
+ val kafkaConfig = kafka.buildConfig
+ val esConfig = elastic.buildConfig
+ val indexTagsConfig = elastic.indexingConfig
+ val backendConfig = traceBackendClient.buildConfig
+ val serviceMetadataConfig = elastic.buildServiceMetadataConfig
+ val accumulatorConfig = spanAccumulatorConfig.copy(pollIntervalMillis = spanAccumulatorConfig.pollIntervalMillis * 5)
+ val startTimestamp = System.currentTimeMillis()
+ produceSpansAsync(
+ MAX_CHILD_SPANS_PER_TRACE,
+ produceInterval = 1.seconds,
+ TRACE_DESCRIPTIONS,
+ startTimestamp,
+ spanAccumulatorConfig.bufferingWindowMillis)
+
+ When(s"kafka-streams topology is started and then stopped forcefully after few sec")
+ var topology = new StreamRunner(kafkaConfig, accumulatorConfig, esConfig, backendConfig, serviceMetadataConfig, indexTagsConfig)
+ topology.start()
+ Thread.sleep(7000)
+ topology.close()
+
+ // wait for few sec to close the stream threads
+ Thread.sleep(6000)
+
+ Then(s"on restart of the topology, we should be able to read complete trace created in previous run from the '${kafka.OUTPUT_TOPIC}' topic in kafka, trace-backend and elasticsearch")
+ topology = new StreamRunner(kafkaConfig, accumulatorConfig, esConfig, backendConfig, serviceMetadataConfig, indexTagsConfig)
+ topology.start()
+
+ // produce one more span record with same traceId to trigger punctuate
+ produceSpansAsync(
+ 1,
+ produceInterval = 1.seconds,
+ TRACE_DESCRIPTIONS,
+ startTimestamp + spanAccumulatorConfig.bufferingWindowMillis,
+ spanAccumulatorConfig.bufferingWindowMillis,
+ startSpanIdxFrom = MAX_CHILD_SPANS_PER_TRACE)
+
+ try {
+ val records: util.List[KeyValue[String, SpanBuffer]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(kafka.RESULT_CONSUMER_CONFIG, kafka.OUTPUT_TOPIC, 1, MAX_WAIT_FOR_OUTPUT_MS)
+
+ // wait for the elastic search writes to pass through, i guess refresh time has to be adjusted
+ Thread.sleep(5000)
+ validateKafkaOutput(records.asScala, MAX_CHILD_SPANS_PER_TRACE)
+ verifyBackendWrites(TRACE_DESCRIPTIONS, MAX_CHILD_SPANS_PER_TRACE, MAX_CHILD_SPANS_PER_TRACE + 1) // 1 extra record for trigger
+ verifyElasticSearchWrites(Seq(TRACE_ID_3))
+ } finally {
+ topology.close()
+ }
+ }
+ }
+
+ // validate the kafka output
+ private def validateKafkaOutput(records: Iterable[KeyValue[String, SpanBuffer]], minChildSpanCount: Int) = {
+ // expect only one span buffer
+ records.size shouldBe 1
+ records.head.key shouldBe TRACE_ID_3
+ records.head.value.getChildSpansCount should be >=minChildSpanCount
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/MultipleTraceIndexingTopologySpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/MultipleTraceIndexingTopologySpec.scala
new file mode 100644
index 000000000..a97166466
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/MultipleTraceIndexingTopologySpec.scala
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.integration
+
+import java.util
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.indexer.StreamRunner
+import org.apache.kafka.streams.KeyValue
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+class MultipleTraceIndexingTopologySpec extends BaseIntegrationTestSpec {
+ private val MAX_CHILD_SPANS_PER_TRACE = 5
+ private val TRACE_ID_9 = "traceid-9"
+ private val TRACE_ID_5 = "traceid-5"
+ private val SPAN_ID_PREFIX_1 = TRACE_ID_9 + "span-id-"
+ private val SPAN_ID_PREFIX_2 = TRACE_ID_5 + "span-id-"
+
+ "Trace Indexing Topology" should {
+ s"consume spans from input '${kafka.INPUT_TOPIC}' and buffer them together for every unique traceId and write to trace-backend and elastic search" in {
+ Given("a set of spans with two different traceIds and project configurations")
+ val kafkaConfig = kafka.buildConfig
+ val esConfig = elastic.buildConfig
+ val indexTagsConfig = elastic.indexingConfig
+ val backendConfig = traceBackendClient.buildConfig
+ val serviceMetadataConfig = elastic.buildServiceMetadataConfig
+
+ When(s"spans are produced in '${kafka.INPUT_TOPIC}' topic async, and kafka-streams topology is started")
+ val traceDescriptions = List(TraceDescription(TRACE_ID_5, SPAN_ID_PREFIX_2),TraceDescription(TRACE_ID_9, SPAN_ID_PREFIX_1))
+
+ produceSpansAsync(MAX_CHILD_SPANS_PER_TRACE,
+ 1.seconds,
+ traceDescriptions,
+ startRecordTimestamp = 0,
+ maxRecordTimestamp = spanAccumulatorConfig.bufferingWindowMillis)
+
+ val topology = new StreamRunner(kafkaConfig, spanAccumulatorConfig, esConfig, backendConfig, serviceMetadataConfig, indexTagsConfig)
+ topology.start()
+
+ Then(s"we should read two span buffers with different traceIds from '${kafka.OUTPUT_TOPIC}' topic and same should be read from trace-backend and elastic search")
+ try {
+ val result: util.List[KeyValue[String, SpanBuffer]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(kafka.RESULT_CONSUMER_CONFIG, kafka.OUTPUT_TOPIC, 2, MAX_WAIT_FOR_OUTPUT_MS)
+
+ validateKafkaOutput(result.asScala,MAX_CHILD_SPANS_PER_TRACE)
+
+ Thread.sleep(6000)
+ verifyBackendWrites(traceDescriptions, MAX_CHILD_SPANS_PER_TRACE, MAX_CHILD_SPANS_PER_TRACE)
+ verifyElasticSearchWrites(Seq(TRACE_ID_9, TRACE_ID_5))
+ } finally {
+ topology.close()
+ }
+ }
+ }
+
+ // validate the kafka output
+ private def validateKafkaOutput(records: Iterable[KeyValue[String, SpanBuffer]], childSpanCount: Int): Unit = {
+ records.size shouldBe 2
+
+ // both traceIds should be present as different span buffer objects
+ records.map(_.key) should contain allOf (TRACE_ID_9, TRACE_ID_5)
+
+ records.foreach(record => {
+ record.key match {
+ case TRACE_ID_9 => validateChildSpans(record.value, TRACE_ID_9, SPAN_ID_PREFIX_1, childSpanCount)
+ case TRACE_ID_5 => validateChildSpans(record.value, TRACE_ID_5, SPAN_ID_PREFIX_2, childSpanCount)
+ }
+ })
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/PartialTraceIndexingTopologySpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/PartialTraceIndexingTopologySpec.scala
new file mode 100644
index 000000000..ca484104c
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/PartialTraceIndexingTopologySpec.scala
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.integration
+
+import java.util
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.indexer.StreamRunner
+import org.apache.kafka.streams.KeyValue
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+class PartialTraceIndexingTopologySpec extends BaseIntegrationTestSpec {
+ private val MAX_CHILD_SPANS_PER_TRACE = 5
+ private val TRACE_ID = "unique-trace-id"
+
+ "Trace Indexing Topology" should {
+ s"consume spans from '${kafka.INPUT_TOPIC}' topic, buffer them together for every unique traceId and write to trace-backend and elastic search" in {
+ Given("a set of spans with all configurations")
+ val SPAN_ID_PREFIX = "span-id"
+ val kafkaConfig = kafka.buildConfig
+ val esConfig = elastic.buildConfig
+ val indexTagsConfig = elastic.indexingConfig
+ val backendConfig = traceBackendClient.buildConfig
+ val serviceMetadataConfig = elastic.buildServiceMetadataConfig
+ val traceDescription = List(TraceDescription(TRACE_ID, SPAN_ID_PREFIX))
+
+ When(s"spans are produced in '${kafka.INPUT_TOPIC}' topic async, and kafka-streams topology is started")
+ produceSpansAsync(
+ MAX_CHILD_SPANS_PER_TRACE,
+ 1.second,
+ traceDescription,
+ 0L,
+ spanAccumulatorConfig.bufferingWindowMillis)
+
+ val topology = new StreamRunner(kafkaConfig, spanAccumulatorConfig, esConfig, backendConfig, serviceMetadataConfig, indexTagsConfig)
+ topology.start()
+
+ Then(s"we should read one span buffer object from '${kafka.OUTPUT_TOPIC}' topic and the same should be searchable in trace-backend and elastic search")
+ try {
+ val result: util.List[KeyValue[String, SpanBuffer]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(kafka.RESULT_CONSUMER_CONFIG, kafka.OUTPUT_TOPIC, 1, MAX_WAIT_FOR_OUTPUT_MS)
+ validateKafkaOutput(result.asScala, MAX_CHILD_SPANS_PER_TRACE, SPAN_ID_PREFIX)
+
+ // give a sleep to let elastic search results become searchable
+ Thread.sleep(6000)
+ verifyBackendWrites(traceDescription, MAX_CHILD_SPANS_PER_TRACE, MAX_CHILD_SPANS_PER_TRACE)
+ verifyElasticSearchWrites(Seq(TRACE_ID))
+
+ repeatTestWithNewerSpanIds()
+ } finally {
+ topology.close()
+ }
+ }
+ }
+
+ // this test is useful to check if we are not emitting the old spans if the same traceId reappears later
+ private def repeatTestWithNewerSpanIds(): Unit = {
+ Given(s"a set of new span ids and same traceId '$TRACE_ID'")
+ val SPAN_ID_2_PREFIX = "span-id-2"
+ When(s"these spans are produced in '${kafka.INPUT_TOPIC}' topic on the currently running topology")
+ produceSpansAsync(
+ MAX_CHILD_SPANS_PER_TRACE,
+ 1.seconds,
+ List(TraceDescription(TRACE_ID, SPAN_ID_2_PREFIX)),
+ spanAccumulatorConfig.bufferingWindowMillis + 100L,
+ spanAccumulatorConfig.bufferingWindowMillis)
+
+ Then(s"we should read see newer spans in the buffered object from '${kafka.OUTPUT_TOPIC}' topic")
+ val result: util.List[KeyValue[String, SpanBuffer]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(kafka.RESULT_CONSUMER_CONFIG, kafka.OUTPUT_TOPIC, 1, MAX_WAIT_FOR_OUTPUT_MS)
+
+ validateKafkaOutput(result.asScala, MAX_CHILD_SPANS_PER_TRACE, SPAN_ID_2_PREFIX)
+ }
+
+ // validate the kafka output
+ private def validateKafkaOutput(records: Iterable[KeyValue[String, SpanBuffer]],
+ childSpanCount: Int,
+ spanIdPrefix: String): Unit = {
+ // expect only one span buffer object
+ records.size shouldBe 1
+ validateChildSpans(records.head.value, TRACE_ID, spanIdPrefix, MAX_CHILD_SPANS_PER_TRACE)
+ }
+}
\ No newline at end of file
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/ServiceMetadataIndexingTopologySpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/ServiceMetadataIndexingTopologySpec.scala
new file mode 100644
index 000000000..5e7ba6939
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/ServiceMetadataIndexingTopologySpec.scala
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.integration
+
+import java.util
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.indexer.StreamRunner
+import org.apache.kafka.streams.KeyValue
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+
+import scala.concurrent.duration._
+
+class ServiceMetadataIndexingTopologySpec extends BaseIntegrationTestSpec {
+ private val MAX_CHILD_SPANS_PER_TRACE = 5
+ private val TRACE_ID_6 = "traceid-6"
+ private val TRACE_ID_7 = "traceid-7"
+ private val SPAN_ID_PREFIX_1 = TRACE_ID_6 + "span-id-"
+ private val SPAN_ID_PREFIX_2 = TRACE_ID_7 + "span-id-"
+
+ "Trace Indexing Topology" should {
+ s"consume spans from input '${kafka.INPUT_TOPIC}' and buffer them together for every service operation combination and write to elastic search elastic" in {
+ Given("a set of spans with different serviceNames and a project configurations")
+ val kafkaConfig = kafka.buildConfig
+ val esConfig = elastic.buildConfig
+ val indexTagsConfig = elastic.indexingConfig
+ val backendConfig = traceBackendClient.buildConfig
+ val serviceMetadataConfig = elastic.buildServiceMetadataConfig
+
+ When(s"spans are produced in '${kafka.INPUT_TOPIC}' topic async, and kafka-streams topology is started")
+ val traceDescriptions = List(TraceDescription(TRACE_ID_6, SPAN_ID_PREFIX_1), TraceDescription(TRACE_ID_7, SPAN_ID_PREFIX_2))
+
+ produceSpansAsync(MAX_CHILD_SPANS_PER_TRACE,
+ 1.seconds,
+ traceDescriptions,
+ 0,
+ spanAccumulatorConfig.bufferingWindowMillis)
+
+ val topology = new StreamRunner(kafkaConfig, spanAccumulatorConfig, esConfig, backendConfig, serviceMetadataConfig, indexTagsConfig)
+ topology.start()
+
+ Then(s"we should read two multiple service operation combinations in elastic search")
+ try {
+ val result: util.List[KeyValue[String, SpanBuffer]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(kafka.RESULT_CONSUMER_CONFIG, kafka.OUTPUT_TOPIC, 2, MAX_WAIT_FOR_OUTPUT_MS)
+ Thread.sleep(6000)
+ verifyOperationNames()
+ } finally {
+ topology.close()
+ }
+ }
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/clients/ElasticSearchTestClient.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/clients/ElasticSearchTestClient.scala
new file mode 100644
index 000000000..5d554087a
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/clients/ElasticSearchTestClient.scala
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.integration.clients
+
+import java.text.SimpleDateFormat
+import java.util.Date
+
+import com.expedia.www.haystack.commons.retries.RetryOperation
+import com.expedia.www.haystack.trace.commons.config.entities._
+import com.expedia.www.haystack.trace.indexer.config.entities.{ElasticSearchConfiguration, ServiceMetadataWriteConfiguration}
+import io.searchbox.client.config.HttpClientConfig
+import io.searchbox.client.{JestClient, JestClientFactory}
+import io.searchbox.core.Search
+import io.searchbox.indices.DeleteIndex
+import org.json4s.ext.EnumNameSerializer
+import org.json4s.jackson.Serialization
+import org.json4s.{DefaultFormats, Formats}
+
+case class EsSourceDocument(traceid: String)
+
+class ElasticSearchTestClient {
+ protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)
+
+ private val ELASTIC_SEARCH_ENDPOINT = "http://elasticsearch:9200"
+ private val SPANS_INDEX_NAME_PREFIX = "haystack-traces"
+ private val SPANS_INDEX_TYPE = "spans"
+ private val SPANS_INDEX_HOUR_BUCKET = 6
+
+ private val HAYSTACK_TRACES_INDEX = {
+ val formatter = new SimpleDateFormat("yyyy-MM-dd")
+ s"$SPANS_INDEX_NAME_PREFIX-${formatter.format(new Date())}"
+ }
+
+ private val esClient: JestClient = {
+ val factory = new JestClientFactory()
+ factory.setHttpClientConfig(new HttpClientConfig.Builder(ELASTIC_SEARCH_ENDPOINT).build())
+ factory.getObject
+ }
+
+ def prepare(): Unit = {
+ // drop the haystack-traces- index
+ 0 until (24 / SPANS_INDEX_HOUR_BUCKET) foreach {
+ idx => {
+ esClient.execute(new DeleteIndex.Builder(s"$HAYSTACK_TRACES_INDEX-$idx").build())
+ }
+ }
+ }
+
+ def buildConfig = ElasticSearchConfiguration(
+ ELASTIC_SEARCH_ENDPOINT,
+ None,
+ None,
+ Some(INDEX_TEMPLATE),
+ "one",
+ SPANS_INDEX_NAME_PREFIX,
+ SPANS_INDEX_HOUR_BUCKET,
+ SPANS_INDEX_TYPE,
+ 3000,
+ 3000,
+ 5,
+ 10,
+ 10,
+ 10,
+ RetryOperation.Config(3, 2000, 2),
+ getAWSRequestSigningConfiguration)
+
+ def getAWSRequestSigningConfiguration: AWSRequestSigningConfiguration = {
+ AWSRequestSigningConfiguration(enabled = false, "", "", None, None)
+ }
+
+ def buildServiceMetadataConfig: ServiceMetadataWriteConfiguration = {
+ ServiceMetadataWriteConfiguration(enabled = true,
+ esEndpoint = ELASTIC_SEARCH_ENDPOINT,
+ username = None,
+ password = None,
+ consistencyLevel = "one",
+ indexTemplateJson = Some(SERVICE_METADATA_INDEX_TEMPLATE),
+ indexName = "service-metadata",
+ indexType = "metadata",
+ connectionTimeoutMillis = 3000,
+ readTimeoutMillis = 3000,
+ maxInFlightBulkRequests = 10,
+ maxDocsInBulk = 5,
+ maxBulkDocSizeInBytes = 50,
+ flushIntervalInSec = 10,
+ flushOnMaxOperationCount = 10,
+ retryConfig = RetryOperation.Config(10, 250, 2))
+ }
+
+
+ def indexingConfig: WhitelistIndexFieldConfiguration = {
+ val cfg = WhitelistIndexFieldConfiguration()
+ val cfgJsonData = Serialization.write(WhiteListIndexFields(
+ List(WhitelistIndexField(name = "role", `type` = IndexFieldType.string, aliases = Set("_role")), WhitelistIndexField(name = "errorcode", `type` = IndexFieldType.long))))
+ cfg.onReload(cfgJsonData)
+ cfg
+ }
+
+ def querySpansIndex(query: String): List[EsSourceDocument] = {
+ import scala.collection.JavaConverters._
+ val searchQuery = new Search.Builder(query)
+ .addIndex(SPANS_INDEX_NAME_PREFIX)
+ .addType(SPANS_INDEX_TYPE)
+ .build()
+ val result = esClient.execute(searchQuery)
+ if (result.getSourceAsStringList != null && result.getSourceAsStringList.size() > 0) {
+ result.getSourceAsStringList.asScala.map(Serialization.read[EsSourceDocument]).toList
+ }
+ else {
+ Nil
+ }
+ }
+
+ def queryServiceMetadataIndex(query: String): List[String] = {
+ import scala.collection.JavaConverters._
+ val SERVICE_METADATA_INDEX_NAME = "service-metadata"
+ val SERVICE_METADATA_INDEX_TYPE = "metadata"
+ val searchQuery = new Search.Builder(query)
+ .addIndex(SERVICE_METADATA_INDEX_NAME)
+ .addType(SERVICE_METADATA_INDEX_TYPE)
+ .build()
+ val result = esClient.execute(searchQuery)
+ if (result.getSourceAsStringList != null && result.getSourceAsStringList.size() > 0) {
+ result.getSourceAsStringList.asScala.toList
+ }
+ else {
+ Nil
+ }
+ }
+
+ private val INDEX_TEMPLATE =
+ """{
+ | "template": "haystack-traces*",
+ | "settings": {
+ | "number_of_shards": 1,
+ | "index.mapping.ignore_malformed": true,
+ | "analysis": {
+ | "normalizer": {
+ | "lowercase_normalizer": {
+ | "type": "custom",
+ | "filter": ["lowercase"]
+ | }
+ | }
+ | }
+ | },
+ | "aliases": {
+ | "haystack-traces": {}
+ | },
+ | "mappings": {
+ | "spans": {
+ | "_field_names": {
+ | "enabled": false
+ | },
+ | "_all": {
+ | "enabled": false
+ | },
+ | "_source": {
+ | "includes": ["traceid"]
+ | },
+ | "properties": {
+ | "traceid": {
+ | "enabled": false
+ | },
+ | "starttime": {
+ | "type": "long",
+ | "doc_values": true
+ | },
+ | "spans": {
+ | "type": "nested",
+ | "properties": {
+ | "starttime": {
+ | "type": "long",
+ | "doc_values": true
+ | }
+ | }
+ | }
+ | },
+ | "dynamic_templates": [{
+ | "strings_as_keywords_1": {
+ | "match_mapping_type": "string",
+ | "mapping": {
+ | "type": "keyword",
+ | "normalizer": "lowercase_normalizer",
+ | "doc_values": false,
+ | "norms": false
+ | }
+ | }
+ | }, {
+ | "longs_disable_doc_norms": {
+ | "match_mapping_type": "long",
+ | "mapping": {
+ | "type": "long",
+ | "doc_values": false,
+ | "norms": false
+ | }
+ | }
+ | }]
+ | }
+ | }
+ |}
+ |""".stripMargin
+
+ private val SERVICE_METADATA_INDEX_TEMPLATE =
+ """{
+ | "template": "service-metadata*",
+ | "aliases": {
+ | "service-metadata": {}
+ | },
+ | "settings": {
+ | "number_of_shards": 4,
+ | "index.mapping.ignore_malformed": true,
+ | "analysis": {
+ | "normalizer": {
+ | "lowercase_normalizer": {
+ | "type": "custom",
+ | "filter": [
+ | "lowercase"
+ | ]
+ | }
+ | }
+ | }
+ | },
+ | "mappings": {
+ | "metadata": {
+ | "_field_names": {
+ | "enabled": false
+ | },
+ | "_all": {
+ | "enabled": false
+ | },
+ | "properties": {
+ | "servicename": {
+ | "type": "keyword",
+ | "norms": false
+ | },
+ | "operationname": {
+ | "type": "keyword",
+ | "doc_values": false,
+ | "norms": false
+ | }
+ | }
+ | }
+ | }
+ |}
+ |""".stripMargin
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/clients/GrpcTestClient.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/clients/GrpcTestClient.scala
new file mode 100644
index 000000000..342450061
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/clients/GrpcTestClient.scala
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.integration.clients
+
+import java.util.concurrent.Executors
+
+import com.expedia.open.tracing.backend.{ReadSpansRequest, StorageBackendGrpc, TraceRecord}
+import com.expedia.www.haystack.trace.commons.config.entities.{GrpcClientConfig, TraceStoreBackends}
+import com.expedia.www.haystack.trace.indexer.config.entities.TraceBackendConfiguration
+import com.expedia.www.haystack.trace.indexer.integration.TraceDescription
+import com.expedia.www.haystack.trace.storage.backends.memory.Service
+import io.grpc.ManagedChannelBuilder
+
+import scala.collection.JavaConverters._
+
+class GrpcTestClient {
+
+
+ var storageBackendClient: StorageBackendGrpc.StorageBackendBlockingStub = _
+
+ import GrpcTestClient._
+
+
+ def prepare(): Unit = {
+ storageBackendClient = StorageBackendGrpc.newBlockingStub(ManagedChannelBuilder.forAddress("localhost", port)
+ .usePlaintext(true)
+ .build())
+ }
+
+
+ def buildConfig = TraceBackendConfiguration(
+ TraceStoreBackends(Seq(GrpcClientConfig("localhost", port))), 10)
+
+ def queryTraces(traceDescriptions: Seq[TraceDescription]): Seq[TraceRecord] = {
+ val traceIds = traceDescriptions.map(traceDescription => traceDescription.traceId).toList
+ storageBackendClient.readSpans(ReadSpansRequest.newBuilder().addAllTraceIds(traceIds.asJava).build()).getRecordsList.asScala
+ }
+
+}
+
+object GrpcTestClient {
+
+ val port = 8090
+
+ private val executors = Executors.newSingleThreadExecutor()
+ executors.submit(new Runnable {
+ override def run(): Unit = Service.main(Array {
+ port.toString
+ })
+ })
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/clients/KafkaTestClient.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/clients/KafkaTestClient.scala
new file mode 100644
index 000000000..5f20ba7d4
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/clients/KafkaTestClient.scala
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.integration.clients
+
+import java.util.Properties
+
+import com.expedia.www.haystack.trace.indexer.config.entities.KafkaConfiguration
+import com.expedia.www.haystack.trace.indexer.integration.serdes.{SnappyCompressedSpanBufferProtoDeserializer, SpanProtoSerializer}
+import com.expedia.www.haystack.trace.indexer.serde.SpanDeserializer
+import org.apache.kafka.clients.consumer.ConsumerConfig
+import org.apache.kafka.clients.producer.ProducerConfig
+import org.apache.kafka.common.serialization.{ByteArraySerializer, StringDeserializer, StringSerializer}
+import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster
+
+object KafkaTestClient {
+ val KAFKA_CLUSTER = new EmbeddedKafkaCluster(1)
+ KAFKA_CLUSTER.start()
+}
+
+class KafkaTestClient {
+ import KafkaTestClient._
+
+ val INPUT_TOPIC = "spans"
+ val OUTPUT_TOPIC = "span-buffer"
+
+ val APP_PRODUCER_CONFIG: Properties = {
+ val props = new Properties()
+ props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_CLUSTER.bootstrapServers)
+ props.put(ProducerConfig.ACKS_CONFIG, "1")
+ props.put(ProducerConfig.BATCH_SIZE_CONFIG, "20")
+ props.put(ProducerConfig.RETRIES_CONFIG, "0")
+ props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
+ props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer])
+ props
+ }
+
+ val APP_CONSUMER_CONFIG: Properties = new Properties()
+
+ val TEST_PRODUCER_CONFIG: Properties = {
+ val props = new Properties()
+ props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_CLUSTER.bootstrapServers)
+ props.put(ProducerConfig.ACKS_CONFIG, "1")
+ props.put(ProducerConfig.BATCH_SIZE_CONFIG, "20")
+ props.put(ProducerConfig.RETRIES_CONFIG, "0")
+ props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
+ props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[SpanProtoSerializer])
+ props
+ }
+
+ val RESULT_CONSUMER_CONFIG = new Properties()
+
+ def buildConfig = KafkaConfiguration(numStreamThreads = 1,
+ pollTimeoutMs = 100,
+ APP_CONSUMER_CONFIG, APP_PRODUCER_CONFIG, OUTPUT_TOPIC, INPUT_TOPIC,
+ consumerCloseTimeoutInMillis = 3000,
+ commitOffsetRetries = 3,
+ commitBackoffInMillis = 250,
+ maxWakeups = 5, wakeupTimeoutInMillis = 3000)
+
+ def prepare(appId: String): Unit = {
+ APP_CONSUMER_CONFIG.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_CLUSTER.bootstrapServers)
+ APP_CONSUMER_CONFIG.put(ConsumerConfig.GROUP_ID_CONFIG, appId + "-app-consumer")
+ APP_CONSUMER_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
+ APP_CONSUMER_CONFIG.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
+ APP_CONSUMER_CONFIG.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[SpanDeserializer])
+ APP_CONSUMER_CONFIG.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
+
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_CLUSTER.bootstrapServers)
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.GROUP_ID_CONFIG, appId + "-result-consumer")
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[SnappyCompressedSpanBufferProtoDeserializer])
+
+ deleteTopics(INPUT_TOPIC, OUTPUT_TOPIC)
+ KAFKA_CLUSTER.createTopic(INPUT_TOPIC, 2, 1)
+ KAFKA_CLUSTER.createTopic(OUTPUT_TOPIC)
+ }
+
+ private def deleteTopics(topics: String*): Unit = KAFKA_CLUSTER.deleteTopicsAndWait(topics:_*)
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/serdes/TestSerdes.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/serdes/TestSerdes.scala
new file mode 100644
index 000000000..91c5244b4
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/integration/serdes/TestSerdes.scala
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trace.indexer.integration.serdes
+
+import java.util
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.commons.packer.Unpacker
+import org.apache.kafka.common.serialization.{Deserializer, Serializer}
+
+class SpanProtoSerializer extends Serializer[Span] {
+ override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
+ override def serialize(topic: String, data: Span): Array[Byte] = {
+ data.toByteArray
+ }
+ override def close(): Unit = ()
+}
+
+class SnappyCompressedSpanBufferProtoDeserializer extends Deserializer[SpanBuffer] {
+ override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
+
+ override def deserialize(topic: String, data: Array[Byte]): SpanBuffer = {
+ if(data == null) {
+ null
+ } else {
+ Unpacker.readSpanBuffer(data)
+ }
+ }
+
+ override def close(): Unit = ()
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ConfigurationLoaderSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ConfigurationLoaderSpec.scala
new file mode 100644
index 000000000..ad56bdc0d
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ConfigurationLoaderSpec.scala
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.unit
+
+import com.expedia.www.haystack.trace.commons.packer.PackerType
+import com.expedia.www.haystack.trace.indexer.config.ProjectConfiguration
+import org.apache.kafka.clients.consumer.ConsumerConfig
+import org.apache.kafka.clients.producer.ProducerConfig
+import org.scalatest.{FunSpec, Matchers}
+
+class ConfigurationLoaderSpec extends FunSpec with Matchers {
+
+ val project = new ProjectConfiguration()
+ describe("Configuration loader") {
+
+ it("should load the health status config from base.conf") {
+ project.healthStatusFilePath shouldEqual "/app/isHealthy"
+ }
+
+ it("should load the span buffer config only from base.conf") {
+ val config = project.spanAccumulateConfig
+ config.pollIntervalMillis shouldBe 2000L
+ config.maxEntriesAllStores shouldBe 20000
+ config.bufferingWindowMillis shouldBe 10000L
+ config.packerType shouldEqual PackerType.SNAPPY
+ }
+
+ it("should load the kafka config from base.conf and one stream property from env variable") {
+ val kafkaConfig = project.kafkaConfig
+ kafkaConfig.produceTopic shouldBe "span-buffer"
+ kafkaConfig.consumeTopic shouldBe "spans"
+ kafkaConfig.numStreamThreads shouldBe 2
+ kafkaConfig.commitOffsetRetries shouldBe 3
+ kafkaConfig.commitBackoffInMillis shouldBe 200
+
+ kafkaConfig.maxWakeups shouldBe 5
+ kafkaConfig.wakeupTimeoutInMillis shouldBe 5000
+
+ kafkaConfig.consumerProps.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG) shouldBe "kafkasvc:9092"
+ kafkaConfig.consumerProps.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG) shouldBe "earliest"
+ kafkaConfig.consumerProps.getProperty(ConsumerConfig.GROUP_ID_CONFIG) shouldBe "haystack-trace-indexer"
+ kafkaConfig.consumerProps.getProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) shouldBe "false"
+ kafkaConfig.consumerProps.getProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG) shouldBe "org.apache.kafka.common.serialization.StringDeserializer"
+ kafkaConfig.consumerProps.getProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG) shouldBe "com.expedia.www.haystack.trace.indexer.serde.SpanDeserializer"
+
+ kafkaConfig.consumerCloseTimeoutInMillis shouldBe 30000
+
+ kafkaConfig.producerProps.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG) shouldBe "kafkasvc:9092"
+ kafkaConfig.producerProps.getProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG) shouldBe "org.apache.kafka.common.serialization.ByteArraySerializer"
+ kafkaConfig.producerProps.getProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) shouldBe "org.apache.kafka.common.serialization.StringSerializer"
+ }
+
+ it("should load the service metadata config from base.conf") {
+ val config = project.serviceMetadataWriteConfig
+ config.flushIntervalInSec shouldBe 60
+ config.flushOnMaxOperationCount shouldBe 10000
+ config.esEndpoint shouldBe "http://elasticsearch:9200"
+ config.maxInFlightBulkRequests shouldBe 10
+ config.maxDocsInBulk shouldBe 100
+ config.maxBulkDocSizeInBytes shouldBe 1000000
+ config.indexTemplateJson shouldBe Some("some_template_json")
+ config.consistencyLevel shouldBe "one"
+ config.readTimeoutMillis shouldBe 5000
+ config.connectionTimeoutMillis shouldBe 10000
+ config.indexName shouldBe "service-metadata"
+ config.indexType shouldBe "metadata"
+ config.retryConfig.maxRetries shouldBe 10
+ config.retryConfig.backOffInMillis shouldBe 100
+ config.retryConfig.backoffFactor shouldBe 2
+ }
+
+ it("should load the trace backend config from base.conf and few properties overridden from env variable") {
+ val backendConfiguration = project.backendConfig
+
+ backendConfiguration.maxInFlightRequests shouldBe 100
+ }
+
+ it("should load the elastic search config from base.conf and one property overridden from env variable") {
+ val elastic = project.elasticSearchConfig
+ elastic.endpoint shouldBe "http://elasticsearch:9200"
+ elastic.maxInFlightBulkRequests shouldBe 10
+ elastic.maxDocsInBulk shouldBe 100
+ elastic.maxBulkDocSizeInBytes shouldBe 1000000
+ elastic.indexTemplateJson shouldBe Some("some_template_json")
+ elastic.consistencyLevel shouldBe "one"
+ elastic.readTimeoutMillis shouldBe 5000
+ elastic.connectionTimeoutMillis shouldBe 10000
+ elastic.indexNamePrefix shouldBe "haystack-test"
+ elastic.indexType shouldBe "spans"
+ elastic.retryConfig.maxRetries shouldBe 10
+ elastic.retryConfig.backOffInMillis shouldBe 1000
+ elastic.retryConfig.backoffFactor shouldBe 2
+ elastic.indexHourBucket shouldBe 6
+ elastic.maxConnectionsPerRoute shouldBe 10
+
+ elastic.awsRequestSigningConfiguration.enabled shouldEqual false
+ elastic.awsRequestSigningConfiguration.region shouldEqual "us-west-2"
+ elastic.awsRequestSigningConfiguration.awsServiceName shouldEqual "es"
+ elastic.awsRequestSigningConfiguration.accessKey shouldBe None
+ elastic.awsRequestSigningConfiguration.secretKey shouldBe None
+ }
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/DynamicCacheSizerSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/DynamicCacheSizerSpec.scala
new file mode 100644
index 000000000..340a1fb54
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/DynamicCacheSizerSpec.scala
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.unit
+
+import com.expedia.www.haystack.trace.indexer.store.traits.CacheSizeObserver
+import com.expedia.www.haystack.trace.indexer.store.DynamicCacheSizer
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+
+class DynamicCacheSizerSpec extends FunSpec with Matchers with EasyMockSugar {
+ private val MAX_CACHE_ENTRIES = 500
+
+ describe("dynamic cache sizer") {
+ it("should notify the cache observer with new cache size") {
+ val sizer = new DynamicCacheSizer(1, MAX_CACHE_ENTRIES)
+ val observer = mock[CacheSizeObserver]
+ expecting {
+ observer.onCacheSizeChange(MAX_CACHE_ENTRIES)
+ }
+ whenExecuting(observer) {
+ sizer.addCacheObserver(observer)
+ }
+ }
+
+ it("should notify multiple cache observers with new cache size") {
+ val sizer = new DynamicCacheSizer(1, MAX_CACHE_ENTRIES)
+ val observer_1 = mock[CacheSizeObserver]
+ val observer_2 = mock[CacheSizeObserver]
+
+ expecting {
+ observer_1.onCacheSizeChange(MAX_CACHE_ENTRIES)
+ observer_1.onCacheSizeChange(MAX_CACHE_ENTRIES / 2)
+ observer_2.onCacheSizeChange(MAX_CACHE_ENTRIES / 2)
+ }
+ whenExecuting(observer_1, observer_2) {
+ sizer.addCacheObserver(observer_1)
+ sizer.addCacheObserver(observer_2)
+ }
+ }
+
+ it("should notify existing cache observers when an existing observer is removed with new cache size") {
+ val sizer = new DynamicCacheSizer(1, MAX_CACHE_ENTRIES)
+ val observer_1 = mock[CacheSizeObserver]
+ val observer_2 = mock[CacheSizeObserver]
+
+ expecting {
+ observer_1.onCacheSizeChange(MAX_CACHE_ENTRIES)
+ observer_1.onCacheSizeChange(MAX_CACHE_ENTRIES / 2)
+ observer_2.onCacheSizeChange(MAX_CACHE_ENTRIES / 2)
+ observer_2.onCacheSizeChange(MAX_CACHE_ENTRIES)
+ }
+ whenExecuting(observer_1, observer_2) {
+ sizer.addCacheObserver(observer_1)
+ sizer.addCacheObserver(observer_2)
+ sizer.removeCacheObserver(observer_1)
+ }
+ }
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ElasticSearchResultHandlerSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ElasticSearchResultHandlerSpec.scala
new file mode 100644
index 000000000..5822553f6
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ElasticSearchResultHandlerSpec.scala
@@ -0,0 +1,101 @@
+package com.expedia.www.haystack.trace.indexer.unit
+
+import java.util
+import java.util.Collections
+
+import com.codahale.metrics.Timer
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.commons.retries.RetryOperation
+import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames
+import com.expedia.www.haystack.trace.indexer.writers.es.ElasticSearchResultHandler
+import com.google.gson.Gson
+import io.searchbox.core.BulkResult
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+
+class ElasticSearchResultHandlerSpec extends FunSpec with Matchers with EasyMockSugar with MetricsSupport {
+ private val esWriteFailureMeter = metricRegistry.meter(AppMetricNames.ES_WRITE_FAILURE)
+
+ describe("Trace Index Result Handler") {
+
+ it("should complete with success if no failures reported") {
+ val retryCallback = mock[RetryOperation.Callback]
+ val timer = mock[Timer.Context]
+ val bulkResult = mock[BulkResult]
+
+ expecting {
+ retryCallback.onResult(bulkResult)
+ timer.close()
+ bulkResult.getFailedItems.andReturn(Collections.emptyList()).anyTimes()
+ }
+
+ whenExecuting(retryCallback, timer, bulkResult) {
+ val handler = new ElasticSearchResultHandler(timer, esWriteFailureMeter, retryCallback)
+ handler.completed(bulkResult)
+ esWriteFailureMeter.getCount shouldBe 0
+ }
+ }
+
+ it("should complete with success but mark the failures if happen") {
+ val retryCallback = mock[RetryOperation.Callback]
+ val timer = mock[Timer.Context]
+ val bulkResult = mock[BulkResult]
+ val outer = new BulkResult(new Gson())
+ val resultItem = new outer.BulkResultItem("op", "index", "type", "1", 400,
+ "error", 1, "errorType", "errorReason")
+
+ expecting {
+ retryCallback.onResult(bulkResult)
+ timer.close()
+ bulkResult.getFailedItems.andReturn(util.Arrays.asList(resultItem)).anyTimes()
+ }
+
+ whenExecuting(retryCallback, timer, bulkResult) {
+ val handler = new ElasticSearchResultHandler(timer, esWriteFailureMeter, retryCallback)
+ val initialFailures = esWriteFailureMeter.getCount
+ handler.completed(bulkResult)
+ esWriteFailureMeter.getCount - initialFailures shouldBe 1
+ }
+ }
+
+ it("should report failure and mark the number of failures, and perform retry on any exception") {
+ val retryCallback = mock[RetryOperation.Callback]
+ val timer = mock[Timer.Context]
+ val bulkResult = mock[BulkResult]
+
+ val error = new RuntimeException
+ expecting {
+ retryCallback.onError(error, retry = true)
+ timer.close()
+ }
+
+ whenExecuting(retryCallback, timer, bulkResult) {
+ val handler = new ElasticSearchResultHandler(timer, esWriteFailureMeter, retryCallback)
+ val initialFailures = esWriteFailureMeter.getCount
+ handler.failed(error)
+ esWriteFailureMeter.getCount - initialFailures shouldBe 1
+ }
+ }
+
+ it("should report failure and mark the number of failures and perform function on elastic search specific exception") {
+ val retryCallback = mock[RetryOperation.Callback]
+ val timer = mock[Timer.Context]
+ val bulkResult = mock[BulkResult]
+
+ val error = new EsRejectedExecutionException("too many requests")
+
+ expecting {
+ retryCallback.onError(error, retry = true)
+ timer.close()
+ }
+
+ whenExecuting(retryCallback, timer, bulkResult) {
+ val handler = new ElasticSearchResultHandler(timer, esWriteFailureMeter, retryCallback)
+ val initialFailures = esWriteFailureMeter.getCount
+ handler.failed(error)
+ esWriteFailureMeter.getCount - initialFailures shouldBe 1
+ }
+ }
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ElasticSearchWriterUtilsSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ElasticSearchWriterUtilsSpec.scala
new file mode 100644
index 000000000..b74d9826a
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ElasticSearchWriterUtilsSpec.scala
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.indexer.unit
+
+import com.expedia.www.haystack.trace.indexer.writers.es.ElasticSearchWriterUtils
+import org.scalatest.{BeforeAndAfterEach, FunSpec, GivenWhenThen, Matchers}
+
+class ElasticSearchWriterUtilsSpec extends FunSpec with Matchers with GivenWhenThen with BeforeAndAfterEach {
+ var timezone: String = _
+
+ override def beforeEach() {
+ timezone = System.getProperty("user.timezone")
+ System.setProperty("user.timezone", "CST")
+ }
+
+ override def afterEach(): Unit = {
+ System.setProperty("user.timezone", timezone)
+ }
+
+ describe("elastic search writer") {
+ it("should use UTC when generating ES indexes") {
+ Given("the system timezone is not UTC")
+ System.setProperty("user.timezone", "CST")
+ val eventTimeInMicros = System.currentTimeMillis() * 1000
+
+ When("the writer generates the ES indexes")
+ val cstName = ElasticSearchWriterUtils.indexName("haystack-traces", 6, eventTimeInMicros)
+ System.setProperty("user.timezone", "UTC")
+ val utcName = ElasticSearchWriterUtils.indexName("haystack-traces", 6, eventTimeInMicros)
+
+ Then("it should use UTC to get those indexes")
+ cstName shouldBe utcName
+ }
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/IndexTemplateHandlerSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/IndexTemplateHandlerSpec.scala
new file mode 100644
index 000000000..b108e54a4
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/IndexTemplateHandlerSpec.scala
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2019 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.unit
+
+import com.expedia.www.haystack.trace.commons.config.entities.WhitelistIndexFieldConfiguration
+import com.expedia.www.haystack.trace.indexer.writers.es.IndexTemplateHandler
+import com.google.gson.{Gson, JsonParser}
+import io.searchbox.client.{JestClient, JestResult}
+import io.searchbox.indices.template.{GetTemplate, PutTemplate}
+import org.easymock.EasyMock
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+
+class IndexTemplateHandlerSpec extends FunSpec with Matchers with EasyMockSugar {
+
+ private val templateJson =
+ """
+ |{
+ | "spans-index-template": {
+ | "order": 0,
+ | "index_patterns": ["haystack-traces*"],
+ | "settings": {
+ | "index": {
+ | "analysis": {
+ | "normalizer": {
+ | "lowercase_normalizer": {
+ | "filter": ["lowercase"],
+ | "type": "custom"
+ | }
+ | }
+ | },
+ | "number_of_shards": "8",
+ | "mapping": {
+ | "ignore_malformed": "true"
+ | }
+ | }
+ | },
+ | "mappings": {
+ | "spans": {
+ | "_field_names": {
+ | "enabled": false
+ | },
+ | "_all": {
+ | "enabled": false
+ | },
+ | "_source": {
+ | "includes": ["traceid"]
+ | },
+ | "properties": {
+ | "traceid": {
+ | "enabled": false
+ | },
+ | "starttime": {
+ | "type": "long",
+ | "doc_values": true
+ | },
+ | "spans": {
+ | "type": "nested",
+ | "properties": {
+ | "servicename": {
+ | "type": "keyword",
+ | "normalizer": "lowercase_normalizer",
+ | "doc_values": false,
+ | "norms": false
+ | },
+ | "operationname": {
+ | "type": "keyword",
+ | "normalizer": "lowercase_normalizer",
+ | "doc_values": false,
+ | "norms": false
+ | },
+ | "starttime": {
+ | "enabled": false
+ | },
+ | "duration": {
+ | "type": "long",
+ | "doc_values": true
+ | },
+ | "f1": {
+ | "type": "long",
+ | "doc_values": true
+ | }
+ | }
+ | }
+ | },
+ | "dynamic_templates": [{
+ | "strings_as_keywords_1": {
+ | "match_mapping_type": "string",
+ | "mapping": {
+ | "type": "keyword",
+ | "normalizer": "lowercase_normalizer",
+ | "doc_values": false,
+ | "norms": false
+ | }
+ | }
+ | }, {
+ | "longs_disable_doc_norms": {
+ | "match_mapping_type": "long",
+ | "mapping": {
+ | "type": "long",
+ | "doc_values": false,
+ | "norms": false
+ | }
+ | }
+ | }]
+ | }
+ | },
+ | "aliases": {
+ | "haystack-traces": {}
+ | }
+ | }
+ |}
+ |
+ """.stripMargin
+ describe("Index Template Handler") {
+ it("should read the template and update it") {
+ val client = mock[JestClient]
+ val getTemplateResult = new JestResult(new Gson())
+ val putTemplateResult = new JestResult(new Gson())
+ val config = WhitelistIndexFieldConfiguration()
+
+ val getTemplate = EasyMock.newCapture[GetTemplate]()
+ val putTemplate = EasyMock.newCapture[PutTemplate]()
+
+ expecting {
+ getTemplateResult.setSucceeded(true)
+ putTemplateResult.setSucceeded(true)
+ getTemplateResult.setJsonObject(new JsonParser().parse(templateJson).getAsJsonObject)
+ client.execute(EasyMock.capture(getTemplate)).andReturn(getTemplateResult)
+ client.execute(EasyMock.capture(putTemplate)).andReturn(putTemplateResult)
+ }
+
+ whenExecuting(client) {
+ new IndexTemplateHandler(client, None, "spans", config).run()
+ config.onReload(
+ """
+ |{
+ |"fields": [
+ | {
+ | "name": "status_code",
+ | "type": "int",
+ | "enableRangeQuery": true
+ | },
+ | {
+ | "name": "f1",
+ | "type": "long",
+ | "enableRangeQuery": false
+ | }
+ |]}
+ """.
+ stripMargin)
+ }
+
+ putTemplate.getValue.getData(new Gson()) shouldEqual "{\"settings\":{\"index\":{\"analysis\":{\"normalizer\":{\"lowercase_normalizer\":{\"filter\":[\"lowercase\"],\"type\":\"custom\"}}},\"number_of_shards\":\"8\",\"mapping\":{\"ignore_malformed\":\"true\"}}},\"mappings\":{\"spans\":{\"_field_names\":{\"enabled\":false},\"_all\":{\"enabled\":false},\"_source\":{\"includes\":[\"traceid\"]},\"properties\":{\"traceid\":{\"enabled\":false},\"starttime\":{\"type\":\"long\",\"doc_values\":true},\"spans\":{\"type\":\"nested\",\"properties\":{\"servicename\":{\"type\":\"keyword\",\"normalizer\":\"lowercase_normalizer\",\"doc_values\":false,\"norms\":false},\"operationname\":{\"type\":\"keyword\",\"normalizer\":\"lowercase_normalizer\",\"doc_values\":false,\"norms\":false},\"starttime\":{\"enabled\":false},\"duration\":{\"type\":\"long\",\"doc_values\":true},\"f1\":{\"type\":\"long\",\"doc_values\":false}}}},\"dynamic_templates\":[{\"strings_as_keywords_1\":{\"match_mapping_type\":\"string\",\"mapping\":{\"type\":\"keyword\",\"normalizer\":\"lowercase_normalizer\",\"doc_values\":false,\"norms\":false}}},{\"longs_disable_doc_norms\":{\"match_mapping_type\":\"long\",\"mapping\":{\"type\":\"long\",\"doc_values\":false,\"norms\":false}}}]}},\"aliases\":{\"haystack-traces\":{}},\"index_patterns\":[\"haystack-traces*\"],\"order\":0}"
+ }
+ }
+}
+
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanBufferMemoryStoreSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanBufferMemoryStoreSpec.scala
new file mode 100644
index 000000000..1c063b586
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanBufferMemoryStoreSpec.scala
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.unit
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.indexer.store.DynamicCacheSizer
+import com.expedia.www.haystack.trace.indexer.store.impl.SpanBufferMemoryStore
+import org.apache.kafka.streams.processor.internals.{ProcessorContextImpl, RecordCollector}
+import org.apache.kafka.streams.processor.{StateRestoreCallback, StateStore, TaskId}
+import org.easymock.EasyMock._
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+
+class SpanBufferMemoryStoreSpec extends FunSpec with Matchers with EasyMockSugar {
+
+ private val TRACE_ID_1 = "TraceId_1"
+ private val TRACE_ID_2 = "TraceId_2"
+
+ describe("SpanBuffer Memory Store") {
+ it("should create spanBuffer, add child spans and allow retrieving old spanBuffers from the store") {
+ val (context, rootStateStore, recordCollector, spanBufferStore) = createSpanBufferStore
+
+ whenExecuting(context, recordCollector, rootStateStore) {
+
+ val span1 = Span.newBuilder().setTraceId(TRACE_ID_1).setSpanId("SPAN_ID_1").build()
+ val span2 = Span.newBuilder().setTraceId(TRACE_ID_1).setSpanId("SPAN_ID_2").build()
+
+ spanBufferStore.addOrUpdateSpanBuffer(TRACE_ID_1, span1, 11000L, 10)
+ spanBufferStore.addOrUpdateSpanBuffer(TRACE_ID_1, span2, 12000L, 11)
+
+ spanBufferStore.totalSpans shouldBe 2
+
+ val result = spanBufferStore.getAndRemoveSpanBuffersOlderThan(13000L)
+
+ result.size shouldBe 1
+ result.foreach {
+ spanBufferWithMetadata =>
+ spanBufferWithMetadata.builder.getTraceId shouldBe TRACE_ID_1
+ spanBufferWithMetadata.builder.getChildSpansCount shouldBe 2
+ spanBufferWithMetadata.builder.getChildSpans(0).getSpanId shouldBe "SPAN_ID_1"
+ spanBufferWithMetadata.builder.getChildSpans(1).getSpanId shouldBe "SPAN_ID_2"
+ }
+ spanBufferStore.totalSpans shouldBe 0
+ }
+ }
+
+ it("should create two spanBuffers for different traceIds, allow retrieving old spanBuffers from the store") {
+ val (context, rootStateStore, recordCollector, spanBufferStore) = createSpanBufferStore
+
+ whenExecuting(context, recordCollector, rootStateStore) {
+ val span1 = Span.newBuilder().setTraceId(TRACE_ID_1).setSpanId("SPAN_ID_1").build()
+ val span2 = Span.newBuilder().setTraceId(TRACE_ID_2).setSpanId("SPAN_ID_2").build()
+ val span3 = Span.newBuilder().setTraceId(TRACE_ID_2).setSpanId("SPAN_ID_3").build()
+
+ spanBufferStore.addOrUpdateSpanBuffer(TRACE_ID_1, span1, 11000L, 10)
+ spanBufferStore.addOrUpdateSpanBuffer(TRACE_ID_2, span2, 12000L, 11)
+ spanBufferStore.addOrUpdateSpanBuffer(TRACE_ID_2, span3, 12500L, 12)
+
+ spanBufferStore.totalSpans shouldBe 3
+
+ var result = spanBufferStore.getAndRemoveSpanBuffersOlderThan(11500L)
+
+ result.size shouldBe 1
+ result.foreach {
+ spanBufferWithMetadata =>
+ spanBufferWithMetadata.builder.getTraceId shouldBe TRACE_ID_1
+ spanBufferWithMetadata.builder.getChildSpansCount shouldBe 1
+ spanBufferWithMetadata.builder.getChildSpans(0).getSpanId shouldBe "SPAN_ID_1"
+ }
+
+ spanBufferStore.totalSpans shouldBe 2
+
+ result = spanBufferStore.getAndRemoveSpanBuffersOlderThan(11500L)
+ result.size shouldBe 0
+
+ result = spanBufferStore.getAndRemoveSpanBuffersOlderThan(13000L)
+
+ result.size shouldBe 1
+ result.foreach {
+ spanBufferWithMetadata =>
+ spanBufferWithMetadata.builder.getTraceId shouldBe TRACE_ID_2
+ spanBufferWithMetadata.builder.getChildSpansCount shouldBe 2
+ spanBufferWithMetadata.builder.getChildSpans(0).getSpanId shouldBe "SPAN_ID_2"
+ spanBufferWithMetadata.builder.getChildSpans(1).getSpanId shouldBe "SPAN_ID_3"
+ }
+
+ spanBufferStore.totalSpans shouldBe 0
+ }
+ }
+ }
+
+ private def createSpanBufferStore = {
+ val cacheSizer = new DynamicCacheSizer(10, 1000)
+ val spanBufferStore = new SpanBufferMemoryStore(cacheSizer)
+ spanBufferStore.init()
+
+ val context = mock[ProcessorContextImpl]
+ val rootStateStore = mock[StateStore]
+ val recordCollector: RecordCollector = mock[RecordCollector]
+
+ expecting {
+ context.applicationId().andReturn("appId").anyTimes()
+ context.taskId().andReturn(new TaskId(1, 0)).anyTimes()
+ context.recordCollector().andReturn(recordCollector).anyTimes()
+ context.register(anyObject(classOf[StateStore]), anyBoolean(), anyObject(classOf[StateRestoreCallback])).anyTimes()
+ }
+ (context, rootStateStore, recordCollector, spanBufferStore)
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanIndexDocumentGeneratorSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanIndexDocumentGeneratorSpec.scala
new file mode 100644
index 000000000..721a0405e
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanIndexDocumentGeneratorSpec.scala
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.unit
+
+import java.util.concurrent.TimeUnit
+
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.open.tracing.{Log, Span, Tag}
+import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhiteListIndexFields, WhitelistIndexField, WhitelistIndexFieldConfiguration}
+import com.expedia.www.haystack.trace.indexer.writers.es.IndexDocumentGenerator
+import com.google.protobuf.ByteString
+import org.json4s.ext.EnumNameSerializer
+import org.json4s.jackson.Serialization
+import org.json4s.{DefaultFormats, Formats}
+import org.scalatest.{FunSpec, Matchers}
+
+class SpanIndexDocumentGeneratorSpec extends FunSpec with Matchers {
+ protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)
+
+ private val TRACE_ID = "trace_id"
+ private val START_TIME_1 = 1529042838469123l
+ private val START_TIME_2 = 1529042848469000l
+
+ private val LONG_DURATION = TimeUnit.SECONDS.toMicros(25) + TimeUnit.MICROSECONDS.toMicros(500)
+
+ describe("Span to IndexDocument Generator") {
+ it ("should extract serviceName, operationName, duration and create json document for indexing") {
+ val generator = new IndexDocumentGenerator(WhitelistIndexFieldConfiguration())
+
+ val span_1 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setSpanId("span-1")
+ .setServiceName("service1")
+ .setOperationName("op1")
+ .setStartTime(START_TIME_1)
+ .setDuration(610000L)
+ .build()
+ val span_2 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setSpanId("span-2")
+ .setServiceName("service1")
+ .setOperationName("op1")
+ .setStartTime(START_TIME_1)
+ .setDuration(500000L)
+ .build()
+ val span_3 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setSpanId("span-3")
+ .setServiceName("service2")
+ .setDuration(LONG_DURATION)
+ .setStartTime(START_TIME_2)
+ .setOperationName("op3").build()
+
+ val spanBuffer = SpanBuffer.newBuilder().addChildSpans(span_1).addChildSpans(span_2).addChildSpans(span_3).setTraceId(TRACE_ID).build()
+ val doc = generator.createIndexDocument(TRACE_ID, spanBuffer).get
+ doc.json shouldBe "{\"traceid\":\"trace_id\",\"rootduration\":0,\"starttime\":1529042838000000,\"spans\":[{\"servicename\":\"service1\",\"starttime\":[1529042838000000],\"duration\":[500000,610000],\"operationname\":\"op1\"},{\"servicename\":\"service2\",\"starttime\":[1529042848000000],\"duration\":[25000000],\"operationname\":\"op3\"}]}"
+ }
+
+ it ("should not create an index document if service name is absent") {
+ val generator = new IndexDocumentGenerator(WhitelistIndexFieldConfiguration())
+
+ val span_1 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setOperationName("op1")
+ .build()
+ val span_2 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setDuration(1000L)
+ .setOperationName("op2").build()
+
+ val spanBuffer = SpanBuffer.newBuilder().addChildSpans(span_1).addChildSpans(span_2).setTraceId(TRACE_ID).build()
+ val doc = generator.createIndexDocument(TRACE_ID, spanBuffer)
+ doc shouldBe None
+ }
+
+ it ("should extract tags along with serviceName, operationName and duration and create json document for indexing") {
+ val indexableTags = List(
+ WhitelistIndexField(name = "role", `type` = IndexFieldType.string),
+ WhitelistIndexField(name = "errorCode", `type` = IndexFieldType.long))
+
+ val whitelistConfig = WhitelistIndexFieldConfiguration()
+ whitelistConfig.onReload(Serialization.write(WhiteListIndexFields(indexableTags)))
+ val generator = new IndexDocumentGenerator(whitelistConfig)
+
+ val tag_1 = Tag.newBuilder().setKey("role").setType(Tag.TagType.STRING).setVStr("haystack").build()
+ val tag_2 = Tag.newBuilder().setKey("errorCode").setType(Tag.TagType.LONG).setVLong(3).build()
+
+ val span_1 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setServiceName("service1")
+ .setSpanId("span-1")
+ .setOperationName("op1")
+ .setDuration(100L)
+ .setStartTime(START_TIME_1)
+ .addTags(tag_1)
+ .build()
+ val span_2 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setServiceName("service1")
+ .setSpanId("span-2")
+ .setOperationName("op2")
+ .setDuration(200L)
+ .setStartTime(START_TIME_2)
+ .addTags(tag_2)
+ .addTags(tag_1)
+ .build()
+ val span_3 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setSpanId("span-3")
+ .setServiceName("service2")
+ .setDuration(1000L)
+ .setStartTime(START_TIME_1)
+ .addTags(tag_2)
+ .setOperationName("op3").build()
+
+ val spanBuffer = SpanBuffer.newBuilder().addChildSpans(span_1).addChildSpans(span_2).addChildSpans(span_3).setTraceId(TRACE_ID).build()
+ val doc = generator.createIndexDocument(TRACE_ID, spanBuffer).get
+ doc.json shouldBe "{\"traceid\":\"trace_id\",\"rootduration\":0,\"starttime\":1529042838000000,\"spans\":[{\"role\":[\"haystack\"],\"servicename\":\"service1\",\"starttime\":[1529042838000000],\"duration\":[100],\"operationname\":\"op1\"},{\"role\":[\"haystack\"],\"servicename\":\"service1\",\"starttime\":[1529042848000000],\"errorcode\":[3],\"duration\":[200],\"operationname\":\"op2\"},{\"servicename\":\"service2\",\"starttime\":[1529042838000000],\"errorcode\":[3],\"duration\":[1000],\"operationname\":\"op3\"}]}"
+ }
+
+ it ("should respect enabled flag of tags create right json document for indexing") {
+ val indexableTags = List(
+ WhitelistIndexField(name = "role", IndexFieldType.string, enabled = false),
+ WhitelistIndexField(name = "errorCode", `type` = IndexFieldType.long))
+ val whitelistConfig = WhitelistIndexFieldConfiguration()
+ whitelistConfig.onReload(Serialization.write(WhiteListIndexFields(indexableTags)))
+ val generator = new IndexDocumentGenerator(whitelistConfig)
+
+ val tag_1 = Tag.newBuilder().setKey("role").setType(Tag.TagType.STRING).setVStr("haystack").build()
+ val tag_2 = Tag.newBuilder().setKey("errorCode").setType(Tag.TagType.LONG).setVLong(3).build()
+
+ val span_1 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setSpanId("span-1")
+ .setServiceName("service1")
+ .setOperationName("op1")
+ .setDuration(100L)
+ .setStartTime(START_TIME_1)
+ .addTags(tag_1)
+ .build()
+ val span_2 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setSpanId("span-2")
+ .setServiceName("service1")
+ .setOperationName("op2")
+ .setDuration(200L)
+ .setStartTime(START_TIME_2)
+ .addTags(tag_2)
+ .build()
+
+ val spanBuffer = SpanBuffer.newBuilder().addChildSpans(span_1).addChildSpans(span_2).setTraceId(TRACE_ID).build()
+ val doc = generator.createIndexDocument(TRACE_ID, spanBuffer).get
+ doc.json shouldBe "{\"traceid\":\"trace_id\",\"rootduration\":0,\"starttime\":1529042838000000,\"spans\":[{\"servicename\":\"service1\",\"starttime\":[1529042838000000],\"duration\":[100],\"operationname\":\"op1\"},{\"servicename\":\"service1\",\"starttime\":[1529042848000000],\"errorcode\":[3],\"duration\":[200],\"operationname\":\"op2\"}]}"
+ }
+
+ it ("one more test to verify the tags are indexed") {
+ val indexableTags = List(
+ WhitelistIndexField(name = "errorCode", `type` = IndexFieldType.long))
+ val whitelistConfig = WhitelistIndexFieldConfiguration()
+ whitelistConfig.onReload(Serialization.write(WhiteListIndexFields(indexableTags)))
+ val generator = new IndexDocumentGenerator(whitelistConfig)
+
+ val tag_1 = Tag.newBuilder().setKey("errorCode").setType(Tag.TagType.LONG).setVLong(5).build()
+ val tag_2 = Tag.newBuilder().setKey("errorCode").setType(Tag.TagType.LONG).setVLong(3).build()
+
+ val span_1 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setServiceName("service1")
+ .setSpanId("span-1")
+ .setOperationName("op1")
+ .setDuration(100L)
+ .addTags(tag_1)
+ .build()
+ val span_2 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setServiceName("service1")
+ .setSpanId("span-2")
+ .setOperationName("op2")
+ .setDuration(200L)
+ .addTags(tag_2)
+ .build()
+
+ val spanBuffer = SpanBuffer.newBuilder().addChildSpans(span_1).addChildSpans(span_2).setTraceId(TRACE_ID).build()
+ val doc = generator.createIndexDocument(TRACE_ID, spanBuffer).get
+ doc.json shouldBe "{\"traceid\":\"trace_id\",\"rootduration\":0,\"starttime\":0,\"spans\":[{\"servicename\":\"service1\",\"starttime\":[0],\"errorcode\":[5],\"duration\":[100],\"operationname\":\"op1\"},{\"servicename\":\"service1\",\"starttime\":[0],\"errorcode\":[3],\"duration\":[200],\"operationname\":\"op2\"}]}"
+ }
+
+ it ("should extract unique tag values along with serviceName, operationName and duration and create json document for indexing") {
+ val indexableTags = List(
+ WhitelistIndexField(name = "role", `type` = IndexFieldType.string),
+ WhitelistIndexField(name = "errorCode", `type` = IndexFieldType.long))
+ val whitelistConfig = WhitelistIndexFieldConfiguration()
+ whitelistConfig.onReload(Serialization.write(WhiteListIndexFields(indexableTags)))
+ val generator = new IndexDocumentGenerator(whitelistConfig)
+
+ val tag_1 = Tag.newBuilder().setKey("role").setType(Tag.TagType.STRING).setVStr("haystack").build()
+ val tag_2 = Tag.newBuilder().setKey("errorCode").setType(Tag.TagType.LONG).setVLong(3).build()
+
+ val span_1 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setServiceName("service1")
+ .setSpanId("span-1")
+ .setOperationName("op1")
+ .setDuration(100L)
+ .addTags(tag_1)
+ .build()
+ val span_2 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setServiceName("service1")
+ .setSpanId("span-2")
+ .setOperationName("op2")
+ .setDuration(200L)
+ .addTags(tag_2)
+ .build()
+
+ val spanBuffer = SpanBuffer.newBuilder().addChildSpans(span_1).addChildSpans(span_2).setTraceId(TRACE_ID).build()
+ val doc = generator.createIndexDocument(TRACE_ID, spanBuffer).get
+ doc.json shouldBe "{\"traceid\":\"trace_id\",\"rootduration\":0,\"starttime\":0,\"spans\":[{\"role\":[\"haystack\"],\"servicename\":\"service1\",\"starttime\":[0],\"duration\":[100],\"operationname\":\"op1\"},{\"servicename\":\"service1\",\"starttime\":[0],\"errorcode\":[3],\"duration\":[200],\"operationname\":\"op2\"}]}"
+ }
+
+ it ("should extract tags, log values along with serviceName, operationName and duration and create json document for indexing") {
+ val indexableTags = List(
+ WhitelistIndexField(name = "role", `type` = IndexFieldType.string),
+ WhitelistIndexField(name = "errorCode", `type` = IndexFieldType.long),
+ WhitelistIndexField(name = "exception", `type` = IndexFieldType.string))
+
+ val whitelistConfig = WhitelistIndexFieldConfiguration()
+ whitelistConfig.onReload(Serialization.write(WhiteListIndexFields(indexableTags)))
+ val generator = new IndexDocumentGenerator(whitelistConfig)
+
+ val tag_1 = Tag.newBuilder().setKey("role").setType(Tag.TagType.STRING).setVStr("haystack").build()
+ val tag_2 = Tag.newBuilder().setKey("errorCode").setType(Tag.TagType.LONG).setVLong(3).build()
+ val log_1 = Log.newBuilder()
+ .addFields(Tag.newBuilder().setKey("exception").setType(Tag.TagType.STRING).setVStr("xxx-yy-zzz").build())
+ .setTimestamp(100L)
+ val log_2 = Log.newBuilder()
+ .addFields(Tag.newBuilder().setKey("exception").setType(Tag.TagType.STRING).setVStr("aaa-bb-cccc").build())
+ .setTimestamp(200L)
+
+ val span_1 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setServiceName("service1")
+ .setSpanId("span-1")
+ .setOperationName("op1")
+ .setDuration(100L)
+ .setStartTime(START_TIME_1)
+ .addTags(tag_1)
+ .build()
+ val span_2 = Span.newBuilder().setTraceId("traceId")
+ .setServiceName("service1")
+ .setSpanId("span-2")
+ .setOperationName("op2")
+ .setDuration(200L)
+ .setStartTime(START_TIME_2)
+ .addTags(tag_2)
+ .addLogs(log_1)
+ .addLogs(log_2)
+ .build()
+
+ val spanBuffer = SpanBuffer.newBuilder().addChildSpans(span_1).addChildSpans(span_2).setTraceId(TRACE_ID).build()
+ val doc = generator.createIndexDocument(TRACE_ID, spanBuffer).get
+ doc.json shouldBe "{\"traceid\":\"trace_id\",\"rootduration\":0,\"starttime\":1529042838000000,\"spans\":[{\"role\":[\"haystack\"],\"servicename\":\"service1\",\"starttime\":[1529042838000000],\"duration\":[100],\"operationname\":\"op1\"},{\"servicename\":\"service1\",\"starttime\":[1529042848000000],\"errorcode\":[3],\"duration\":[200],\"operationname\":\"op2\"}]}"
+ }
+
+ it("should transform the tags for all data types like bool, long, double to string type") {
+ val indexableTags = List(
+ WhitelistIndexField(name = "errorCode", `type` = IndexFieldType.string),
+ WhitelistIndexField(name = "isErrored", `type` = IndexFieldType.string),
+ WhitelistIndexField(name = "exception", `type` = IndexFieldType.string))
+ val whitelistConfig = WhitelistIndexFieldConfiguration()
+ whitelistConfig.onReload(Serialization.write(WhiteListIndexFields(indexableTags)))
+ val generator = new IndexDocumentGenerator(whitelistConfig)
+
+ val tag_1 = Tag.newBuilder().setKey("isErrored").setType(Tag.TagType.BOOL).setVBool(true).build()
+ val tag_2 = Tag.newBuilder().setKey("errorCode").setType(Tag.TagType.LONG).setVLong(500).build()
+ val log_1 = Log.newBuilder()
+ .addFields(Tag.newBuilder().setKey("exception").setType(Tag.TagType.BINARY).setVBytes(ByteString.copyFromUtf8("xxx-yy-zzz")).build())
+ .setTimestamp(100L)
+ val span_1 = Span.newBuilder().setTraceId(TRACE_ID)
+ .setServiceName("service1")
+ .setSpanId("span-1")
+ .setOperationName("op1")
+ .setDuration(100L)
+ .setStartTime(START_TIME_1)
+ .addTags(tag_1)
+ .addTags(tag_2)
+ .addLogs(log_1)
+ .build()
+ val spanBuffer = SpanBuffer.newBuilder().addChildSpans(span_1).setTraceId(TRACE_ID).build()
+
+ val doc = generator.createIndexDocument(TRACE_ID, spanBuffer)
+ doc.get.json shouldEqual "{\"traceid\":\"trace_id\",\"rootduration\":0,\"starttime\":1529042838000000,\"spans\":[{\"servicename\":\"service1\",\"iserrored\":[\"true\"],\"starttime\":[1529042838000000],\"errorcode\":[\"500\"],\"duration\":[100],\"operationname\":\"op1\"}]}"
+ doc.get.json.contains("iserrored") shouldBe true
+ }
+ }
+}
\ No newline at end of file
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanIndexProcessorSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanIndexProcessorSpec.scala
new file mode 100644
index 000000000..292a67682
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanIndexProcessorSpec.scala
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.indexer.unit
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.commons.packer.{NoopPacker, PackedMessage, PackerType}
+import com.expedia.www.haystack.trace.indexer.config.entities.SpanAccumulatorConfiguration
+import com.expedia.www.haystack.trace.indexer.processors.SpanIndexProcessor
+import com.expedia.www.haystack.trace.indexer.store.SpanBufferMemoryStoreSupplier
+import com.expedia.www.haystack.trace.indexer.store.data.model.SpanBufferWithMetadata
+import com.expedia.www.haystack.trace.indexer.store.traits.SpanBufferKeyValueStore
+import com.expedia.www.haystack.trace.indexer.writers.TraceWriter
+import org.apache.kafka.clients.consumer.ConsumerRecord
+import org.apache.kafka.common.record.TimestampType
+import org.easymock.EasyMock
+import org.easymock.EasyMock._
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, Matchers}
+
+import scala.collection.mutable
+
+class SpanIndexProcessorSpec extends FunSpec with Matchers with EasyMockSugar {
+ private implicit val executor = scala.concurrent.ExecutionContext.global
+
+ private val TRACE_ID = "traceid"
+ private val startRecordTimestamp = System.currentTimeMillis()
+ private val timestampInterval = 100
+ private val maxSpans = 10
+ private val bufferingWindow = 10000
+ private val startRecordOffset = 11
+ private val accumulatorConfig = SpanAccumulatorConfiguration(10, 100, 2000, bufferingWindow, PackerType.NONE)
+
+ describe("Span Index Processor") {
+ it("should process the records for a partition and return the offsets to commit") {
+ // mock entities
+ val mockStore = mock[SpanBufferKeyValueStore]
+ val storeSupplier = new SpanBufferMemoryStoreSupplier(10, 100) {
+ override def get(): SpanBufferKeyValueStore = mockStore
+ }
+
+ val mockBackend = mock[TraceWriter]
+
+ val processor = new SpanIndexProcessor(accumulatorConfig, storeSupplier, Seq(mockBackend), new NoopPacker[SpanBuffer])(executor)
+ val (spanBufferWithMetadata, records) = createConsumerRecordsAndSetStoreExpectation(maxSpans, startRecordTimestamp, timestampInterval, startRecordOffset, mockStore)
+ val finalStreamTimestamp = startRecordTimestamp + ((maxSpans - 1) * timestampInterval)
+
+ val packedMessage = EasyMock.newCapture[PackedMessage[SpanBuffer]]()
+ val writeTraceIdCapture = EasyMock.newCapture[String]()
+ val writeLastRecordCapture = EasyMock.newCapture[Boolean]()
+
+ expecting {
+ mockStore.addEvictionListener(processor)
+ mockStore.init()
+ mockStore.getAndRemoveSpanBuffersOlderThan(finalStreamTimestamp - bufferingWindow).andReturn(mutable.ListBuffer(spanBufferWithMetadata))
+ mockBackend.writeAsync(capture(writeTraceIdCapture), capture(packedMessage), capture(writeLastRecordCapture))
+ mockStore.close()
+ }
+
+ whenExecuting(mockStore, mockBackend) {
+ processor.init()
+ val offsets = processor.process(records)
+ SpanBuffer.parseFrom(packedMessage.getValue.packedDataBytes).getChildSpansCount shouldBe maxSpans
+ writeTraceIdCapture.getValue shouldBe TRACE_ID
+ writeLastRecordCapture.getValue shouldBe true
+ offsets.get.offset() shouldBe startRecordOffset
+
+ processor.close()
+ }
+ }
+
+ it("should process the records for a partition, and if store does not emit any 'old' spanBuffers, then writers will not be called and no offsets will be committted") {
+ // mock entities
+ val mockStore = mock[SpanBufferKeyValueStore]
+ val storeSupplier = new SpanBufferMemoryStoreSupplier(10, 100) {
+ override def get(): SpanBufferKeyValueStore = mockStore
+ }
+
+ val mockBackend = mock[TraceWriter]
+
+ val processor = new SpanIndexProcessor(accumulatorConfig, storeSupplier, Seq(mockBackend), new NoopPacker)(executor)
+ val (_, records) = createConsumerRecordsAndSetStoreExpectation(maxSpans, startRecordTimestamp, timestampInterval, startRecordOffset, mockStore)
+ val finalStreamTimestamp = startRecordTimestamp + ((maxSpans - 1) * timestampInterval)
+
+ expecting {
+ mockStore.addEvictionListener(processor)
+ mockStore.init()
+ mockStore.getAndRemoveSpanBuffersOlderThan(finalStreamTimestamp - bufferingWindow).andReturn(mutable.ListBuffer())
+ }
+
+ whenExecuting(mockStore, mockBackend) {
+ processor.init()
+ val offsets = processor.process(records)
+ offsets shouldBe 'empty
+ }
+ }
+ }
+
+ private def createConsumerRecordsAndSetStoreExpectation(maxSpans: Int,
+ startRecordTimestamp: Long,
+ timestampInterval: Long,
+ startRecordOffset: Int,
+ mockStore: SpanBufferKeyValueStore):
+ (SpanBufferWithMetadata, Iterable[ConsumerRecord[String, Span]]) = {
+
+ val builder = SpanBuffer.newBuilder().setTraceId(TRACE_ID)
+ val spanBufferWithMetadata = SpanBufferWithMetadata(builder, startRecordTimestamp, startRecordOffset)
+
+ val consumerRecords =
+ for (idx <- 0 until maxSpans;
+ span = Span.newBuilder().setTraceId(TRACE_ID).setSpanId(idx.toString).build();
+ timestamp = startRecordTimestamp + (idx * timestampInterval);
+ _ = builder.addChildSpans(span);
+ _ = mockStore.addOrUpdateSpanBuffer(TRACE_ID, span, timestamp, idx + startRecordOffset).andReturn(spanBufferWithMetadata))
+ yield new ConsumerRecord[String, Span]("topic",
+ 0,
+ idx + startRecordOffset,
+ timestamp,
+ TimestampType.CREATE_TIME,
+ 0, 0, 0,
+ TRACE_ID,
+ span)
+
+ (spanBufferWithMetadata, consumerRecords)
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanSerdeSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanSerdeSpec.scala
new file mode 100644
index 000000000..c2b9efbc1
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/SpanSerdeSpec.scala
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.unit
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.trace.indexer.serde.SpanDeserializer
+import org.scalatest.{FunSpec, Matchers}
+
+class SpanSerdeSpec extends FunSpec with Matchers {
+
+ private val TRACE_ID = "unique-trace-id"
+ private val PARENT_SPAN_ID = "parent-span-id"
+ private val SPAN_ID = "spanId-1"
+ private val OP_NAME = "testOp"
+ private val TAG_KEY = "tag-key"
+ private val TAG_VALUE = "tag-value"
+ private val TOPIC = "topic"
+
+ private val testSpan = {
+ val tag = Tag.newBuilder().setType(Tag.TagType.STRING).setKey(TAG_KEY).setVStr(TAG_VALUE).build()
+ Span.newBuilder()
+ .setTraceId(TRACE_ID)
+ .setParentSpanId(PARENT_SPAN_ID)
+ .setSpanId(SPAN_ID)
+ .setOperationName(OP_NAME)
+ .addTags(tag)
+ .build()
+ }
+
+ describe("Span Serde") {
+ it("should serialize and deserialize a span object") {
+ val deser = new SpanDeserializer().deserialize(TOPIC, testSpan.toByteArray)
+ deser.getTraceId shouldEqual TRACE_ID
+
+ deser.getParentSpanId shouldEqual PARENT_SPAN_ID
+ deser.getTraceId shouldEqual TRACE_ID
+ deser.getSpanId shouldEqual SPAN_ID
+ deser.getOperationName shouldEqual OP_NAME
+ deser.getTagsCount shouldBe 1
+
+ val tag = deser.getTags(0)
+ tag.getType shouldBe Tag.TagType.STRING
+ tag.getKey shouldBe TAG_KEY
+ tag.getVStr shouldBe TAG_VALUE
+ }
+
+ it("should return null on serializing invalid span bytes") {
+ val data = "invalid span serialized bytes".getBytes()
+ val deser = new SpanDeserializer().deserialize(TOPIC, data)
+ deser shouldBe null
+ }
+ }
+}
diff --git a/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ThreadSafeBulkBuilderSpec.scala b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ThreadSafeBulkBuilderSpec.scala
new file mode 100644
index 000000000..412758ef5
--- /dev/null
+++ b/traces/indexer/src/test/scala/com/expedia/www/haystack/trace/indexer/unit/ThreadSafeBulkBuilderSpec.scala
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.indexer.unit
+
+import com.expedia.www.haystack.trace.indexer.writers.es.ThreadSafeBulkBuilder
+import com.google.gson.Gson
+import io.searchbox.core.Index
+import org.scalatest.{FunSpec, Matchers}
+
+class ThreadSafeBulkBuilderSpec extends FunSpec with Matchers {
+ private val gson = new Gson()
+
+ describe("Thread safe bulk builder") {
+ it("should return the bulk object when index operations exceeds the configured maxDocument count") {
+ val builder = new ThreadSafeBulkBuilder(maxDocuments = 3, 1000)
+ var bulkOp = builder.addAction(new Index.Builder("source1").build(), 10, forceBulkCreate = false)
+ bulkOp shouldBe 'empty
+
+ bulkOp = builder.addAction(new Index.Builder("source2").build(), 10, forceBulkCreate = false)
+ bulkOp shouldBe 'empty
+
+ bulkOp = builder.addAction(new Index.Builder("source3").build(), 10, forceBulkCreate = false)
+ var bulkJson = bulkOp.get.getData(gson)
+ bulkJson shouldEqual
+ """{"index":{}}
+ |source1
+ |{"index":{}}
+ |source2
+ |{"index":{}}
+ |source3
+ |""".stripMargin
+
+ builder.getDocsCount shouldBe 0
+ builder.getTotalSizeInBytes shouldBe 0
+
+ bulkOp = builder.addAction(new Index.Builder("source4").build(), 10, forceBulkCreate = true)
+ bulkJson = bulkOp.get.
+ getData(gson)
+ bulkJson shouldEqual
+ """{"index":{}}
+ |source4
+ |""".stripMargin
+ }
+
+ it("should return the bulk after size of the index operations exceed the configured threshold") {
+ val builder = new ThreadSafeBulkBuilder(maxDocuments = 10, 100)
+ var bulkOp = builder.addAction(new Index.Builder("source1").build(), 30, forceBulkCreate = false)
+ bulkOp shouldBe 'empty
+
+ bulkOp = builder.addAction(new Index.Builder("source2").build(), 30, forceBulkCreate = false)
+ bulkOp shouldBe 'empty
+
+ bulkOp = builder.addAction(new Index.Builder("source3").build(), 80, forceBulkCreate = false)
+ val bulkJson = bulkOp.get.getData(gson)
+ bulkJson shouldEqual """{"index":{}}
+ |source1
+ |{"index":{}}
+ |source2
+ |{"index":{}}
+ |source3
+ |""".stripMargin
+
+ builder.getDocsCount shouldBe 0
+ builder.getTotalSizeInBytes shouldBe 0
+ }
+
+ it("should return the bulk if forceBulkCreate attribute is set") {
+ val builder = new ThreadSafeBulkBuilder(maxDocuments = 10, 1000)
+ var bulkOp = builder.addAction(new Index.Builder("source1").build(), 30, forceBulkCreate = false)
+ bulkOp shouldBe 'empty
+
+ bulkOp = builder.addAction(new Index.Builder("source2").build(), 30, forceBulkCreate = false)
+ bulkOp shouldBe 'empty
+
+ bulkOp = builder.addAction(new Index.Builder("source3").build(), 80, forceBulkCreate = false)
+ bulkOp shouldBe 'empty
+
+ bulkOp = builder.addAction(new Index.Builder("source4").build(), 80, forceBulkCreate = true)
+ val bulkJson = bulkOp.get.getData(gson)
+ bulkJson shouldEqual """{"index":{}}
+ |source1
+ |{"index":{}}
+ |source2
+ |{"index":{}}
+ |source3
+ |{"index":{}}
+ |source4
+ |""".stripMargin
+
+ builder.getDocsCount shouldBe 0
+ builder.getTotalSizeInBytes shouldBe 0
+ }
+ }
+}
diff --git a/traces/mvnw b/traces/mvnw
new file mode 100755
index 000000000..5551fde8e
--- /dev/null
+++ b/traces/mvnw
@@ -0,0 +1,286 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven2 Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+ # TODO classpath?
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project base directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+##########################################################################################
+# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+# This allows using the maven wrapper in projects that prohibit checking in binary data.
+##########################################################################################
+if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found .mvn/wrapper/maven-wrapper.jar"
+ fi
+else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
+ fi
+ jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+ while IFS="=" read key value; do
+ case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
+ esac
+ done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Downloading from: $jarUrl"
+ fi
+ wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
+
+ if command -v wget > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found wget ... using wget"
+ fi
+ wget "$jarUrl" -O "$wrapperJarPath"
+ elif command -v curl > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found curl ... using curl"
+ fi
+ curl -o "$wrapperJarPath" "$jarUrl"
+ else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Falling back to using Java to download"
+ fi
+ javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
+ if [ -e "$javaClass" ]; then
+ if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Compiling MavenWrapperDownloader.java ..."
+ fi
+ # Compiling the Java class
+ ("$JAVA_HOME/bin/javac" "$javaClass")
+ fi
+ if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ # Running the downloader
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Running MavenWrapperDownloader.java ..."
+ fi
+ ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
+ fi
+ fi
+ fi
+fi
+##########################################################################################
+# End of extension
+##########################################################################################
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/traces/mvnw.cmd b/traces/mvnw.cmd
new file mode 100755
index 000000000..e5cfb0ae9
--- /dev/null
+++ b/traces/mvnw.cmd
@@ -0,0 +1,161 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven2 Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM set title of command window
+title %0
+@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO (
+ IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
+)
+
+@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
+if exist %WRAPPER_JAR% (
+ echo Found %WRAPPER_JAR%
+) else (
+ echo Couldn't find %WRAPPER_JAR%, downloading it ...
+ echo Downloading from: %DOWNLOAD_URL%
+ powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"
+ echo Finished downloading %WRAPPER_JAR%
+)
+@REM End of extension
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/traces/pom.xml b/traces/pom.xml
new file mode 100644
index 000000000..279b720ac
--- /dev/null
+++ b/traces/pom.xml
@@ -0,0 +1,528 @@
+
+
+
+ 4.0.0
+ com.expedia.www
+ haystack-traces
+ 1.0.9-SNAPSHOT
+ pom
+
+
+ commons
+ indexer
+ reader
+ backends
+
+
+
+ scm:git:git://github.com/ExpediaDotCom/haystack-traces.git
+ scm:git:ssh://github.com/ExpediaDotCom/haystack-traces.git
+ http://github.com/ExpediaDotCom/haystack-traces
+
+
+ ${project.groupId}:${project.artifactId}
+ Code to build the haystack indexer and reader which move and read spans from the span stream into
+ elastic search
+ Cassandra, etc.
+
+ https://github.com/ExpediaDotCom/haystack-traces/tree/master
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+
+
+ haystack
+ Haystack Team
+ haystack@expedia.com
+ https://github.com/ExpediaDotCom/haystack
+
+
+
+
+ 1.8
+ 3.4.0
+ 1.0.65
+
+ 1.2.3
+ 1.7.25
+ 3.4
+ 2.6
+ 3.5.3
+ 1.3.1
+ 1.1.7.1
+ 1.3.7-2
+
+ 5.3.2
+ 6.0.1
+ 4.5.3
+ 1.11.20
+ 2
+ 12
+ 5
+ 1.0.5
+ ${scala.major.version}.${scala.minor.version}
+ ${scala.major.version}.${scala.minor.version}.${scala.tiny.version}
+ 6.8
+ 1.6.0
+ 3.0.3
+ 1.7.1
+ 4.1.16.Final
+ 1.3.0
+ ${basedir}/../checkstyles/scalastyle_config.xml
+ 1.6
+ 3.0.1
+ 1.6.8
+ true
+
+
+
+
+
+
+ com.google.protobuf
+ protobuf-java
+ ${protobuf.version}
+
+
+ io.grpc
+ grpc-protobuf
+ ${grpc.version}
+
+
+ io.grpc
+ grpc-stub
+ ${grpc.version}
+
+
+ io.grpc
+ grpc-netty
+ ${grpc.version}
+
+
+ io.grpc
+ grpc-services
+ ${grpc.version}
+
+
+
+ io.netty
+ netty-handler
+ ${netty.handler.version}
+
+
+
+
+ org.scala-lang
+ scala-library
+ ${scala-library.version}
+
+
+ org.scala-lang
+ scala-reflect
+ ${scala-library.version}
+
+
+
+
+ com.typesafe
+ config
+ ${typesafe-config.version}
+
+
+ io.dropwizard.metrics
+ metrics-core
+ 3.1.2
+
+
+
+
+ ch.qos.logback
+ logback-classic
+ ${logback.version}
+
+
+ ch.qos.logback
+ logback-core
+ ${logback.version}
+
+
+ org.slf4j
+ slf4j-api
+ ${slf4j-api.version}
+
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+ ${haystack.logback.metrics.appender.version}
+
+
+
+
+ io.searchbox
+ jest
+ ${jest.version}
+
+
+
+ org.elasticsearch
+ elasticsearch
+ ${elasticsearch.version}
+
+
+
+ org.json4s
+ json4s-jackson_${scala.major.minor.version}
+ ${json4s.version}
+
+
+
+ org.json4s
+ json4s-ext_${scala.major.minor.version}
+ ${json4s.version}
+
+
+
+ org.apache.commons
+ commons-lang3
+ ${commons-lang.version}
+
+
+
+ commons-io
+ commons-io
+ ${commons-io.version}
+
+
+
+
+ com.amazonaws
+ aws-java-sdk-sts
+ ${aws-sdk.version}
+
+
+
+ com.amazonaws
+ aws-java-sdk-ec2
+ ${aws-sdk.version}
+
+
+
+
+ vc.inreach.aws
+ aws-signing-request-interceptor
+ 0.0.22
+
+
+
+ org.apache.httpcomponents
+ httpclient
+ ${httpclient.version}
+
+
+
+
+ com.expedia.www
+ haystack-commons
+ ${haystack-commons.version}
+
+
+
+ org.xerial.snappy
+ snappy-java
+ ${snappy.version}
+
+
+ com.github.luben
+ zstd-jni
+ ${zstd.version}
+
+
+
+
+
+
+ com.expedia.www
+ haystack-commons
+
+
+
+ io.dropwizard.metrics
+ metrics-core
+
+
+
+ com.typesafe
+ config
+
+
+
+ org.scala-lang
+ scala-library
+
+
+
+ org.scala-lang
+ scala-reflect
+
+
+
+ ch.qos.logback
+ logback-classic
+
+
+
+ ch.qos.logback
+ logback-core
+
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+ commons-io
+ commons-io
+
+
+
+
+ org.scalatest
+ scalatest_${scala.major.minor.version}
+ ${scalatest.version}
+ test
+
+
+ org.pegdown
+ pegdown
+ ${pegdown.version}
+ test
+
+
+ junit
+ junit
+ 4.12
+ test
+
+
+ org.easymock
+ easymock
+ 3.4
+ test
+
+
+
+
+ ${basedir}/src/main/scala
+
+
+ ${basedir}/src/main/resources
+ true
+
+
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+ 1.0
+
+
+ test
+
+ test
+
+
+
+
+
+
+ com.github.os72
+ protoc-jar-maven-plugin
+ 3.3.0.1
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+ 1.6
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+ 0.8.0
+
+ true
+ false
+ ${scalastyle.config.location}
+ ${basedir}/src/main/scala
+ ${basedir}/src/test/scala
+ ${project.build.directory}/scalastyle-output.xml
+ UTF-8
+
+
+
+ compile-scalastyle
+
+ check
+
+ compile
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+ 3.2.1
+
+
+ scala-compile-first
+ process-resources
+
+ add-source
+ compile
+
+
+
+ scala-test-compile
+ process-test-resources
+
+ testCompile
+
+
+
+ attach-javadocs
+
+ doc-jar
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.6.1
+
+ ${project.jdk.version}
+ ${project.jdk.version}
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+ ${scoverage.plugin.version}
+
+
+ 75
+ false
+ true
+ ${scala-library.version}
+ true
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ ${maven-source-plugin.version}
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+ ${maven-gpg-plugin.version}
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ ${nexus-staging-maven-plugin.version}
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+
+ ${skipGpg}
+
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+
+
+
+
+ ossrh
+ https://oss.sonatype.org/content/repositories/snapshots
+
+
+ ossrh
+ http://oss.sonatype.org/service/local/staging/deploy/maven2/
+
+
+
diff --git a/traces/reader/Makefile b/traces/reader/Makefile
new file mode 100644
index 000000000..99fe50ba0
--- /dev/null
+++ b/traces/reader/Makefile
@@ -0,0 +1,23 @@
+.PHONY: docker_build prepare_integration_test_env integration_test release
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-trace-reader
+PWD := $(shell pwd)
+SERVICE_DEBUG_ON ?= false
+
+docker_build:
+ # build docker image using existing app jar
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+prepare_integration_test_env: docker_build
+ # prepare environment to run integration tests against
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox up -d
+ sleep 30
+
+integration_test: prepare_integration_test_env
+ cd ../ && ./mvnw -q integration-test -pl reader -am
+ docker-compose -f build/integration-tests/docker-compose.yml -p sandbox stop
+ docker rm $(shell docker ps -a -q)
+
+release:
+ ../deployment/scripts/publish-to-docker-hub.sh
diff --git a/traces/reader/README.md b/traces/reader/README.md
new file mode 100644
index 000000000..a1fb06a98
--- /dev/null
+++ b/traces/reader/README.md
@@ -0,0 +1,14 @@
+# haystack-trace-reader
+
+Service for fetching traces and fields from persistent storages.
+
+## Technical Details
+
+In order to understand this service, we recommend to read the details of [haystack](https://github.com/ExpediaDotCom/haystack) project.
+This service reads from [TraceBackend]() and [ElasticSearch](https://www.elastic.co/) stores. API endpoints are exposed as [GRPC](https://grpc.io/) endpoints.
+
+Will fill in more details as we go..
+
+## Building
+
+Check the details on [Build Section](../README.md)
diff --git a/traces/reader/build/docker/Dockerfile b/traces/reader/build/docker/Dockerfile
new file mode 100644
index 000000000..276488ef2
--- /dev/null
+++ b/traces/reader/build/docker/Dockerfile
@@ -0,0 +1,30 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-trace-reader
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+ENV DOCKERIZE_VERSION v0.6.1
+
+ADD https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/dockerize-alpine-linux-amd64-${DOCKERIZE_VERSION}.tar.gz dockerize.tar.gz
+RUN tar xzf dockerize.tar.gz
+RUN chmod +x dockerize
+
+RUN mkdir -p ${APP_HOME}
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+RUN chmod +x ${APP_HOME}/start-app.sh
+
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+ wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+ chmod +x /bin/grpc_health_probe
+
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+EXPOSE 8088
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/traces/reader/build/docker/jmxtrans-agent.xml b/traces/reader/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..44fc677c7
--- /dev/null
+++ b/traces/reader/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:true}
+ haystack.traces.reader.#hostname#.
+
+ 30
+
diff --git a/traces/reader/build/docker/start-app.sh b/traces/reader/build/docker/start-app.sh
new file mode 100755
index 000000000..ba2c65569
--- /dev/null
+++ b/traces/reader/build/docker/start-app.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+[ -z "$JAVA_GC_OPTS" ] && JAVA_GC_OPTS="-XX:+UseG1GC"
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+${JAVA_GC_OPTS} \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-XX:+ExitOnOutOfMemoryError \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+if [[ -n "$SERVICE_DEBUG_ON" ]] && [[ "$SERVICE_DEBUG_ON" == true ]]; then
+ JAVA_OPTS="$JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y"
+fi
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/traces/reader/build/integration-tests/docker-app.conf b/traces/reader/build/integration-tests/docker-app.conf
new file mode 100644
index 000000000..d058819a8
--- /dev/null
+++ b/traces/reader/build/integration-tests/docker-app.conf
@@ -0,0 +1,66 @@
+health.status.path = "isHealthy"
+
+service {
+ port = 8088
+ ssl {
+ enabled = false
+ cert.path = ""
+ private.key.path = ""
+ }
+}
+
+backend {
+ client {
+ host = "localhost"
+ port = 8090
+ }
+}
+
+elasticsearch {
+ endpoint = "http://elasticsearch:9200"
+ conn.timeout.ms = 10000
+ read.timeout.ms = 5000
+
+ index {
+ name.prefix = "haystack-traces"
+ type = "spans"
+ }
+}
+
+trace {
+ validators {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.validators.TraceIdValidator"
+ "com.expedia.www.haystack.trace.reader.readers.validators.ParentIdValidator"
+ "com.expedia.www.haystack.trace.reader.readers.validators.RootValidator"
+ ]
+ }
+
+ transformers {
+ pre {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.transformers.DeDuplicateSpanTransformer"
+ ]
+ }
+ post {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.transformers.PartialSpanTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.ClockSkewTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.SortSpanTransformer"
+ ]
+ }
+ }
+}
+
+reload {
+ tables {
+ index.fields.config = "whitelist-index-fields"
+ }
+ config {
+ endpoint = "http://elasticsearch:9200"
+ database.name = "reload-configs"
+ }
+ startup.load = true
+ interval.ms = 5000 # -1 will imply 'no reload'
+}
+
diff --git a/traces/reader/build/integration-tests/docker-compose.yml b/traces/reader/build/integration-tests/docker-compose.yml
new file mode 100644
index 000000000..bbea51ac6
--- /dev/null
+++ b/traces/reader/build/integration-tests/docker-compose.yml
@@ -0,0 +1,8 @@
+version: '3'
+services:
+ elasticsearch:
+ image: elastic/elasticsearch:6.0.1
+ environment:
+ ES_JAVA_OPTS: "-Xms256m -Xmx256m"
+ ports:
+ - "9200:9200"
diff --git a/traces/reader/pom.xml b/traces/reader/pom.xml
new file mode 100644
index 000000000..2d5c7bd9e
--- /dev/null
+++ b/traces/reader/pom.xml
@@ -0,0 +1,181 @@
+
+
+
+
+ haystack-traces
+ com.expedia.www
+ 1.0.9-SNAPSHOT
+
+
+ 4.0.0
+ haystack-trace-reader
+ jar
+
+
+ com.expedia.www.haystack.trace.reader.Service
+ ${project.artifactId}-${project.version}
+ 3.3.0.1
+
+
+
+
+ com.expedia.www
+ haystack-trace-commons
+ ${project.version}
+
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+ io.grpc
+ grpc-protobuf
+
+
+
+ io.grpc
+ grpc-stub
+
+
+
+ io.grpc
+ grpc-services
+
+
+
+ io.grpc
+ grpc-netty
+
+
+
+ io.netty
+ netty-handler
+
+
+
+ io.searchbox
+ jest
+
+
+
+ org.elasticsearch
+ elasticsearch
+
+
+
+ org.apache.commons
+ commons-lang3
+
+
+
+ org.apache.httpcomponents
+ httpclient
+
+
+
+ com.amazonaws
+ aws-java-sdk-ec2
+
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+
+
+
+
+ com.expedia.www
+ haystack-trace-backend-memory
+ ${project.version}
+ test
+
+
+
+
+ ${finalName}
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ test
+
+ test
+
+
+ com.expedia.www.haystack.trace.reader.unit
+
+
+
+ integration-test
+ integration-test
+
+ test
+
+
+
+ /src/reader/build/integration-tests/docker-app.conf
+
+ com.expedia.www.haystack.trace.reader.integration
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+ true
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ package
+
+ shade
+
+
+
+
+ reference.conf
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+
+
diff --git a/traces/reader/src/main/resources/config/base.conf b/traces/reader/src/main/resources/config/base.conf
new file mode 100644
index 000000000..3b728bca8
--- /dev/null
+++ b/traces/reader/src/main/resources/config/base.conf
@@ -0,0 +1,109 @@
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
+
+service {
+ port = 8088
+ ssl {
+ enabled = false
+ cert.path = ""
+ private.key.path = ""
+ }
+ max.message.size = 52428800 # 50MB in bytes
+}
+
+backend {
+ client {
+ host = "localhost"
+ port = 8090
+ }
+
+ # we support multiple grpc based backends, to provide another one use something like following.
+ # you are required to provide host and port
+
+ # another_client {
+ # host = "localhost"
+ # port = 8092
+ # }
+}
+
+elasticsearch {
+ client {
+ endpoint = "http://elasticsearch:9200"
+ conn.timeout.ms = 10000
+ read.timeout.ms = 5000
+ }
+ index {
+ spans {
+ name.prefix = "haystack-traces"
+ type = "spans"
+ hour.bucket = 6
+ hour.ttl = 72 // 3 * 24 hours
+ use.root.doc.starttime = true
+ }
+ service.metadata {
+ enabled = true
+ name = "service-metadata"
+ type = "metadata"
+ }
+ }
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
+
+trace {
+ validators {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.validators.TraceIdValidator"
+ "com.expedia.www.haystack.trace.reader.readers.validators.ParentIdValidator"
+ "com.expedia.www.haystack.trace.reader.readers.validators.RootValidator"
+ ]
+ }
+
+ transformers {
+ pre {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.transformers.DeDuplicateSpanTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.ClientServerEventLogTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.InfrastructureTagTransformer"
+ ]
+ }
+ post {
+ sequence = [
+ # "com.expedia.www.haystack.trace.reader.readers.transformers.OrphanedTraceTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.PartialSpanTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.ServerClientSpanMergeTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.ClockSkewTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.SortSpanTransformer"
+ ]
+ }
+ }
+}
+
+reload {
+ tables {
+ index.fields.config = "whitelist-index-fields"
+ }
+ config {
+ endpoint = "http://elasticsearch:9200"
+ database.name = "reload-configs"
+ }
+ startup.load = true
+ interval.ms = 60000 # -1 will imply 'no reload'
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
diff --git a/traces/reader/src/main/resources/logback.xml b/traces/reader/src/main/resources/logback.xml
new file mode 100644
index 000000000..ab4e25a63
--- /dev/null
+++ b/traces/reader/src/main/resources/logback.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/Service.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/Service.scala
new file mode 100644
index 000000000..91b43988b
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/Service.scala
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trace.reader
+
+import java.io.File
+
+import com.codahale.metrics.JmxReporter
+import com.expedia.www.haystack.commons.logger.LoggerUtils
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.reader.config.ProviderConfiguration
+import com.expedia.www.haystack.trace.reader.services.{GrpcHealthService, TraceService}
+import com.expedia.www.haystack.trace.reader.stores.EsIndexedTraceStore
+import io.grpc.netty.NettyServerBuilder
+import org.slf4j.{Logger, LoggerFactory}
+
+object Service extends MetricsSupport {
+ private val LOGGER: Logger = LoggerFactory.getLogger("TraceReader")
+
+ // primary executor for service's async tasks
+ implicit private val executor = scala.concurrent.ExecutionContext.global
+
+ def main(args: Array[String]): Unit = {
+ startJmxReporter()
+ startService()
+ }
+
+ private def startJmxReporter(): Unit = {
+ JmxReporter
+ .forRegistry(metricRegistry)
+ .build()
+ .start()
+ }
+
+ private def startService(): Unit = {
+ try {
+ val config = new ProviderConfiguration
+
+ val store = new EsIndexedTraceStore(
+ config.traceBackendConfiguration,
+ config.elasticSearchConfiguration,
+ config.whitelistedFieldsConfig)(executor)
+
+ val serviceConfig = config.serviceConfig
+
+ val serverBuilder = NettyServerBuilder
+ .forPort(serviceConfig.port)
+ .directExecutor()
+ .addService(new TraceService(store, config.traceValidatorConfig, config.traceTransformerConfig)(executor))
+ .addService(new GrpcHealthService())
+
+ // enable ssl if enabled
+ if (serviceConfig.ssl.enabled) {
+ serverBuilder.useTransportSecurity(new File(serviceConfig.ssl.certChainFilePath), new File(serviceConfig.ssl.privateKeyPath))
+ }
+
+ // default max message size in grpc is 4MB. if our max message size is greater than 4MB then we should configure this
+ // limit in the netty based grpc server.
+ if (serviceConfig.maxSizeInBytes > 4 * 1024 * 1024) serverBuilder.maxMessageSize(serviceConfig.maxSizeInBytes)
+
+ val server = serverBuilder.build().start()
+
+ LOGGER.info(s"server started, listening on ${serviceConfig.port}")
+
+ Runtime.getRuntime.addShutdownHook(new Thread() {
+ override def run(): Unit = {
+ LOGGER.info("shutting down gRPC server since JVM is shutting down")
+ server.shutdown()
+ store.close()
+ LOGGER.info("server has been shutdown now")
+ }
+ })
+
+ server.awaitTermination()
+ } catch {
+ case ex: Throwable =>
+ ex.printStackTrace()
+ LOGGER.error("Fatal error observed while running the app", ex)
+ LoggerUtils.shutdownLogger()
+ System.exit(1)
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/ProviderConfiguration.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/ProviderConfiguration.scala
new file mode 100644
index 000000000..5070b6403
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/ProviderConfiguration.scala
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.config
+
+import java.util
+
+import com.expedia.www.haystack.commons.config.ConfigurationLoader
+import com.expedia.www.haystack.trace.commons.config.entities._
+import com.expedia.www.haystack.trace.commons.config.reload.{ConfigurationReloadElasticSearchProvider, Reloadable}
+import com.expedia.www.haystack.trace.reader.config.entities._
+import com.expedia.www.haystack.trace.reader.readers.transformers.{PartialSpanTransformer, SpanTreeTransformer, TraceTransformer}
+import com.expedia.www.haystack.trace.reader.readers.validators.TraceValidator
+import com.typesafe.config.Config
+import org.apache.commons.lang3.StringUtils
+
+import scala.collection.JavaConverters._
+import scala.reflect.ClassTag
+
+class ProviderConfiguration {
+ private val config: Config = ConfigurationLoader.loadConfigFileWithEnvOverrides()
+
+ val serviceConfig: ServiceConfiguration = {
+ val serviceConfig = config.getConfig("service")
+
+ val ssl = serviceConfig.getConfig("ssl")
+ val sslConfig = SslConfiguration(ssl.getBoolean("enabled"), ssl.getString("cert.path"), ssl.getString("private.key.path"))
+
+ ServiceConfiguration(serviceConfig.getInt("port"), sslConfig, serviceConfig.getInt("max.message.size"))
+ }
+
+ /**
+ * trace backend configuration object
+ */
+ val traceBackendConfiguration: TraceStoreBackends = {
+ val traceBackendConfig = config.getConfig("backend")
+
+ val grpcClients = traceBackendConfig.entrySet().asScala
+ .map(k => StringUtils.split(k.getKey, '.')(0)).toSeq
+ .map(cl => traceBackendConfig.getConfig(cl))
+ .filter(cl => cl.hasPath("host") && cl.hasPath("port"))
+ .map(cl => GrpcClientConfig(cl.getString("host"), cl.getInt("port")))
+
+ require(grpcClients.nonEmpty)
+
+ TraceStoreBackends(grpcClients)
+ }
+
+
+ /**
+ * ElasticSearch configuration
+ */
+
+
+ private val elasticSearchClientConfig: ElasticSearchClientConfiguration = {
+ val es = config.getConfig("elasticsearch.client")
+
+ val username = if (es.hasPath("username")) {
+ Option(es.getString("username"))
+ } else None
+ val password = if (es.hasPath("password")) {
+ Option(es.getString("password"))
+ } else None
+
+ ElasticSearchClientConfiguration(
+ endpoint = es.getString("endpoint"),
+ username = username,
+ password = password,
+ connectionTimeoutMillis = es.getInt("conn.timeout.ms"),
+ readTimeoutMillis = es.getInt("read.timeout.ms")
+ )
+ }
+ private val spansIndexConfiguration: SpansIndexConfiguration = {
+ val indexConfig = config.getConfig("elasticsearch.index.spans")
+ SpansIndexConfiguration(
+ indexNamePrefix = indexConfig.getString("name.prefix"),
+ indexType = indexConfig.getString("type"),
+ indexHourBucket = indexConfig.getInt("hour.bucket"),
+ indexHourTtl = indexConfig.getInt("hour.ttl"),
+ useRootDocumentStartTime = indexConfig.getBoolean("use.root.doc.starttime")
+ )
+ }
+ private val serviceMetadataIndexConfig: ServiceMetadataIndexConfiguration = {
+ val metadataCfg = config.getConfig("elasticsearch.index.service.metadata")
+
+ ServiceMetadataIndexConfiguration(
+ metadataCfg.getBoolean("enabled"),
+ metadataCfg.getString("name"),
+ metadataCfg.getString("type"))
+ }
+
+ private def awsRequestSigningConfig(awsESConfig: Config): AWSRequestSigningConfiguration = {
+ val accessKey: Option[String] = if (awsESConfig.hasPath("access.key") && awsESConfig.getString("access.key").nonEmpty)
+ Some(awsESConfig.getString("access.key"))
+ else
+ None
+
+ val secretKey: Option[String] = if (awsESConfig.hasPath("secret.key") && awsESConfig.getString("secret.key").nonEmpty) {
+ Some(awsESConfig.getString("secret.key"))
+ }
+ else
+ None
+
+ AWSRequestSigningConfiguration(
+ awsESConfig.getBoolean("enabled"),
+ awsESConfig.getString("region"),
+ awsESConfig.getString("service.name"),
+ accessKey,
+ secretKey)
+ }
+
+
+ val elasticSearchConfiguration: ElasticSearchConfiguration = {
+ ElasticSearchConfiguration(
+ clientConfiguration = elasticSearchClientConfig,
+ spansIndexConfiguration = spansIndexConfiguration,
+ serviceMetadataIndexConfiguration = serviceMetadataIndexConfig,
+ awsRequestSigningConfiguration = awsRequestSigningConfig(config.getConfig("elasticsearch.signing.request.aws"))
+ )
+ }
+
+ private def toInstances[T](classes: util.List[String])(implicit ct: ClassTag[T]): scala.Seq[T] = {
+ classes.asScala.map(className => {
+ val c = Class.forName(className)
+
+ if (c == null) {
+ throw new RuntimeException(s"No class found with name $className")
+ } else {
+ val o = c.newInstance()
+ val baseClass = ct.runtimeClass
+
+ if (!baseClass.isInstance(o)) {
+ throw new RuntimeException(s"${c.getName} is not an instance of ${baseClass.getName}")
+ }
+ o.asInstanceOf[T]
+ }
+ })
+ }
+
+ /**
+ * Configurations to specify what all transforms to apply on traces
+ */
+ val traceTransformerConfig: TraceTransformersConfiguration = {
+ val preTransformers = config.getStringList("trace.transformers.pre.sequence")
+ val postTransformers = config.getStringList("trace.transformers.post.sequence")
+
+ val preTransformerInstances = toInstances[TraceTransformer](preTransformers)
+ var postTransformerInstances = toInstances[SpanTreeTransformer](postTransformers).filterNot(_.isInstanceOf[PartialSpanTransformer])
+ postTransformerInstances = new PartialSpanTransformer +: postTransformerInstances
+
+ TraceTransformersConfiguration(preTransformerInstances, postTransformerInstances)
+ }
+
+ /**
+ * Configurations to specify what all validations to apply on traces
+ */
+ val traceValidatorConfig: TraceValidatorsConfiguration = {
+ val validatorConfig: Config = config.getConfig("trace.validators")
+ TraceValidatorsConfiguration(toInstances[TraceValidator](validatorConfig.getStringList("sequence")))
+ }
+
+ /**
+ * configuration that contains list of tags that should be indexed for a span
+ */
+ val whitelistedFieldsConfig: WhitelistIndexFieldConfiguration = {
+ val whitelistedFieldsConfig = WhitelistIndexFieldConfiguration()
+ whitelistedFieldsConfig.reloadConfigTableName = Option(config.getConfig("reload.tables").getString("index.fields.config"))
+ whitelistedFieldsConfig
+ }
+
+ // configuration reloader
+ registerReloadableConfigurations(List(whitelistedFieldsConfig))
+
+ /**
+ * registers a reloadable config object to reloader instance.
+ * The reloader registers them as observers and invokes them periodically when it re-reads the
+ * configuration from an external store
+ *
+ * @param observers list of reloadable configuration objects
+ * @return the reloader instance that uses ElasticSearch as an external database for storing the configs
+ */
+ private def registerReloadableConfigurations(observers: Seq[Reloadable]): ConfigurationReloadElasticSearchProvider = {
+ val reload = config.getConfig("reload")
+ val reloadConfig = ReloadConfiguration(
+ reload.getString("config.endpoint"),
+ reload.getString("config.database.name"),
+ reload.getInt("interval.ms"),
+ if (reload.hasPath("config.username")) {
+ Option(reload.getString("config.username"))
+ } else {
+ None
+ },
+ if (reload.hasPath("config.password")) {
+ Option(reload.getString("config.password"))
+ } else {
+ None
+ },
+ observers,
+ loadOnStartup = reload.getBoolean("startup.load"))
+
+ val awsConfig: AWSRequestSigningConfiguration = awsRequestSigningConfig(config.getConfig("reload.signing.request.aws"))
+ val loader = new ConfigurationReloadElasticSearchProvider(reloadConfig, awsConfig)
+ if (reloadConfig.loadOnStartup) loader.load()
+ loader
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/ElasticSearchConfiguration.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/ElasticSearchConfiguration.scala
new file mode 100644
index 000000000..25f6928c9
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/ElasticSearchConfiguration.scala
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trace.reader.config.entities
+
+import com.expedia.www.haystack.trace.commons.config.entities.AWSRequestSigningConfiguration
+
+case class ElasticSearchClientConfiguration(endpoint: String,
+ username: Option[String],
+ password: Option[String],
+ connectionTimeoutMillis: Int,
+ readTimeoutMillis: Int)
+
+case class SpansIndexConfiguration(indexNamePrefix: String,
+ indexType: String,
+ indexHourTtl: Int,
+ indexHourBucket: Int,
+ useRootDocumentStartTime: Boolean)
+
+case class ServiceMetadataIndexConfiguration(enabled: Boolean,
+ indexName: String,
+ indexType: String)
+
+case class ElasticSearchConfiguration(clientConfiguration: ElasticSearchClientConfiguration,
+ spansIndexConfiguration: SpansIndexConfiguration,
+ serviceMetadataIndexConfiguration: ServiceMetadataIndexConfiguration,
+ awsRequestSigningConfiguration: AWSRequestSigningConfiguration)
\ No newline at end of file
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/ServiceConfiguration.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/ServiceConfiguration.scala
new file mode 100644
index 000000000..72693fcc1
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/ServiceConfiguration.scala
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.config.entities
+
+/**
+ * @param port port to start grpc servicer on
+ */
+case class ServiceConfiguration(port: Int, ssl: SslConfiguration, maxSizeInBytes: Int)
+case class SslConfiguration(enabled: Boolean, certChainFilePath: String, privateKeyPath: String)
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/TraceTransformersConfiguration.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/TraceTransformersConfiguration.scala
new file mode 100644
index 000000000..611391ba3
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/TraceTransformersConfiguration.scala
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.reader.config.entities
+
+import com.expedia.www.haystack.trace.reader.readers.transformers.{SpanTreeTransformer, TraceTransformer}
+
+case class TraceTransformersConfiguration(preTransformers: Seq[TraceTransformer], postTransformers: Seq[SpanTreeTransformer])
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/TraceValidatorsConfiguration.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/TraceValidatorsConfiguration.scala
new file mode 100644
index 000000000..6da0205f2
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/config/entities/TraceValidatorsConfiguration.scala
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.reader.config.entities
+
+import com.expedia.www.haystack.trace.reader.readers.validators.TraceValidator
+
+case class TraceValidatorsConfiguration(validators: Seq[TraceValidator])
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/ElasticSearchClientError.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/ElasticSearchClientError.scala
new file mode 100644
index 000000000..8d471165b
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/ElasticSearchClientError.scala
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.exceptions
+
+import io.grpc.{Status, StatusException}
+
+case class ElasticSearchClientError(status: Int, details: String)
+ extends StatusException(Status.INTERNAL.withDescription(s"es client returned status $status. $details"))
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/InvalidTraceException.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/InvalidTraceException.scala
new file mode 100644
index 000000000..90dd3c414
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/InvalidTraceException.scala
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.exceptions
+
+import io.grpc.{Status, StatusException}
+
+class InvalidTraceException(message: String)
+ extends StatusException(Status.FAILED_PRECONDITION.withDescription(s"Invalid Trace: $message"))
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/InvalidTraceIdInDocument.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/InvalidTraceIdInDocument.scala
new file mode 100644
index 000000000..8c2a6f947
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/InvalidTraceIdInDocument.scala
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.exceptions
+
+import io.grpc.{Status, StatusException}
+
+case class InvalidTraceIdInDocument(docId: String)
+ extends StatusException(Status.INTERNAL.withDescription(s"invalid traceId in doc: $docId"))
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/SpanNotFoundException.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/SpanNotFoundException.scala
new file mode 100644
index 000000000..bf62a1c20
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/SpanNotFoundException.scala
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.exceptions
+
+import io.grpc.{Status, StatusException}
+
+class SpanNotFoundException extends StatusException(Status.NOT_FOUND.withDescription("spanId not found in trace"))
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/TraceNotFoundException.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/TraceNotFoundException.scala
new file mode 100644
index 000000000..df9cf3f6e
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/exceptions/TraceNotFoundException.scala
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.exceptions
+
+import io.grpc.{Status, StatusException}
+
+class TraceNotFoundException extends StatusException(Status.NOT_FOUND.withDescription("traceId not found in data store"))
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/metrics/AppMetricNames.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/metrics/AppMetricNames.scala
new file mode 100644
index 000000000..e20b124ec
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/metrics/AppMetricNames.scala
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.reader.metrics
+
+object AppMetricNames {
+
+ val ELASTIC_SEARCH_READ_TIME = "elasticsearch.read.time"
+ val ELASTIC_SEARCH_READ_FAILURES = "elasticsearch.read.failures"
+
+ val BACKEND_READ_TIME = "backend.read.time"
+ val BACKEND_READ_FAILURES = "backend.read.failures"
+ val BACKEND_TRACES_FAILURE = "backend.traces.failures"
+
+ val SEARCH_TRACE_REJECTED = "search.trace.rejected"
+ val COUNT_BUCKET_REJECTED = "count.bucket.rejected"
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/TraceProcessor.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/TraceProcessor.scala
new file mode 100644
index 000000000..b6d2941ae
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/TraceProcessor.scala
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers
+
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.readers.transformers.{PostTraceTransformationHandler, SpanTreeTransformer, TraceTransformationHandler, TraceTransformer}
+import com.expedia.www.haystack.trace.reader.readers.validators.{TraceValidationHandler, TraceValidator}
+
+import scala.util.Try
+
+class TraceProcessor(validators: Seq[TraceValidator],
+ preValidationTransformers: Seq[TraceTransformer],
+ postValidationTransformers: Seq[SpanTreeTransformer]) {
+
+ private val validationHandler: TraceValidationHandler = new TraceValidationHandler(validators)
+ private val postTransformers: PostTraceTransformationHandler = new PostTraceTransformationHandler(postValidationTransformers)
+ private val preTransformers: TraceTransformationHandler = new TraceTransformationHandler(preValidationTransformers)
+
+ def process(trace: Trace): Try[Trace] = {
+ for (trace <- Try(preTransformers.transform(trace));
+ validated <- validationHandler.validate(trace)) yield postTransformers.transform(validated)
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/TraceReader.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/TraceReader.scala
new file mode 100644
index 000000000..00b96a7f2
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/TraceReader.scala
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers
+
+import com.codahale.metrics.Meter
+import com.expedia.open.tracing.api.{FieldNames, _}
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.reader.config.entities.{TraceTransformersConfiguration, TraceValidatorsConfiguration}
+import com.expedia.www.haystack.trace.reader.exceptions.SpanNotFoundException
+import com.expedia.www.haystack.trace.reader.readers.utils.AuxiliaryTags
+import com.expedia.www.haystack.trace.reader.readers.utils.TagExtractors._
+import com.expedia.www.haystack.trace.reader.stores.TraceStore
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.collection.JavaConverters._
+import scala.concurrent.{ExecutionContextExecutor, Future}
+import scala.util.{Failure, Success}
+
+object TraceReader extends MetricsSupport {
+ private val LOGGER: Logger = LoggerFactory.getLogger(s"${classOf[TraceReader]}.search.trace.rejection")
+ private val traceRejectedCounter: Meter = metricRegistry.meter("search.trace.rejection")
+}
+
+class TraceReader(traceStore: TraceStore,
+ validatorsConfig: TraceValidatorsConfiguration,
+ transformersConfig: TraceTransformersConfiguration)
+ (implicit val executor: ExecutionContextExecutor)
+ extends TraceProcessor(validatorsConfig.validators, transformersConfig.preTransformers, transformersConfig.postTransformers) {
+
+ def getTrace(request: TraceRequest): Future[Trace] = {
+ traceStore
+ .getTrace(request.getTraceId)
+ .flatMap(process(_) match {
+ case Success(span) => Future.successful(span)
+ case Failure(ex) => Future.failed(ex)
+ })
+ }
+
+ def getRawTrace(request: TraceRequest): Future[Trace] = {
+ traceStore.getTrace(request.getTraceId)
+ }
+
+ def getRawSpan(request: SpanRequest): Future[SpanResponse] = {
+ traceStore
+ .getTrace(request.getTraceId)
+ .flatMap(trace => {
+ val spans = trace.getChildSpansList.asScala.filter(_.getSpanId == request.getSpanId)
+ if (spans.isEmpty) {
+ Future.failed(new SpanNotFoundException)
+ } else {
+ Future.successful(SpanResponse.newBuilder().addAllSpans(spans.asJava).build())
+ }
+ })
+ }
+
+ def searchTraces(request: TracesSearchRequest): Future[TracesSearchResult] = {
+ traceStore
+ .searchTraces(request)
+ .map(
+ traces => {
+ TracesSearchResult
+ .newBuilder()
+ .addAllTraces(traces.flatMap(transformTraceIgnoringInvalid).asJavaCollection)
+ .build()
+ })
+ }
+
+ private def transformTraceIgnoringInvalid(trace: Trace): Option[Trace] = {
+ process(trace) match {
+ case Success(t) => Some(t)
+ case Failure(ex) =>
+ TraceReader.LOGGER.warn(s"invalid trace=${trace.getTraceId} is rejected", ex)
+ TraceReader.traceRejectedCounter.mark()
+ None
+ }
+ }
+
+ def getFieldNames: Future[FieldNames] = {
+ traceStore
+ .getFieldNames
+ }
+
+ def getFieldValues(request: FieldValuesRequest): Future[FieldValues] = {
+ traceStore
+ .getFieldValues(request)
+ .map(names =>
+ FieldValues
+ .newBuilder()
+ .addAllValues(names.asJavaCollection)
+ .build())
+ }
+
+ def getTraceCallGraph(request: TraceRequest): Future[TraceCallGraph] = {
+ traceStore
+ .getTrace(request.getTraceId)
+ .flatMap(process(_) match {
+ case Success(trace) => Future.successful(buildTraceCallGraph(trace))
+ case Failure(ex) => Future.failed(ex)
+ })
+ }
+
+ def getTraceCounts(request: TraceCountsRequest): Future[TraceCounts] = {
+ traceStore
+ .getTraceCounts(request)
+ }
+
+ def getRawTraces(request: RawTracesRequest): Future[RawTracesResult] = {
+ traceStore
+ .getRawTraces(request)
+ .flatMap(traces => Future.successful(RawTracesResult.newBuilder().addAllTraces(traces.asJava).build()))
+ }
+
+ private def buildTraceCallGraph(trace: Trace): TraceCallGraph = {
+ val calls = trace.getChildSpansList
+ .asScala
+ .filter(containsTag(_, AuxiliaryTags.IS_MERGED_SPAN))
+ .map(span => {
+ val from = CallNode.newBuilder()
+ .setServiceName(extractTagStringValue(span, AuxiliaryTags.CLIENT_SERVICE_NAME))
+ .setOperationName(extractTagStringValue(span, AuxiliaryTags.CLIENT_OPERATION_NAME))
+ .setInfrastructureProvider(extractTagStringValue(span, AuxiliaryTags.CLIENT_INFRASTRUCTURE_PROVIDER))
+ .setInfrastructureLocation(extractTagStringValue(span, AuxiliaryTags.CLIENT_INFRASTRUCTURE_LOCATION))
+
+ val to = CallNode.newBuilder()
+ .setServiceName(extractTagStringValue(span, AuxiliaryTags.SERVER_SERVICE_NAME))
+ .setOperationName(extractTagStringValue(span, AuxiliaryTags.SERVER_OPERATION_NAME))
+ .setInfrastructureProvider(extractTagStringValue(span, AuxiliaryTags.SERVER_INFRASTRUCTURE_PROVIDER))
+ .setInfrastructureLocation(extractTagStringValue(span, AuxiliaryTags.SERVER_INFRASTRUCTURE_LOCATION))
+
+ Call.newBuilder()
+ .setFrom(from)
+ .setTo(to)
+ .setNetworkDelta(extractTagLongValue(span, AuxiliaryTags.NETWORK_DELTA))
+ .build()
+ })
+
+ TraceCallGraph
+ .newBuilder()
+ .addAllCalls(calls.asJavaCollection)
+ .build()
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ClientServerEventLogTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ClientServerEventLogTransformer.scala
new file mode 100644
index 000000000..42057b31b
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ClientServerEventLogTransformer.scala
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.commons.utils.{SpanMarkers, SpanUtils}
+
+import scala.collection.JavaConverters._
+
+/**
+ * add log events(if not present) using span.kind tag value
+ */
+class ClientServerEventLogTransformer extends TraceTransformer {
+
+ override def transform(spans: Seq[Span]): Seq[Span] = {
+ spans.map(span => {
+ span.getTagsList.asScala.find(_.getKey == SpanMarkers.SPAN_KIND_TAG_KEY).map(_.getVStr) match {
+ case Some(SpanMarkers.SERVER_SPAN_KIND) if !SpanUtils.containsServerLogTag(span) => SpanUtils.addServerLogTag(span)
+ case Some(SpanMarkers.CLIENT_SPAN_KIND) if !SpanUtils.containsClientLogTag(span) => SpanUtils.addClientLogTag(span)
+ case _ => span
+ }
+ })
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ClockSkewFromParentTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ClockSkewFromParentTransformer.scala
new file mode 100644
index 000000000..20e1e70dd
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ClockSkewFromParentTransformer.scala
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.commons.utils.SpanUtils
+import com.expedia.www.haystack.trace.reader.readers.utils.{MutableSpanForest, SpanTree}
+
+import scala.annotation.tailrec
+import scala.collection.mutable.ListBuffer
+import scala.collection.{Seq, mutable}
+
+/**
+ * Fixes clock skew between parent and child spans
+ * If any child spans reports a startTime earlier then parent span's startTime or
+ * an endTime later then the parent span's endTime, then
+ * the child span's startTime or endTime will be shifted. Where the shift is
+ * set equal to the parent's startTime or endTime depending on which is off.
+ */
+class ClockSkewFromParentTransformer extends SpanTreeTransformer {
+
+ case class SpanTreeWithParent(spanTree: SpanTree, parent: Option[Span])
+
+ override def transform(forest: MutableSpanForest): MutableSpanForest = {
+ val underlyingSpans = new mutable.ListBuffer[Span]
+ forest.getAllTrees.foreach(tree => {
+ adjustSkew(underlyingSpans, List(SpanTreeWithParent(tree, None)))
+ })
+ forest.updateUnderlyingSpans(underlyingSpans)
+ }
+
+ @tailrec
+ private def adjustSkew(fixedSpans: ListBuffer[Span], spanTrees: Seq[SpanTreeWithParent]): Unit = {
+ if (spanTrees.isEmpty) return
+
+ // collect the child trees that need to be corrected for clock skew
+ val childTrees = mutable.ListBuffer[SpanTreeWithParent]()
+
+ spanTrees.foreach(e => {
+ val rootSpan = e.spanTree.span
+ var adjustedSpan = rootSpan
+ e.parent match {
+ case Some(parentSpan) =>
+ adjustedSpan = adjustSpan(rootSpan, parentSpan)
+ fixedSpans += adjustedSpan
+ case _ => fixedSpans += rootSpan
+ }
+ childTrees ++= e.spanTree.children.map(tree => SpanTreeWithParent(tree, Some(adjustedSpan)))
+ })
+
+ adjustSkew(fixedSpans, childTrees)
+ }
+
+ private def adjustSpan(child: Span, parent: Span): Span = {
+ var shift = 0L
+ if (child.getStartTime < parent.getStartTime) {
+ shift = parent.getStartTime - child.getStartTime
+ }
+ val childEndTime = SpanUtils.getEndTime(child)
+ val parentEndTime = SpanUtils.getEndTime(parent)
+ if (parentEndTime < childEndTime + shift) {
+ shift = parentEndTime - childEndTime
+ }
+ if (shift == 0L) {
+ child
+ } else {
+ Span.newBuilder(child).setStartTime(child.getStartTime + shift).build()
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ClockSkewTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ClockSkewTransformer.scala
new file mode 100644
index 000000000..9fc5abd5a
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ClockSkewTransformer.scala
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.commons.utils.{SpanMarkers, SpanUtils}
+import com.expedia.www.haystack.trace.reader.readers.utils.{MutableSpanForest, SpanTree}
+
+/**
+ * Fixes clock skew between parent and child spans
+ * If any child spans reports a startTime earlier then parent span's startTime,
+ * corresponding delta will be added in the subtree with child span as root
+ *
+ * addSkewInSubtree looks into each child of given subtreeRoot, calculates delta,
+ * and recursively applies delta in its subtree
+ */
+class ClockSkewTransformer extends SpanTreeTransformer {
+
+ override def transform(forest: MutableSpanForest): MutableSpanForest = {
+ require(forest.getAllTrees.size == 1)
+
+ val clockedSkewAdjusted = adjustSkew(forest.getAllTrees.head, None)
+ forest.updateUnderlyingSpans(clockedSkewAdjusted)
+ }
+
+ private def adjustSkew(node: SpanTree, previousSkew: Option[Skew]): Seq[Span] = {
+ val previousSkewAdjustedSpan: Span = previousSkew match {
+ case Some(skew) => adjustForASpan(node.span, skew)
+ case None => node.span
+ }
+
+ getClockSkew(previousSkewAdjustedSpan) match {
+ case Some(skew) =>
+ val selfSkewAdjustedSpan: Span = adjustForASpan(previousSkewAdjustedSpan, skew)
+ val children = node.children.flatMap(adjustSkew(_, Some(skew)))
+ children.prepend(selfSkewAdjustedSpan)
+ children
+ case None =>
+ val children = node.children.flatMap(adjustSkew(_, None))
+ children.prepend(previousSkewAdjustedSpan)
+ children
+ }
+ }
+
+ private def adjustForASpan(span: Span, skew: Skew): Span = {
+ if (span.getServiceName == skew.serviceName) {
+ Span
+ .newBuilder(span)
+ .setStartTime(span.getStartTime - skew.delta)
+ .build()
+ }
+ else {
+ span
+ }
+ }
+
+ // if span is a merged span of partial spans, calculate corresponding skew
+ private def getClockSkew(span: Span): Option[Skew] = {
+ if (SpanUtils.isMergedSpan(span)) {
+ calculateClockSkew(
+ SpanUtils.getEventTimestamp(span, SpanMarkers.CLIENT_SEND_EVENT),
+ SpanUtils.getEventTimestamp(span, SpanMarkers.CLIENT_RECV_EVENT),
+ SpanUtils.getEventTimestamp(span, SpanMarkers.SERVER_RECV_EVENT),
+ SpanUtils.getEventTimestamp(span, SpanMarkers.SERVER_SEND_EVENT),
+ span.getServiceName
+ )
+ } else {
+ None
+ }
+ }
+
+ /**
+ * Calculate the clock skew between two servers based on logs in a span
+ *
+ * Only adjust for clock skew if logs are not in the following order:
+ * Client send -> Server receive -> Server send -> Client receive
+ *
+ * Special case: if the server (child) span is longer than the client (parent), then do not
+ * adjust for clock skew.
+ */
+ private def calculateClockSkew(clientSend: Long,
+ clientRecv: Long,
+ serverRecv: Long,
+ serverSend: Long,
+ serviceName: String): Option[Skew] = {
+ val clientDuration = clientRecv - clientSend
+ val serverDuration = serverSend - serverRecv
+
+ // There is only clock skew if CS is after SR or CR is before SS
+ val csAhead = clientSend < serverRecv
+ val crAhead = clientRecv > serverSend
+ if (serverDuration > clientDuration || (csAhead && crAhead)) {
+ None
+ } else {
+ val latency = (clientDuration - serverDuration) / 2
+ serverRecv - latency - clientSend match {
+ case 0 => None
+ case _ => Some(Skew(serviceName, serverRecv - latency - clientSend))
+ }
+ }
+ }
+
+ case class Skew(serviceName: String, delta: Long)
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/DeDuplicateSpanTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/DeDuplicateSpanTransformer.scala
new file mode 100644
index 000000000..fafb83a95
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/DeDuplicateSpanTransformer.scala
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.Span
+
+import scala.collection.mutable
+
+/**
+ * dedup the spans with the same span id
+ */
+class DeDuplicateSpanTransformer extends TraceTransformer {
+
+ override def transform(spans: Seq[Span]): Seq[Span] = {
+ val seen = mutable.HashSet[Span]()
+ spans.filter {
+ span =>
+ val alreadySeen = seen.contains(span)
+ seen.add(span)
+ !alreadySeen
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/InfrastructureTagTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/InfrastructureTagTransformer.scala
new file mode 100644
index 000000000..1613df092
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/InfrastructureTagTransformer.scala
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.trace.reader.readers.utils.AuxiliaryTags
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+/**
+ * add the infrastructure tag in all the spans coming for a service, if any of its span(client or server) contains it.
+ * Many services send the infrastructure tag only in the server span for ease and saving transfer cost.
+ * This transformer refill the infrastructure tags if required.
+ */
+class InfrastructureTagTransformer extends TraceTransformer {
+
+ override def transform(spans: Seq[Span]): Seq[Span] = {
+ val serviceWithInfraTags = mutable.HashMap[String, mutable.ListBuffer[Tag]]()
+ val spansWithoutInfraTags = mutable.HashSet[Span]()
+
+ spans.foreach { span =>
+ var infraTagsPresent = false
+ span.getTagsList.asScala.foreach { tag =>
+ if (tag.getKey == AuxiliaryTags.INFRASTRUCTURE_PROVIDER || tag.getKey == AuxiliaryTags.INFRASTRUCTURE_LOCATION) {
+ val tags = serviceWithInfraTags.getOrElseUpdate(span.getServiceName, mutable.ListBuffer[Tag]())
+ tags.append(tag)
+ infraTagsPresent = true
+ }
+ }
+
+ if (!infraTagsPresent) {
+ spansWithoutInfraTags += span
+ }
+ }
+
+ spans.map { span =>
+ if (serviceWithInfraTags.contains(span.getServiceName) && spansWithoutInfraTags.contains(span)) {
+ span.toBuilder.addAllTags(serviceWithInfraTags(span.getServiceName).asJava).build()
+ } else {
+ span
+ }
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/InvalidParentTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/InvalidParentTransformer.scala
new file mode 100644
index 000000000..91585391d
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/InvalidParentTransformer.scala
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+
+/**
+ * If there are spans with invalid parentId in the trace, mark root to be their parentId
+ *
+ * **Apply this transformer only if you are not confident about clients sending in parentIds properly**
+ */
+class InvalidParentTransformer extends SpanTreeTransformer {
+ override def transform(spanForest: MutableSpanForest): MutableSpanForest = {
+ val rootTrees = spanForest.getAllTrees.filter(_.span.getParentSpanId.isEmpty)
+
+ require(rootTrees.size == 1)
+
+ spanForest.mergeTreesUnder(rootTrees.head)
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/InvalidRootTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/InvalidRootTransformer.scala
new file mode 100644
index 000000000..ceaae3c66
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/InvalidRootTransformer.scala
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import java.util.UUID
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.trace.commons.utils.SpanUtils
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+
+/**
+ *
+ * If there are multiple roots in the given trace, use the first root based on startTime to be root
+ * mark other roots as children of the selected root
+ * If there is no root, assume loopback span or first span in time order to be root
+ *
+ * **Apply this transformer only if you are not confident about clients sending in roots properly**
+ */
+class InvalidRootTransformer extends SpanTreeTransformer {
+ private val AUTOGEN_REASON =
+ """
+ |This span is autogenerated by haystack and only a UI sugar to show multiple root spans together in one view.
+ | This is a symptom that few spans have empty parent id, but only one such root span should exist.
+ """.stripMargin
+
+ override def transform(spanForest: MutableSpanForest): MutableSpanForest = {
+ val rootSpans = spanForest.getAllTrees.filter(_.span.getParentSpanId.isEmpty).map(_.span)
+
+ rootSpans.size match {
+ case 0 => toTraceWithAssumedRoot(spanForest)
+ case 1 => spanForest
+ case _ => toTraceWithSingleRoot(spanForest, rootSpans.size)
+ }
+ }
+
+ private def toTraceWithAssumedRoot(forest: MutableSpanForest): MutableSpanForest = {
+ // if we have just one tree, then simply set it's root's parent spanId as empty
+ if (forest.countTrees <= 1) {
+ return forest.updateEachSpanTreeRoot(resetParentSpanId)
+ }
+
+ // if we have just 1 loopback tree root, which means its parentSpanId is the same as its spanId, then mark it as a root
+ // by setting its parentSpanId as empty
+ val loopbackTrees = forest.treesWithLoopbackRoots
+ if (loopbackTrees.size == 1) {
+ return forest.updateEachSpanTreeRoot(span => if (loopbackTrees.head.span == span) resetParentSpanId(span) else span)
+ }
+
+ // for all other cases, get the root with the minimum startTime and make it as a root by setting parentSpanId as empty
+ val spanWithMinStartTime = forest.getAllTrees.minBy(_.span.getStartTime).span
+ forest.updateEachSpanTreeRoot(span => if (span == spanWithMinStartTime) resetParentSpanId(span) else span)
+ }
+
+ private def toTraceWithSingleRoot(forest: MutableSpanForest, emptyParentIdSpanTrees: Int): MutableSpanForest = {
+ val allTreeRootSpans: Seq[Span] = forest.getAllTrees.map(_.span)
+ val newRootSpan = SpanUtils.addClientLogTag(SpanUtils
+ .createAutoGeneratedRootSpan(allTreeRootSpans, AUTOGEN_REASON, UUID.randomUUID().toString)
+ .addTags(Tag.newBuilder().setKey("X-HAYSTACK-SPAN-ROOT-COUNT").setVLong(emptyParentIdSpanTrees).setType(Tag.TagType.LONG))
+ .build())
+
+ forest.addNewRoot(newRootSpan)
+ }
+
+ private def resetParentSpanId(span: Span): Span = Span.newBuilder(span).setParentSpanId("").build()
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/OrphanedTraceTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/OrphanedTraceTransformer.scala
new file mode 100644
index 000000000..cc66ffbff
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/OrphanedTraceTransformer.scala
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.commons.utils.SpanUtils
+import com.expedia.www.haystack.trace.reader.readers.utils.{MutableSpanForest, SpanTree}
+
+
+/**
+ * If the root span is missing within a trace, create a pseudo root span to wrap all the spans.
+ *
+ * ** [[com.expedia.www.haystack.trace.reader.readers.validators.RootValidator]] and [[com.expedia.www.haystack.trace.reader.readers.validators.ParentIdValidator]] must be turned off for this to take into effect. **
+ * ** Should place first in the POST transformers sequence of the configuration, as the other transformers may depend on or use the generated root during their transformation. **
+ */
+object OrphanedTraceTransformerConstants {
+ val AUTO_GEN_REASON = "Missing root span"
+}
+
+class OrphanedTraceTransformer extends SpanTreeTransformer {
+
+ override def transform(forest: MutableSpanForest): MutableSpanForest = {
+ val orphanedTrees = forest.orphanedTrees()
+ if (orphanedTrees.isEmpty) {
+ forest
+ } else if (multipleOrphans(orphanedTrees)) {
+ forest.updateUnderlyingSpans(Seq.empty)
+ } else {
+ val rootSpan = generateRootSpan(forest.getUnderlyingSpans)
+ forest.addNewRoot(rootSpan)
+ }
+ }
+
+ def multipleOrphans(orphanedTrees: Seq[SpanTree]): Boolean = {
+ val orphanedParents = orphanedTrees.groupBy(_.span.getParentSpanId)
+ if (orphanedParents.size != 1) return true
+
+ // we may now have multiple orphaned trees but each tree's root span has the same parentId
+ // if this parentId is same as traceId, then we will not call them as multipleOrphans as
+ // we will build an autogenerated span as their parent
+ val orphanedSpan = orphanedParents.head._2.head.span
+ orphanedSpan.getParentSpanId != orphanedSpan.getTraceId
+ }
+
+ def generateRootSpan(spans: Seq[Span]): Span = {
+ SpanUtils.createAutoGeneratedRootSpan(spans, OrphanedTraceTransformerConstants.AUTO_GEN_REASON, spans.head.getTraceId).build()
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/PartialSpanTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/PartialSpanTransformer.scala
new file mode 100644
index 000000000..dfc90ed5d
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/PartialSpanTransformer.scala
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.reader.readers.utils._
+
+/**
+ * Merges partial spans and generates a single Span combining a client and corresponding server span
+ * gracefully fallback to collapse to a single span if there are multiple or missing client/server spans
+ */
+class PartialSpanTransformer extends SpanTreeTransformer {
+ override def transform(spanForest: MutableSpanForest): MutableSpanForest = {
+ var hasAnySpanMerged = false
+
+ val mergedSpans: Seq[Span] = spanForest.getUnderlyingSpans.groupBy(_.getSpanId).map((pair) => pair._2 match {
+ case Seq(span: Span) => span
+ case spans: Seq[Span] =>
+ hasAnySpanMerged = true
+ SpanMerger.mergeSpans(spans)
+ }).toSeq
+
+ spanForest.updateUnderlyingSpans(mergedSpans, hasAnySpanMerged)
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/PostTraceTransformationHandler.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/PostTraceTransformationHandler.scala
new file mode 100644
index 000000000..9688139f2
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/PostTraceTransformationHandler.scala
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+
+import scala.collection.JavaConverters._
+
+/**
+ * takes a sequence of [[SpanTreeTransformer]] and apply transform functions on the chain
+ *
+ * transformer functions takes [[MutableSpanForest]] and generates a transformed [[MutableSpanForest]]
+ * [[PostTraceTransformationHandler]] takes a [[Seq]] of [[SpanTreeTransformer]] and applies chaining on them,
+ * providing response [[List]] of a transformer to the next one
+ *
+ * @param transformerSeq
+ */
+class PostTraceTransformationHandler(transformerSeq: Seq[SpanTreeTransformer]) {
+ private val transformerChain =
+ Function.chain(transformerSeq.foldLeft(Seq[MutableSpanForest => MutableSpanForest]())((seq, transformer) => seq :+ transformer.transform _))
+
+ def transform(trace: Trace): Trace = {
+ // build a span forest from the given spans in a trace
+ val spanForest = MutableSpanForest(trace.getChildSpansList.asScala)
+
+ // transform the forest and yeild only one tree
+ val transformedSpanForest = transformerChain(spanForest)
+
+ Trace
+ .newBuilder()
+ .setTraceId(trace.getTraceId)
+ .addAllChildSpans(transformedSpanForest.getUnderlyingSpans.asJavaCollection)
+ .build()
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ServerClientSpanMergeTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ServerClientSpanMergeTransformer.scala
new file mode 100644
index 000000000..73e512585
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/ServerClientSpanMergeTransformer.scala
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.reader.readers.utils.{MutableSpanForest, SpanMerger}
+
+class ServerClientSpanMergeTransformer extends SpanTreeTransformer {
+
+ private def shouldMerge(parentSpan: Span, childSpan: Span) = {
+ childSpan.getServiceName != parentSpan.getServiceName &&
+ !SpanMerger.isAlreadyMergedSpan(parentSpan) &&
+ !SpanMerger.isAlreadyMergedSpan(childSpan) &&
+ SpanMerger.shouldMergeSpanKinds(parentSpan, childSpan)
+ }
+
+ override def transform(spanForest: MutableSpanForest): MutableSpanForest = {
+ spanForest.collapse((tree) =>
+ tree.children match {
+ case Seq(singleChild) if shouldMerge(tree.span, singleChild.span) =>
+ Some(SpanMerger.mergeParentChildSpans(tree.span, singleChild.span))
+ case _ => None
+ })
+
+ spanForest
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/SortSpanTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/SortSpanTransformer.scala
new file mode 100644
index 000000000..ab2a4cbac
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/SortSpanTransformer.scala
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+
+/**
+ * Orders spans in natural ordering - root followed by other spans ordered by start time
+ *
+ * Assumes there is only one root in give spans List
+ * corresponding validations are done in [[com.expedia.www.haystack.trace.reader.readers.validators.RootValidator]]
+ * corresponding transformation are done in [[InvalidRootTransformer]]
+ */
+class SortSpanTransformer extends SpanTreeTransformer {
+ override def transform(spanForest: MutableSpanForest): MutableSpanForest = {
+ require(spanForest.getAllTrees.size <= 1)
+
+ val (left, right) = spanForest.getUnderlyingSpans.partition(_.getParentSpanId.isEmpty)
+ val sortedSpans = left.toList.head :: right.toList.sortBy(_.getStartTime)
+ spanForest.updateUnderlyingSpans(sortedSpans, triggerForestUpdate = false)
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/SpanTreeTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/SpanTreeTransformer.scala
new file mode 100644
index 000000000..8fdc24b1e
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/SpanTreeTransformer.scala
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+
+trait SpanTreeTransformer {
+ def transform(forest: MutableSpanForest): MutableSpanForest
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/TraceTransformationHandler.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/TraceTransformationHandler.scala
new file mode 100644
index 000000000..7d8bd4b6a
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/TraceTransformationHandler.scala
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+
+import scala.collection.JavaConverters._
+
+/**
+ * takes a sequence of [[TraceTransformer]] and apply transform functions on the chain
+ *
+ * transformer functions takes [[Seq]] of [[Span]]s and generates a [[Seq]] of [[Span]]s
+ * [[TraceTransformationHandler]] takes a [[Seq]] of [[TraceTransformer]] and applies chaining on them,
+ * providing response [[List]] of a transformer to the next one
+ *
+ * @param transformerSeq
+ */
+class TraceTransformationHandler(transformerSeq: Seq[TraceTransformer]) {
+ private val transformerChain =
+ Function.chain(transformerSeq.foldLeft(Seq[Seq[Span] => Seq[Span]]())((seq, transformer) => seq :+ transformer.transform _))
+
+ def transform(trace: Trace): Trace = {
+ val transformedSpans = transformerChain(trace.getChildSpansList.asScala)
+
+ Trace
+ .newBuilder()
+ .setTraceId(trace.getTraceId)
+ .addAllChildSpans(transformedSpans.asJavaCollection)
+ .build()
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/TraceTransformer.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/TraceTransformer.scala
new file mode 100644
index 000000000..377695ab6
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/transformers/TraceTransformer.scala
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.transformers
+
+import com.expedia.open.tracing.Span
+
+trait TraceTransformer {
+ def transform(spans: Seq[Span]): Seq[Span]
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/AuxiliaryTags.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/AuxiliaryTags.scala
new file mode 100644
index 000000000..a77d834c4
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/AuxiliaryTags.scala
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.utils
+
+object AuxiliaryTags {
+ val INFRASTRUCTURE_LOCATION = "X-HAYSTACK-AWS-REGION"
+ val INFRASTRUCTURE_PROVIDER = "X-HAYSTACK-INFRASTRUCTURE-PROVIDER"
+
+ val IS_MERGED_SPAN = "X-HAYSTACK-IS-MERGED-SPAN"
+ val NETWORK_DELTA = "X-HAYSTACK-NETWORK-DELTA"
+
+ val CLIENT_SERVICE_NAME = "X-HAYSTACK-CLIENT-SERVICE-NAME"
+ val CLIENT_OPERATION_NAME = "X-HAYSTACK-CLIENT-OPERATION-NAME"
+ val CLIENT_SPAN_ID = "X-HAYSTACK-CLIENT-SPAN-ID"
+ val CLIENT_INFRASTRUCTURE_PROVIDER = "X-HAYSTACK-CLIENT-INFRASTRUCTURE-PROVIDER"
+ val CLIENT_INFRASTRUCTURE_LOCATION = "X-HAYSTACK-CLIENT-INFRASTRUCTURE-LOCATION"
+ val CLIENT_START_TIME = "X-HAYSTACK-CLIENT-START-TIME"
+ val CLIENT_DURATION = "X-HAYSTACK-CLIENT-DURATION"
+
+ val SERVER_SERVICE_NAME = "X-HAYSTACK-SERVER-SERVICE-NAME"
+ val SERVER_OPERATION_NAME = "X-HAYSTACK-SERVER-OPERATION-NAME"
+ val SERVER_INFRASTRUCTURE_PROVIDER = "X-HAYSTACK-SERVER-INFRASTRUCTURE-PROVIDER"
+ val SERVER_INFRASTRUCTURE_LOCATION = "X-HAYSTACK-SERVER-INFRASTRUCTURE-LOCATION"
+ val SERVER_START_TIME = "X-HAYSTACK-SERVER-START-TIME"
+ val SERVER_DURATION = "X-HAYSTACK-SERVER-DURATION"
+
+ val ERR_IS_MULTI_PARTIAL_SPAN = "X-HAYSTACK-ERR-IS-MULTI-PARTIAL-SPAN"
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/SpanMerger.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/SpanMerger.scala
new file mode 100644
index 000000000..78d2de704
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/SpanMerger.scala
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.utils
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.trace.commons.utils.{SpanMarkers, SpanUtils}
+import com.expedia.www.haystack.trace.reader.readers.utils.TagBuilders.{buildBoolTag, buildLongTag, buildStringTag}
+import com.expedia.www.haystack.trace.reader.readers.utils.TagExtractors.extractTagStringValue
+
+import scala.collection.JavaConverters._
+
+object SpanMerger {
+
+ def mergeParentChildSpans(parentSpan: Span, childSpan: Span): Span = {
+ val clientSpan = if (SpanUtils.containsClientLogTag(parentSpan)) parentSpan else SpanUtils.addClientLogTag(parentSpan)
+ val serverSpan = if (SpanUtils.containsServerLogTag(childSpan)) childSpan else SpanUtils.addServerLogTag(childSpan)
+ merge(clientSpan, serverSpan)
+ }
+
+ def mergeSpans(spans: Seq[Span]): Span = {
+ val serverSpanOptional = collapseSpans(spans.filter(SpanUtils.containsServerLogTag))
+ val clientSpanOptional = collapseSpans(spans.filter(SpanUtils.containsClientLogTag))
+ (clientSpanOptional, serverSpanOptional) match {
+ // ideally there should be one server and one client span
+ // merging these partial spans to form a new single span
+ case (Some(clientSpan), Some(serverSpan)) => merge(clientSpan, serverSpan)
+
+ // imperfect scenario, fallback to return available server span
+ case (None, Some(serverSpan)) => serverSpan
+
+ // imperfect scenario, fallback to return available client span
+ case (Some(clientSpan), None) => clientSpan
+
+ // imperfect scenario, fallback to collapse all spans
+ case _ => collapseSpans(spans).get
+ }
+ }
+
+ private def merge(clientSpan: Span, serverSpan: Span): Span = {
+ Span
+ .newBuilder(serverSpan)
+ .setParentSpanId(clientSpan.getParentSpanId) // use the parentSpanId of the client span to stitch in the client's trace tree
+ .addAllTags((clientSpan.getTagsList.asScala
+ ++ auxiliaryCommonTags(clientSpan, serverSpan)
+ ++ auxiliaryClientTags(clientSpan)
+ ++ auxiliaryServerTags(serverSpan)).asJavaCollection)
+ .clearLogs().addAllLogs((clientSpan.getLogsList.asScala
+ ++ serverSpan.getLogsList.asScala.sortBy(_.getTimestamp)).asJavaCollection)
+ .build()
+ }
+
+ // collapse all spans of a type(eg. client or server) if needed,
+ // ideally there would be just one span in the list and hence no need of collapsing
+ private def collapseSpans(spans: Seq[Span]): Option[Span] = {
+ spans match {
+ case Nil => None
+ case Seq(span) => Some(span)
+ case _ =>
+ // if there are multiple spans fallback to collapse all the spans in a single span
+ // start the collapsed span from startTime of the first and end at ending of last such span
+ // also add an error marker in the collapsed span
+ val firstSpan = spans.minBy(_.getStartTime)
+ val lastSpan = spans.maxBy(span => span.getStartTime + span.getDuration)
+ val allTags = spans.flatMap(span => span.getTagsList.asScala)
+ val allLogs = spans.flatMap(span => span.getLogsList.asScala)
+ val opName = spans.map(_.getOperationName).reduce((a, b) => a + " & " + b)
+
+ Some(
+ Span
+ .newBuilder(firstSpan)
+ .setOperationName(opName)
+ .setDuration(lastSpan.getStartTime + lastSpan.getDuration - firstSpan.getStartTime)
+ .clearTags().addAllTags(allTags.asJava)
+ .addTags(buildBoolTag(AuxiliaryTags.ERR_IS_MULTI_PARTIAL_SPAN, tagValue = true))
+ .clearLogs().addAllLogs(allLogs.asJava)
+ .build())
+ }
+ }
+
+ // Network delta - difference between server and client duration
+ // calculate only if serverDuration is smaller then client
+ private def calculateNetworkDelta(clientSpan: Span, serverSpan: Span): Option[Long] = {
+ val clientDuration = SpanUtils.getEventTimestamp(clientSpan, SpanMarkers.CLIENT_RECV_EVENT) - SpanUtils.getEventTimestamp(clientSpan, SpanMarkers.CLIENT_SEND_EVENT)
+ val serverDuration = SpanUtils.getEventTimestamp(serverSpan, SpanMarkers.SERVER_SEND_EVENT) - SpanUtils.getEventTimestamp(serverSpan, SpanMarkers.SERVER_RECV_EVENT)
+
+ // difference of duration of spans
+ if (serverDuration < clientDuration) {
+ Some(clientDuration - serverDuration)
+ } else {
+ None
+ }
+ }
+
+ private def auxiliaryCommonTags(clientSpan: Span, serverSpan: Span): List[Tag] =
+ List(
+ buildBoolTag(AuxiliaryTags.IS_MERGED_SPAN, tagValue = true),
+ buildLongTag(AuxiliaryTags.NETWORK_DELTA, calculateNetworkDelta(clientSpan, serverSpan).getOrElse(-1))
+ )
+
+ private def auxiliaryClientTags(span: Span): List[Tag] =
+ List(
+ buildStringTag(AuxiliaryTags.CLIENT_SERVICE_NAME, span.getServiceName),
+ buildStringTag(AuxiliaryTags.CLIENT_OPERATION_NAME, span.getOperationName),
+ buildStringTag(AuxiliaryTags.CLIENT_SPAN_ID, span.getSpanId),
+ buildStringTag(AuxiliaryTags.CLIENT_INFRASTRUCTURE_PROVIDER, extractTagStringValue(span, AuxiliaryTags.INFRASTRUCTURE_PROVIDER)),
+ buildStringTag(AuxiliaryTags.CLIENT_INFRASTRUCTURE_LOCATION, extractTagStringValue(span, AuxiliaryTags.INFRASTRUCTURE_LOCATION)),
+ buildLongTag(AuxiliaryTags.CLIENT_START_TIME, span.getStartTime),
+ buildLongTag(AuxiliaryTags.CLIENT_DURATION, span.getDuration)
+ )
+
+ private def auxiliaryServerTags(span: Span): List[Tag] = {
+ List(
+ buildStringTag(AuxiliaryTags.SERVER_SERVICE_NAME, span.getServiceName),
+ buildStringTag(AuxiliaryTags.SERVER_OPERATION_NAME, span.getOperationName),
+ buildStringTag(AuxiliaryTags.SERVER_INFRASTRUCTURE_PROVIDER, extractTagStringValue(span, AuxiliaryTags.INFRASTRUCTURE_PROVIDER)),
+ buildStringTag(AuxiliaryTags.SERVER_INFRASTRUCTURE_LOCATION, extractTagStringValue(span, AuxiliaryTags.INFRASTRUCTURE_LOCATION)),
+ buildLongTag(AuxiliaryTags.SERVER_START_TIME, span.getStartTime),
+ buildLongTag(AuxiliaryTags.SERVER_DURATION, span.getDuration)
+ )
+ }
+
+ private def isProducerConsumerSpanKind(spanKind: String): Boolean = {
+ "producer".equalsIgnoreCase(spanKind) || "consumer".equalsIgnoreCase(spanKind)
+ }
+
+ def isAlreadyMergedSpan(span: Span): Boolean = {
+ span.getTagsList.asScala.exists(tag => tag.getKey.equals(AuxiliaryTags.IS_MERGED_SPAN))
+ }
+
+ def shouldMergeSpanKinds(spanA: Span, spanB: Span): Boolean = {
+ val spanAKind = SpanUtils.spanKind(spanA)
+ val spanBKind = SpanUtils.spanKind(spanB)
+ // if we find the span kind correctly(non-empty), then return false if they are same
+ // for all other cases, return true.
+ // also dont merge the spans with 'producer' and 'consumer' span.kind
+ if ((spanAKind != "" && spanBKind != "" && spanAKind == spanBKind) ||
+ isProducerConsumerSpanKind(spanAKind) ||
+ isProducerConsumerSpanKind(spanBKind)) {
+ false
+ } else {
+ true
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/SpanTree.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/SpanTree.scala
new file mode 100644
index 000000000..d537d97fb
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/SpanTree.scala
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.utils
+
+import com.expedia.open.tracing.Span
+
+import scala.collection.mutable
+import scala.collection.mutable.ListBuffer
+
+case class MutableSpanForest(private var spans: Seq[Span]) {
+ private var forest: mutable.ListBuffer[SpanTree] = _
+ private var needForestUpdate = true
+
+ def getAllTrees: Seq[SpanTree] = {
+ if (needForestUpdate) reCreateForest()
+ forest
+ }
+
+ def countTrees: Int = getAllTrees.size
+
+ def treesWithLoopbackRoots: Seq[SpanTree] = getAllTrees.filter(tree => tree.span.getSpanId == tree.span.getParentSpanId)
+
+ def getUnderlyingSpans: Seq[Span] = this.spans
+
+ def updateUnderlyingSpans(spans: Seq[Span], triggerForestUpdate: Boolean = true): MutableSpanForest = {
+ this.spans = spans
+ if (triggerForestUpdate) needForestUpdate = true
+ this
+ }
+
+ def orphanedTrees(): Seq[SpanTree] = getAllTrees.filter(_.span.getParentSpanId.nonEmpty)
+
+ def addNewRoot(rootSpan: Span): MutableSpanForest = {
+ val newTree = SpanTree(rootSpan)
+ mergeTreesUnder(newTree)
+ spans = spans :+ rootSpan
+ forest = mutable.ListBuffer(newTree)
+ needForestUpdate = false
+ this
+ }
+
+ def mergeTreesUnder(root: SpanTree): MutableSpanForest = {
+ val toBeMergedTrees = forest.filter(_ != root)
+
+ val toBeUpdatedUnderlyingSpans = mutable.ListBuffer[(Span, Span)]()
+ toBeMergedTrees.foreach(tree => {
+ val originalSpan = tree.span
+ val updatedSpan = Span.newBuilder(originalSpan).setParentSpanId(root.span.getSpanId).build()
+ toBeUpdatedUnderlyingSpans += ((originalSpan, updatedSpan))
+ tree.span = updatedSpan
+ root.children += tree
+ })
+
+ updateUnderlyingSpanWith(toBeUpdatedUnderlyingSpans)
+ this.forest = mutable.ListBuffer[SpanTree](root)
+ needForestUpdate = false
+ this
+ }
+
+ def updateEachSpanTreeRoot(updateFunc: (Span) => Span): MutableSpanForest = {
+ val toBeUpdatedUnderlyingSpans = mutable.ListBuffer[(Span, Span)]()
+
+ for (tree <- getAllTrees) {
+ val originalSpan = tree.span
+ val updatedSpan = updateFunc(originalSpan)
+ if (originalSpan != updatedSpan) {
+ tree.span = updatedSpan
+ toBeUpdatedUnderlyingSpans += ((originalSpan, updatedSpan))
+ }
+ }
+ updateUnderlyingSpanWith(toBeUpdatedUnderlyingSpans)
+
+ this
+ }
+
+ private def reCreateForest() = {
+ this.forest = mutable.ListBuffer[SpanTree]()
+ if (this.spans.nonEmpty) {
+ val spanIdTreeMap = mutable.HashMap[String, SpanTree]()
+ val possibleRoots = mutable.HashSet[String]()
+
+ spans.foreach {
+ span =>
+ spanIdTreeMap.put(span.getSpanId, SpanTree(span))
+ possibleRoots.add(span.getSpanId)
+ }
+
+ for (span <- spans;
+ parentTree <- spanIdTreeMap.get(span.getParentSpanId)) {
+ val self = spanIdTreeMap(span.getSpanId)
+ if (parentTree != self) {
+ parentTree.children += self
+ possibleRoots.remove(span.getSpanId)
+ }
+ }
+
+ spanIdTreeMap.foreach {
+ case (spanId, tree) => if (possibleRoots.contains(spanId)) this.forest += tree
+ }
+ }
+ needForestUpdate = false
+ }
+
+ private def updateUnderlyingSpanWith(updateList: ListBuffer[(Span, Span)]) = {
+ if (updateList.nonEmpty) {
+ // update the underlying spans
+ this.spans = this.spans.map(span => {
+ updateList.find {
+ case (curr, _) => curr == span
+ } match {
+ case Some((_, ne)) => ne
+ case _ => span
+ }
+ })
+ }
+ }
+
+ def collapse(applyCondition: (SpanTree) => Option[Span]): Unit = {
+ val underlyingSpans = mutable.ListBuffer[Span]()
+
+ def collapseTree(spanTree: SpanTree): Unit = {
+ val queue = mutable.Queue[SpanTree]()
+ queue.enqueue(spanTree)
+
+ while (queue.nonEmpty) {
+ val tree = queue.dequeue()
+ applyCondition(tree) match {
+ case Some(mergedSpan) =>
+ tree.span = mergedSpan
+ val childSpanTrees = new ListBuffer[SpanTree]()
+ tree.children.foreach(t => childSpanTrees.appendAll(t.children))
+ tree.children.clear()
+ childSpanTrees.foreach(tr => tree.children.append(tr))
+ case _ =>
+ }
+ underlyingSpans.append(tree.span)
+ queue.enqueue(tree.children:_*)
+ }
+ }
+
+ getAllTrees.foreach(collapseTree)
+ updateUnderlyingSpans(underlyingSpans, triggerForestUpdate = false)
+ }
+}
+
+case class SpanTree(var span: Span, children: mutable.ListBuffer[SpanTree] = mutable.ListBuffer[SpanTree]())
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/TagBuilders.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/TagBuilders.scala
new file mode 100644
index 000000000..8c03bb0b0
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/TagBuilders.scala
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.utils
+
+import com.expedia.open.tracing.Tag
+
+object TagBuilders {
+ def buildStringTag(tagKey: String, tagValue: String): Tag =
+ Tag.newBuilder()
+ .setKey(tagKey)
+ .setType(Tag.TagType.STRING)
+ .setVStr(tagValue)
+ .build()
+
+ def buildBoolTag(tagKey: String, tagValue: Boolean): Tag =
+ Tag.newBuilder()
+ .setKey(tagKey)
+ .setType(Tag.TagType.BOOL)
+ .setVBool(tagValue)
+ .build()
+
+ def buildLongTag(tagKey: String, tagValue: Long): Tag =
+ Tag.newBuilder()
+ .setKey(tagKey)
+ .setType(Tag.TagType.LONG)
+ .setVLong(tagValue)
+ .build()
+
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/TagExtractors.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/TagExtractors.scala
new file mode 100644
index 000000000..074ddde8e
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/TagExtractors.scala
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.utils
+
+import com.expedia.open.tracing.Span
+import scala.collection.JavaConverters._
+
+object TagExtractors {
+ def containsTag(span: Span, tagKey: String): Boolean = {
+ span.getTagsList.asScala.exists(_.getKey == tagKey)
+ }
+
+ def extractTagStringValue(span: Span, tagKey: String): String = {
+ span.getTagsList.asScala.find(_.getKey == tagKey) match {
+ case Some(t) => t.getVStr
+ case _ => ""
+ }
+ }
+
+ def extractTagLongValue(span: Span, tagKey: String): Long = {
+ span.getTagsList.asScala.find(_.getKey == tagKey) match {
+ case Some(t) => t.getVLong
+ case _ => -1
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/TraceMerger.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/TraceMerger.scala
new file mode 100644
index 000000000..f5264f546
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/utils/TraceMerger.scala
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2019 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.utils
+
+import com.expedia.open.tracing.api.Trace
+
+object TraceMerger {
+ def merge(traces: Seq[Trace]): Seq[Trace] = {
+ traces.groupBy(_.getTraceId).mapValues {
+ seq => {
+ if (seq.size == 1) {
+ seq.head
+ } else {
+ val head = seq.head.toBuilder
+ seq.tail.foreach(t => head.addAllChildSpans(t.getChildSpansList))
+ head.build()
+ }
+ }
+ }.values.toSeq
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/ParentIdValidator.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/ParentIdValidator.scala
new file mode 100644
index 000000000..085744b2d
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/ParentIdValidator.scala
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.validators
+
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.exceptions.InvalidTraceException
+
+import scala.collection.JavaConverters._
+import scala.util.{Failure, Success, Try}
+
+/**
+ * validates if spans in the trace has a valid parentIds
+ * assumes that traceId is a non-empty string and there is a single root, apply [[TraceIdValidator]] and [[RootValidator]] to make sure
+ */
+class ParentIdValidator extends TraceValidator {
+ override def validate(trace: Trace): Try[Trace] = {
+ val spans = trace.getChildSpansList.asScala
+ val spanIdSet = spans.map(_.getSpanId).toSet
+
+ if (!spans.forall(sp => spanIdSet.contains(sp.getParentSpanId) || sp.getParentSpanId.isEmpty)) {
+ Failure(new InvalidTraceException(s"spans without valid parent found for traceId=${spans.head.getTraceId}"))
+ } else if (!spans.forall(sp => sp.getSpanId != sp.getParentSpanId)) {
+ Failure(new InvalidTraceException(s"same parent and span id found for one ore more span for traceId=${spans.head.getTraceId}"))
+ }
+ else {
+ Success(trace)
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/RootValidator.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/RootValidator.scala
new file mode 100644
index 000000000..ffee3121a
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/RootValidator.scala
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.validators
+
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.exceptions.InvalidTraceException
+
+import scala.collection.JavaConverters._
+import scala.util.{Failure, Success, Try}
+
+/**
+ * validates if the trace has a single root or not
+ * assumes that traceId is a non-empty string, apply [[TraceIdValidator]] to make sure
+ */
+class RootValidator extends TraceValidator {
+ override def validate(trace: Trace): Try[Trace] = {
+ val roots = trace.getChildSpansList.asScala.filter(_.getParentSpanId.isEmpty).map(_.getSpanId).toSet
+
+ if (roots.size != 1) {
+ Failure(new InvalidTraceException(s"found ${roots.size} roots with spanIDs=${roots.mkString(",")} and traceID=${trace.getTraceId}"))
+ } else {
+ Success(trace)
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/TraceIdValidator.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/TraceIdValidator.scala
new file mode 100644
index 000000000..cfcdb16f6
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/TraceIdValidator.scala
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.validators
+
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.exceptions.InvalidTraceException
+
+import scala.collection.JavaConverters._
+import scala.util.{Failure, Success, Try}
+
+/**
+ * validates if the traceId is non empty and consistent for all the spans
+ */
+class TraceIdValidator extends TraceValidator {
+ override def validate(trace: Trace): Try[Trace] =
+ if (trace.getTraceId.isEmpty) {
+ Failure(new InvalidTraceException("invalid traceId"))
+ } else if (!trace.getChildSpansList.asScala.forall(_.getTraceId == trace.getTraceId)) {
+ Failure(new InvalidTraceException(s"span with different traceId are not allowed for traceId=${trace.getTraceId}"))
+ } else {
+ Success(trace)
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/TraceValidationHandler.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/TraceValidationHandler.scala
new file mode 100644
index 000000000..ce16adac4
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/TraceValidationHandler.scala
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.validators
+
+import com.expedia.open.tracing.api.Trace
+
+import scala.util.{Success, Try}
+
+/**
+ * takes a sequence of [[TraceValidator]] and apply validations on the trace
+ * will either return Success or Failure with the first failed validation as exception
+ *
+ * @param validatorSeq sequence of validations to apply on the trace
+ */
+class TraceValidationHandler(validatorSeq: Seq[TraceValidator]) {
+ def validate(trace: Trace): Try[Trace] = {
+ validatorSeq
+ .map(_.validate(trace))
+ .find(_.isFailure)
+ .getOrElse(Success(trace))
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/TraceValidator.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/TraceValidator.scala
new file mode 100644
index 000000000..4bb3c065e
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/readers/validators/TraceValidator.scala
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.readers.validators
+
+import com.expedia.open.tracing.api.Trace
+
+import scala.util.Try
+
+trait TraceValidator {
+ def validate(trace: Trace): Try[Trace]
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/services/GrpcHandler.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/services/GrpcHandler.scala
new file mode 100644
index 000000000..358a8711a
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/services/GrpcHandler.scala
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.services
+
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.google.protobuf.GeneratedMessageV3
+import io.grpc.Status
+import io.grpc.stub.StreamObserver
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.concurrent.{ExecutionContextExecutor, Future}
+import scala.util.{Failure, Success}
+
+object GrpcHandler {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[GrpcHandler])
+}
+
+/**
+ * Handler for Grpc response
+ * populates responseObserver with response object or error accordingly
+ * takes care of corresponding logging and updating counters
+ *
+ * @param operationName : name of operation
+ * @param executor : executor service on which handler is invoked
+ */
+
+class GrpcHandler(operationName: String)(implicit val executor: ExecutionContextExecutor) extends MetricsSupport {
+ private val metricFriendlyOperationName = operationName.replace('/', '.')
+ private val timer = metricRegistry.timer(metricFriendlyOperationName)
+ private val failureMeter = metricRegistry.meter(s"$metricFriendlyOperationName.failures")
+
+ import GrpcHandler._
+
+ def handle[Rs](request: GeneratedMessageV3, responseObserver: StreamObserver[Rs])(op: => Future[Rs]): Unit = {
+ val time = timer.time()
+ op onComplete {
+ case Success(response) =>
+ responseObserver.onNext(response)
+ responseObserver.onCompleted()
+ time.stop()
+ LOGGER.debug(s"service invocation for operation=$operationName and request=${request.toString} completed successfully")
+
+ case Failure(ex) =>
+ responseObserver.onError(Status.fromThrowable(ex).asRuntimeException())
+ failureMeter.mark()
+ time.stop()
+ LOGGER.error(s"service invocation for operation=$operationName and request=${request.toString} failed with error", ex)
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/services/GrpcHealthService.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/services/GrpcHealthService.scala
new file mode 100644
index 000000000..9299b5bc0
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/services/GrpcHealthService.scala
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.services
+
+import io.grpc.health.v1.{HealthCheckRequest, HealthCheckResponse, HealthGrpc}
+import io.grpc.stub.StreamObserver
+
+class GrpcHealthService extends HealthGrpc.HealthImplBase {
+
+ override def check(request: HealthCheckRequest, responseObserver: StreamObserver[HealthCheckResponse]): Unit = {
+ responseObserver.onNext(HealthCheckResponse
+ .newBuilder()
+ .setStatus(HealthCheckResponse.ServingStatus.SERVING)
+ .build())
+ responseObserver.onCompleted()
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/services/TraceService.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/services/TraceService.scala
new file mode 100644
index 000000000..c95990c8c
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/services/TraceService.scala
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.services
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api._
+import com.expedia.www.haystack.trace.reader.config.entities.{TraceTransformersConfiguration, TraceValidatorsConfiguration}
+import com.expedia.www.haystack.trace.reader.readers.TraceReader
+import com.expedia.www.haystack.trace.reader.stores.TraceStore
+import io.grpc.stub.StreamObserver
+
+import scala.concurrent.ExecutionContextExecutor
+
+class TraceService(traceStore: TraceStore,
+ validatorsConfig: TraceValidatorsConfiguration,
+ transformersConfig: TraceTransformersConfiguration)
+ (implicit val executor: ExecutionContextExecutor) extends TraceReaderGrpc.TraceReaderImplBase {
+
+ private val handleGetTraceResponse = new GrpcHandler(TraceReaderGrpc.METHOD_GET_TRACE.getFullMethodName)
+ private val handleGetRawTraceResponse = new GrpcHandler(TraceReaderGrpc.METHOD_GET_RAW_TRACE.getFullMethodName)
+ private val handleGetRawSpanResponse = new GrpcHandler(TraceReaderGrpc.METHOD_GET_RAW_SPAN.getFullMethodName)
+ private val handleSearchResponse = new GrpcHandler(TraceReaderGrpc.METHOD_SEARCH_TRACES.getFullMethodName)
+ private val handleFieldNamesResponse = new GrpcHandler(TraceReaderGrpc.METHOD_GET_FIELD_NAMES.getFullMethodName)
+ private val handleFieldValuesResponse = new GrpcHandler(TraceReaderGrpc.METHOD_GET_FIELD_VALUES.getFullMethodName)
+ private val handleTraceCallGraphResponse = new GrpcHandler(TraceReaderGrpc.METHOD_GET_TRACE_CALL_GRAPH.getFullMethodName)
+ private val traceReader = new TraceReader(traceStore, validatorsConfig, transformersConfig)
+
+ /**
+ * endpoint for fetching a trace
+ * trace will be validated and transformed
+ *
+ * @param request TraceRequest object containing traceId of the trace to fetch
+ * @param responseObserver response observer will contain Trace object
+ * or will error out with [[com.expedia.www.haystack.trace.reader.exceptions.TraceNotFoundException]]
+ */
+ override def getTrace(request: TraceRequest, responseObserver: StreamObserver[Trace]): Unit = {
+ handleGetTraceResponse.handle(request, responseObserver) {
+ traceReader.getTrace(request)
+ }
+ }
+
+ /**
+ * endpoint for fetching raw trace logs, trace will returned without validations and transformations
+ *
+ * @param request TraceRequest object containing traceId of the trace to fetch
+ * @param responseObserver response observer will stream out [[Trace]] object
+ * or will error out with [[com.expedia.www.haystack.trace.reader.exceptions.TraceNotFoundException]]
+ */
+ override def getRawTrace(request: TraceRequest, responseObserver: StreamObserver[Trace]): Unit = {
+ handleGetRawTraceResponse.handle(request, responseObserver) {
+ traceReader.getRawTrace(request)
+ }
+ }
+
+ /**
+ * endpoint for fetching raw span logs, span will returned without validations and transformations
+ *
+ * @param request SpanRequest object containing spanId and parent traceId of the span to fetch
+ * @param responseObserver response observer will stream out [[Span]] object
+ * or will error out with [[com.expedia.www.haystack.trace.reader.exceptions.SpanNotFoundException]]
+ */
+ override def getRawSpan(request: SpanRequest, responseObserver: StreamObserver[SpanResponse]): Unit = {
+ handleGetRawSpanResponse.handle(request, responseObserver) {
+ traceReader.getRawSpan(request)
+ }
+ }
+
+ /**
+ * endpoint for searching traces
+ *
+ * @param request TracesSearchRequest object containing criteria and filters for traces to find
+ * @param responseObserver response observer will stream out [[List[Trace]]
+ */
+ override def searchTraces(request: TracesSearchRequest, responseObserver: StreamObserver[TracesSearchResult]): Unit = {
+ handleSearchResponse.handle(request, responseObserver) {
+ traceReader.searchTraces(request)
+ }
+ }
+
+ /**
+ * get list of field names available in indexing system
+ *
+ * @param request empty request object
+ * @param responseObserver response observer will contain list of field names
+ */
+ override def getFieldNames(request: Empty, responseObserver: StreamObserver[FieldNames]): Unit = {
+ handleFieldNamesResponse.handle(request, responseObserver) {
+ traceReader.getFieldNames
+ }
+ }
+
+ /**
+ * get list of possible field values for a given field
+ *
+ * @param request contains field name and other field name-value pairs to be used as filters
+ * @param responseObserver response observer will contain list of field values for filter condition
+ */
+ override def getFieldValues(request: FieldValuesRequest, responseObserver: StreamObserver[FieldValues]): Unit = {
+ handleFieldValuesResponse.handle(request, responseObserver) {
+ traceReader.getFieldValues(request)
+ }
+ }
+
+ override def getTraceCallGraph(request: TraceRequest, responseObserver: StreamObserver[TraceCallGraph]): Unit = {
+ handleTraceCallGraphResponse.handle(request, responseObserver) {
+ traceReader.getTraceCallGraph(request)
+ }
+ }
+
+ override def getTraceCounts(request: TraceCountsRequest, responseObserver: StreamObserver[TraceCounts]): Unit = {
+ handleTraceCallGraphResponse.handle(request, responseObserver) {
+ traceReader.getTraceCounts(request)
+ }
+ }
+
+ override def getRawTraces(request: RawTracesRequest, responseObserver: StreamObserver[RawTracesResult]): Unit = {
+ handleTraceCallGraphResponse.handle(request, responseObserver) {
+ traceReader.getRawTraces(request)
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/EsIndexedTraceStore.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/EsIndexedTraceStore.scala
new file mode 100644
index 000000000..b91673b5e
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/EsIndexedTraceStore.scala
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores
+
+import com.expedia.open.tracing.api._
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc
+import com.expedia.www.haystack.trace.commons.config.entities.{TraceStoreBackends, WhitelistIndexFieldConfiguration}
+import com.expedia.www.haystack.trace.reader.config.entities.ElasticSearchConfiguration
+import com.expedia.www.haystack.trace.reader.stores.readers.es.ElasticSearchReader
+import com.expedia.www.haystack.trace.reader.stores.readers.es.query.{FieldValuesQueryGenerator, ServiceMetadataQueryGenerator, TraceCountsQueryGenerator, TraceSearchQueryGenerator}
+import com.expedia.www.haystack.trace.reader.stores.readers.grpc.GrpcTraceReaders
+import io.searchbox.core.SearchResult
+import org.elasticsearch.index.IndexNotFoundException
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.concurrent.{ExecutionContextExecutor, Future}
+
+class EsIndexedTraceStore(traceStoreBackendConfig: TraceStoreBackends,
+ elasticSearchConfiguration: ElasticSearchConfiguration,
+ whitelistedFieldsConfiguration: WhitelistIndexFieldConfiguration)(implicit val executor: ExecutionContextExecutor)
+ extends TraceStore with MetricsSupport with ResponseParser {
+ private val LOGGER = LoggerFactory.getLogger(classOf[EsIndexedTraceStore])
+
+ private val traceReader: GrpcTraceReaders = new GrpcTraceReaders(traceStoreBackendConfig)
+ private val esReader: ElasticSearchReader = new ElasticSearchReader(elasticSearchConfiguration.clientConfiguration, elasticSearchConfiguration.awsRequestSigningConfiguration)
+ private val traceSearchQueryGenerator = new TraceSearchQueryGenerator(elasticSearchConfiguration.spansIndexConfiguration, ES_NESTED_DOC_NAME, whitelistedFieldsConfiguration)
+ private val traceCountsQueryGenerator = new TraceCountsQueryGenerator(elasticSearchConfiguration.spansIndexConfiguration, ES_NESTED_DOC_NAME, whitelistedFieldsConfiguration)
+ private val fieldValuesQueryGenerator = new FieldValuesQueryGenerator(elasticSearchConfiguration.spansIndexConfiguration, ES_NESTED_DOC_NAME, whitelistedFieldsConfiguration)
+ private val serviceMetadataQueryGenerator = new ServiceMetadataQueryGenerator(elasticSearchConfiguration.serviceMetadataIndexConfiguration)
+
+ private val esCountTraces = (request: TraceCountsRequest, useSpecificIndices: Boolean) => {
+ esReader.count(traceCountsQueryGenerator.generate(request, useSpecificIndices))
+ }
+
+ private val esSearchTraces = (request: TracesSearchRequest, useSpecificIndices: Boolean) => {
+ esReader.search(traceSearchQueryGenerator.generate(request, useSpecificIndices))
+ }
+
+ private def handleIndexNotFoundResult(result: Future[SearchResult],
+ retryFunc: () => Future[SearchResult]): Future[SearchResult] = {
+ result.recoverWith {
+ case _: IndexNotFoundException => retryFunc()
+ }
+ }
+
+ override def searchTraces(request: TracesSearchRequest): Future[Seq[Trace]] = {
+ // search ES with specific indices
+ val esResult = esSearchTraces(request, true)
+ // handle the response and retry in case of IndexNotFoundException
+ handleIndexNotFoundResult(esResult, () => esSearchTraces(request, false)).flatMap(result => extractTraces(result))
+ }
+
+ private def extractTraces(result: SearchResult): Future[Seq[Trace]] = {
+ val traceIdKey = "traceid"
+
+ // go through each hit and fetch trace for parsed traceId
+ val sourceList = result.getSourceAsStringList
+ if (sourceList != null && sourceList.size() > 0) {
+ val traceIds = sourceList
+ .asScala
+ .map(source => extractStringFieldFromSource(source, traceIdKey))
+ .filter(!_.isEmpty)
+ .toSet[String] // de-dup traceIds
+ .toList
+
+ traceReader.readTraces(traceIds)
+ } else {
+ Future.successful(Nil)
+ }
+ }
+
+ override def getTrace(traceId: String): Future[Trace] = traceReader.readTraces(List(traceId)).map(_.head)
+
+ override def getFieldNames: Future[FieldNames] = {
+ val fields = whitelistedFieldsConfiguration.whitelistIndexFields.distinct.sortBy(_.name)
+ val builder = FieldNames.newBuilder()
+
+ fields.foreach {
+ f => {
+ builder.addNames(f.name)
+ builder.addFieldMetadata(FieldMetadata.newBuilder().setIsRangeQuery(f.enableRangeQuery))
+ }
+ }
+
+ Future.successful(builder.build())
+ }
+
+ private def readFromServiceMetadata(request: FieldValuesRequest): Option[Future[Seq[String]]] = {
+ val serviceMetadataConfig = elasticSearchConfiguration.serviceMetadataIndexConfiguration
+ if (!serviceMetadataConfig.enabled) return None
+
+ if (request.getFieldName.toLowerCase == TraceIndexDoc.SERVICE_KEY_NAME && request.getFiltersCount == 0) {
+ Some(esReader
+ .search(serviceMetadataQueryGenerator.generateSearchServiceQuery())
+ .map(extractServiceMetadata))
+ } else if (request.getFieldName.toLowerCase == TraceIndexDoc.OPERATION_KEY_NAME
+ && (request.getFiltersCount == 1)
+ && request.getFiltersList.get(0).getName.toLowerCase == TraceIndexDoc.SERVICE_KEY_NAME) {
+ Some(esReader
+ .search(serviceMetadataQueryGenerator.generateSearchOperationQuery(request.getFilters(0).getValue))
+ .map(extractOperationMetadataFromSource(_, request.getFieldName.toLowerCase)))
+ } else {
+ LOGGER.info("read from service metadata request isn't served by elasticsearch")
+ None
+ }
+ }
+
+
+ override def getFieldValues(request: FieldValuesRequest): Future[Seq[String]] = {
+ readFromServiceMetadata(request).getOrElse(
+ esReader
+ .search(fieldValuesQueryGenerator.generate(request))
+ .map(extractFieldValues(_, request.getFieldName.toLowerCase)))
+ }
+
+ override def getTraceCounts(request: TraceCountsRequest): Future[TraceCounts] = {
+ // search ES with specific indices
+ val esResponse = esCountTraces(request, true)
+
+ // handle the response and retry in case of IndexNotFoundException
+ handleIndexNotFoundResult(esResponse, () => esCountTraces(request, false))
+ .map(result => mapSearchResultToTraceCount(request.getStartTime, request.getEndTime, result))
+ }
+
+ override def getRawTraces(request: RawTracesRequest): Future[Seq[Trace]] = {
+ traceReader.readTraces(request.getTraceIdList.asScala.toList)
+ }
+
+ override def close(): Unit = {
+ traceReader.close()
+ esReader.close()
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/ResponseParser.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/ResponseParser.scala
new file mode 100644
index 000000000..aaa6cf71d
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/ResponseParser.scala
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores
+
+import com.expedia.open.tracing.api.{TraceCount, TraceCounts}
+import com.expedia.www.haystack.trace.commons.config.entities.IndexFieldType
+import com.expedia.www.haystack.trace.reader.stores.readers.es.query.TraceCountsQueryGenerator
+import io.searchbox.core.SearchResult
+import org.json4s.ext.EnumNameSerializer
+import org.json4s.jackson.JsonMethods.parse
+import org.json4s.{DefaultFormats, Formats}
+
+import scala.collection.JavaConverters._
+import scala.concurrent.Future
+
+trait ResponseParser {
+ protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)
+
+ private val ES_FIELD_AGGREGATIONS = "aggregations"
+ private val ES_FIELD_BUCKETS = "buckets"
+ private val ES_FIELD_KEY = "key"
+ private val ES_COUNT_PER_INTERVAL = "__count_per_interval"
+ private val ES_AGG_DOC_COUNT = "doc_count"
+ protected val ES_NESTED_DOC_NAME = "spans"
+
+ protected def mapSearchResultToTraceCounts(result: SearchResult): Future[TraceCounts] = {
+ val aggregation = result.getJsonObject
+ .getAsJsonObject(ES_FIELD_AGGREGATIONS)
+ .getAsJsonObject(ES_NESTED_DOC_NAME)
+ .getAsJsonObject(ES_NESTED_DOC_NAME)
+ .getAsJsonObject(ES_COUNT_PER_INTERVAL)
+
+ val traceCounts = aggregation
+ .getAsJsonArray(ES_FIELD_BUCKETS).asScala.map(
+ element => TraceCount.newBuilder()
+ .setTimestamp(element.getAsJsonObject.get(ES_FIELD_KEY).getAsLong)
+ .setCount(element.getAsJsonObject.get(ES_AGG_DOC_COUNT).getAsLong)
+ .build()
+ ).asJava
+
+ Future.successful(TraceCounts.newBuilder().addAllTraceCount(traceCounts).build())
+ }
+
+ protected def mapSearchResultToTraceCount(startTime: Long, endTime: Long, result: SearchResult): TraceCounts = {
+ val traceCountsBuilder = TraceCounts.newBuilder()
+
+ result.getAggregations.getHistogramAggregation(TraceCountsQueryGenerator.COUNT_HISTOGRAM_NAME)
+ .getBuckets.asScala
+ .filter(bucket => startTime <= bucket.getKey && bucket.getKey <= endTime)
+ .foreach(bucket => {
+ val traceCount = TraceCount.newBuilder().setCount(bucket.getCount).setTimestamp(bucket.getKey)
+ traceCountsBuilder.addTraceCount(traceCount)
+ })
+ traceCountsBuilder.build()
+ }
+
+ protected def extractFieldValues(result: SearchResult, fieldName: String): List[String] = {
+ val aggregations =
+ result
+ .getJsonObject
+ .getAsJsonObject(ES_FIELD_AGGREGATIONS)
+ .getAsJsonObject(ES_NESTED_DOC_NAME)
+ .getAsJsonObject(fieldName)
+
+ if (aggregations.has(ES_FIELD_BUCKETS)) {
+ aggregations
+ .getAsJsonArray(ES_FIELD_BUCKETS)
+ .asScala
+ .map(element => element.getAsJsonObject.get(ES_FIELD_KEY).getAsString)
+ .toList
+ }
+ else {
+ aggregations
+ .getAsJsonObject(fieldName)
+ .getAsJsonArray(ES_FIELD_BUCKETS)
+ .asScala
+ .map(element => element.getAsJsonObject.get(ES_FIELD_KEY).getAsString)
+ .toList
+ }
+ }
+
+ protected def extractStringFieldFromSource(source: String, fieldName:String): String = {
+ (parse(source) \ fieldName).extract[String]
+ }
+
+ protected def extractServiceMetadata(result: SearchResult): Seq[String] = {
+ result.getAggregations.getTermsAggregation("distinct_services").getBuckets.asScala.map(_.getKey)
+ }
+
+ protected def extractOperationMetadataFromSource(result: SearchResult, fieldName: String): List[String] = {
+ // go through each hit and fetch field from service_metadata
+ val sourceList = result.getSourceAsStringList
+ if (sourceList != null && sourceList.size() > 0) {
+ sourceList
+ .asScala
+ .map(source => extractStringFieldFromSource(source, fieldName))
+ .filter(!_.isEmpty)
+ .toSet[String] // de-dup fieldValues
+ .toList
+ } else {
+ Nil
+ }
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/TraceStore.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/TraceStore.scala
new file mode 100644
index 000000000..257f634e4
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/TraceStore.scala
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores
+
+import com.expedia.open.tracing.api._
+
+import scala.concurrent.Future
+
+trait TraceStore extends AutoCloseable {
+ def getTrace(traceId: String): Future[Trace]
+ def searchTraces(request: TracesSearchRequest): Future[Seq[Trace]]
+ def getFieldNames: Future[FieldNames]
+ def getFieldValues(request: FieldValuesRequest): Future[Seq[String]]
+ def getTraceCounts(request: TraceCountsRequest): Future[TraceCounts]
+ def getRawTraces(request: RawTracesRequest): Future[Seq[Trace]]
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ESUtils.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ESUtils.scala
new file mode 100644
index 000000000..5e6386d27
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ESUtils.scala
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es
+
+import com.google.gson.Gson
+import io.searchbox.action.AbstractAction
+import io.searchbox.client.JestResult
+
+object ESUtils {
+ implicit class ElasticSearchSearchExtension[T <: JestResult](val search: AbstractAction[T]) extends AnyVal {
+ def toJson: String = search.getData(new Gson()).replaceAll("\n", "")
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchCountResultListener.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchCountResultListener.scala
new file mode 100644
index 000000000..b6da6351e
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchCountResultListener.scala
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es
+
+import com.codahale.metrics.{Meter, Timer}
+import com.expedia.www.haystack.trace.reader.exceptions.ElasticSearchClientError
+import com.expedia.www.haystack.trace.reader.stores.readers.es.ESUtils._
+import com.expedia.www.haystack.trace.reader.stores.readers.es.ElasticSearchCountResultListener._
+import io.searchbox.client.JestResultHandler
+import io.searchbox.core.{Search, SearchResult}
+import org.elasticsearch.index.IndexNotFoundException
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.concurrent.Promise
+
+object ElasticSearchCountResultListener {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[ElasticSearchCountResultListener])
+}
+
+class ElasticSearchCountResultListener(request: Search,
+ promise: Promise[SearchResult],
+ timer: Timer.Context,
+ failure: Meter) extends JestResultHandler[SearchResult] with ElasticSearchResultListener {
+
+ override def completed(result: SearchResult): Unit = {
+ timer.close()
+
+ if (is2xx(result.getResponseCode)) {
+ promise.success(result)
+ } else {
+ val errorJsonString = result.getJsonString
+ if (errorJsonString.toLowerCase.contains(INDEX_NOT_FOUND_EXCEPTION)) {
+ val indexNotFoundEx = new IndexNotFoundException("Index not found exception, should retry", ElasticSearchClientError(result.getResponseCode, errorJsonString))
+ promise.failure(indexNotFoundEx)
+ } else {
+ val ex = ElasticSearchClientError(result.getResponseCode, errorJsonString)
+ LOGGER.error(s"Failed in reading from elasticsearch for request='${request.toJson}'", ex)
+ failure.mark()
+ promise.failure(ex)
+ }
+ }
+ }
+
+ override def failed(ex: Exception): Unit = {
+ LOGGER.error(s"Failed in reading from elasticsearch for request=${request.toJson}", ex)
+ failure.mark()
+ timer.close()
+ promise.failure(ex)
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchReadResultListener.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchReadResultListener.scala
new file mode 100644
index 000000000..17fc98e96
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchReadResultListener.scala
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es
+
+import com.codahale.metrics.{Meter, Timer}
+import com.expedia.www.haystack.trace.reader.exceptions.ElasticSearchClientError
+import com.expedia.www.haystack.trace.reader.stores.readers.es.ESUtils._
+import com.expedia.www.haystack.trace.reader.stores.readers.es.ElasticSearchReadResultListener._
+import io.searchbox.client.JestResultHandler
+import io.searchbox.core.{Search, SearchResult}
+import org.elasticsearch.index.IndexNotFoundException
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.concurrent.Promise
+
+object ElasticSearchReadResultListener {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[ElasticSearchReadResultListener])
+}
+
+class ElasticSearchReadResultListener(request: Search,
+ promise: Promise[SearchResult],
+ timer: Timer.Context,
+ failure: Meter) extends JestResultHandler[SearchResult] with ElasticSearchResultListener {
+
+ override def completed(result: SearchResult): Unit = {
+ timer.close()
+
+ if (is2xx(result.getResponseCode)) {
+ promise.success(result)
+ } else {
+ if (result.getJsonString.toLowerCase.contains(INDEX_NOT_FOUND_EXCEPTION)) {
+ val indexNotFoundEx = new IndexNotFoundException("Index not found exception, should retry", ElasticSearchClientError(result.getResponseCode, result.getJsonString))
+ promise.failure(indexNotFoundEx)
+ } else {
+ val ex = ElasticSearchClientError(result.getResponseCode, result.getJsonString)
+ LOGGER.error(s"Failed in reading from elasticsearch for request='${request.toJson}'", ex)
+ failure.mark()
+ promise.failure(ex)
+ }
+ }
+ }
+
+ override def failed(ex: Exception): Unit = {
+ LOGGER.error(s"Failed in reading from elasticsearch for request=${request.toJson}", ex)
+ failure.mark()
+ timer.close()
+ promise.failure(ex)
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchReader.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchReader.scala
new file mode 100644
index 000000000..7b29ef519
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchReader.scala
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es
+
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.commons.clients.es.AWSSigningJestClientFactory
+import com.expedia.www.haystack.trace.commons.config.entities.AWSRequestSigningConfiguration
+import com.expedia.www.haystack.trace.reader.config.entities.ElasticSearchClientConfiguration
+import com.expedia.www.haystack.trace.reader.metrics.AppMetricNames
+import com.expedia.www.haystack.trace.reader.stores.readers.es.ESUtils._
+import com.google.gson.Gson
+import io.searchbox.client.config.HttpClientConfig
+import io.searchbox.client.{JestClient, JestClientFactory}
+import io.searchbox.core.{Search, SearchResult}
+import org.slf4j.LoggerFactory
+
+import scala.concurrent.{ExecutionContextExecutor, Future, Promise}
+import scala.util.Try
+
+class ElasticSearchReader(config: ElasticSearchClientConfiguration, awsRequestSigningConfig: AWSRequestSigningConfiguration)(implicit val dispatcher: ExecutionContextExecutor) extends MetricsSupport with AutoCloseable {
+ private val LOGGER = LoggerFactory.getLogger(classOf[ElasticSearchReader])
+ private val readTimer = metricRegistry.timer(AppMetricNames.ELASTIC_SEARCH_READ_TIME)
+ private val readFailures = metricRegistry.meter(AppMetricNames.ELASTIC_SEARCH_READ_FAILURES)
+
+ // initialize the elastic search client
+ private val esClient: JestClient = {
+ LOGGER.info("Initializing the http elastic search client with endpoint={}", config.endpoint)
+
+ val factory = {
+ if (awsRequestSigningConfig.enabled) {
+ LOGGER.info("using AWSSigningJestClientFactory for es client")
+ new AWSSigningJestClientFactory(awsRequestSigningConfig)
+ } else {
+ LOGGER.info("using JestClientFactory for es client")
+ new JestClientFactory()
+ }
+ }
+
+ val builder = new HttpClientConfig.Builder(config.endpoint)
+ .multiThreaded(true)
+ .connTimeout(config.connectionTimeoutMillis)
+ .readTimeout(config.readTimeoutMillis)
+
+ if (config.username.isDefined && config.password.isDefined) {
+ builder.defaultCredentials(config.username.get, config.password.get)
+ }
+
+ factory.setHttpClientConfig(builder.build())
+ factory.getObject
+ }
+
+ def search(request: Search): Future[SearchResult] = {
+ val promise = Promise[SearchResult]()
+ val time = readTimer.time()
+ try {
+ LOGGER.debug(s"elastic search query requested: ${request.toString}', query: '${request.toJson}'")
+ esClient.executeAsync(request, new ElasticSearchReadResultListener(request, promise, time, readFailures))
+ promise.future
+ } catch {
+ case ex: Exception =>
+ readFailures.mark()
+ time.stop()
+ LOGGER.error(s"Failed to read from elasticsearch for request=${request.toJson} with exception", ex)
+ Future.failed(ex)
+ }
+ }
+
+ def count(request: Search): Future[SearchResult] = {
+ val promise = Promise[SearchResult]()
+ val time = readTimer.time()
+ try {
+ LOGGER.debug(s"elastic count query requested: ${request.toString}', query: '${request.toJson}'")
+ esClient.executeAsync(request, new ElasticSearchCountResultListener(request, promise, time, readFailures))
+ promise.future
+ } catch {
+ case ex: Exception =>
+ readFailures.mark()
+ time.stop()
+ LOGGER.error(s"Failed to read from elasticsearch for request=${request.getData(new Gson())} with exception", ex)
+ Future.failed(ex)
+ }
+ }
+
+ override def close(): Unit = Try(esClient.shutdownClient())
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchResultListener.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchResultListener.scala
new file mode 100644
index 000000000..58545176b
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/ElasticSearchResultListener.scala
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es
+
+trait ElasticSearchResultListener {
+ protected val INDEX_NOT_FOUND_EXCEPTION = "index_not_found_exception"
+
+ protected def is2xx(code: Int): Boolean = (code / 100) == 2
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/FieldValuesQueryGenerator.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/FieldValuesQueryGenerator.scala
new file mode 100644
index 000000000..bb4957f1b
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/FieldValuesQueryGenerator.scala
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es.query
+
+import com.expedia.open.tracing.api.FieldValuesRequest
+import com.expedia.www.haystack.trace.commons.config.entities.WhitelistIndexFieldConfiguration
+import com.expedia.www.haystack.trace.reader.config.entities.SpansIndexConfiguration
+import io.searchbox.core.Search
+import org.elasticsearch.search.builder.SearchSourceBuilder
+
+class FieldValuesQueryGenerator(config: SpansIndexConfiguration,
+ nestedDocName: String,
+ indexConfiguration: WhitelistIndexFieldConfiguration)
+ extends SpansIndexQueryGenerator(nestedDocName, indexConfiguration) {
+
+ def generate(request: FieldValuesRequest): Search = {
+ new Search.Builder(buildQueryString(request))
+ .addIndex(s"${config.indexNamePrefix}*")
+ .addType(config.indexType)
+ .build()
+ }
+
+ private def buildQueryString(request: FieldValuesRequest): String = {
+ val query = createFilterFieldBasedQuery(request.getFiltersList)
+ if (query.filter().size() > 0) {
+ new SearchSourceBuilder()
+ .aggregation(createNestedAggregationQueryWithNestedFilters(request.getFieldName.toLowerCase, request.getFiltersList))
+ .size(0)
+ .toString
+ } else {
+ new SearchSourceBuilder()
+ .aggregation(createNestedAggregationQuery(request.getFieldName.toLowerCase))
+ .size(0)
+ .toString
+ }
+ }
+}
\ No newline at end of file
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/ServiceMetadataQueryGenerator.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/ServiceMetadataQueryGenerator.scala
new file mode 100644
index 000000000..9fc1602da
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/ServiceMetadataQueryGenerator.scala
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es.query
+
+import com.expedia.www.haystack.trace.reader.config.entities.ServiceMetadataIndexConfiguration
+import io.searchbox.core.Search
+import org.elasticsearch.index.query.QueryBuilders.termQuery
+import org.elasticsearch.search.aggregations.AggregationBuilders
+import org.elasticsearch.search.builder.SearchSourceBuilder
+
+class ServiceMetadataQueryGenerator(config: ServiceMetadataIndexConfiguration) {
+ private val SERVICE_NAME_KEY = "servicename"
+ private val OPERATION_NAME_KEY = "operationname"
+ private val LIMIT = 10000
+
+ def generateSearchServiceQuery(): Search = {
+ val serviceAggregationQuery = buildServiceAggregationQuery()
+ generateSearchQuery(serviceAggregationQuery)
+ }
+
+ def generateSearchOperationQuery(serviceName: String): Search = {
+ val serviceAggregationQuery = buildOperationAggregationQuery(serviceName)
+ generateSearchQuery(serviceAggregationQuery)
+ }
+
+ private def generateSearchQuery(queryString: String): Search = {
+ new Search.Builder(queryString)
+ .addIndex(config.indexName)
+ .addType(config.indexType)
+ .build()
+ }
+
+ private def buildServiceAggregationQuery(): String = {
+ val aggr = AggregationBuilders.terms("distinct_services").field(SERVICE_NAME_KEY).size(LIMIT)
+ new SearchSourceBuilder().aggregation(aggr).size(0).toString
+ }
+
+ private def buildOperationAggregationQuery(serviceName: String): String = {
+ new SearchSourceBuilder()
+ .query(termQuery(SERVICE_NAME_KEY, serviceName))
+ .fetchSource(OPERATION_NAME_KEY, SERVICE_NAME_KEY)
+ .size(LIMIT)
+ .toString
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/SpansIndexQueryGenerator.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/SpansIndexQueryGenerator.scala
new file mode 100644
index 000000000..dc465dc5d
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/SpansIndexQueryGenerator.scala
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es.query
+
+import java.text.SimpleDateFormat
+import java.util.{Date, TimeZone}
+
+import com.expedia.open.tracing.api.Operand.OperandCase
+import com.expedia.open.tracing.api.{ExpressionTree, Field}
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc
+import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhitelistIndexFieldConfiguration}
+import io.searchbox.strings.StringUtils
+import org.apache.lucene.search.join.ScoreMode
+import org.elasticsearch.index.query.QueryBuilders.{boolQuery, nestedQuery, termQuery}
+import org.elasticsearch.index.query._
+import org.elasticsearch.search.aggregations.AggregationBuilder
+import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder
+import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder
+import org.elasticsearch.search.aggregations.support.ValueType
+
+import scala.collection.JavaConverters._
+
+abstract class SpansIndexQueryGenerator(nestedDocName: String,
+ whitelistIndexFieldConfiguration: WhitelistIndexFieldConfiguration) {
+ private final val TIME_ZONE = TimeZone.getTimeZone("UTC")
+
+ // create search query by using filters list
+ @deprecated
+ protected def createFilterFieldBasedQuery(filterFields: java.util.List[Field]): BoolQueryBuilder = {
+ val traceContextWhitelistFields = whitelistIndexFieldConfiguration.globalTraceContextIndexFieldNames
+ val (traceContextFields, serviceContextFields) = filterFields
+ .asScala
+ .partition(f => traceContextWhitelistFields.contains(f.getName.toLowerCase))
+
+ val query = boolQuery()
+
+ createNestedQuery(serviceContextFields).map(query.filter)
+
+ traceContextFields foreach {
+ field => {
+ createNestedQuery(Seq(field)) match {
+ case Some(nestedQuery) => query.filter(nestedQuery)
+ case _ => /* may be log ? */
+ }
+ }
+ }
+ query
+ }
+
+ // create search query by using filters expression tree
+ protected def createExpressionTreeBasedQuery(expression: ExpressionTree): BoolQueryBuilder = {
+ val query = boolQuery()
+ val contextFiltersList = listOfContextFilters(expression)
+
+ // create a nested boolean query per context
+ contextFiltersList foreach {
+ filters => {
+ createNestedQuery(filters) match {
+ case Some(nestedQuery) => query.filter(nestedQuery)
+ case _ => /* may be log ?*/
+ }
+ }
+ }
+ query
+ }
+
+ // create list of fields, one for each trace level query and one for each span level groups
+ // assuming that first level is trace level filters
+ // and second level are span level filter groups
+ private def listOfContextFilters(expression: ExpressionTree): List[List[Field]] = {
+ val (spanLevel, traceLevel) = expression.getOperandsList.asScala.partition(operand => operand.getOperandCase == OperandCase.EXPRESSION)
+
+ val traceLevelFilters = traceLevel.map(field => List(field.getField))
+ val spanLevelFilters = spanLevel.map(tree => toListOfSpanLevelFilters(tree.getExpression))
+
+ (spanLevelFilters ++ traceLevelFilters).toList
+ }
+
+ private def toListOfSpanLevelFilters(expression: ExpressionTree): List[Field] = {
+ expression.getOperandsList.asScala.map(field => field.getField).toList
+ }
+
+ private def createNestedQuery(fields: Seq[Field]): Option[NestedQueryBuilder] = {
+ if (fields.isEmpty) {
+ None
+ } else {
+ val nestedBoolQueryBuilder = createNestedBoolQuery(fields)
+ Some(nestedQuery(nestedDocName, nestedBoolQueryBuilder, ScoreMode.None))
+ }
+ }
+
+ private def buildNestedTermQuery(field: Field): TermQueryBuilder = {
+ termQuery(withBaseDoc(field.getName.toLowerCase), field.getValue)
+ }
+
+ private def buildNestedRangeQuery(field: Field): RangeQueryBuilder = {
+ def rangeValue(): Any = {
+ if(field.getName == TraceIndexDoc.DURATION_KEY_NAME || field.getName == TraceIndexDoc.START_TIME_KEY_NAME) {
+ field.getValue.toLong
+ } else {
+ val fieldType = whitelistIndexFieldConfiguration.whitelistIndexFields
+ .find(wf => wf.name.equalsIgnoreCase(field.getName))
+ .map(wf => wf.`type`)
+ .getOrElse(IndexFieldType.string)
+
+ fieldType match {
+ case IndexFieldType.int | IndexFieldType.long => field.getValue.toLong
+ case IndexFieldType.double => field.getValue.toDouble
+ case IndexFieldType.bool => field.getValue.toBoolean
+ case _ => field.getValue
+ }
+ }
+ }
+
+ val rangeQuery = QueryBuilders.rangeQuery(withBaseDoc(field.getName.toLowerCase))
+ val value = rangeValue()
+ field.getOperator match {
+ case Field.Operator.GREATER_THAN => rangeQuery.gt(value)
+ case Field.Operator.LESS_THAN => rangeQuery.lt(value)
+ case _ => throw new RuntimeException("Fail to understand the operator -" + field.getOperator)
+ }
+ rangeQuery
+ }
+
+ protected def createNestedBoolQuery(fields: Seq[Field]): BoolQueryBuilder = {
+ val boolQueryBuilder = boolQuery()
+
+ val validFields = fields.filterNot(f => StringUtils.isBlank(f.getValue))
+ validFields foreach {
+ field => {
+ field match {
+ case _ if field.getOperator == null || field.getOperator == Field.Operator.EQUAL =>
+ boolQueryBuilder.filter(buildNestedTermQuery(field))
+ case _ if field.getOperator == Field.Operator.NOT_EQUAL =>
+ boolQueryBuilder.mustNot(buildNestedTermQuery(field))
+ case _ if field.getOperator == Field.Operator.GREATER_THAN || field.getOperator == Field.Operator.LESS_THAN =>
+ boolQueryBuilder.filter(buildNestedRangeQuery(field))
+ case _ => throw new RuntimeException("Fail to understand the operator type of the field!")
+ }
+ }
+ }
+
+
+ boolQueryBuilder
+ }
+
+ protected def createNestedAggregationQuery(fieldName: String): AggregationBuilder =
+ new NestedAggregationBuilder(nestedDocName, nestedDocName)
+ .subAggregation(
+ new TermsAggregationBuilder(fieldName, ValueType.STRING)
+ .field(withBaseDoc(fieldName))
+ .size(1000))
+
+ protected def createNestedAggregationQueryWithNestedFilters(fieldName: String, filterFields: java.util.List[Field]): AggregationBuilder = {
+ val boolQueryBuilder = createNestedBoolQuery(filterFields.asScala)
+
+ new NestedAggregationBuilder(nestedDocName, nestedDocName)
+ .subAggregation(
+ new FilterAggregationBuilder(s"$fieldName", boolQueryBuilder)
+ .subAggregation(new TermsAggregationBuilder(s"$fieldName", ValueType.STRING)
+ .field(withBaseDoc(fieldName))
+ .size(1000))
+ )
+ }
+
+ def getESIndexes(startTimeInMicros: Long,
+ endTimeInMicros: Long,
+ indexNamePrefix: String,
+ indexHourBucket: Int,
+ indexHourTtl: Int): Seq[String] = {
+
+ if (!isValidTimeRange(startTimeInMicros, endTimeInMicros, indexHourTtl)) {
+ Seq(s"$indexNamePrefix")
+ } else {
+ val INDEX_BUCKET_TIME_IN_MICROS: Long = indexHourBucket.toLong * 60 * 60 * 1000 * 1000
+ val flooredStarttime = startTimeInMicros - (startTimeInMicros % INDEX_BUCKET_TIME_IN_MICROS)
+ val flooredEndtime = endTimeInMicros - (endTimeInMicros % INDEX_BUCKET_TIME_IN_MICROS)
+
+ for (datetimeInMicros <- flooredStarttime to flooredEndtime by INDEX_BUCKET_TIME_IN_MICROS)
+ yield {
+ val date = new Date(datetimeInMicros / 1000)
+ val dateBucket = createSimpleDateFormat("yyyy-MM-dd").format(date)
+ val hourBucket = createSimpleDateFormat("HH").format(date).toInt / indexHourBucket
+
+ s"$indexNamePrefix-$dateBucket-$hourBucket"
+ }
+ }
+ }
+
+ private def createSimpleDateFormat(pattern: String): SimpleDateFormat = {
+ val sdf = new SimpleDateFormat(pattern)
+ sdf.setTimeZone(TIME_ZONE)
+ sdf
+ }
+
+ private def isValidTimeRange(startTimeInMicros: Long,
+ endTimeInMicros: Long,
+ indexHourTtl: Int): Boolean = {
+ (endTimeInMicros - startTimeInMicros) < (indexHourTtl.toLong * 60 * 60 * 1000 * 1000)
+ }
+
+ protected def withBaseDoc(field: String) = s"$nestedDocName.$field"
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/TraceCountsQueryGenerator.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/TraceCountsQueryGenerator.scala
new file mode 100644
index 000000000..f822dbc07
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/TraceCountsQueryGenerator.scala
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es.query
+
+import com.expedia.open.tracing.api.TraceCountsRequest
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc
+import com.expedia.www.haystack.trace.commons.config.entities.WhitelistIndexFieldConfiguration
+import com.expedia.www.haystack.trace.reader.config.entities.SpansIndexConfiguration
+import io.searchbox.core.Search
+import org.elasticsearch.index.query.{BoolQueryBuilder, QueryBuilders}
+import org.elasticsearch.search.aggregations.AggregationBuilders
+import org.elasticsearch.search.builder.SearchSourceBuilder
+
+import scala.collection.JavaConverters._
+
+
+object TraceCountsQueryGenerator {
+ val COUNT_HISTOGRAM_NAME = "countagg"
+}
+
+class TraceCountsQueryGenerator(config: SpansIndexConfiguration,
+ nestedDocName: String,
+ whitelistIndexFields: WhitelistIndexFieldConfiguration)
+ extends SpansIndexQueryGenerator(nestedDocName, whitelistIndexFields) {
+
+ import TraceCountsQueryGenerator._
+
+ def generate(request: TraceCountsRequest, useSpecificIndices: Boolean): Search = {
+ require(request.getStartTime > 0)
+ require(request.getEndTime > 0)
+ require(request.getInterval > 0)
+
+ if (useSpecificIndices) {
+ generate(request)
+ } else {
+ new Search.Builder(buildQueryString(request))
+ .addIndex(config.indexNamePrefix)
+ .addType(config.indexType)
+ .build()
+ }
+ }
+
+ def generate(request: TraceCountsRequest): Search = {
+ require(request.getStartTime > 0)
+ require(request.getEndTime > 0)
+ require(request.getInterval > 0)
+
+ // create ES count query
+ val targetIndicesToSearch = getESIndexes(
+ request.getStartTime,
+ request.getEndTime,
+ config.indexNamePrefix,
+ config.indexHourBucket,
+ config.indexHourTtl).asJava
+
+ new Search.Builder(buildQueryString(request))
+ .addIndex(targetIndicesToSearch)
+ .addType(config.indexType)
+ .build()
+ }
+
+ private def buildQueryString(request: TraceCountsRequest): String = {
+ val query: BoolQueryBuilder =
+ if(request.hasFilterExpression) {
+ createExpressionTreeBasedQuery(request.getFilterExpression)
+ }
+ else {
+ // this is deprecated
+ createFilterFieldBasedQuery(request.getFieldsList)
+ }
+
+ query.must(QueryBuilders.rangeQuery(TraceIndexDoc.START_TIME_KEY_NAME).gte(request.getStartTime).lte(request.getEndTime))
+
+ val aggregation = AggregationBuilders
+ .histogram(COUNT_HISTOGRAM_NAME)
+ .field(TraceIndexDoc.START_TIME_KEY_NAME)
+ .interval(request.getInterval)
+ .extendedBounds(request.getStartTime, request.getEndTime)
+
+ new SearchSourceBuilder()
+ .query(query)
+ .aggregation(aggregation)
+ .size(0)
+ .toString
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/TraceSearchQueryGenerator.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/TraceSearchQueryGenerator.scala
new file mode 100644
index 000000000..14a4ed04d
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/es/query/TraceSearchQueryGenerator.scala
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.es.query
+
+import com.expedia.open.tracing.api.TracesSearchRequest
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc._
+import com.expedia.www.haystack.trace.commons.config.entities.WhitelistIndexFieldConfiguration
+import com.expedia.www.haystack.trace.reader.config.entities.SpansIndexConfiguration
+import io.searchbox.core.Search
+import org.apache.lucene.search.join.ScoreMode
+import org.elasticsearch.index.query.QueryBuilders._
+import org.elasticsearch.search.builder.SearchSourceBuilder
+import org.elasticsearch.search.sort.{FieldSortBuilder, SortOrder}
+
+import scala.collection.JavaConverters._
+
+class TraceSearchQueryGenerator(config: SpansIndexConfiguration,
+ nestedDocName: String,
+ whitelistIndexFields: WhitelistIndexFieldConfiguration)
+ extends SpansIndexQueryGenerator(nestedDocName, whitelistIndexFields) {
+
+ def generate(request: TracesSearchRequest, useSpecificIndices: Boolean): Search = {
+ require(request.getStartTime > 0)
+ require(request.getEndTime > 0)
+ require(request.getLimit > 0)
+
+ if (useSpecificIndices) {
+ generate(request)
+ } else {
+ new Search.Builder(buildQueryString(request))
+ .addIndex(config.indexNamePrefix)
+ .addType(config.indexType)
+ .build()
+ }
+ }
+
+ def generate(request: TracesSearchRequest): Search = {
+ require(request.getStartTime > 0)
+ require(request.getEndTime > 0)
+ require(request.getLimit > 0)
+
+ val targetIndicesToSearch = getESIndexes(
+ request.getStartTime,
+ request.getEndTime,
+ config.indexNamePrefix,
+ config.indexHourBucket,
+ config.indexHourTtl).asJava
+
+ new Search.Builder(buildQueryString(request))
+ .addIndex(targetIndicesToSearch)
+ .addType(config.indexType)
+ .build()
+ }
+
+ private def buildQueryString(request: TracesSearchRequest): String = {
+ val query =
+ if(request.hasFilterExpression)
+ createExpressionTreeBasedQuery(request.getFilterExpression)
+ else
+ createFilterFieldBasedQuery(request.getFieldsList)
+
+ if(config.useRootDocumentStartTime) {
+ query
+ .must(rangeQuery(START_TIME_KEY_NAME)
+ .gte(request.getStartTime)
+ .lte(request.getEndTime))
+ } else {
+ query.must(
+ nestedQuery(nestedDocName,
+ rangeQuery(withBaseDoc(START_TIME_KEY_NAME))
+ .gte(request.getStartTime)
+ .lte(request.getEndTime), ScoreMode.None))
+ }
+
+ val sortBuilder =
+ if(config.useRootDocumentStartTime) {
+ new FieldSortBuilder(START_TIME_KEY_NAME).order(SortOrder.DESC)
+ }
+ else {
+ new FieldSortBuilder(withBaseDoc(START_TIME_KEY_NAME)).order(SortOrder.DESC).setNestedPath(nestedDocName)
+ }
+
+ new SearchSourceBuilder().query(query).sort(sortBuilder).size(request.getLimit).toString
+ }
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/grpc/GrpcTraceReaders.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/grpc/GrpcTraceReaders.scala
new file mode 100644
index 000000000..a6c739fe2
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/grpc/GrpcTraceReaders.scala
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.grpc
+
+import com.expedia.open.tracing.api.Trace
+import com.expedia.open.tracing.backend.{ReadSpansRequest, StorageBackendGrpc}
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trace.commons.config.entities.TraceStoreBackends
+import com.expedia.www.haystack.trace.reader.exceptions.TraceNotFoundException
+import com.expedia.www.haystack.trace.reader.metrics.AppMetricNames
+import com.expedia.www.haystack.trace.reader.readers.utils.TraceMerger
+import io.grpc.{ManagedChannel, ManagedChannelBuilder}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.concurrent.{ExecutionContextExecutor, Future, Promise}
+
+class GrpcTraceReaders(config: TraceStoreBackends)
+ (implicit val dispatcher: ExecutionContextExecutor) extends MetricsSupport with AutoCloseable {
+ private val LOGGER = LoggerFactory.getLogger(classOf[GrpcTraceReaders])
+
+ private val readTimer = metricRegistry.timer(AppMetricNames.BACKEND_READ_TIME)
+ private val readFailures = metricRegistry.meter(AppMetricNames.BACKEND_READ_FAILURES)
+ private val tracesFailures = metricRegistry.meter(AppMetricNames.BACKEND_TRACES_FAILURE)
+
+ private val clients: Seq[GrpcChannelClient] = config.backends.map {
+ backend => {
+ val channel = ManagedChannelBuilder
+ .forAddress(backend.host, backend.port)
+ .usePlaintext(true)
+ .build()
+
+ val client = StorageBackendGrpc.newFutureStub(channel)
+ GrpcChannelClient(channel, client)
+ }
+ }
+
+ def readTraces(traceIds: List[String]): Future[Seq[Trace]] = {
+ val allFutures = clients.map {
+ client =>
+ readTraces(traceIds, client.stub) recoverWith {
+ case _: Exception => Future.successful(Seq.empty[Trace])
+ }
+ }
+
+ Future.sequence(allFutures)
+ .map(traceSeq => traceSeq.flatten)
+ .map {
+ traces =>
+ if (traces.isEmpty) throw new TraceNotFoundException() else TraceMerger.merge(traces)
+ }
+ }
+
+ private def readTraces(traceIds: List[String], client: StorageBackendGrpc.StorageBackendFutureStub): Future[Seq[Trace]] = {
+ val timer = readTimer.time()
+ val promise = Promise[Seq[Trace]]
+
+ try {
+ val readSpansRequest = ReadSpansRequest.newBuilder().addAllTraceIds(traceIds.asJavaCollection).build()
+ val futureResponse = client.readSpans(readSpansRequest)
+ futureResponse.addListener(new ReadSpansResponseListener(
+ futureResponse,
+ promise,
+ timer,
+ readFailures,
+ tracesFailures,
+ traceIds.size), dispatcher)
+
+ // return the future with the results for the given client
+ promise.future
+ } catch {
+ case ex: Exception =>
+ readFailures.mark()
+ timer.stop()
+ LOGGER.error("Failed to read raw traces with exception", ex)
+ Future.failed(ex)
+ }
+ }
+
+ override def close(): Unit = {
+ clients.foreach(_.channel.shutdown())
+ }
+
+ case class GrpcChannelClient(channel: ManagedChannel, stub: StorageBackendGrpc.StorageBackendFutureStub)
+}
diff --git a/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/grpc/ReadSpansResponseListener.scala b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/grpc/ReadSpansResponseListener.scala
new file mode 100644
index 000000000..5602ce36b
--- /dev/null
+++ b/traces/reader/src/main/scala/com/expedia/www/haystack/trace/reader/stores/readers/grpc/ReadSpansResponseListener.scala
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.stores.readers.grpc
+
+import java.util.concurrent.Future
+
+import com.codahale.metrics.{Meter, Timer}
+import com.expedia.open.tracing.api.Trace
+import com.expedia.open.tracing.backend.{ReadSpansResponse, TraceRecord}
+import com.expedia.www.haystack.trace.commons.packer.Unpacker
+import com.expedia.www.haystack.trace.reader.exceptions.TraceNotFoundException
+import org.slf4j.{Logger, LoggerFactory}
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+import scala.concurrent.Promise
+import scala.util.{Failure, Success, Try}
+
+object ReadSpansResponseListener {
+ protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[ReadSpansResponseListener])
+}
+
+class ReadSpansResponseListener(readSpansResponse: Future[ReadSpansResponse],
+ promise: Promise[Seq[Trace]],
+ timer: Timer.Context,
+ failure: Meter,
+ tracesFailure: Meter,
+ traceIdCount: Int) extends Runnable {
+
+ import ReadSpansResponseListener._
+
+ override def run(): Unit = {
+ timer.close()
+
+ Try(readSpansResponse.get)
+ .flatMap(tryGetTraceRows)
+ .flatMap(tryDeserialize)
+ match {
+ case Success(traces) =>
+ tracesFailure.mark(traceIdCount - traces.length)
+ promise.success(traces)
+ case Failure(ex) =>
+ LOGGER.error("Failed in reading the record from trace-backend", ex)
+ failure.mark()
+ tracesFailure.mark(traceIdCount)
+ promise.failure(ex)
+ }
+ }
+
+ private def tryGetTraceRows(response: ReadSpansResponse): Try[Seq[TraceRecord]] = {
+ val records = response.getRecordsList
+ if (records.isEmpty) Failure(new TraceNotFoundException) else Success(records.asScala)
+ }
+
+ private def tryDeserialize(records: Seq[TraceRecord]): Try[Seq[Trace]] = {
+ val traceBuilderMap = new mutable.HashMap[String, Trace.Builder]()
+ var deserFailed: Failure[Seq[Trace]] = null
+
+ records.foreach(record => {
+ Try(Unpacker.readSpanBuffer(record.getSpans.toByteArray)) match {
+ case Success(sBuffer) =>
+ traceBuilderMap.getOrElseUpdate(sBuffer.getTraceId, Trace.newBuilder().setTraceId(sBuffer.getTraceId)).addAllChildSpans(sBuffer.getChildSpansList)
+ case Failure(cause) => deserFailed = Failure(cause)
+ }
+ })
+ if (deserFailed == null) Success(traceBuilderMap.values.map(_.build).toSeq) else deserFailed
+ }
+}
diff --git a/traces/reader/src/test/resources/config/base.conf b/traces/reader/src/test/resources/config/base.conf
new file mode 100644
index 000000000..2a3707325
--- /dev/null
+++ b/traces/reader/src/test/resources/config/base.conf
@@ -0,0 +1,99 @@
+health.status.path = "isHealthy"
+
+service {
+ port = 8088
+ ssl {
+ enabled = false
+ cert.path = "/ssl/cert"
+ private.key.path = "/ssl/private-key"
+ }
+ max.message.size = 52428800 # 50MB in bytes
+}
+
+backend {
+ client {
+ host = "localhost"
+ port = 8090
+ }
+}
+
+elasticsearch {
+ client {
+ endpoint = "http://elasticsearch:9200"
+ conn.timeout.ms = 10000
+ read.timeout.ms = 5000
+ }
+ index {
+ spans {
+ name.prefix = "haystack-traces"
+ type = "spans"
+ hour.bucket = 6
+ hour.ttl = 72 // 3 * 24 hours
+ use.root.doc.starttime = true
+ }
+ service.metadata {
+ enabled = false
+ name = "service_metadata"
+ type = "metadata"
+ }
+ }
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
+
+trace {
+ validators {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.validators.TraceIdValidator"
+ "com.expedia.www.haystack.trace.reader.readers.validators.ParentIdValidator"
+ "com.expedia.www.haystack.trace.reader.readers.validators.RootValidator"
+ ]
+ }
+
+ transformers {
+ pre {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.transformers.DeDuplicateSpanTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.ClientServerEventLogTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.InfrastructureTagTransformer"
+ ]
+ }
+ post {
+ sequence = [
+ "com.expedia.www.haystack.trace.reader.readers.transformers.PartialSpanTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.ClockSkewTransformer"
+ "com.expedia.www.haystack.trace.reader.readers.transformers.SortSpanTransformer"
+ ]
+ }
+ }
+}
+
+reload {
+ tables {
+ index.fields.config = "whitelist-index-fields"
+ }
+ config {
+ endpoint = "http://elasticsearch:9200"
+ database.name = "reload-configs"
+ }
+ startup.load = true
+ interval.ms = 5000 # -1 will imply 'no reload'
+
+ # if enabled flag is true, es requests will be signed
+ signing.request.aws {
+ enabled = false
+ region = "us-west-2"
+ service.name = "es"
+ # if 'access.key' is not provided, will use DefaultAWSCredentialsProviderChain to resolve creds
+ access.key = ""
+ secret.key = ""
+ }
+}
diff --git a/traces/reader/src/test/resources/logback-test.xml b/traces/reader/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..298193e01
--- /dev/null
+++ b/traces/reader/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
diff --git a/traces/reader/src/test/resources/raw_trace.json b/traces/reader/src/test/resources/raw_trace.json
new file mode 100644
index 000000000..4e8a4585f
--- /dev/null
+++ b/traces/reader/src/test/resources/raw_trace.json
@@ -0,0 +1,651 @@
+{
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "childSpans": [
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "2fe438d3-9742-4973-a5b6-e0e7870c60e3",
+ "parentSpanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "serviceName": "service_a",
+ "operationName": "oper_1",
+ "startTime": 1534487370387000,
+ "duration": 0,
+ "logs": [],
+ "tags": [
+ {
+ "key": "span.kind",
+ "value": "server"
+ }
+ ]
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "933e7aa6-9dd6-4818-86b0-a8561fc0b4da",
+ "parentSpanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "serviceName": "service_b",
+ "operationName": "oper_2",
+ "startTime": 1534487370212000,
+ "duration": 155000,
+ "logs": [
+ {
+ "timestamp": 1534487370212000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487370367000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "2fe438d3-9742-4973-a5b6-e0e7870c60e3",
+ "parentSpanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "serviceName": "service_b",
+ "operationName": "oper_3",
+ "startTime": 1534487370368000,
+ "duration": 53000,
+ "logs": [
+ {
+ "timestamp": 1534487370368000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487370421000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "f9d9fd32-55b9-4355-a07a-5e9455d17820",
+ "parentSpanId": "5e5a9451-08f2-4d83-90cb-4eb424f657fb",
+ "serviceName": "service_c",
+ "operationName": "oper_4",
+ "startTime": 1534487372108000,
+ "duration": 177000,
+ "logs": [
+ {
+ "timestamp": 1534487372108000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372285000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "5e5a9451-08f2-4d83-90cb-4eb424f657fb",
+ "parentSpanId": "12e410a5-58a4-4f3a-b559-6822ade5acfd",
+ "serviceName": "service_c",
+ "operationName": "oper_5",
+ "startTime": 1534487370603000,
+ "duration": 1686000,
+ "logs": [
+ {
+ "timestamp": 1534487370603000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "sr"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372289000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "ss"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "5e5a9451-08f2-4d83-90cb-4eb424f657fb",
+ "parentSpanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "serviceName": "service_b",
+ "operationName": "oper_6",
+ "startTime": 1534487370430000,
+ "duration": 1893000,
+ "logs": [
+ {
+ "timestamp": 1534487370430000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372323000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "0e4ebf10-ad12-4d3c-bf4c-db2bafa64e60",
+ "parentSpanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "serviceName": "service_b",
+ "operationName": "oper_7",
+ "startTime": 1534487372324000,
+ "duration": 399000,
+ "logs": [
+ {
+ "timestamp": 1534487372324000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372723000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "eb9b1f0d-c0e5-487e-9b50-a2175c69807e",
+ "parentSpanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "serviceName": "service_b",
+ "operationName": "oper_7",
+ "startTime": 1534487372726000,
+ "duration": 95000,
+ "logs": [
+ {
+ "timestamp": 1534487372726000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372821000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "parentSpanId": "a6575426-1bd5-4195-ba8d-1bd36bc390b1",
+ "serviceName": "service_b",
+ "operationName": "oper_8",
+ "startTime": 1534487370211000,
+ "duration": 2617000,
+ "logs": [
+ {
+ "timestamp": 1534487370211000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "sr"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372828000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "ss"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "14eeae8e-90c2-4e43-8a3a-2737b8a8dd55",
+ "parentSpanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "serviceName": "service_b",
+ "operationName": "oper_9",
+ "startTime": 1534487372725000,
+ "duration": 986000,
+ "logs": [
+ {
+ "timestamp": 1534487372725000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487373711000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "e799c8f3-7dd1-49e6-bb8a-df6f4f02966e",
+ "parentSpanId": "0e4ebf10-ad12-4d3c-bf4c-db2bafa64e60",
+ "serviceName": "service_d",
+ "operationName": "oper_10",
+ "startTime": 1534487372347000,
+ "duration": 81000,
+ "logs": [
+ {
+ "timestamp": 1534487372347000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372428000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "0e4ebf10-ad12-4d3c-bf4c-db2bafa64e60",
+ "parentSpanId": "0e4ebf10-ad12-4d3c-bf4c-db2bafa64e60",
+ "serviceName": "service_d",
+ "operationName": "oper_11",
+ "startTime": 1534487372435000,
+ "duration": 146000,
+ "logs": [
+ {
+ "timestamp": 1534487372435000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372581000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "fc1d74f3-94a4-4a95-86af-b6100bb6a337",
+ "parentSpanId": "0e4ebf10-ad12-4d3c-bf4c-db2bafa64e60",
+ "serviceName": "service_d",
+ "operationName": "oper_12",
+ "startTime": 1534487372691000,
+ "duration": 0,
+ "logs": [
+ {
+ "timestamp": 1534487372691000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372691000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "0e4ebf10-ad12-4d3c-bf4c-db2bafa64e60",
+ "parentSpanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "serviceName": "service_d",
+ "operationName": "oper_13",
+ "startTime": 1534487372345000,
+ "duration": 348000,
+ "logs": [
+ {
+ "timestamp": 1534487372345000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "sr"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372693000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "ss"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "435a043c-fcd9-4a81-8045-9d69bc9966c9",
+ "parentSpanId": "eb9b1f0d-c0e5-487e-9b50-a2175c69807e",
+ "serviceName": "service_d",
+ "operationName": "oper_12",
+ "startTime": 1534487372789000,
+ "duration": 0,
+ "logs": [
+ {
+ "timestamp": 1534487372789000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372789000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "eb9b1f0d-c0e5-487e-9b50-a2175c69807e",
+ "parentSpanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "serviceName": "service_d",
+ "operationName": "oper_13",
+ "startTime": 1534487372748000,
+ "duration": 43000,
+ "logs": [
+ {
+ "timestamp": 1534487372748000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "sr"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372791000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "ss"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "fc1d74f3-94a4-4a95-86af-b6100bb6a337",
+ "parentSpanId": "0e4ebf10-ad12-4d3c-bf4c-db2bafa64e60",
+ "serviceName": "service_e",
+ "operationName": "oper_14",
+ "startTime": 1534487372648000,
+ "duration": 44000,
+ "logs": [
+ {
+ "timestamp": 1534487372648000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "sr"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372692000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "ss"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "435a043c-fcd9-4a81-8045-9d69bc9966c9",
+ "parentSpanId": "eb9b1f0d-c0e5-487e-9b50-a2175c69807e",
+ "serviceName": "service_e",
+ "operationName": "oper_14",
+ "startTime": 1534487372756000,
+ "duration": 34000,
+ "logs": [
+ {
+ "timestamp": 1534487372756000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "sr"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372790000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "ss"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "9b0523d6-f14b-4a33-ba75-1e46720684cc",
+ "parentSpanId": "5fbe1254-1036-4d47-80b9-0cbf36fdd128",
+ "serviceName": "service_f",
+ "operationName": "oper_15",
+ "startTime": 1534487372849000,
+ "duration": 0,
+ "logs": [
+ {
+ "timestamp": 1534487372849000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372849000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "5fbe1254-1036-4d47-80b9-0cbf36fdd128",
+ "parentSpanId": "",
+ "serviceName": "service_f",
+ "operationName": "oper_16",
+ "startTime": 1534487370131000,
+ "duration": 2844000,
+ "logs": [
+ {
+ "timestamp": 1534487370131000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "sr"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372975000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "ss"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "095f1fa8-d920-4230-b142-6ccb0d9e7a04",
+ "parentSpanId": "",
+ "serviceName": "service_g",
+ "operationName": "oper_17",
+ "startTime": 1534487368981000,
+ "duration": 3019000,
+ "logs": [
+ {
+ "timestamp": 1534487368981000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "sr"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372000000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "ss"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ },
+ {
+ "traceId": "c4fffc7d-7c0d-4073-98ea-34362506d323",
+ "spanId": "5fbe1254-1036-4d47-80b9-0cbf36fdd128",
+ "parentSpanId": "095f1fa8-d920-4230-b142-6ccb0d9e7a04",
+ "serviceName": "service_g",
+ "operationName": "oper_18",
+ "startTime": 1534487368981000,
+ "duration": 3019000,
+ "logs": [
+ {
+ "timestamp": 1534487368981000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cs"
+ }
+ ]
+ },
+ {
+ "timestamp": 1534487372000000,
+ "fields": [
+ {
+ "key": "event",
+ "value": "cr"
+ }
+ ]
+ }
+ ],
+ "tags": []
+ }
+ ]
+}
\ No newline at end of file
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/integration/BaseIntegrationTestSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/integration/BaseIntegrationTestSpec.scala
new file mode 100644
index 000000000..c0c7b5cef
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/integration/BaseIntegrationTestSpec.scala
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.integration
+
+import java.text.SimpleDateFormat
+import java.util.concurrent.{Executors, TimeUnit}
+import java.util.{Date, UUID}
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.TraceReaderGrpc
+import com.expedia.open.tracing.api.TraceReaderGrpc.TraceReaderBlockingStub
+import com.expedia.open.tracing.backend.StorageBackendGrpc.StorageBackendBlockingStub
+import com.expedia.open.tracing.backend.{StorageBackendGrpc, TraceRecord, WriteSpansRequest, WriteSpansResponse}
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc
+import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhiteListIndexFields, WhitelistIndexField}
+import com.expedia.www.haystack.trace.commons.packer.{PackerFactory, PackerType}
+import com.expedia.www.haystack.trace.reader.Service
+import com.expedia.www.haystack.trace.reader.unit.readers.builders.ValidTraceBuilder
+import com.expedia.www.haystack.trace.storage.backends.memory.{Service => BackendService}
+import com.google.protobuf.ByteString
+import io.grpc.ManagedChannelBuilder
+import io.grpc.health.v1.HealthGrpc
+import io.searchbox.client.config.HttpClientConfig
+import io.searchbox.client.{JestClient, JestClientFactory}
+import io.searchbox.core.Index
+import io.searchbox.indices.CreateIndex
+import org.json4s.ext.EnumNameSerializer
+import org.json4s.jackson.Serialization
+import org.json4s.{DefaultFormats, Formats}
+import org.scalatest._
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+trait BaseIntegrationTestSpec extends FunSpec with GivenWhenThen with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with ValidTraceBuilder {
+ protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)
+ protected var client: TraceReaderBlockingStub = _
+
+ protected var healthCheckClient: HealthGrpc.HealthBlockingStub = _
+
+ private val ELASTIC_SEARCH_ENDPOINT = "http://elasticsearch:9200"
+ private val ELASTIC_SEARCH_WHITELIST_INDEX = "reload-configs"
+ private val ELASTIC_SEARCH_WHITELIST_TYPE = "whitelist-index-fields"
+ private val SPANS_INDEX_TYPE = "spans"
+
+ private val executors = Executors.newFixedThreadPool(2)
+
+ private val DEFAULT_DURATION = TimeUnit.MILLISECONDS.toMicros(500)
+
+ private val HAYSTACK_TRACES_INDEX = {
+ val date = new Date()
+
+ val dateBucket = new SimpleDateFormat("yyyy-MM-dd").format(date)
+ val hourBucket = new SimpleDateFormat("HH").format(date).toInt / 6
+
+ s"haystack-traces-$dateBucket-$hourBucket"
+ }
+ private val INDEX_TEMPLATE =
+ """{
+ | "template": "haystack-traces*",
+ | "settings": {
+ | "number_of_shards": 1,
+ | "index.mapping.ignore_malformed": true,
+ | "analysis": {
+ | "normalizer": {
+ | "lowercase_normalizer": {
+ | "type": "custom",
+ | "filter": ["lowercase"]
+ | }
+ | }
+ | }
+ | },
+ | "aliases": {
+ | "haystack-traces": {}
+ | },
+ | "mappings": {
+ | "spans": {
+ | "_all": {
+ | "enabled": false
+ | },
+ | "_source": {
+ | "includes": ["traceid"]
+ | },
+ | "properties": {
+ | "traceid": {
+ | "enabled": false
+ | },
+ | "starttime": {
+ | "type": "long",
+ | "doc_values": true
+ | },
+ | "spans": {
+ | "type": "nested",
+ | "properties": {
+ | "servicename": {
+ | "type": "keyword",
+ | "normalizer": "lowercase_normalizer",
+ | "doc_values": true,
+ | "norms": false
+ | },
+ | "operationname": {
+ | "type": "keyword",
+ | "normalizer": "lowercase_normalizer",
+ | "doc_values": true,
+ | "norms": false
+ | },
+ | "starttime": {
+ | "type": "long",
+ | "doc_values": true
+ | },
+ | "duration": {
+ | "type": "long",
+ | "doc_values": true
+ | }
+ | }
+ | }
+ | },
+ | "dynamic_templates": [{
+ | "strings_as_keywords_1": {
+ | "match_mapping_type": "string",
+ | "mapping": {
+ | "type": "keyword",
+ | "normalizer": "lowercase_normalizer",
+ | "doc_values": false,
+ | "norms": false
+ | }
+ | }
+ | }, {
+ | "longs_disable_doc_norms": {
+ | "match_mapping_type": "long",
+ | "mapping": {
+ | "type": "long",
+ | "doc_values": false,
+ | "norms": false
+ | }
+ | }
+ | }]
+ | }
+ | }
+ |}
+ |""".stripMargin
+
+
+ private var esClient: JestClient = _
+ private var traceBackendClient: StorageBackendBlockingStub = _
+
+ def setupTraceBackend(): StorageBackendBlockingStub = {
+ val port = 8090
+ executors.submit(new Runnable {
+ override def run(): Unit = BackendService.main(Array {
+ port.toString
+ })
+ })
+ traceBackendClient = StorageBackendGrpc.newBlockingStub(
+ ManagedChannelBuilder.forAddress("localhost", port)
+ .usePlaintext(true)
+ .build())
+ traceBackendClient
+ }
+
+ override def beforeAll() {
+ // setup traceBackend
+ traceBackendClient = setupTraceBackend()
+
+ // setup elasticsearch
+ val factory = new JestClientFactory()
+ factory.setHttpClientConfig(
+ new HttpClientConfig.Builder(ELASTIC_SEARCH_ENDPOINT)
+ .multiThreaded(true)
+ .build())
+ esClient = factory.getObject
+ esClient.execute(new CreateIndex.Builder(HAYSTACK_TRACES_INDEX)
+ .settings(INDEX_TEMPLATE)
+ .build)
+
+ executors.submit(new Runnable {
+ override def run(): Unit = Service.main(null)
+ })
+
+ Thread.sleep(5000)
+
+ client = TraceReaderGrpc.newBlockingStub(ManagedChannelBuilder.forAddress("localhost", 8088)
+ .usePlaintext(true)
+ .build())
+
+ healthCheckClient = HealthGrpc.newBlockingStub(ManagedChannelBuilder.forAddress("localhost", 8088)
+ .usePlaintext(true)
+ .build())
+ }
+
+
+ protected def putTraceInEsAndTraceBackend(traceId: String = UUID.randomUUID().toString,
+ spanId: String = UUID.randomUUID().toString,
+ serviceName: String = "",
+ operationName: String = "",
+ tags: Map[String, String] = Map.empty,
+ startTime: Long = System.currentTimeMillis() * 1000,
+ sleep: Boolean = true,
+ duration: Long = DEFAULT_DURATION): Unit = {
+ insertTraceInBackend(traceId, spanId, serviceName, operationName, tags, startTime, duration)
+ insertTraceInEs(traceId, spanId, serviceName, operationName, tags, startTime, duration)
+
+ // wait for few sec to let ES refresh its index
+ if (sleep) Thread.sleep(5000)
+ }
+
+ private def insertTraceInEs(traceId: String,
+ spanId: String,
+ serviceName: String,
+ operationName: String,
+ tags: Map[String, String],
+ startTime: Long,
+ duration: Long) = {
+ import TraceIndexDoc._
+ // create map using service, operation and tags
+ val fieldMap: mutable.Map[String, Any] = mutable.Map(
+ SERVICE_KEY_NAME -> serviceName,
+ OPERATION_KEY_NAME -> operationName,
+ START_TIME_KEY_NAME -> mutable.Set[Any](startTime),
+ DURATION_KEY_NAME -> mutable.Set[Any](duration)
+ )
+ tags.foreach(pair => fieldMap.put(pair._1.toLowerCase(), pair._2))
+
+ // index the document
+ val result = esClient.execute(new Index.Builder(TraceIndexDoc(traceId, 0, startTime, Seq(fieldMap)).json)
+ .index(HAYSTACK_TRACES_INDEX)
+ .`type`(SPANS_INDEX_TYPE)
+ .build)
+
+ if (result.getErrorMessage != null) {
+ fail("Fail to execute the indexing request " + result.getErrorMessage)
+ }
+ }
+
+ case class FieldWithMetadata(name: String, isRangeQuery: Boolean)
+
+ protected def putWhitelistIndexFieldsInEs(fields: List[FieldWithMetadata]): Unit = {
+ val whitelistFields = for (field <- fields) yield WhitelistIndexField(field.name, IndexFieldType.string, aliases = Set(s"_${field.name}"), field.isRangeQuery)
+ esClient.execute(new Index.Builder(Serialization.write(WhiteListIndexFields(whitelistFields)))
+ .index(ELASTIC_SEARCH_WHITELIST_INDEX)
+ .`type`(ELASTIC_SEARCH_WHITELIST_TYPE)
+ .build)
+
+ // wait for few sec to let ES refresh its index and app to reload its config
+ Thread.sleep(10000)
+ }
+
+ private def insertTraceInBackend(traceId: String,
+ spanId: String,
+ serviceName: String,
+ operationName: String,
+ tags: Map[String, String],
+ startTime: Long,
+ duration: Long): WriteSpansResponse = {
+ val spanBuffer = createSpanBufferWithSingleSpan(traceId, spanId, serviceName, operationName, tags, startTime, duration)
+ writeToBackend(spanBuffer, traceId)
+ }
+
+ protected def putTraceInBackend(traceId: String,
+ spanId: String = UUID.randomUUID().toString,
+ serviceName: String = "",
+ operationName: String = "",
+ tags: Map[String, String] = Map.empty,
+ startTime: Long = System.currentTimeMillis() * 1000,
+ duration: Long = DEFAULT_DURATION): Unit = {
+ insertTraceInBackend(traceId, spanId, serviceName, operationName, tags, startTime, duration)
+ // wait for few sec to let ES refresh its index
+ Thread.sleep(1000)
+ }
+
+ protected def putTraceInBackendWithPartialSpans(traceId: String): WriteSpansResponse = {
+ val trace = buildMultiServiceTrace()
+ val spanBuffer = SpanBuffer
+ .newBuilder()
+ .setTraceId(traceId)
+ .addAllChildSpans(trace.getChildSpansList)
+ .build()
+
+ writeToBackend(spanBuffer, traceId)
+ }
+
+ private def writeToBackend(spanBuffer: SpanBuffer, traceId: String): WriteSpansResponse = {
+ val packer = PackerFactory.spanBufferPacker(PackerType.NONE)
+
+ val traceRecord = TraceRecord
+ .newBuilder()
+ .setTraceId(traceId)
+ .setSpans(ByteString.copyFrom(packer.apply(spanBuffer).packedDataBytes))
+ .build()
+
+
+ val writeSpanRequest = WriteSpansRequest.newBuilder()
+ .addRecords(traceRecord)
+ .build()
+
+ traceBackendClient.writeSpans(writeSpanRequest)
+ }
+
+ private def createSpanBufferWithSingleSpan(traceId: String,
+ spanId: String,
+ serviceName: String,
+ operationName: String,
+ tags: Map[String, String],
+ startTime: Long,
+ duration: Long) = {
+ val spanTags = tags.map(tag => com.expedia.open.tracing.Tag.newBuilder().setKey(tag._1).setVStr(tag._2).build())
+
+ SpanBuffer
+ .newBuilder()
+ .setTraceId(traceId)
+ .addChildSpans(Span
+ .newBuilder()
+ .setTraceId(traceId)
+ .setSpanId(spanId)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .setStartTime(startTime)
+ .setDuration(duration)
+ .addAllTags(spanTags.asJava)
+ .build())
+ .build()
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/integration/TraceServiceIntegrationTestSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/integration/TraceServiceIntegrationTestSpec.scala
new file mode 100644
index 000000000..1c870505a
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/integration/TraceServiceIntegrationTestSpec.scala
@@ -0,0 +1,538 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.integration
+
+import java.util.UUID
+
+import com.expedia.open.tracing.api.ExpressionTree.Operator
+import com.expedia.open.tracing.api._
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc
+import io.grpc.health.v1.{HealthCheckRequest, HealthCheckResponse}
+import io.grpc.{Status, StatusRuntimeException}
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+class TraceServiceIntegrationTestSpec extends BaseIntegrationTestSpec {
+
+ describe("TraceReader.getFieldNames") {
+ it("should return names of enabled fields") {
+ Given("trace in trace-backend and elasticsearch")
+ val field1 = FieldWithMetadata("abc", isRangeQuery = true)
+ val field2 = FieldWithMetadata("def", isRangeQuery = false)
+ putWhitelistIndexFieldsInEs(List(field1, field2))
+
+ When("calling getFieldNames")
+ val fieldNames = client.getFieldNames(Empty.newBuilder().build())
+
+ Then("should return fieldNames available in index")
+ fieldNames.getNamesCount should be(2)
+ fieldNames.getFieldMetadataCount should be(2)
+ fieldNames.getNamesList.asScala.toList should contain allOf("abc", "def")
+ }
+ }
+
+ describe("TraceReader.getFieldValues") {
+ it("should return values of a given fields") {
+ Given("trace in trace-backend and elasticsearch")
+ val serviceName = "get_values_servicename"
+ putTraceInEsAndTraceBackend(UUID.randomUUID().toString, UUID.randomUUID().toString, serviceName, "op")
+ val request = FieldValuesRequest.newBuilder()
+ .setFieldName(TraceIndexDoc.SERVICE_KEY_NAME)
+ .build()
+
+ When("calling getFieldValues")
+ val result = client.getFieldValues(request)
+
+ Then("should return possible values for given field")
+ result.getValuesList.asScala should contain(serviceName)
+ }
+
+ it("should return values of a given fields with filters") {
+ Given("trace in trace-backend and elasticsearch")
+ val serviceName = "get_values_with_filters_servicename"
+ val op1 = "get_values_with_filters_operationname_1"
+ val op2 = "get_values_with_filters_operationname_2"
+
+ putTraceInEsAndTraceBackend(UUID.randomUUID().toString, UUID.randomUUID().toString, serviceName, op1)
+ putTraceInEsAndTraceBackend(UUID.randomUUID().toString, UUID.randomUUID().toString, serviceName, op2)
+ putTraceInEsAndTraceBackend(UUID.randomUUID().toString, UUID.randomUUID().toString, "non_matching_servicename", "non_matching_operationname")
+
+ val request = FieldValuesRequest.newBuilder()
+ .addFilters(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName))
+ .setFieldName(TraceIndexDoc.OPERATION_KEY_NAME)
+ .build()
+
+ When("calling getFieldValues")
+ val result = client.getFieldValues(request)
+
+ Then("should return filtered values for given field")
+ result.getValuesList.size() should be(2)
+ result.getValuesList.asScala should contain allOf(op1, op2)
+ }
+ }
+
+ describe("TraceReader.getTrace") {
+ it("should get trace for given traceID from trace-backend") {
+ Given("trace in trace-backend")
+ val traceId = UUID.randomUUID().toString
+ putTraceInBackend(traceId)
+
+ When("getTrace is invoked")
+ val trace = client.getTrace(TraceRequest.newBuilder().setTraceId(traceId).build())
+
+ Then("should return the trace")
+ trace.getTraceId shouldBe traceId
+ }
+
+ it("should return TraceNotFound exception if traceID is not in trace-backend") {
+ Given("trace in trace-backend")
+ putTraceInBackend(UUID.randomUUID().toString)
+
+ When("getTrace is invoked")
+ val thrown = the[StatusRuntimeException] thrownBy {
+ client.getTrace(TraceRequest.newBuilder().setTraceId(UUID.randomUUID().toString).build())
+ }
+
+ Then("thrown StatusRuntimeException should have 'not found' error")
+ thrown.getStatus.getCode should be(Status.NOT_FOUND.getCode)
+ thrown.getStatus.getDescription should include("traceId not found")
+ }
+ }
+
+ describe("TraceReader.getRawTrace") {
+ it("should get trace for given traceID from trace-backend") {
+ Given("trace in trace-backend")
+ val traceId = UUID.randomUUID().toString
+ putTraceInBackend(traceId)
+
+ When("getRawTrace is invoked")
+ val trace = client.getRawTrace(TraceRequest.newBuilder().setTraceId(traceId).build())
+
+ Then("should return the trace")
+ trace.getTraceId shouldBe traceId
+ }
+
+ it("should return TraceNotFound exception if traceID is not in trace-backend") {
+ Given("trace in trace-backend")
+ putTraceInBackend(UUID.randomUUID().toString)
+
+ When("getRawTrace is invoked")
+ val thrown = the[StatusRuntimeException] thrownBy {
+ client.getRawTrace(TraceRequest.newBuilder().setTraceId(UUID.randomUUID().toString).build())
+ }
+
+ Then("thrown StatusRuntimeException should have 'not found' error")
+ thrown.getStatus.getCode should be(Status.NOT_FOUND.getCode)
+ thrown.getStatus.getDescription should include("traceId not found")
+ }
+ }
+
+ describe("TraceReader.getRawSpan") {
+ it("should get spanId for given traceID-spanId from trace-backend") {
+ Given("trace in trace-backend")
+ val traceId = UUID.randomUUID().toString
+ val spanId = UUID.randomUUID().toString
+ putTraceInBackend(traceId, spanId, "svc1")
+ putTraceInBackend(traceId, spanId, "svc2")
+
+
+ When("getRawSpan is invoked")
+ val spanResponse = client.getRawSpan(SpanRequest
+ .newBuilder()
+ .setTraceId(traceId)
+ .setSpanId(spanId)
+ .build())
+
+ Then("should return the trace")
+ spanResponse.getSpansCount shouldBe 2
+ val servicesObserved = mutable.Set[String]()
+ spanResponse.getSpansList.asScala foreach { span =>
+ span.getTraceId shouldBe traceId
+ span.getSpanId shouldBe spanId
+ servicesObserved += span.getServiceName
+ }
+ servicesObserved should contain allOf("svc1", "svc2")
+ }
+
+ it("should return TraceNotFound exception if traceID is not in trace-backend") {
+ Given("trace in trace-backend")
+ putTraceInBackend(UUID.randomUUID().toString)
+
+ When("getRawSpan is invoked")
+ val thrown = the[StatusRuntimeException] thrownBy {
+ client.getRawSpan(SpanRequest
+ .newBuilder()
+ .setTraceId(UUID.randomUUID().toString)
+ .setSpanId(UUID.randomUUID().toString)
+ .build())
+ }
+
+ Then("thrown StatusRuntimeException should have 'traceId not found' error")
+ thrown.getStatus.getCode should be(Status.NOT_FOUND.getCode)
+ thrown.getStatus.getDescription should include("traceId not found")
+ }
+
+ it("should return SpanNotFound exception if spanId is not part of Trace") {
+ Given("trace in trace-backend")
+ val traceId = UUID.randomUUID().toString
+ putTraceInBackend(traceId)
+
+ When("getRawSpan is invoked")
+ val thrown = the[StatusRuntimeException] thrownBy {
+ client.getRawSpan(SpanRequest
+ .newBuilder()
+ .setTraceId(traceId)
+ .setSpanId(UUID.randomUUID().toString)
+ .build())
+ }
+
+ Then("thrown StatusRuntimeException should have 'spanId not found' error")
+ thrown.getStatus.getCode should be(Status.NOT_FOUND.getCode)
+ thrown.getStatus.getDescription should include("spanId not found")
+ }
+ }
+
+ describe("TraceReader.searchTraces") {
+ it("should search traces for given service and operation") {
+ Given("trace in trace-backend and elasticsearch")
+ val traceId = UUID.randomUUID().toString
+ val spanId = UUID.randomUUID().toString
+ val serviceName = "svcName"
+ val operationName = "opName"
+ val startTime = 1
+ val endTime = (System.currentTimeMillis() + 10000000) * 1000
+ putTraceInEsAndTraceBackend(traceId, spanId, serviceName, operationName)
+
+ When("searching traces for service and operation")
+ val traces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName).build())
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.OPERATION_KEY_NAME).setValue(operationName).build())
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build())
+
+ Then("should return traces for the searched service and operation name")
+ traces.getTracesList.size() should be > 0
+ traces.getTraces(0).getTraceId shouldBe traceId
+ traces.getTraces(0).getChildSpans(0).getServiceName shouldBe serviceName
+ traces.getTraces(0).getChildSpans(0).getOperationName shouldBe operationName
+ }
+
+ it("should search traces for given service") {
+ Given("traces in trace-backend and elasticsearch")
+ val traceId1 = UUID.randomUUID().toString
+ val traceId2 = UUID.randomUUID().toString
+ val serviceName = "serviceToSearch"
+ val operationName = "opName"
+ val startTime = 1
+ val endTime = (System.currentTimeMillis() + 10000000) * 1000
+ putTraceInEsAndTraceBackend(traceId1, UUID.randomUUID().toString, serviceName, operationName)
+ putTraceInEsAndTraceBackend(traceId2, UUID.randomUUID().toString, serviceName, operationName)
+
+ When("searching traces for service")
+ val traces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName).build())
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build())
+
+ Then("should return all traces for the service")
+ traces.getTracesList.size() should be(2)
+ traces.getTracesList.asScala.exists(_.getTraceId == traceId1) shouldBe true
+ traces.getTracesList.asScala.exists(_.getTraceId == traceId2) shouldBe true
+ }
+
+ it("should not return traces for unavailable searches") {
+ Given("traces in trace-backend and elasticsearch")
+
+ When("searching traces for service")
+ val traces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue("unavailableService").build())
+ .setStartTime(1)
+ .setEndTime((System.currentTimeMillis() + 10000000) * 1000)
+ .setLimit(10)
+ .build())
+
+ Then("should not return traces")
+ traces.getTracesList.size() should be(0)
+ }
+
+ it("should search traces for given whitelisted tags") {
+ Given("traces with tags in trace-backend and elasticsearch")
+ val traceId = UUID.randomUUID().toString
+ val serviceName = "svcWhitelisteTags"
+ val operationName = "opWhitelisteTags"
+ val tags = Map("aKey" -> "aValue", "bKey" -> "bValue")
+ val startTime = 1
+ val endTime = (System.currentTimeMillis() + 10000000) * 1000
+ putTraceInEsAndTraceBackend(traceId, UUID.randomUUID().toString, serviceName, operationName, tags)
+
+ When("searching traces for tags")
+ val traces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName).build())
+ .addFields(Field.newBuilder().setName("akey").setValue("avalue").build())
+ .addFields(Field.newBuilder().setName("bkey").setValue("bvalue").build())
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build())
+
+ Then("should return traces having tags")
+ traces.getTracesList.asScala.exists(_.getTraceId == traceId) shouldBe true
+ }
+
+ it("should not return traces if tags are not available") {
+ Given("traces with tags in trace-backend and elasticsearch")
+ val traceId = UUID.randomUUID().toString
+ val serviceName = "svcWhitelisteTags"
+ val operationName = "opWhitelisteTags"
+ val tags = Map("cKey" -> "cValue", "dKey" -> "dValue")
+ val startTime = 1
+ val endTime = (System.currentTimeMillis() + 10000000) * 1000
+ putTraceInEsAndTraceBackend(traceId, UUID.randomUUID().toString, serviceName, operationName, tags)
+
+ When("searching traces for tags")
+ val traces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName).build())
+ .addFields(Field.newBuilder().setName("ckey").setValue("cvalue").build())
+ .addFields(Field.newBuilder().setName("akey").setValue("avalue").build())
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build())
+
+ Then("should not return traces")
+ traces.getTracesList.asScala.exists(_.getTraceId == traceId) shouldBe false
+ }
+
+ it("should return traces for expression tree based search targeting ") {
+ Given("traces with tags in trace-backend and elasticsearch")
+ val traceId = UUID.randomUUID().toString
+ val serviceName = "expressionTraceSvc"
+ val operationName = "expressionTraceOp"
+ val tags = Map("uKey" -> "uValue", "vKey" -> "vValue")
+ val startTime = 1
+ val endTime = (System.currentTimeMillis() + 10000000) * 1000
+ putTraceInEsAndTraceBackend(traceId, UUID.randomUUID().toString, serviceName, operationName, tags)
+
+ When("searching traces for tags using expression tree")
+ val expression = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName)))
+
+ val traces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .setFilterExpression(expression)
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build())
+
+ Then("should return traces")
+ traces.getTracesList.asScala.exists(_.getTraceId == traceId) shouldBe true
+ }
+
+ it("should not return traces for expression tree using not_equal operator based search") {
+ Given("traces with tags in trace-backend and elasticsearch")
+ val traceId = UUID.randomUUID().toString
+ val serviceName = "expressionTraceSvc"
+ val operationName = "expressionTraceOp"
+ val tags = Map("uKey" -> "uValue", "vKey" -> "vValue")
+ val startTime = 1
+ val endTime = (System.currentTimeMillis() + 10000000) * 1000
+ putTraceInEsAndTraceBackend(traceId, UUID.randomUUID().toString, serviceName, operationName, tags)
+
+ When("searching traces for tags using expression tree")
+ val expression = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName)))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(TraceIndexDoc.OPERATION_KEY_NAME).setValue(operationName).setOperator(Field.Operator.NOT_EQUAL)))
+
+ val traces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .setFilterExpression(expression)
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build())
+
+ Then("should not return traces")
+ traces.getTracesList.size() shouldBe 0
+ }
+
+ it("should return traces for expression tree using not_equal operator based search") {
+ Given("traces with tags in trace-backend and elasticsearch")
+ val traceId = UUID.randomUUID().toString
+ val serviceName = "expressionTraceSvc_1"
+ val operationName = "expressionTraceOp_1"
+ val tags = Map("uKey" -> "uValue", "vKey" -> "vValue")
+ val startTime = 1
+ val endTime = (System.currentTimeMillis() + 10000000) * 1000
+ putTraceInEsAndTraceBackend(traceId, UUID.randomUUID().toString, serviceName, operationName, tags)
+
+ When("searching traces for tags using expression tree")
+ val expression = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName)))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(TraceIndexDoc.OPERATION_KEY_NAME).setValue("somethingelse").setOperator(Field.Operator.NOT_EQUAL)))
+
+ val traces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .setFilterExpression(expression)
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build())
+
+ Then("should return traces")
+ traces.getTracesList.asScala.exists(_.getTraceId == traceId) shouldBe true
+ }
+
+ it("should return traces for expression tree with duration filter") {
+ Given("traces with tags in trace-backend and elasticsearch")
+ val traceId = UUID.randomUUID().toString
+ val serviceName = "expressionTraceSvc_1"
+ val operationName = "expressionTraceOp_1"
+ val tags = Map("uKey" -> "uValue", "vKey" -> "vValue")
+ val startTime = 1
+ val endTime = (System.currentTimeMillis() + 10000000) * 1000
+ putTraceInEsAndTraceBackend(traceId, UUID.randomUUID().toString, serviceName, operationName, tags)
+
+ When("searching traces for tags using expression tree")
+ val baseExpr = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .setIsSpanLevelExpression(true)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName)))
+
+ val greaterThanExpr = baseExpr.addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(TraceIndexDoc.DURATION_KEY_NAME).setOperator(Field.Operator.GREATER_THAN).setValue("300000")))
+ val nonEmptyTraces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .setFilterExpression(greaterThanExpr)
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build())
+
+ val lessThanExpr = baseExpr.addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(TraceIndexDoc.DURATION_KEY_NAME).setOperator(Field.Operator.LESS_THAN).setValue("300000")))
+ val emptyTraces = client.searchTraces(TracesSearchRequest
+ .newBuilder()
+ .setFilterExpression(lessThanExpr)
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build())
+
+ Then("should return traces")
+ nonEmptyTraces.getTracesList.asScala.exists(_.getTraceId == traceId) shouldBe true
+ emptyTraces.getTracesList shouldBe empty
+ }
+ }
+
+ describe("TraceReader.getTraceCallGraph") {
+ it("should get trace for given traceID from trace-backend") {
+ Given("trace in trace-backend")
+ val traceId = "traceId"
+ putTraceInBackendWithPartialSpans(traceId)
+
+ When("getTrace is invoked")
+ val traceCallGraph = client.getTraceCallGraph(TraceRequest.newBuilder().setTraceId(traceId).build())
+
+ Then("should return the trace call graph")
+ traceCallGraph.getCallsCount should be(3)
+ }
+ }
+
+ describe("TraceReader.getTraceCounts") {
+ it("should return trace counts histogram for given time span") {
+ Given("traces elasticsearch")
+ val serviceName = "dummy-servicename"
+ val operationName = "dummy-operationname"
+ val currentTimeMicros = System.currentTimeMillis() * 1000l
+
+ val bucketIntervalInMicros = 10l * 1000 * 10000
+ val bucketCount = 4
+ val randomStartTimes = 0 until bucketCount map (idx => currentTimeMicros - (bucketIntervalInMicros * idx))
+ val startTimeInMicroSec = currentTimeMicros - (bucketIntervalInMicros * bucketCount)
+ val endTimeInMicroSec = currentTimeMicros
+
+ randomStartTimes.foreach(startTime =>
+ putTraceInEsAndTraceBackend(serviceName = serviceName, operationName = operationName, startTime = startTime, sleep = false))
+ Thread.sleep(5000)
+
+ When("calling getTraceCounts")
+ val traceCountsRequest = TraceCountsRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName).build())
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.OPERATION_KEY_NAME).setValue(operationName).build())
+ .setStartTime(startTimeInMicroSec)
+ .setEndTime(endTimeInMicroSec)
+ .setInterval(bucketIntervalInMicros)
+ .build()
+
+ val traceCounts = client.getTraceCounts(traceCountsRequest)
+
+ Then("should return possible values for given field")
+ traceCounts.getTraceCountCount shouldEqual bucketCount
+ traceCounts.getTraceCountList.asScala.foreach(_.getCount shouldBe 1)
+ }
+ }
+
+ describe("TraceReader.getRawTraces") {
+ it("should get raw traces for given traceIds from trace-backend") {
+ Given("traces in trace-backend")
+ val traceId1 = UUID.randomUUID().toString
+ val spanId1 = UUID.randomUUID().toString
+ val spanId2 = UUID.randomUUID().toString
+ putTraceInBackend(traceId1, spanId1, "svc1", "oper1")
+ putTraceInBackend(traceId1, spanId2, "svc2", "oper2")
+
+ val traceId2 = UUID.randomUUID().toString
+ val spanId3 = UUID.randomUUID().toString
+ putTraceInBackend(traceId2, spanId3, "svc1", "oper1")
+
+ When("getRawTraces is invoked")
+ val tracesResult = client.getRawTraces(RawTracesRequest.newBuilder().addAllTraceId(Seq(traceId1, traceId2).asJava).build())
+
+ Then("should return the traces")
+ val traceIdSpansMap: Map[String, Set[String]] = tracesResult.getTracesList.asScala
+ .map(trace => trace.getTraceId -> trace.getChildSpansList.asScala.map(_.getSpanId).toSet).toMap
+
+ traceIdSpansMap(traceId1) shouldEqual Set(spanId1, spanId2)
+ traceIdSpansMap(traceId2) shouldEqual Set(spanId3)
+ }
+ }
+
+ describe("TraceReader.HealthCheck") {
+ it("should return SERVING as health check response") {
+ val request = HealthCheckRequest.newBuilder().build()
+ val response = healthCheckClient.check(request)
+ response.getStatus shouldEqual HealthCheckResponse.ServingStatus.SERVING
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/BaseUnitTestSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/BaseUnitTestSpec.scala
new file mode 100644
index 000000000..a18ffc8c3
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/BaseUnitTestSpec.scala
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit
+
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FunSpec, GivenWhenThen, Matchers}
+
+trait BaseUnitTestSpec extends FunSpec with GivenWhenThen with Matchers with EasyMockSugar
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/config/ConfigurationLoaderSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/config/ConfigurationLoaderSpec.scala
new file mode 100644
index 000000000..89f6c77d9
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/config/ConfigurationLoaderSpec.scala
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2019, Expedia Group.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.reader.unit.config
+
+import com.expedia.www.haystack.trace.reader.config.ProviderConfiguration
+import com.expedia.www.haystack.trace.reader.config.entities.{ServiceConfiguration, TraceTransformersConfiguration}
+import com.expedia.www.haystack.trace.reader.readers.transformers.{ClientServerEventLogTransformer, DeDuplicateSpanTransformer, InfrastructureTagTransformer, PartialSpanTransformer}
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class ConfigurationLoaderSpec extends BaseUnitTestSpec {
+ describe("ConfigurationLoader") {
+ it("should load the service config from base.conf") {
+ val serviceConfig: ServiceConfiguration = new ProviderConfiguration().serviceConfig
+ serviceConfig.port shouldBe 8088
+ serviceConfig.ssl.enabled shouldBe false
+ serviceConfig.ssl.certChainFilePath shouldBe "/ssl/cert"
+ serviceConfig.ssl.privateKeyPath shouldBe "/ssl/private-key"
+ serviceConfig.maxSizeInBytes shouldBe 52428800
+ }
+
+ it("should load the trace transformers") {
+ val traceConfig: TraceTransformersConfiguration = new ProviderConfiguration().traceTransformerConfig
+ traceConfig.postTransformers.length shouldBe 3
+ traceConfig.postTransformers.head.isInstanceOf[PartialSpanTransformer] shouldBe true
+ traceConfig.preTransformers.length shouldBe 3
+ traceConfig.preTransformers.head.isInstanceOf[DeDuplicateSpanTransformer] shouldBe true
+ traceConfig.preTransformers(1).isInstanceOf[ClientServerEventLogTransformer] shouldBe true
+ traceConfig.preTransformers(2).isInstanceOf[InfrastructureTagTransformer] shouldBe true
+ }
+
+ it("should load the trace validators") {
+ val traceConfig: TraceTransformersConfiguration = new ProviderConfiguration().traceTransformerConfig
+ traceConfig.postTransformers.length shouldBe 3
+ traceConfig.postTransformers.head.isInstanceOf[PartialSpanTransformer] shouldBe true
+ traceConfig.preTransformers.length shouldBe 3
+ traceConfig.preTransformers.head.isInstanceOf[DeDuplicateSpanTransformer] shouldBe true
+ traceConfig.preTransformers(1).isInstanceOf[ClientServerEventLogTransformer] shouldBe true
+ traceConfig.preTransformers(2).isInstanceOf[InfrastructureTagTransformer] shouldBe true
+ }
+
+ it("should load elastic search configuration") {
+
+
+ val elasticSearchConfig = new ProviderConfiguration().elasticSearchConfiguration
+
+ elasticSearchConfig.clientConfiguration.endpoint shouldEqual "http://elasticsearch:9200"
+ elasticSearchConfig.clientConfiguration.connectionTimeoutMillis shouldEqual 10000
+ elasticSearchConfig.clientConfiguration.readTimeoutMillis shouldEqual 5000
+
+
+ elasticSearchConfig.spansIndexConfiguration.indexHourBucket shouldEqual 6
+ elasticSearchConfig.spansIndexConfiguration.indexHourTtl shouldEqual 72
+ elasticSearchConfig.spansIndexConfiguration.useRootDocumentStartTime shouldEqual true
+ elasticSearchConfig.spansIndexConfiguration.indexType shouldEqual "spans"
+ elasticSearchConfig.spansIndexConfiguration.indexNamePrefix shouldEqual "haystack-traces"
+
+
+ elasticSearchConfig.serviceMetadataIndexConfiguration.enabled shouldEqual false
+ elasticSearchConfig.serviceMetadataIndexConfiguration.indexName shouldEqual "service_metadata"
+ elasticSearchConfig.serviceMetadataIndexConfiguration.indexType shouldEqual "metadata"
+
+ elasticSearchConfig.awsRequestSigningConfiguration.enabled shouldEqual false
+ elasticSearchConfig.awsRequestSigningConfiguration.region shouldEqual "us-west-2"
+ elasticSearchConfig.awsRequestSigningConfiguration.awsServiceName shouldEqual "es"
+ elasticSearchConfig.awsRequestSigningConfiguration.accessKey shouldBe None
+ elasticSearchConfig.awsRequestSigningConfiguration.secretKey shouldBe None
+ }
+
+ it("should load trace backend configuration") {
+ val traceBackendConfig = new ProviderConfiguration().traceBackendConfiguration
+ traceBackendConfig.backends.head.host shouldEqual "localhost"
+ traceBackendConfig.backends.head.port shouldEqual 8090
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/TraceMergerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/TraceMergerSpec.scala
new file mode 100644
index 000000000..e0d4d7fd7
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/TraceMergerSpec.scala
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2019 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.readers.utils.TraceMerger
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class TraceMergerSpec extends BaseUnitTestSpec {
+ describe("Trace Merger") {
+ it("should merge the traces by traceId") {
+ val trace_1 = buildTrace("t1", "s1", "svc1", "op1")
+ val trace_2 = buildTrace("t2", "s2", "svc2", "op1")
+ val trace_3 = buildTrace("t1", "s3", "svc3", "op1")
+ val trace_4 = buildTrace("t2", "s4", "svc4", "op1")
+ val trace_5 = buildTrace("t3", "s5", "svc5", "op1")
+
+ val mergedTraces = TraceMerger.merge(Seq(trace_1, trace_2, trace_3, trace_4, trace_5))
+ mergedTraces.size shouldBe 3
+ val t1 = mergedTraces.find(_.getTraceId == "t1").head
+ t1.getChildSpansCount shouldBe 2
+ t1.getChildSpans(0).getTraceId shouldBe "t1"
+ t1.getChildSpans(0).getSpanId shouldBe "s1"
+ t1.getChildSpans(1).getTraceId shouldBe "t1"
+ t1.getChildSpans(1).getSpanId shouldBe "s3"
+ t1.getChildSpans(0).getServiceName shouldBe "svc1"
+ t1.getChildSpans(1).getServiceName shouldBe "svc3"
+
+ val t2 = mergedTraces.find(_.getTraceId == "t2").head
+ t2.getChildSpansCount shouldBe 2
+ t2.getChildSpans(0).getSpanId shouldBe "s2"
+ t2.getChildSpans(1).getSpanId shouldBe "s4"
+
+ mergedTraces.find(_.getTraceId == "t3").head.getChildSpansCount shouldBe 1
+ }
+ }
+
+ private def buildTrace(traceId: String, spanId: String, serviceName: String, operationName: String): Trace = {
+ Trace.newBuilder().setTraceId(traceId).addChildSpans(
+ Span.newBuilder()
+ .setSpanId(spanId)
+ .setTraceId(traceId)
+ .setServiceName(serviceName)
+ .setOperationName(operationName)).build()
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/TraceProcessorSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/TraceProcessorSpec.scala
new file mode 100644
index 000000000..cd1ce01a7
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/TraceProcessorSpec.scala
@@ -0,0 +1,383 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers
+
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.readers.TraceProcessor
+import com.expedia.www.haystack.trace.reader.readers.transformers._
+import com.expedia.www.haystack.trace.reader.readers.validators._
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+import com.expedia.www.haystack.trace.reader.unit.readers.builders.{ClockSkewedTraceBuilder, MultiRootTraceBuilder, MultiServerSpanTraceBuilder, ValidTraceBuilder}
+import com.google.protobuf.util.JsonFormat
+
+import scala.io.Source
+
+class TraceProcessorSpec
+ extends BaseUnitTestSpec
+ with ValidTraceBuilder
+ with MultiServerSpanTraceBuilder
+ with MultiRootTraceBuilder
+ with ClockSkewedTraceBuilder {
+
+ /**
+ * This test can be used to debug the prod issue using the raw trace.
+ * Copy-paste the raw trace under the child spans in the json file. And update
+ * the traceId which is at the first level in the json file.
+ * Also, make sure to set the same transformers (in same sequence) which are applied in your prod env.
+ */
+ describe("TraceProcessor for well-formed raw trace from json file") {
+
+ val traceProcessor = new TraceProcessor(
+ Seq(new TraceIdValidator),
+ Seq(new DeDuplicateSpanTransformer, new ClientServerEventLogTransformer),
+ Seq(new PartialSpanTransformer, new ServerClientSpanMergeTransformer, new InvalidRootTransformer,
+ new InvalidParentTransformer, new ClockSkewTransformer, new SortSpanTransformer))
+
+ it("should successfully process a simple valid raw trace from json") {
+ Given("a raw trace from json file")
+ val trace = getTraceFromJson(jsonFile = "raw_trace.json")
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(13)
+ }
+ }
+
+ private def getTraceFromJson(jsonFile: String): Trace = {
+ val stringJson = Source.fromResource(jsonFile).mkString
+
+ // replace "value" with proto supported "vStr" for tags and log
+ val replacedStringJson = stringJson.replaceAll("value", "vStr")
+ val builder = Trace.newBuilder()
+ JsonFormat.parser().merge(replacedStringJson, builder)
+ builder.build()
+ }
+
+ describe("TraceProcessor for well-formed traces") {
+ val traceProcessor = new TraceProcessor(
+ Seq(new TraceIdValidator, new RootValidator, new ParentIdValidator),
+ Seq(new DeDuplicateSpanTransformer),
+ Seq(new PartialSpanTransformer, new ClockSkewTransformer, new SortSpanTransformer))
+
+ it("should successfully process a simple valid trace") {
+ Given("a simple liner trace ")
+ val trace = buildSimpleLinerTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(4)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp + 50)
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 550)
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp + 750)
+ }
+
+ it("should reject a multi-root trace") {
+ Given("a multi-root trace ")
+ val trace = buildMultiRootTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("reject trace")
+ processedTraceOption.isSuccess should be(false)
+ }
+
+ it("should successfully process a valid multi-service trace without clock skew") {
+ Given("a valid multi-service trace without skew")
+ val trace = buildMultiServiceWithoutSkewTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(5)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "a").getServiceName should be("x")
+
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "b").getServiceName should be("y")
+
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 500)
+ getSpanById(processedTrace, "c").getServiceName should be("x")
+
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "d").getServiceName should be("y")
+
+ getSpanById(processedTrace, "e").getStartTime should be(startTimestamp + 200)
+ getSpanById(processedTrace, "e").getServiceName should be("y")
+ }
+
+ it("should successfully process a valid multi-service trace with positive clock skew") {
+ Given("a valid multi-service trace with skew")
+ val trace = buildMultiServiceWithPositiveSkewTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(5)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "a").getServiceName should be("x")
+
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "b").getServiceName should be("y")
+
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 500)
+ getSpanById(processedTrace, "c").getServiceName should be("x")
+
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "d").getServiceName should be("y")
+
+ getSpanById(processedTrace, "e").getStartTime should be(startTimestamp + 200)
+ getSpanById(processedTrace, "e").getServiceName should be("y")
+ }
+
+ it("should successfully process a valid multi-service trace with negative clock skew") {
+ Given("a valid multi-service trace with negative skew")
+ val trace = buildMultiServiceWithNegativeSkewTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(5)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "a").getServiceName should be("x")
+
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "b").getServiceName should be("y")
+
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 500)
+ getSpanById(processedTrace, "c").getServiceName should be("x")
+
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "d").getServiceName should be("y")
+
+ getSpanById(processedTrace, "e").getStartTime should be(startTimestamp + 200)
+ getSpanById(processedTrace, "e").getServiceName should be("y")
+ }
+
+ it("should successfully process a valid complex multi-service trace") {
+ Given("a valid multi-service trace ")
+ val trace = buildMultiServiceTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(6)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "a").getServiceName should be("w")
+
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp + 20)
+ getSpanById(processedTrace, "b").getServiceName should be("x")
+
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 520)
+ getSpanById(processedTrace, "c").getServiceName should be("y")
+
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp + 20)
+ getSpanById(processedTrace, "d").getServiceName should be("x")
+
+ getSpanById(processedTrace, "e").getStartTime should be(startTimestamp + 20)
+ getSpanById(processedTrace, "e").getServiceName should be("x")
+
+ getSpanById(processedTrace, "f").getStartTime should be(startTimestamp + 540)
+ getSpanById(processedTrace, "f").getServiceName should be("z")
+ }
+ }
+
+ describe("TraceProcessor for non well-formed traces") {
+ val traceProcessor = new TraceProcessor(
+ Seq(new TraceIdValidator),
+ Seq(new DeDuplicateSpanTransformer),
+ Seq(new PartialSpanTransformer, new InvalidRootTransformer, new InvalidParentTransformer, new ClockSkewTransformer, new SortSpanTransformer))
+
+ it("should successfully process a simple valid trace") {
+ Given("a simple liner trace ")
+ val trace = buildSimpleLinerTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(4)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp + 50)
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 550)
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp + 750)
+ }
+
+ it("should successfully process a multi-root trace") {
+ Given("a multi-root trace ")
+ val trace = buildMultiRootTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(5)
+ getSpanById(processedTrace, "a").getServiceName should be("x")
+ getSpanById(processedTrace, "b").getParentSpanId should not be "a"
+ getSpanById(processedTrace, "c").getParentSpanId should be("b")
+ getSpanById(processedTrace, "d").getParentSpanId should be("b")
+ }
+
+ it("should successfully process a valid multi-service trace without clock skew") {
+ Given("a valid multi-service trace without skew")
+ val trace = buildMultiServiceWithoutSkewTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(5)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "a").getServiceName should be("x")
+
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "b").getServiceName should be("y")
+
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 500)
+ getSpanById(processedTrace, "c").getServiceName should be("x")
+
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "d").getServiceName should be("y")
+
+ getSpanById(processedTrace, "e").getStartTime should be(startTimestamp + 200)
+ getSpanById(processedTrace, "e").getServiceName should be("y")
+ }
+
+ it("should successfully process a valid multi-service trace with positive clock skew") {
+ Given("a valid multi-service trace with skew")
+ val trace = buildMultiServiceWithPositiveSkewTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(5)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "a").getServiceName should be("x")
+
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "b").getServiceName should be("y")
+
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 500)
+ getSpanById(processedTrace, "c").getServiceName should be("x")
+
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "d").getServiceName should be("y")
+
+ getSpanById(processedTrace, "e").getStartTime should be(startTimestamp + 200)
+ getSpanById(processedTrace, "e").getServiceName should be("y")
+ }
+
+ it("should successfully process a valid multi-service trace with negative clock skew") {
+ Given("a valid multi-service trace with negative skew")
+ val trace = buildMultiServiceWithNegativeSkewTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(5)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "a").getServiceName should be("x")
+
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "b").getServiceName should be("y")
+
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 500)
+ getSpanById(processedTrace, "c").getServiceName should be("x")
+
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "d").getServiceName should be("y")
+
+ getSpanById(processedTrace, "e").getStartTime should be(startTimestamp + 200)
+ getSpanById(processedTrace, "e").getServiceName should be("y")
+ }
+
+ it("should successfully process a valid complex multi-service trace") {
+ Given("a valid multi-service trace ")
+ val trace = buildMultiServiceTrace()
+
+ When("invoking process")
+ val processedTraceOption = traceProcessor.process(trace)
+
+ Then("successfully process trace")
+ processedTraceOption.isSuccess should be(true)
+ val processedTrace = processedTraceOption.get
+
+ processedTrace.getChildSpansList.size() should be(6)
+ getSpanById(processedTrace, "a").getStartTime should be(startTimestamp)
+ getSpanById(processedTrace, "a").getServiceName should be("w")
+
+ getSpanById(processedTrace, "b").getStartTime should be(startTimestamp + 20)
+ getSpanById(processedTrace, "b").getServiceName should be("x")
+
+ getSpanById(processedTrace, "c").getStartTime should be(startTimestamp + 520)
+ getSpanById(processedTrace, "c").getServiceName should be("y")
+
+ getSpanById(processedTrace, "d").getStartTime should be(startTimestamp + 20)
+ getSpanById(processedTrace, "d").getServiceName should be("x")
+
+ getSpanById(processedTrace, "e").getStartTime should be(startTimestamp + 20)
+ getSpanById(processedTrace, "e").getServiceName should be("x")
+
+ getSpanById(processedTrace, "f").getStartTime should be(startTimestamp + 540)
+ getSpanById(processedTrace, "f").getServiceName should be("z")
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/ClockSkewedTraceBuilder.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/ClockSkewedTraceBuilder.scala
new file mode 100644
index 000000000..f2b868505
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/ClockSkewedTraceBuilder.scala
@@ -0,0 +1,321 @@
+package com.expedia.www.haystack.trace.reader.unit.readers.builders
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.readers.utils.SpanTree
+
+import scala.collection.JavaConverters._
+
+// helper to create various types of traces for unit testing
+trait ClockSkewedTraceBuilder extends TraceBuilder {
+
+ /**
+ * trace spanning multiple services without clock skew
+ *
+ * ...................................................... x
+ * a |============================================|
+ * b |---------------------|
+ * c |----------------------|
+ *
+ * ..................................................... y
+ * b |=====================|
+ * d |---------|
+ * e |-----------|
+ *
+ */
+ def buildMultiServiceWithoutSkewTrace(): Trace = {
+ val aSpan = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(1000)
+ .addAllLogs(createServerSpanTags(startTimestamp, startTimestamp + 1000).asJavaCollection)
+ .build()
+
+ val bSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp, startTimestamp + 500).asJavaCollection)
+ .build()
+
+ val cSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 500)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp + 500, startTimestamp + 500 + 500).asJavaCollection)
+ .build()
+
+ val bServerSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp)
+ .setDuration(500)
+ .addAllLogs(createServerSpanTags(startTimestamp, startTimestamp + 500).asJavaCollection)
+ .build()
+
+ val dSpan = Span.newBuilder()
+ .setSpanId("d")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp)
+ .setDuration(200)
+ .addAllLogs(createClientSpanTags(startTimestamp, startTimestamp + 200).asJavaCollection)
+ .build()
+
+ val eSpan = Span.newBuilder()
+ .setSpanId("e")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp + 200)
+ .setDuration(300)
+ .addAllLogs(createClientSpanTags(startTimestamp + 200, startTimestamp + 200 + 300).asJavaCollection)
+ .build()
+
+ toTrace(aSpan, bSpan, cSpan, bServerSpan, dSpan, eSpan)
+ }
+
+ /**
+ * trace spanning multiple services with positive clock skew
+ *
+ * ...................................................... x
+ * a |============================================|
+ * b |---------------------|
+ * c |----------------------|
+ *
+ * ..................................................... y
+ * b |=====================|
+ * d |--------|
+ * e |------------|
+ *
+ */
+ def buildMultiServiceWithPositiveSkewTrace(): Trace = {
+ val aSpan = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(1000)
+ .addAllLogs(createServerSpanTags(startTimestamp, startTimestamp + 1000).asJavaCollection)
+ .build()
+
+ val bSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp, startTimestamp + 500).asJavaCollection)
+ .build()
+
+ val cSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 500)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp + 500, startTimestamp + 500 + 500).asJavaCollection)
+ .build()
+
+ val bServerSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp + 100)
+ .setDuration(500)
+ .addAllLogs(createServerSpanTags(startTimestamp + 100, startTimestamp + 100 + 500).asJavaCollection)
+ .build()
+
+ val dSpan = Span.newBuilder()
+ .setSpanId("d")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp + 100)
+ .setDuration(200)
+ .addAllLogs(createClientSpanTags(startTimestamp + 100, startTimestamp + 100 + 200).asJavaCollection)
+ .build()
+
+ val eSpan = Span.newBuilder()
+ .setSpanId("e")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp + 300)
+ .setDuration(300)
+ .addAllLogs(createClientSpanTags(startTimestamp + 300, startTimestamp + 300 + 300).asJavaCollection)
+ .build()
+
+ toTrace(aSpan, bSpan, cSpan, bServerSpan, dSpan, eSpan)
+ }
+
+ /**
+ * trace spanning multiple services with negative clock skew
+ *
+ * ...................................................... x
+ * a |============================================|
+ * b |---------------------|
+ * c |----------------------|
+ *
+ * ..................................................... y
+ * b |===================|
+ * d |--------|
+ * e |----------|
+ *
+ */
+ def buildMultiServiceWithNegativeSkewTrace(): Trace = {
+ val aSpan = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(1000)
+ .addAllLogs(createServerSpanTags(startTimestamp, startTimestamp + 1000).asJavaCollection)
+ .build()
+
+ val bSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp, startTimestamp + 500).asJavaCollection)
+ .build()
+
+ val cSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 500)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp + 500, startTimestamp + 500 + 500).asJavaCollection)
+ .build()
+
+ val bServerSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp - 100)
+ .setDuration(500)
+ .addAllLogs(createServerSpanTags(startTimestamp - 100, startTimestamp - 100 + 500).asJavaCollection)
+ .build()
+
+ val dSpan = Span.newBuilder()
+ .setSpanId("d")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp - 100)
+ .setDuration(200)
+ .addAllLogs(createClientSpanTags(startTimestamp - 100, startTimestamp - 100 + 200).asJavaCollection)
+ .build()
+
+ val eSpan = Span.newBuilder()
+ .setSpanId("e")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp + 100)
+ .setDuration(300)
+ .addAllLogs(createClientSpanTags(startTimestamp + 100, startTimestamp + 100 + 300).asJavaCollection)
+ .build()
+
+ toTrace(aSpan, bSpan, cSpan, bServerSpan, dSpan, eSpan)
+ }
+
+ /**
+ * trace spanning multiple services with multi-level clock skew
+ *
+ * ...................................................... x
+ * a |============================================|
+ * b |--------------------------------------------|
+ *
+ * ..................................................... y
+ * b |============================================|
+ * c |--------------------------------------------|
+ *
+ * ..................................................... z
+ * c |============================================|
+ * d |--------------------------------------------|
+ *
+ */
+ def buildMultiLevelSkewTrace(): Trace = {
+ val aSpan = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(1000)
+ .addAllLogs(createServerSpanTags(startTimestamp, startTimestamp + 1000).asJavaCollection)
+ .build()
+
+ val bSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(1000)
+ .addAllLogs(createClientSpanTags(startTimestamp, startTimestamp + 1000).asJavaCollection)
+ .build()
+
+ val bServerSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp - 100)
+ .setDuration(1000)
+ .addAllLogs(createServerSpanTags(startTimestamp - 100, startTimestamp - 100 + 1000).asJavaCollection)
+ .build()
+
+ val cSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp - 100)
+ .setDuration(1000)
+ .addAllLogs(createClientSpanTags(startTimestamp - 100, startTimestamp -100 + 1000).asJavaCollection)
+ .build()
+
+ val cServerSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("z")
+ .setStartTime(startTimestamp + 500)
+ .setDuration(1000)
+ .addAllLogs(createServerSpanTags(startTimestamp + 500, startTimestamp + 500 + 1000).asJavaCollection)
+ .build()
+
+ val dSpan = Span.newBuilder()
+ .setSpanId("d")
+ .setParentSpanId("c")
+ .setTraceId(traceId)
+ .setServiceName("z")
+ .setStartTime(startTimestamp + 500)
+ .setDuration(1000)
+ .addAllLogs(createClientSpanTags(startTimestamp + 500, startTimestamp + 500 + 1000).asJavaCollection)
+ .build()
+
+ toTrace(aSpan, bSpan, cSpan, bServerSpan, cServerSpan, dSpan)
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/MultiRootTraceBuilder.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/MultiRootTraceBuilder.scala
new file mode 100644
index 000000000..fa3e9e411
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/MultiRootTraceBuilder.scala
@@ -0,0 +1,61 @@
+package com.expedia.www.haystack.trace.reader.unit.readers.builders
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+
+import scala.collection.JavaConverters._
+
+// helper to create various types of traces for unit testing
+trait MultiRootTraceBuilder extends TraceBuilder {
+
+ /**
+ * trace with multiple root spans
+ *
+ * ..................................................... x
+ * a |=========| b |===================|
+ * c |-------------------|
+ * d |------|
+ *
+ */
+ def buildMultiRootTrace(): Trace = {
+ val aSpan = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(300)
+ .addAllLogs(createServerSpanTags(startTimestamp, startTimestamp + 300).asJavaCollection)
+ .build()
+
+ val bSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 500)
+ .setDuration(500)
+ .addAllLogs(createServerSpanTags(startTimestamp + 500, startTimestamp + 500 + 500).asJavaCollection)
+ .build()
+
+ val cSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 500)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp + 500, startTimestamp + 500 + 500).asJavaCollection)
+ .build()
+
+ val dSpan = Span.newBuilder()
+ .setSpanId("d")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 750)
+ .setDuration(200)
+ .addAllLogs(createClientSpanTags(startTimestamp + 750, startTimestamp + 750 + 200).asJavaCollection)
+ .build()
+
+ toTrace(aSpan, bSpan, cSpan, dSpan)
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/MultiServerSpanTraceBuilder.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/MultiServerSpanTraceBuilder.scala
new file mode 100644
index 000000000..e35c11b21
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/MultiServerSpanTraceBuilder.scala
@@ -0,0 +1,62 @@
+package com.expedia.www.haystack.trace.reader.unit.readers.builders
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+
+import scala.collection.JavaConverters._
+
+// helper to create various types of traces for unit testing
+trait MultiServerSpanTraceBuilder extends TraceBuilder {
+ /**
+ * trace with multiple root spans
+ *
+ * ..................................................... x
+ * a |============================|
+ * b |----------------------------|
+ * ..................................................... y
+ * b |==========| b |========|
+ *
+ */
+ def buildMultiServerSpanForAClientSpanTrace(): Trace = {
+ val aSpan = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(1000)
+ .addAllLogs(createServerSpanTags(startTimestamp, startTimestamp + 1000).asJavaCollection)
+ .build()
+
+ val bSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(1000)
+ .addAllLogs(createClientSpanTags(startTimestamp, startTimestamp + 1000).asJavaCollection)
+ .build()
+
+ val bFirstServerSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp, startTimestamp + 500).asJavaCollection)
+ .build()
+
+ val bSecondServerSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("z")
+ .setStartTime(startTimestamp + 500)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp + 500, startTimestamp + 500 + 500).asJavaCollection)
+ .build()
+
+ toTrace(aSpan, bSpan, bFirstServerSpan, bSecondServerSpan)
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/TraceBuilder.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/TraceBuilder.scala
new file mode 100644
index 000000000..93dada8d2
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/TraceBuilder.scala
@@ -0,0 +1,40 @@
+package com.expedia.www.haystack.trace.reader.unit.readers.builders
+
+import com.expedia.open.tracing.api.Trace
+import com.expedia.open.tracing.{Log, Span, Tag}
+
+import scala.collection.JavaConverters._
+
+// helper to create various types of traces for unit testing
+trait TraceBuilder {
+ val startTimestamp = 150000000000l
+ val traceId = "traceId"
+
+ protected def toTrace(spans: Span*): Trace = Trace.newBuilder().setTraceId(traceId).addAllChildSpans(spans.asJavaCollection).build
+
+ protected def createServerSpanTags(start: Long, end: Long) = List(
+ Log.newBuilder()
+ .setTimestamp(start)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("sr").build())
+ .build(),
+ Log.newBuilder()
+ .setTimestamp(end)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("ss").build())
+ .build()
+ )
+
+ protected def createClientSpanTags(start: Long, end: Long) = List(
+ Log.newBuilder()
+ .setTimestamp(start)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cs").build())
+ .build(),
+ Log.newBuilder()
+ .setTimestamp(end)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cr").build())
+ .build()
+ )
+
+ protected def getSpanById(trace: Trace, spanId: String): Span = trace.getChildSpansList.asScala.find(_.getSpanId == spanId).get
+
+ protected def getSpanById(spans: Seq[Span], spanId: String): Span = spans.find(_.getSpanId == spanId).get
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/ValidTraceBuilder.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/ValidTraceBuilder.scala
new file mode 100644
index 000000000..700ba5864
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/builders/ValidTraceBuilder.scala
@@ -0,0 +1,175 @@
+package com.expedia.www.haystack.trace.reader.unit.readers.builders
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+
+import scala.collection.JavaConverters._
+
+// helper to create various types of traces for unit testing
+trait ValidTraceBuilder extends TraceBuilder {
+ /**
+ * simple liner trace with a sequence of sequential spans
+ *
+ * ..................................................... x
+ * a |==================================|
+ * b |-------------------|
+ * c |------|
+ * d |---|
+ *
+ */
+ def buildSimpleLinerTrace(): Trace = {
+ val aSpan = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp)
+ .setDuration(1000)
+ .addAllLogs(createServerSpanTags(startTimestamp, startTimestamp + 1000).asJavaCollection)
+ .build()
+
+ val bSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 50)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp + 50, startTimestamp + 50 + 500).asJavaCollection)
+ .build()
+
+ val cSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 550)
+ .setDuration(200)
+ .addAllLogs(createClientSpanTags(startTimestamp + 550, startTimestamp + 550 + 200).asJavaCollection)
+ .build()
+
+ val dSpan = Span.newBuilder()
+ .setSpanId("d")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 750)
+ .setDuration(200)
+ .addAllLogs(createClientSpanTags(startTimestamp + 750, startTimestamp + 750 + 200).asJavaCollection)
+ .build()
+
+ toTrace(aSpan, bSpan, cSpan, dSpan)
+ }
+
+ /**
+ * trace spanning multiple services, assume network delta to be 20ms
+ *
+ * ...................................................... w
+ * a |============================================|
+ * b |---------------------|
+ * c |----------------------|
+ *
+ * ..................................................... x
+ * b |==================|
+ * d |--------|
+ * e |----------------|
+ *
+ * ..................................................... y
+ * c |====================|
+ * f |----------|
+ *
+ * ..................................................... y
+ * f |========|
+ */
+ def buildMultiServiceTrace(): Trace = {
+ val aSpan = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("w")
+ .setStartTime(startTimestamp)
+ .setDuration(1000)
+ .addAllLogs(createServerSpanTags(startTimestamp, startTimestamp + 1000).asJavaCollection)
+ .build()
+
+ val bSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("w")
+ .setStartTime(startTimestamp)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp, startTimestamp + 500).asJavaCollection)
+ .build()
+
+ val cSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("w")
+ .setStartTime(startTimestamp + 500)
+ .setDuration(500)
+ .addAllLogs(createClientSpanTags(startTimestamp + 500, startTimestamp + 500 + 500).asJavaCollection)
+ .build()
+
+ val bServerSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 20)
+ .setDuration(460)
+ .addAllLogs(createServerSpanTags(startTimestamp + 20, startTimestamp + 20 + 460).asJavaCollection)
+ .build()
+
+ val dSpan = Span.newBuilder()
+ .setSpanId("d")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 20)
+ .setDuration(200)
+ .addAllLogs(createClientSpanTags(startTimestamp + 20, startTimestamp + 20 + 200).asJavaCollection)
+ .build()
+
+ val eSpan = Span.newBuilder()
+ .setSpanId("e")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("x")
+ .setStartTime(startTimestamp + 20)
+ .setDuration(400)
+ .addAllLogs(createClientSpanTags(startTimestamp + 20, startTimestamp + 20 + 400).asJavaCollection)
+ .build()
+
+ val cServerSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp + 520)
+ .setDuration(460)
+ .addAllLogs(createServerSpanTags(startTimestamp + 520, startTimestamp + 520 + 460).asJavaCollection)
+ .build()
+
+ val fSpan = Span.newBuilder()
+ .setSpanId("f")
+ .setParentSpanId("c")
+ .setTraceId(traceId)
+ .setServiceName("y")
+ .setStartTime(startTimestamp + 520)
+ .setDuration(100)
+ .addAllLogs(createClientSpanTags(startTimestamp + 520, startTimestamp + 520 + 100).asJavaCollection)
+ .build()
+
+ val fServerSpan = Span.newBuilder()
+ .setSpanId("f")
+ .setParentSpanId("c")
+ .setTraceId(traceId)
+ .setServiceName("z")
+ .setStartTime(startTimestamp + 540)
+ .setDuration(100)
+ .addAllLogs(createServerSpanTags(startTimestamp + 540, startTimestamp + 540 + 50).asJavaCollection)
+ .build()
+
+ toTrace(aSpan, bSpan, cSpan, bServerSpan, dSpan, eSpan, cServerSpan, fSpan, fServerSpan)
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ClientServerEventLogTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ClientServerEventLogTransformerSpec.scala
new file mode 100644
index 000000000..87d131229
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ClientServerEventLogTransformerSpec.scala
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.{Log, Span, Tag}
+import com.expedia.www.haystack.trace.commons.utils.SpanMarkers
+import com.expedia.www.haystack.trace.reader.readers.transformers.ClientServerEventLogTransformer
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+import scala.collection.JavaConverters._
+
+class ClientServerEventLogTransformerSpec extends BaseUnitTestSpec {
+ describe("client server event log transformer") {
+ it("should add event logs in the span using span.kind") {
+ val span_1 = Span.newBuilder()
+ .setTraceId("trace-id-1")
+ .setDuration(100)
+ .setStartTime(10000)
+ .addTags(Tag.newBuilder().setType(Tag.TagType.STRING).setKey(SpanMarkers.SPAN_KIND_TAG_KEY).setVStr("client"))
+ .build()
+ val span_2 = Span.newBuilder()
+ .setTraceId("trace-id-2")
+ .setDuration(200)
+ .setStartTime(20000)
+ .addTags(Tag.newBuilder().setType(Tag.TagType.STRING).setKey(SpanMarkers.SPAN_KIND_TAG_KEY).setVStr("server"))
+ .build()
+ val transformer = new ClientServerEventLogTransformer
+ val transformedSpans = transformer.transform(Seq(span_1, span_2))
+ transformedSpans.length shouldBe 2
+
+ val headSpan = transformedSpans.head
+ headSpan.getTraceId shouldBe "trace-id-1"
+ headSpan.getDuration shouldBe 100l
+ headSpan.getStartTime shouldBe 10000l
+ headSpan.getTagsList.asScala.find(tag => tag.getKey == SpanMarkers.SPAN_KIND_TAG_KEY).get.getVStr shouldBe "client"
+ headSpan.getLogsList.size() shouldBe 2
+ headSpan.getLogs(0).getTimestamp shouldBe 10000l
+ headSpan.getLogs(1).getTimestamp shouldBe 10100l
+ headSpan.getLogs(0).getFieldsList.asScala.count(tag => tag.getKey == SpanMarkers.LOG_EVENT_TAG_KEY && tag.getVStr == SpanMarkers.CLIENT_SEND_EVENT) shouldBe 1
+ headSpan.getLogs(1).getFieldsList.asScala.count(tag => tag.getKey == SpanMarkers.LOG_EVENT_TAG_KEY && tag.getVStr == SpanMarkers.CLIENT_RECV_EVENT) shouldBe 1
+
+ val lastSpan = transformedSpans(1)
+ lastSpan.getTraceId shouldBe "trace-id-2"
+ lastSpan.getDuration shouldBe 200
+ lastSpan.getStartTime shouldBe 20000
+ lastSpan.getTagsList.asScala.find(tag => tag.getKey == SpanMarkers.SPAN_KIND_TAG_KEY).get.getVStr shouldBe "server"
+ lastSpan.getLogsList.size() shouldBe 2
+ lastSpan.getLogs(0).getTimestamp shouldBe 20000
+ lastSpan.getLogs(1).getTimestamp shouldBe 20200l
+ lastSpan.getLogs(0).getFieldsList.asScala.count(tag => tag.getKey == SpanMarkers.LOG_EVENT_TAG_KEY && tag.getVStr == SpanMarkers.SERVER_RECV_EVENT) shouldBe 1
+ lastSpan.getLogs(1).getFieldsList.asScala.count(tag => tag.getKey == SpanMarkers.LOG_EVENT_TAG_KEY && tag.getVStr == SpanMarkers.SERVER_SEND_EVENT) shouldBe 1
+ }
+
+ it("should not add anything if span.kind is absent") {
+ val span = Span.newBuilder()
+ .setTraceId("trace-id-1")
+ .setDuration(100)
+ .setStartTime(10000)
+ .build()
+ val transformedSpans = new ClientServerEventLogTransformer().transform(Seq(span))
+ transformedSpans.size shouldBe 1
+ transformedSpans.head shouldEqual span
+ }
+
+ it("should not add log tags if they are already present") {
+ val span = Span.newBuilder()
+ .setTraceId("trace-id-1")
+ .setDuration(100)
+ .setStartTime(10000)
+ .addTags(Tag.newBuilder().setType(Tag.TagType.STRING).setKey(SpanMarkers.SPAN_KIND_TAG_KEY).setVStr("client"))
+ .addLogs(Log.newBuilder().setTimestamp(10000).addFields(Tag.newBuilder().setKey("event").setVStr("cr")))
+ .addLogs(Log.newBuilder().setTimestamp(10100).addFields(Tag.newBuilder().setKey("event").setVStr("cs")))
+ .build()
+ val transformedSpans = new ClientServerEventLogTransformer().transform(Seq(span))
+ transformedSpans.size shouldBe 1
+ transformedSpans.head shouldEqual span
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ClockSkewFromParentTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ClockSkewFromParentTransformerSpec.scala
new file mode 100644
index 000000000..64527260f
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ClockSkewFromParentTransformerSpec.scala
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.commons.utils.SpanUtils
+import com.expedia.www.haystack.trace.reader.readers.transformers.ClockSkewFromParentTransformer
+import com.expedia.www.haystack.trace.reader.readers.utils.{MutableSpanForest, SpanTree}
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class ClockSkewFromParentTransformerSpec extends BaseUnitTestSpec {
+ describe("ClockSkewFromParentTransformerSpec") {
+ it("should not make any adjustments if parent/child spans are properly aligned") {
+ val span_1 = createSpan("", "span_1", 100L, 200L)
+ val span_2 = createSpan(span_1.getSpanId, "span_2", 125L, 175L)
+
+ val transformer = new ClockSkewFromParentTransformer()
+ val spans = transformer.transform(MutableSpanForest(List(span_1, span_2)))
+ validateSpans(spans, 2)
+ }
+
+ it("should shift child span's startTime if the endTime exceeds the parent span") {
+ val span_1 = createSpan("", "span_1", 100L, 200L)
+ val span_2 = createSpan(span_1.getSpanId, "span_2", 175L, 225L)
+
+ val transformer = new ClockSkewFromParentTransformer()
+ val spans = transformer.transform(MutableSpanForest(List(span_1, span_2)))
+ validateSpans(spans, 2)
+ }
+
+ it("should shift child span's startTime if the startTime precedes the parent span") {
+ val span_1 = createSpan("", "span_1", 100L, 200L)
+ val span_2 = createSpan(span_1.getSpanId, "span_2", 75L, 125L)
+
+ val transformer = new ClockSkewFromParentTransformer()
+ val spans = transformer.transform(MutableSpanForest(List(span_1, span_2)))
+ validateSpans(spans, 2)
+ }
+
+ it("should shift both the startTime and the endTime if the child span is completely outside of the parent spans timeframe") {
+ val span_1 = createSpan("", "span_1", 100L, 200L)
+ val span_2 = createSpan(span_1.getSpanId, "span_2", 275L, 325L)
+
+ val transformer = new ClockSkewFromParentTransformer()
+ val spans = transformer.transform(MutableSpanForest(List(span_1, span_2)))
+ validateSpans(spans, 2)
+ }
+
+ it("should shift multiple children correctly") {
+ val span_1 = createSpan("", "span_1", 100L, 200L)
+ val span_2 = createSpan(span_1.getSpanId, "span_2", 275L, 325L)
+ val span_3 = createSpan(span_2.getSpanId, "span_3", 375, 400L)
+
+ val transformer = new ClockSkewFromParentTransformer()
+ val spans = transformer.transform(MutableSpanForest(List(span_1, span_2, span_3)))
+ validateSpans(spans, 3)
+ }
+
+ it("should handle a single span with no shift") {
+ val span_1 = createSpan("", "span_1", 100L, 200L)
+
+ val transformer = new ClockSkewFromParentTransformer()
+ val spans = transformer.transform(MutableSpanForest(List(span_1)))
+ spans.getUnderlyingSpans.size shouldBe 1
+ spans.getUnderlyingSpans.head.getStartTime shouldBe 100L
+ spans.getUnderlyingSpans.head.getDuration shouldBe 100L
+ }
+ }
+
+ def validateSpans(spans: MutableSpanForest, size: Int): Unit = {
+ spans.getUnderlyingSpans.size shouldBe size
+ spans.getAllTrees.foreach(spanTree => validateSpanTree(spanTree))
+ }
+
+ def validateSpanTree(spanTree: SpanTree): Unit = {
+ spanTree.children.foreach(child => {
+ spanTree.span.getStartTime should be <= child.span.getStartTime
+ SpanUtils.getEndTime(child.span) should be <= SpanUtils.getEndTime(spanTree.span)
+ })
+ spanTree.children.foreach(child => validateSpanTree(child))
+ }
+
+ def createSpan(parentId: String, spanId: String, startTime: Long, endTime: Long): Span = {
+ Span.newBuilder().setTraceId("traceId").setParentSpanId(parentId).setSpanId(spanId).setStartTime(startTime).setDuration(endTime - startTime).setServiceName("another-service").build()
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ClockSkewTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ClockSkewTransformerSpec.scala
new file mode 100644
index 000000000..d4eee68c5
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ClockSkewTransformerSpec.scala
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.{Log, Span, Tag}
+import com.expedia.www.haystack.trace.reader.readers.transformers.ClockSkewTransformer
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class ClockSkewTransformerSpec extends BaseUnitTestSpec {
+
+ private def createTraceWithoutMergedSpans(timestamp: Long) = {
+ // creating a trace with this timeline structure-
+ // a -> b(-50) -> e(-100)
+ // -> c(+500)
+ // -> d(-100)
+
+ val traceId = "traceId"
+
+ val spanA = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setStartTime(timestamp)
+ .setDuration(1000)
+ .build()
+
+ val spanB = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setStartTime(spanA.getStartTime - 50)
+ .setDuration(100)
+ .build()
+
+ val spanC = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setStartTime(spanA.getStartTime + 500)
+ .setDuration(100)
+ .build()
+
+ val spanD = Span.newBuilder()
+ .setSpanId("d")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setStartTime(spanA.getStartTime - 100)
+ .setDuration(100)
+ .build()
+
+ val spanE = Span.newBuilder()
+ .setSpanId("e")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setStartTime(spanB.getStartTime - 100)
+ .setDuration(100)
+ .build()
+
+ List(spanA, spanB, spanC, spanD, spanE)
+ }
+
+ private def createSpansWithClientAndServer(timestamp: Long) = {
+ val traceId = "traceId"
+ val skewedSpanId = "spanId"
+ val serviceName = "serviceNam"
+ val tag = Tag.newBuilder().setKey("tag").setVBool(true).build()
+ val log = Log.newBuilder().setTimestamp(System.currentTimeMillis).addFields(tag).build()
+
+ val partialSpan = Span.newBuilder()
+ .setSpanId(skewedSpanId)
+ .setTraceId(traceId)
+ .setServiceName(serviceName)
+ .setStartTime(timestamp + 2000)
+ .setDuration(1000)
+ .addTags(tag)
+ .addLogs(Log.newBuilder()
+ .setTimestamp(timestamp)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cs").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(timestamp + 1000)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(timestamp + 2000)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("sr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(timestamp + 2000 + 400)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("ss").build())
+ .build())
+ .build()
+
+ val aChildSpan = Span.newBuilder()
+ .setSpanId("a")
+ .setParentSpanId(skewedSpanId)
+ .setTraceId(traceId)
+ .setServiceName(serviceName)
+ .setStartTime(timestamp + 2500)
+ .setDuration(400)
+ .addTags(tag)
+ .build()
+
+ val bChildSpan = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId(skewedSpanId)
+ .setTraceId(traceId)
+ .setServiceName(serviceName)
+ .setStartTime(timestamp + 2700)
+ .setDuration(400)
+ .addTags(tag)
+ .build()
+
+ val cSpan = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setServiceName("otherService")
+ .setStartTime(timestamp + 100)
+ .setDuration(400)
+ .addTags(tag)
+ .build()
+
+ List(aChildSpan, bChildSpan, cSpan, partialSpan)
+ }
+
+ describe("ClockSkewTransformer") {
+ it("should not change clock skew if there are no merged spans") {
+ Given("trace with skewed spans")
+ val timestamp = 150000000000l
+ val spanForest = MutableSpanForest(createTraceWithoutMergedSpans(timestamp))
+
+ When("invoking transform")
+ val transformedSpans = new ClockSkewTransformer().transform(spanForest).getUnderlyingSpans
+
+ Then("return spans without fixing skew")
+ transformedSpans.length should be(5)
+ transformedSpans.find(_.getSpanId == "a").get.getStartTime should be(timestamp)
+ transformedSpans.find(_.getSpanId == "b").get.getStartTime should be(timestamp - 50)
+ transformedSpans.find(_.getSpanId == "c").get.getStartTime should be(timestamp + 500)
+ transformedSpans.find(_.getSpanId == "d").get.getStartTime should be(timestamp - 100)
+ transformedSpans.find(_.getSpanId == "e").get.getStartTime should be(timestamp - 150)
+ }
+
+ it("should fix clock skew if there merged spans with skew") {
+ Given("trace with skewed spans")
+ val timestamp = 150000000000l
+ val spanForest = MutableSpanForest(createSpansWithClientAndServer(timestamp))
+
+ When("invoking transform")
+ val transformedSpans = new ClockSkewTransformer().transform(spanForest).getUnderlyingSpans
+
+ Then("return spans without fixing skew")
+ transformedSpans.length should be(4)
+ transformedSpans.find(_.getSpanId == "spanId").get.getStartTime should be(timestamp + 300)
+ transformedSpans.find(_.getSpanId == "a").get.getStartTime should be(timestamp + 300 + 500)
+ transformedSpans.find(_.getSpanId == "b").get.getStartTime should be(timestamp + 300 + 700)
+ transformedSpans.find(_.getSpanId == "c").get.getStartTime should be(timestamp + 100)
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/DeDuplicateSpanTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/DeDuplicateSpanTransformerSpec.scala
new file mode 100644
index 000000000..eb0b9fc53
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/DeDuplicateSpanTransformerSpec.scala
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.reader.readers.transformers.DeDuplicateSpanTransformer
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class DeDuplicateSpanTransformerSpec extends BaseUnitTestSpec {
+
+ describe("dedup span transformer") {
+ it("should remove all the duplicate spans") {
+ val span_1 = Span.newBuilder().setTraceId("traceId").setSpanId("span_1").setServiceName("test-service").build()
+ val dup_span_1 = Span.newBuilder().setTraceId("traceId").setSpanId("span_1").setServiceName("test-service").build()
+
+ val span_1_1 = Span.newBuilder().setTraceId("traceId").setSpanId("span_1").setServiceName("test-service-2").build()
+
+ val span_2 = Span.newBuilder().setTraceId("traceId").setSpanId("span_2").setServiceName("another-service").build()
+ val dup_span_2 = Span.newBuilder().setTraceId("traceId").setSpanId("span_2").setServiceName("another-service").build()
+
+ val transformer = new DeDuplicateSpanTransformer()
+ var dedupSpans = transformer.transform(List(span_1, span_2, dup_span_2, dup_span_1, span_1_1))
+ dedupSpans.size shouldBe 3
+ dedupSpans.map(sp => sp.getServiceName) should contain allOf("test-service", "another-service", "test-service-2")
+ dedupSpans.map(sp => sp.getSpanId) should contain allOf("span_1", "span_2")
+
+ dedupSpans = transformer.transform(List(span_1, span_1, span_2, dup_span_2))
+ dedupSpans.size shouldBe 2
+ dedupSpans.map(sp => sp.getSpanId) should contain allOf("span_1", "span_2")
+ }
+ }
+}
\ No newline at end of file
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/InfrastructureTagTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/InfrastructureTagTransformerSpec.scala
new file mode 100644
index 000000000..e121b289d
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/InfrastructureTagTransformerSpec.scala
@@ -0,0 +1,56 @@
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.trace.reader.readers.transformers.InfrastructureTagTransformer
+import com.expedia.www.haystack.trace.reader.readers.utils.AuxiliaryTags
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+import scala.collection.JavaConverters._
+
+class InfrastructureTagTransformerSpec extends BaseUnitTestSpec {
+
+ private val infraTags = Seq(
+ Tag.newBuilder().setKey(AuxiliaryTags.INFRASTRUCTURE_PROVIDER).setVStr("aws").setType(Tag.TagType.STRING).build(),
+ Tag.newBuilder().setKey(AuxiliaryTags.INFRASTRUCTURE_LOCATION).setVStr("us-west-2").setType(Tag.TagType.STRING).build()
+ )
+
+ private val randomTags = Seq(Tag.newBuilder().setKey("error").setVBool(false).setType(Tag.TagType.BOOL).build())
+
+
+ describe("infrastructure tag transformer") {
+ it("should add missing infrastructure tags in a service spans if any of it contains so") {
+ // first tag in the sequence contains infrastructure tags, following span doesn't contain from the same service doesn't contain.
+ val svc1_span1 = Span.newBuilder().setTraceId("traceId").setSpanId("span_1").setServiceName("service_1").addAllTags(randomTags.asJava).addAllTags(infraTags.asJava).build()
+ val svc1_span2 = Span.newBuilder().setTraceId("traceId").setSpanId("span_2").setParentSpanId("span_1").setServiceName("service_1").build()
+
+ // none of the tags from this service contains infrastructure information
+ val svc2_span_1 = Span.newBuilder().setTraceId("traceId").setSpanId("span_3").setServiceName("service_2").addAllTags(randomTags.asJava).build()
+ val svc2_span_2 = Span.newBuilder().setTraceId("traceId").setSpanId("span_4").setParentSpanId("span_3").setServiceName("service_2").build()
+
+
+ // first tag in the sequence doesn't contain infrastructure tags, following span from the same service contains.
+ val svc3_span1 = Span.newBuilder().setTraceId("traceId").setSpanId("span_5").setServiceName("service_3").build()
+ val svc3_span2 = Span.newBuilder().setTraceId("traceId").setSpanId("span_6").setParentSpanId("span_5").setServiceName("service_3").addAllTags(randomTags.asJava).addAllTags(infraTags.asJava).build()
+
+ val transformedSpans = new InfrastructureTagTransformer()
+ .transform(Seq(svc1_span1, svc1_span2, svc2_span_1, svc2_span_2, svc3_span1, svc3_span2))
+
+ transformedSpans.size shouldBe 6
+ transformedSpans.find(_.getSpanId == "span_1").get.getTagsList should contain allElementsOf infraTags
+ transformedSpans.find(_.getSpanId == "span_1").get.getTagsList should contain allElementsOf randomTags
+
+ transformedSpans.find(_.getSpanId == "span_2").get.getTagsCount shouldBe infraTags.size
+ transformedSpans.find(_.getSpanId == "span_2").get.getTagsList should contain allElementsOf infraTags
+
+ transformedSpans.find(_.getSpanId == "span_3").get.getTagsCount shouldBe 1
+ transformedSpans.find(_.getSpanId == "span_3").get.getTagsList should contain allElementsOf randomTags
+ transformedSpans.find(_.getSpanId == "span_4").get.getTagsCount shouldBe 0
+
+ transformedSpans.find(_.getSpanId == "span_5").get.getTagsCount shouldBe infraTags.size
+ transformedSpans.find(_.getSpanId == "span_5").get.getTagsList should contain allElementsOf infraTags
+ transformedSpans.find(_.getSpanId == "span_6").get.getTagsList should contain allElementsOf infraTags
+ transformedSpans.find(_.getSpanId == "span_6").get.getTagsList should contain allElementsOf randomTags
+
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/InvalidParentTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/InvalidParentTransformerSpec.scala
new file mode 100644
index 000000000..2bb0e292e
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/InvalidParentTransformerSpec.scala
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.reader.readers.transformers.InvalidParentTransformer
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class InvalidParentTransformerSpec extends BaseUnitTestSpec {
+ describe("InvalidParentTransformer") {
+ it("should mark root as parent for spans with invalid parent ids") {
+ Given("trace having spans with invalid parent ids")
+ val spans = List(
+ Span.newBuilder()
+ .setSpanId("a")
+ .build(),
+ Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("b")
+ .build(),
+ Span.newBuilder()
+ .setTraceId("traceId")
+ .setSpanId("c")
+ .setParentSpanId("traceId")
+ .build()
+ )
+
+ When("invoking transform")
+ val transformedSpanTree = new InvalidParentTransformer().transform(MutableSpanForest(spans))
+ val transformedSpans = transformedSpanTree.getUnderlyingSpans
+
+ Then("mark root to be parent of spans with invalid parent id")
+ transformedSpans.length should be(3)
+
+ val aSpan = transformedSpans.find(_.getSpanId == "a")
+ aSpan.get.getParentSpanId should be("")
+
+ val bSpan = transformedSpans.find(_.getSpanId == "b")
+ bSpan.get.getParentSpanId should be("a")
+
+ val cSpan = transformedSpans.find(_.getSpanId == "c")
+ cSpan.get.getParentSpanId should be("a")
+
+ transformedSpanTree.getAllTrees.size shouldBe 1
+ }
+
+ it("should mark root as parent for spans with parent ids that are not in the trace") {
+ Given("trace having spans with invalid parent ids")
+ val spans = List(
+ Span.newBuilder()
+ .setSpanId("a")
+ .build(),
+ Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("x")
+ .build(),
+ Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("b")
+ .build()
+ )
+
+ When("invoking transform")
+ val transformedSpans = new InvalidParentTransformer().transform(MutableSpanForest(spans)).getUnderlyingSpans
+
+ Then("mark root to be parent of spans with invalid parent id")
+ transformedSpans.length should be(3)
+
+ val aSpan = transformedSpans.find(_.getSpanId == "a")
+ aSpan.get.getParentSpanId should be("")
+
+ val bSpan = transformedSpans.find(_.getSpanId == "b")
+ bSpan.get.getParentSpanId should be("a")
+
+ val cSpan = transformedSpans.find(_.getSpanId == "c")
+ cSpan.get.getParentSpanId should be("b")
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/InvalidRootTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/InvalidRootTransformerSpec.scala
new file mode 100644
index 000000000..f9e38f52c
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/InvalidRootTransformerSpec.scala
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.trace.commons.utils.SpanUtils
+import com.expedia.www.haystack.trace.reader.readers.transformers.InvalidRootTransformer
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+import scala.collection.JavaConverters._
+
+class InvalidRootTransformerSpec extends BaseUnitTestSpec {
+ describe("InvalidRootTransformer") {
+ it("should mark first span as root when there are multiple roots") {
+ Given("trace with multiple roots ")
+ val spanForest = MutableSpanForest(Seq(
+ Span.newBuilder()
+ .setSpanId("a")
+ .setServiceName("sa")
+ .setStartTime(150000000000l + 300)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("b")
+ .setServiceName("sb")
+ .setStartTime(150000000000l)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("c")
+ .setServiceName("sc")
+ .setStartTime(150000000000l + 150)
+ .build()
+ ))
+
+ When("invoking transform")
+ val transformedSpans = new InvalidRootTransformer().transform(spanForest).getUnderlyingSpans
+
+ Then("pick first span as root and mark second's parent to be root")
+ transformedSpans.length should be(4)
+
+ val root = transformedSpans.filter(_.getParentSpanId.isEmpty)
+ root.size should be(1)
+ root.head.getServiceName shouldEqual "sb"
+ root.head.getOperationName shouldEqual "auto-generated"
+ root.head.getStartTime shouldBe 150000000000l
+ root.head.getDuration shouldBe 300l
+
+ val others = transformedSpans.filter(!_.getParentSpanId.isEmpty)
+ others.foreach(span => span.getParentSpanId should be(root.head.getSpanId))
+ }
+
+ it("should not generate any autogenerated for a complete tree but without a parent span existing") {
+ Given("trace with multiple roots ")
+ val spanForest = MutableSpanForest(Seq(
+ Span.newBuilder()
+ .setSpanId("a")
+ .setParentSpanId("b")
+ .setServiceName("sa")
+ .setStartTime(150000000000l + 300)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("d")
+ .setServiceName("sb")
+ .setStartTime(150000000000l)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("b")
+ .setServiceName("sc")
+ .setStartTime(150000000000l + 150)
+ .build()
+ ))
+
+ When("invoking transform")
+ val transformedSpans = new InvalidRootTransformer().transform(spanForest).getUnderlyingSpans
+
+ Then("pick first span as root and mark second's parent to be root")
+ transformedSpans.length should be(3)
+
+ val root = transformedSpans.filter(_.getParentSpanId.isEmpty)
+ root.size should be(1)
+ root.head.getServiceName shouldEqual "sb"
+ root.head.getStartTime shouldBe 150000000000l
+
+ val others = transformedSpans.filter(!_.getParentSpanId.isEmpty)
+ others.foreach(span => span.getParentSpanId should be(root.head.getSpanId))
+ }
+
+ it("should mark first span as root when there are no roots") {
+ Given("trace with multiple roots ")
+ val spanForest = MutableSpanForest(Seq(
+ Span.newBuilder()
+ .setSpanId("a")
+ .setParentSpanId("x")
+ .setStartTime(150000000000l + 300)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("x")
+ .setStartTime(150000000000l)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("x")
+ .setStartTime(150000000000l + 150)
+ .build()
+ ))
+
+ When("invoking transform")
+ val transformedSpans = new InvalidRootTransformer().transform(spanForest).getUnderlyingSpans
+
+ Then("pick first span as root and mark second's parent to be root")
+ transformedSpans.length should be(3)
+
+ val root = transformedSpans.filter(_.getParentSpanId.isEmpty)
+ root.size should be(1)
+ root.head.getSpanId should be("b")
+ }
+
+ it("should mark loopback span as root when there are no roots") {
+ Given("trace with multiple roots ")
+ val spanForest = MutableSpanForest(Seq(
+ Span.newBuilder()
+ .setSpanId("a")
+ .setParentSpanId("x")
+ .setStartTime(150000000000l + 300)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("x")
+ .setStartTime(150000000000l)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("c")
+ .setStartTime(150000000000l + 150)
+ .build()
+ ))
+
+ When("invoking transform")
+ val transformedSpans = new InvalidRootTransformer().transform(spanForest).getUnderlyingSpans
+
+ Then("pick first span as root and mark second's parent to be root")
+ transformedSpans.length should be(3)
+
+ val root = transformedSpans.filter(_.getParentSpanId.isEmpty)
+ root.size should be(1)
+ root.head.getSpanId should be("c")
+ }
+
+ it("should create an autogenerated span using the span tree with earliest timestamp if multiple trees exist with a root having an empty parentSpanId") {
+ Given("trace with multiple roots ")
+ val spanForest = MutableSpanForest(Seq(
+ Span.newBuilder()
+ .setSpanId("a")
+ .setServiceName("aService")
+ .setParentSpanId("")
+ .addTags(Tag.newBuilder().setKey(SpanUtils.URL_TAG_KEY).setVStr("/anotherurl").setType(Tag.TagType.STRING))
+ .setStartTime(150000000000l + 300)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("")
+ .setServiceName("bService")
+ .addTags(Tag.newBuilder().setKey(SpanUtils.URL_TAG_KEY).setVStr("/someurl").setType(Tag.TagType.STRING))
+ .setStartTime(150000000000l)
+ .build(),
+ Span.newBuilder()
+ .setSpanId("c")
+ .setServiceName("cService")
+ .addTags(Tag.newBuilder().setKey(SpanUtils.URL_TAG_KEY).setVStr("/anotherurl").setType(Tag.TagType.STRING))
+ .setParentSpanId("")
+ .setStartTime(150000000000l + 150)
+ .build()
+ ))
+
+ When("invoking transform")
+ val transformedSpans = new InvalidRootTransformer().transform(spanForest).getUnderlyingSpans
+
+ Then("pick earliest span tree as basis for autogenerated span")
+ transformedSpans.length should be(4)
+
+ val root = transformedSpans.filter(_.getParentSpanId.isEmpty)
+ root.size should be(1)
+ root.head.getSpanId should not be oneOf("a", "b", "c")
+ root.head.getStartTime shouldBe 150000000000l
+ root.head.getOperationName shouldEqual "auto-generated"
+ root.head.getServiceName shouldEqual "bService"
+ val urlTag = root.head.getTagsList.asScala.find(_.getKey == SpanUtils.URL_TAG_KEY)
+ urlTag.isEmpty shouldBe false
+ urlTag.get.getVStr shouldEqual "/someurl"
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/OrphanedTraceTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/OrphanedTraceTransformerSpec.scala
new file mode 100644
index 000000000..2c24d44e6
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/OrphanedTraceTransformerSpec.scala
@@ -0,0 +1,67 @@
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.trace.commons.utils.SpanMarkers
+import com.expedia.www.haystack.trace.reader.readers.transformers.{OrphanedTraceTransformer, OrphanedTraceTransformerConstants}
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class OrphanedTraceTransformerSpec extends BaseUnitTestSpec {
+ describe("OrphanedTraceTransformerTest") {
+ it("should return full list of spans if there is a root span already") {
+ val span_1 = Span.newBuilder().setTraceId("traceId").setSpanId("traceId").setServiceName("another-service").build()
+ val span_2 = Span.newBuilder().setTraceId("traceId").setSpanId("span_2").setParentSpanId(span_1.getSpanId).setServiceName("test-service").build()
+ val span_3 = Span.newBuilder().setTraceId("traceId").setSpanId("span_3").setParentSpanId(span_1.getSpanId).setServiceName("another-service").build()
+
+ val transformer = new OrphanedTraceTransformer()
+ val spanForest = MutableSpanForest(Seq(span_1, span_2, span_3))
+ val spans = transformer.transform(spanForest).getUnderlyingSpans
+ spans.size shouldBe 3
+ spans should contain(span_1)
+ spans should contain(span_2)
+ spans should contain(span_3)
+ }
+
+ it("should return the full list of spans plus a generated root span if there is no root span already") {
+ val span_1 = Span.newBuilder().setTraceId("traceId").setOperationName(SpanMarkers.AUTOGEN_OPERATION_NAME).setServiceName("test-service")
+ .setSpanId("traceId").setStartTime(10000).setDuration(10100)
+ .addTags(Tag.newBuilder().setKey(SpanMarkers.AUTOGEN_REASON_TAG).setVStr(OrphanedTraceTransformerConstants.AUTO_GEN_REASON))
+ .addTags(Tag.newBuilder().setKey(SpanMarkers.AUTOGEN_SPAN_ID_TAG).setVStr("traceId"))
+ .addTags(Tag.newBuilder().setKey(SpanMarkers.AUTOGEN_FLAG_TAG).setVBool(true).setType(Tag.TagType.BOOL)).build()
+ val span_2 = Span.newBuilder().setTraceId("traceId").setSpanId("span_2").setParentSpanId(span_1.getSpanId).setStartTime(10000).setDuration(10).setServiceName("test-service").build()
+ val span_3 = Span.newBuilder().setTraceId("traceId").setSpanId("span_3").setParentSpanId(span_1.getSpanId).setStartTime(20000).setDuration(100).setServiceName("another-service").build()
+
+ val transformer = new OrphanedTraceTransformer()
+ val spanForest = MutableSpanForest(Seq(span_2, span_3))
+ val spans = transformer.transform(spanForest).getUnderlyingSpans
+ spans.size shouldBe 3
+ spans should contain(span_2)
+ spans should contain(span_3)
+ spans should contain(span_1)
+ }
+
+ it("should fail if there are multiple different orphaned parent ids") {
+ val span_1 = Span.newBuilder().setTraceId("traceId").setSpanId("traceId").setServiceName("another-service").build()
+ val span_2 = Span.newBuilder().setTraceId("traceId").setSpanId("span_2").setParentSpanId(span_1.getSpanId).setServiceName("test-service").build()
+ val span_3 = Span.newBuilder().setTraceId("traceId").setSpanId("span_3").setParentSpanId(span_1.getSpanId).setServiceName("another-service").build()
+ val span_4 = Span.newBuilder().setTraceId("traceId").setSpanId("span_4").setParentSpanId(span_1.getSpanId).setServiceName("another-service").build()
+ val span_5 = Span.newBuilder().setTraceId("traceId").setSpanId("span_5").setParentSpanId(span_4.getSpanId).setServiceName("another-service").build()
+
+ val transformer = new OrphanedTraceTransformer()
+ val spanForest = MutableSpanForest(Seq(span_2, span_3, span_5))
+ val spans = transformer.transform(spanForest).getUnderlyingSpans
+ spans.size shouldBe 0
+ }
+
+ it("should fail if there is a missing span in between the root span and orphaned span") {
+ val span_1 = Span.newBuilder().setTraceId("traceId").setSpanId("traceId").setServiceName("another-service").build()
+ val span_4 = Span.newBuilder().setTraceId("traceId").setSpanId("span_4").setParentSpanId(span_1.getSpanId).setServiceName("another-service").build()
+ val span_5 = Span.newBuilder().setTraceId("traceId").setSpanId("span_5").setParentSpanId(span_4.getSpanId).setServiceName("another-service").build()
+
+ val transformer = new OrphanedTraceTransformer()
+ val spanForest = MutableSpanForest(Seq(span_5))
+ val spans = transformer.transform(spanForest).getUnderlyingSpans
+ spans.size shouldBe 0
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/PartialSpanTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/PartialSpanTransformerSpec.scala
new file mode 100644
index 000000000..37529fad8
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/PartialSpanTransformerSpec.scala
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.{Log, Span, Tag}
+import com.expedia.www.haystack.trace.reader.readers.transformers.PartialSpanTransformer
+import com.expedia.www.haystack.trace.reader.readers.utils.TagExtractors._
+import com.expedia.www.haystack.trace.reader.readers.utils.{AuxiliaryTags, MutableSpanForest}
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+import com.expedia.www.haystack.trace.reader.unit.readers.builders.ValidTraceBuilder
+
+import scala.collection.JavaConverters._
+
+class PartialSpanTransformerSpec extends BaseUnitTestSpec with ValidTraceBuilder {
+
+ private def createSpansWithClientAndServer(timestamp: Long) = {
+ val traceId = "traceId"
+ val partialSpanId = "partialSpanId"
+ val parentSpanId = "parentSpanId"
+ val tag = Tag.newBuilder().setKey("tag").setVBool(true).build()
+
+ val partialClientSpan = Span.newBuilder()
+ .setSpanId(partialSpanId)
+ .setParentSpanId(parentSpanId)
+ .setTraceId(traceId)
+ .setServiceName("clientService")
+ .setStartTime(timestamp)
+ .setDuration(1000)
+ .addTags(tag)
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cs").build())
+ .build())
+ .build()
+
+ val partialServerSpan = Span.newBuilder()
+ .setSpanId(partialSpanId)
+ .setParentSpanId(parentSpanId)
+ .setTraceId(traceId)
+ .setServiceName("serverService")
+ .setStartTime(timestamp + 20)
+ .setDuration(980)
+ .addTags(tag)
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("sr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("ss").build())
+ .build())
+ .build()
+
+ List(partialServerSpan, partialClientSpan)
+ }
+
+ private def createMultiplePartialSpans(timestamp: Long) = {
+ val traceId = "traceId"
+ val partialSpanId = "partialSpanId"
+ val parentSpanId = "parentSpanId"
+ val tag = Tag.newBuilder().setKey("tag").setVBool(true).build()
+
+ val partialClientSpan = Span.newBuilder()
+ .setSpanId(partialSpanId)
+ .setParentSpanId(parentSpanId)
+ .setTraceId(traceId)
+ .setServiceName("clientService")
+ .setStartTime(timestamp)
+ .setDuration(1000)
+ .addTags(tag)
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cs").build())
+ .build())
+ .build()
+
+ val firstPartialServerSpan = Span.newBuilder()
+ .setSpanId(partialSpanId)
+ .setParentSpanId(parentSpanId)
+ .setTraceId(traceId)
+ .setServiceName("serverService")
+ .setStartTime(timestamp + 20)
+ .setDuration(960)
+ .addTags(tag)
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("sr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("ss").build())
+ .build())
+ .build()
+
+ val secondPartialServerSpan = Span.newBuilder()
+ .setSpanId(partialSpanId)
+ .setParentSpanId(parentSpanId)
+ .setTraceId(traceId)
+ .setServiceName("serverService")
+ .setStartTime(timestamp + 980)
+ .setDuration(10)
+ .addTags(tag)
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("sr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("ss").build())
+ .build())
+ .build()
+
+ List(partialClientSpan, secondPartialServerSpan, firstPartialServerSpan)
+ }
+
+ private def createNonPartialSpans(timestamp: Long) = {
+ val traceId = "traceId"
+ val tag = Tag.newBuilder().setKey("tag").setVBool(true).build()
+
+ val span1 = Span.newBuilder()
+ .setSpanId("span1")
+ .setParentSpanId("x")
+ .setTraceId(traceId)
+ .setServiceName("span1Service")
+ .setStartTime(timestamp)
+ .setDuration(1000)
+ .addTags(tag)
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("cs").build())
+ .build())
+ .build()
+
+ val span2 = Span.newBuilder()
+ .setSpanId("span2")
+ .setParentSpanId("x")
+ .setTraceId(traceId)
+ .setServiceName("span2Service")
+ .setStartTime(timestamp + 20)
+ .setDuration(980)
+ .addTags(tag)
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("sr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("ss").build())
+ .build())
+ .build()
+
+ val span3 = Span.newBuilder()
+ .setSpanId("span3")
+ .setParentSpanId("x")
+ .setTraceId(traceId)
+ .setServiceName("span3Service")
+ .setStartTime(timestamp + 980)
+ .setDuration(10)
+ .addTags(tag)
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("sr").build())
+ .build())
+ .addLogs(Log.newBuilder()
+ .setTimestamp(System.currentTimeMillis)
+ .addFields(Tag.newBuilder().setKey("event").setVStr("ss").build())
+ .build())
+ .build()
+
+ List(span1, span2, span3)
+ }
+
+ describe("PartialSpanTransformer") {
+ it("should merge two partial spans with right event sequencing") {
+ Given("trace with partial spans")
+ val timestamp = 150000000000l
+ val spans = createSpansWithClientAndServer(timestamp)
+
+ When("invoking transform")
+ val mergedSpans = new PartialSpanTransformer().transform(MutableSpanForest(spans)).getUnderlyingSpans
+
+ Then("return partial spans merged with server span being primary")
+ mergedSpans.length should be(1)
+ mergedSpans.head.getStartTime should be(timestamp + 20)
+ mergedSpans.head.getTagsCount should be(17)
+ mergedSpans.head.getLogsCount should be(4)
+ mergedSpans.head.getServiceName should be("serverService")
+ }
+
+ it("should merge multiple partial spans with first server span as primary") {
+ Given("trace with multiple partial spans")
+ val timestamp = 150000000000l
+ val spans = createMultiplePartialSpans(timestamp)
+
+ When("invoking transform")
+ val mergedSpans = new PartialSpanTransformer().transform(MutableSpanForest(spans)).getUnderlyingSpans
+
+ Then("return partial spans merged with first server span as primary")
+ mergedSpans.length should be(1)
+ mergedSpans.head.getStartTime should be(timestamp + 20)
+ mergedSpans.head.getTagsCount should be(19)
+ mergedSpans.head.getLogsCount should be(6)
+ mergedSpans.head.getServiceName should be("serverService")
+ }
+
+ it("should not merge if there are no partial spans to merge") {
+ Given("trace without partial spans")
+ val timestamp = 150000000000l
+ val spans = createNonPartialSpans(timestamp)
+
+ When("invoking transform")
+ val mergedSpans = new PartialSpanTransformer().transform(MutableSpanForest(spans)).getUnderlyingSpans
+
+ Then("return partial spans merged")
+ mergedSpans.length should be(3)
+ }
+
+ it("should add auxiliary tags") {
+ Given("trace with partial spans")
+ val spans = buildMultiServiceTrace().getChildSpansList.asScala
+
+ When("invoking transform")
+ val mergedSpans = new PartialSpanTransformer().transform(MutableSpanForest(spans)).getUnderlyingSpans
+
+ Then("return partial spans merged with auxiliary tags")
+ mergedSpans.size should be(6)
+ val bSpan = getSpanById(mergedSpans, "b")
+ bSpan.getStartTime should be(startTimestamp + 20)
+ bSpan.getServiceName should be("x")
+
+ extractTagLongValue(bSpan, AuxiliaryTags.NETWORK_DELTA) should be(40)
+ extractTagStringValue(bSpan, AuxiliaryTags.CLIENT_SERVICE_NAME) should be("w")
+ extractTagStringValue(bSpan, AuxiliaryTags.SERVER_SERVICE_NAME) should be("x")
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ServerClientSpanMergeTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ServerClientSpanMergeTransformerSpec.scala
new file mode 100644
index 000000000..a182be0ef
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/ServerClientSpanMergeTransformerSpec.scala
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.trace.commons.utils.SpanMarkers
+import com.expedia.www.haystack.trace.reader.readers.transformers.ServerClientSpanMergeTransformer
+import com.expedia.www.haystack.trace.reader.readers.utils.{AuxiliaryTags, MutableSpanForest}
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+import com.expedia.www.haystack.trace.reader.unit.readers.builders.ValidTraceBuilder
+
+import scala.collection.JavaConverters._
+
+class ServerClientSpanMergeTransformerSpec extends BaseUnitTestSpec with ValidTraceBuilder {
+
+ private def createProducerAndConsumerSpanKinds(): List[Span] = {
+ val traceId = "traceId"
+
+ val timestamp = System.currentTimeMillis() * 1000
+
+ val producerSpan = Span.newBuilder()
+ .setSpanId("sa")
+ .setTraceId(traceId)
+ .setServiceName("aSvc")
+ .addTags(Tag.newBuilder().setKey("span.kind").setVStr("producer"))
+ .setStartTime(timestamp + 100)
+ .setDuration(1000)
+ .build()
+
+ val consumerASpan = Span.newBuilder()
+ .setSpanId("sb1")
+ .setParentSpanId("sa")
+ .setTraceId(traceId)
+ .setServiceName("bSvc")
+ .addTags(Tag.newBuilder().setKey("span.kind").setVStr("consumer"))
+ .setStartTime(timestamp + 400)
+ .setDuration(1000)
+ .build()
+
+ val consumerBSpan = Span.newBuilder()
+ .setSpanId("sb2")
+ .setParentSpanId("sb1")
+ .setTraceId(traceId)
+ .setServiceName("bSvc")
+ .addTags(Tag.newBuilder().setKey("span.kind").setVStr("producer"))
+ .setStartTime(timestamp + 1000)
+ .setDuration(1000)
+ .build()
+ List(producerSpan, consumerASpan, consumerBSpan)
+ }
+
+ private def createSpansWithClientAndServer(): List[Span] = {
+ val traceId = "traceId"
+
+ val timestamp = System.currentTimeMillis() * 1000
+
+ val serverSpanA = Span.newBuilder()
+ .setSpanId("sa")
+ .setTraceId(traceId)
+ .setServiceName("aSvc")
+ .setStartTime(timestamp + 100)
+ .setDuration(1000)
+ .build()
+
+ val clientSpanA = Span.newBuilder()
+ .setSpanId("ca")
+ .setParentSpanId("sa")
+ .setTraceId(traceId)
+ .setServiceName("aSvc")
+ .setStartTime(timestamp + 100)
+ .setDuration(1000)
+ .build()
+
+ val serverSpanB = Span.newBuilder()
+ .setSpanId("sb")
+ .setParentSpanId("ca")
+ .setServiceName("bSvc")
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 200)
+ .setDuration(100)
+ .build()
+
+ val clientSpanB_1 = Span.newBuilder()
+ .setSpanId("cb1")
+ .setParentSpanId("sb")
+ .setServiceName("bSvc")
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 300)
+ .setDuration(100)
+ .build()
+
+ val clientSpanB_2 = Span.newBuilder()
+ .setSpanId("cb2")
+ .setParentSpanId("sb")
+ .setServiceName("bSvc")
+ .setStartTime(timestamp + 400)
+ .setTraceId(traceId)
+ .setDuration(100)
+ .build()
+
+ val serverSpanC_1 = Span.newBuilder()
+ .setSpanId("sc1")
+ .setParentSpanId("cb1")
+ .setServiceName("cSvc")
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 500)
+ .setDuration(100)
+ .build()
+
+ val serverSpanC_2 = Span.newBuilder()
+ .setSpanId("sc2")
+ .setParentSpanId("cb2")
+ .setServiceName("cSvc")
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 600)
+ .setDuration(100)
+ .build()
+
+ val serverSpanC_3 = Span.newBuilder()
+ .setSpanId("sc3")
+ .setParentSpanId("p1")
+ .setServiceName("cSvc")
+ .addTags(Tag.newBuilder().setKey(SpanMarkers.SPAN_KIND_TAG_KEY).setVStr(SpanMarkers.SERVER_SPAN_KIND))
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 600)
+ .setDuration(100)
+ .build()
+
+ val serverSpanD_1 = Span.newBuilder()
+ .setSpanId("sd1")
+ .setParentSpanId("sc3")
+ .setServiceName("dSvc")
+ .addTags(Tag.newBuilder().setKey(SpanMarkers.SPAN_KIND_TAG_KEY).setVStr(SpanMarkers.SERVER_SPAN_KIND))
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 600)
+ .setDuration(100)
+ .build()
+
+ val serverSpanD_2 = Span.newBuilder()
+ .setSpanId("sd2")
+ .setParentSpanId("sc3")
+ .setServiceName("dSvc")
+ .addTags(Tag.newBuilder().setKey(SpanMarkers.SPAN_KIND_TAG_KEY).setVStr(SpanMarkers.CLIENT_SPAN_KIND))
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 600)
+ .setDuration(100)
+ .build()
+
+ val serverSpanE_1 = Span.newBuilder()
+ .setSpanId("se1")
+ .setParentSpanId("sd2")
+ .setServiceName("eSvc")
+ .addTags(Tag.newBuilder().setKey(SpanMarkers.SPAN_KIND_TAG_KEY).setVStr(SpanMarkers.SERVER_SPAN_KIND))
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 600)
+ .setDuration(100)
+ .build()
+
+ List(serverSpanA, clientSpanA, serverSpanB, clientSpanB_1, clientSpanB_2, serverSpanC_1, serverSpanC_2, serverSpanC_3, serverSpanD_1, serverSpanD_2, serverSpanE_1)
+ }
+
+ describe("ServerClientSpanMergeTransformer") {
+ it("should merge the server client spans") {
+ Given("a sequence of spans of a given trace")
+ val spans = createSpansWithClientAndServer()
+
+ When("invoking transform")
+ val mergedSpans =
+ new ServerClientSpanMergeTransformer().transform(MutableSpanForest(spans))
+
+ val underlyingSpans = mergedSpans.getUnderlyingSpans
+
+ Then("return partial spans merged with server span being primary")
+ underlyingSpans.length should be(7)
+ underlyingSpans.foreach(span => span.getTraceId shouldBe traceId)
+ underlyingSpans.head.getSpanId shouldBe "sa"
+ underlyingSpans.head.getParentSpanId shouldBe ""
+
+ underlyingSpans.apply(1).getSpanId shouldBe "sb"
+ underlyingSpans.apply(1).getParentSpanId shouldBe "sa"
+ underlyingSpans.apply(1).getServiceName shouldBe "bSvc"
+ getTag(underlyingSpans.apply(1), AuxiliaryTags.IS_MERGED_SPAN).getVBool shouldBe true
+ underlyingSpans.apply(1).getLogsCount shouldBe 4
+
+ underlyingSpans.apply(2).getSpanId shouldBe "sc1"
+ underlyingSpans.apply(2).getParentSpanId shouldBe "sb"
+ underlyingSpans.apply(2).getServiceName shouldBe "cSvc"
+ getTag(underlyingSpans.apply(2), AuxiliaryTags.IS_MERGED_SPAN).getVBool shouldBe true
+ underlyingSpans.apply(2).getLogsCount shouldBe 4
+
+ underlyingSpans.apply(3).getSpanId shouldBe "sc2"
+ underlyingSpans.apply(3).getParentSpanId shouldBe "sb"
+ underlyingSpans.apply(3).getServiceName shouldBe "cSvc"
+ getTag(underlyingSpans.apply(3), AuxiliaryTags.IS_MERGED_SPAN).getVBool shouldBe true
+ underlyingSpans.apply(3).getLogsCount shouldBe 4
+
+ underlyingSpans.apply(4).getSpanId shouldBe "sc3"
+ getTag(underlyingSpans.apply(4), AuxiliaryTags.IS_MERGED_SPAN) shouldBe null
+
+ underlyingSpans.apply(5).getSpanId shouldBe "sd1"
+ getTag(underlyingSpans.apply(5), AuxiliaryTags.IS_MERGED_SPAN) shouldBe null
+
+ underlyingSpans.apply(6).getSpanId shouldBe "se1"
+ underlyingSpans.apply(6).getServiceName shouldBe "eSvc"
+ getTag(underlyingSpans.apply(6), AuxiliaryTags.IS_MERGED_SPAN).getVBool shouldBe true
+ getTag(underlyingSpans.apply(6), AuxiliaryTags.CLIENT_SERVICE_NAME).getVStr shouldBe "dSvc"
+ getTag(underlyingSpans.apply(6), AuxiliaryTags.CLIENT_SPAN_ID).getVStr shouldBe "sd2"
+ getTag(underlyingSpans.apply(6), AuxiliaryTags.CLIENT_OPERATION_NAME).getVStr shouldBe empty
+
+ mergedSpans.countTrees shouldBe 2
+ val spanTree = mergedSpans.getAllTrees.head
+ spanTree.span shouldBe underlyingSpans.head
+ spanTree.children.size shouldBe 1
+ spanTree.children.head.children.size shouldBe 2
+ spanTree.children.head.span shouldBe underlyingSpans.apply(1)
+ spanTree.children.head.children.map(_.span) should contain allOf(underlyingSpans.apply(2), underlyingSpans.apply(3))
+ spanTree.children.head.children.foreach(tree => tree.children.size shouldBe 0)
+ }
+
+ it ("should not merge producer and consumer parent-child spans") {
+ Given("a sequence of spans of a given trace")
+ val spans = createProducerAndConsumerSpanKinds()
+
+ When("invoking transform")
+ val mergedSpans =
+ new ServerClientSpanMergeTransformer().transform(MutableSpanForest(spans))
+
+ val underlyingSpans = mergedSpans.getUnderlyingSpans
+ underlyingSpans.size shouldBe 3
+ underlyingSpans.foreach(sp => getTag(sp, AuxiliaryTags.IS_MERGED_SPAN) shouldBe null)
+ }
+ }
+
+
+
+ private def getTag(span: Span, tagKey: String): Tag = {
+ span.getTagsList.asScala.find(tag => tag.getKey.equals(tagKey)).orNull
+ }
+}
\ No newline at end of file
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/SortSpanTransformerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/SortSpanTransformerSpec.scala
new file mode 100644
index 000000000..729fd5323
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/transformers/SortSpanTransformerSpec.scala
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.transformers
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.trace.reader.readers.transformers.SortSpanTransformer
+import com.expedia.www.haystack.trace.reader.readers.utils.MutableSpanForest
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class SortSpanTransformerSpec extends BaseUnitTestSpec {
+
+ def createSpans(timestamp: Long): List[Span] = {
+ val traceId = "traceId"
+
+ val spanA = Span.newBuilder()
+ .setSpanId("a")
+ .setTraceId(traceId)
+ .setStartTime(timestamp)
+ .setDuration(1000)
+ .build()
+
+ val spanB = Span.newBuilder()
+ .setSpanId("b")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 50)
+ .setDuration(100)
+ .build()
+
+ val spanC = Span.newBuilder()
+ .setSpanId("c")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 100)
+ .setDuration(100)
+ .build()
+
+ val spanD = Span.newBuilder()
+ .setSpanId("d")
+ .setParentSpanId("a")
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 200)
+ .setDuration(100)
+ .build()
+
+ val spanE = Span.newBuilder()
+ .setSpanId("e")
+ .setParentSpanId("b")
+ .setTraceId(traceId)
+ .setStartTime(timestamp + 300)
+ .setDuration(100)
+ .build()
+
+ List(spanE, spanB, spanC, spanA, spanD)
+ }
+
+ describe("SortSpanTransformer") {
+ it("should sort spans in natural order") {
+ Given("trace with spans")
+ val timestamp = 150000000000l
+ val spans = createSpans(timestamp)
+
+ When("invoking transform")
+ val transformedSpans = new SortSpanTransformer().transform(MutableSpanForest(spans)).getUnderlyingSpans
+
+ Then("return spans in sorted order")
+ transformedSpans.length should be(5)
+ transformedSpans.head.getSpanId should be("a")
+ transformedSpans(1).getSpanId should be("b")
+ transformedSpans(2).getSpanId should be("c")
+ transformedSpans(3).getSpanId should be("d")
+ transformedSpans(4).getSpanId should be("e")
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/validators/ParentIdValidatorSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/validators/ParentIdValidatorSpec.scala
new file mode 100644
index 000000000..20d8541a2
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/validators/ParentIdValidatorSpec.scala
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.validators
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.exceptions.InvalidTraceException
+import com.expedia.www.haystack.trace.reader.readers.validators.ParentIdValidator
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class ParentIdValidatorSpec extends BaseUnitTestSpec {
+ val TRACE_ID = "traceId"
+
+ describe("ParentIdValidator") {
+ it("should fail for traces with spans having same id and parent id") {
+ Given("trace with span having same span and parent id")
+ val trace = Trace.newBuilder()
+ .setTraceId(TRACE_ID)
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("rootSpanId"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("spanId").setParentSpanId("spanId"))
+ .build()
+
+ When("on validate")
+ val validationResult = new ParentIdValidator().validate(trace)
+
+ Then("fail with InvalidTraceException")
+ val thrown = the[InvalidTraceException] thrownBy validationResult.get
+ thrown.getStatus.getDescription shouldEqual "Invalid Trace: same parent and span id found for one ore more span for traceId=traceId"
+ }
+
+ it("should fail for traces with spans without parents") {
+ Given("trace with empty traceId")
+ val trace = Trace.newBuilder()
+ .setTraceId(TRACE_ID)
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("a"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("b").setParentSpanId("x"))
+ .build()
+
+ When("on validate")
+ val validationResult = new ParentIdValidator().validate(trace)
+
+ Then("fail with InvalidTraceException")
+ val thrown = the[InvalidTraceException] thrownBy validationResult.get
+ thrown.getStatus.getDescription shouldEqual "Invalid Trace: spans without valid parent found for traceId=traceId"
+ }
+
+ it("should accept valid traces") {
+ Given("trace with valid spans")
+ val trace = Trace.newBuilder()
+ .setTraceId(TRACE_ID)
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("a"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("b").setParentSpanId("a"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("c").setParentSpanId("a"))
+ .build()
+
+ When("on validate")
+ val validationResult = new ParentIdValidator().validate(trace)
+
+ Then("accept trace")
+ noException should be thrownBy validationResult.get
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/validators/RootValidatorSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/validators/RootValidatorSpec.scala
new file mode 100644
index 000000000..fcbeabd98
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/validators/RootValidatorSpec.scala
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.validators
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.exceptions.InvalidTraceException
+import com.expedia.www.haystack.trace.reader.readers.validators.RootValidator
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class RootValidatorSpec extends BaseUnitTestSpec {
+ val TRACE_ID = "traceId"
+
+ describe("RootValidator") {
+ it("should fail for traces with multiple spans as root") {
+ Given("trace with empty traceId")
+ val trace = Trace.newBuilder()
+ .setTraceId("traceId")
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("a"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("b"))
+ .build()
+
+ When("on validate")
+ val validationResult = new RootValidator().validate(trace)
+
+ Then("fail with InvalidTraceException")
+ val thrown = the[InvalidTraceException] thrownBy validationResult.get
+ thrown.getStatus.getDescription shouldEqual "Invalid Trace: found 2 roots with spanIDs=a,b and traceID=traceId"
+ }
+
+ it("should fail for traces with no root") {
+ Given("trace with empty traceId")
+ val trace = Trace.newBuilder()
+ .setTraceId("traceId")
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("a").setParentSpanId("x"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("b").setParentSpanId("x"))
+ .build()
+
+ When("on validate")
+ val validationResult = new RootValidator().validate(trace)
+
+ Then("fail with InvalidTraceException")
+ val thrown = the[InvalidTraceException] thrownBy validationResult.get
+ thrown.getStatus.getDescription shouldEqual "Invalid Trace: found 0 roots with spanIDs= and traceID=traceId"
+ }
+
+ it("should accept valid traces") {
+ Given("trace with valid spans")
+ val trace = Trace.newBuilder()
+ .setTraceId(TRACE_ID)
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("a"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("b").setParentSpanId("a"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("c").setParentSpanId("a"))
+ .build()
+
+ When("on validate")
+ val validationResult = new RootValidator().validate(trace)
+
+ Then("accept trace")
+ noException should be thrownBy validationResult.get
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/validators/TraceIdValidatorSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/validators/TraceIdValidatorSpec.scala
new file mode 100644
index 000000000..3e22edbe8
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/readers/validators/TraceIdValidatorSpec.scala
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.readers.validators
+
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+import com.expedia.www.haystack.trace.reader.exceptions.InvalidTraceException
+import com.expedia.www.haystack.trace.reader.readers.validators.TraceIdValidator
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class TraceIdValidatorSpec extends BaseUnitTestSpec {
+ describe("TraceIdValidator") {
+ val TRACE_ID = "traceId"
+
+ it("should fail for traces with empty traceId") {
+ Given("trace with empty traceId")
+ val trace = Trace.newBuilder().build()
+
+ When("on validate")
+ val validationResult = new TraceIdValidator().validate(trace)
+
+ Then("Fail with InvalidTraceException")
+ val thrown = the[InvalidTraceException] thrownBy validationResult.get
+ thrown.getStatus.getDescription should include("invalid traceId")
+ }
+
+ it("should fail for traces with spans having different traceId") {
+ Given("trace with span having different id")
+ val trace = Trace.newBuilder()
+ .setTraceId("traceId")
+ .addChildSpans(Span.newBuilder().setTraceId("dummy").setSpanId("spanId"))
+ .build()
+
+ When("on validate")
+ val validationResult = new TraceIdValidator().validate(trace)
+
+ Then("Fail with InvalidTraceException")
+ val thrown = the[InvalidTraceException] thrownBy validationResult.get
+ thrown.getStatus.getDescription should include("span with different traceId")
+ }
+
+ it("should accept valid traces") {
+ Given("trace with valid spans")
+ val trace = Trace.newBuilder()
+ .setTraceId(TRACE_ID)
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("a"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("b").setParentSpanId("a"))
+ .addChildSpans(Span.newBuilder().setTraceId(TRACE_ID).setSpanId("c").setParentSpanId("a"))
+ .build()
+
+ When("on validate")
+ val validationResult = new TraceIdValidator().validate(trace)
+
+ Then("accept trace")
+ noException should be thrownBy validationResult.get
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/ResponseParserSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/ResponseParserSpec.scala
new file mode 100644
index 000000000..7136c4a90
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/ResponseParserSpec.scala
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.stores
+
+import com.expedia.www.haystack.trace.reader.stores.ResponseParser
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+import com.google.gson.{Gson, JsonParser}
+import io.searchbox.core.SearchResult
+import scala.concurrent.ExecutionContext.Implicits.global
+
+class ResponseParserSpec extends BaseUnitTestSpec with ResponseParser {
+ describe("ResponseParserSpec") {
+ it("should be able to parse the search result to trace counts") {
+
+ Given("a trace search response")
+ val result = new SearchResult(new Gson())
+ result.setSucceeded(true)
+ result.setJsonString(getJson())
+ result.setJsonObject(new JsonParser().parse(getJson()).getAsJsonObject)
+
+ When("map search result to trace counts")
+ val traceCounts = mapSearchResultToTraceCounts(result)
+
+ Then("generate a valid query")
+ traceCounts should not be None
+ traceCounts.map(traceCounts => traceCounts.getTraceCountCount shouldEqual 11)
+ }
+ }
+
+ def getJson(): String = {
+ """
+ |{
+ | "took": 41810,
+ | "timed_out": false,
+ | "_shards": {
+ | "total": 240,
+ | "successful": 240,
+ | "skipped": 0,
+ | "failed": 0
+ | },
+ | "hits": {
+ | "total": 10052727254,
+ | "max_score": 0.0,
+ | "hits": []
+ | },
+ | "aggregations": {
+ | "spans": {
+ | "doc_count": 23138047525,
+ | "spans": {
+ | "doc_count": 2604513,
+ | "__count_per_interval": {
+ | "buckets": [
+ | {
+ | "key": 1.52690406E15,
+ | "doc_count": 150949
+ | },
+ | {
+ | "key": 1.52690412E15,
+ | "doc_count": 262163
+ | },
+ | {
+ | "key": 1.52690418E15,
+ | "doc_count": 259394
+ | },
+ | {
+ | "key": 1.52690424E15,
+ | "doc_count": 253247
+ | },
+ | {
+ | "key": 1.5269043E15,
+ | "doc_count": 253589
+ | },
+ | {
+ | "key": 1.52690436E15,
+ | "doc_count": 261232
+ | },
+ | {
+ | "key": 1.52690442E15,
+ | "doc_count": 258264
+ | },
+ | {
+ | "key": 1.52690448E15,
+ | "doc_count": 270179
+ | },
+ | {
+ | "key": 1.52690454E15,
+ | "doc_count": 266545
+ | },
+ | {
+ | "key": 1.5269046E15,
+ | "doc_count": 264921
+ | },
+ | {
+ | "key": 1.52690466E15,
+ | "doc_count": 104084
+ | }
+ | ]
+ | }
+ | }
+ | }
+ | }
+ |}
+ """.stripMargin
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/ElasticSearchReadResultListenerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/ElasticSearchReadResultListenerSpec.scala
new file mode 100644
index 000000000..c9a7107e2
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/ElasticSearchReadResultListenerSpec.scala
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.stores.readers.es.query
+
+import com.codahale.metrics.{Meter, Timer}
+import com.expedia.open.tracing.api.{Field, TracesSearchRequest}
+import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhitelistIndexFieldConfiguration}
+import com.expedia.www.haystack.trace.reader.config.entities.SpansIndexConfiguration
+import com.expedia.www.haystack.trace.reader.exceptions.ElasticSearchClientError
+import com.expedia.www.haystack.trace.reader.stores.readers.es.ElasticSearchReadResultListener
+import com.expedia.www.haystack.trace.reader.stores.readers.es.query.TraceSearchQueryGenerator
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+import io.searchbox.core.SearchResult
+import org.easymock.EasyMock
+import org.json4s.ext.EnumNameSerializer
+import org.json4s.{DefaultFormats, Formats}
+
+import scala.concurrent.Promise
+
+class ElasticSearchReadResultListenerSpec extends BaseUnitTestSpec {
+ protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)
+ val ES_INDEX_HOUR_BUCKET = 6
+ val ES_INDEX_HOUR_TTL = 72
+
+ private val spansIndexConfiguration = SpansIndexConfiguration(
+ indexNamePrefix = "haystack-traces",
+ indexType = "spans",
+ indexHourTtl = ES_INDEX_HOUR_TTL,
+ indexHourBucket = ES_INDEX_HOUR_BUCKET,
+ useRootDocumentStartTime = false)
+
+
+ private val searchRequest = {
+ val generator = new TraceSearchQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+ val field = Field.newBuilder().setName("serviceName").setValue("expweb").build()
+ generator.generate(TracesSearchRequest.newBuilder().setStartTime(1510469157572000l).setEndTime(1510469161172000l).setLimit(40).addFields(field).build(), true)
+ }
+
+ describe("ElasticSearch Read Result Listener") {
+ it("should invoke successful promise with search result") {
+ val promise = mock[Promise[SearchResult]]
+ val timer = mock[Timer.Context]
+ val failureMeter = mock[Meter]
+ val searchResult = mock[SearchResult]
+
+ expecting {
+ timer.close().once()
+ searchResult.getResponseCode.andReturn(200).atLeastOnce()
+ promise.success(searchResult).andReturn(promise).once()
+ }
+
+ whenExecuting(promise, timer, failureMeter, searchResult) {
+ val listener = new ElasticSearchReadResultListener(searchRequest, promise, timer, failureMeter)
+ listener.completed(searchResult)
+ }
+ }
+
+ it("should invoke failed promise with exception object if response code is not 2xx ") {
+ val promise = mock[Promise[SearchResult]]
+ val timer = mock[Timer.Context]
+ val failureMeter = mock[Meter]
+ val searchResult = mock[SearchResult]
+
+ expecting {
+ timer.close().once()
+ searchResult.getResponseCode.andReturn(500).atLeastOnce()
+ searchResult.getJsonString.andReturn("json-string").times(2)
+ failureMeter.mark()
+ promise.failure(EasyMock.anyObject(classOf[ElasticSearchClientError])).andReturn(promise).once()
+ }
+
+ whenExecuting(promise, timer, failureMeter, searchResult) {
+ val listener = new ElasticSearchReadResultListener(searchRequest, promise, timer, failureMeter)
+ listener.completed(searchResult)
+ }
+ }
+
+ it("should invoke failed promise with exception object if failure is generated") {
+ val promise = mock[Promise[SearchResult]]
+ val timer = mock[Timer.Context]
+ val failureMeter = mock[Meter]
+ val expectedException = new Exception
+
+ expecting {
+ timer.close().once()
+ failureMeter.mark()
+ promise.failure(expectedException).andReturn(promise).once()
+ }
+
+ whenExecuting(promise, timer, failureMeter) {
+ val listener = new ElasticSearchReadResultListener(searchRequest, promise, timer, failureMeter)
+ listener.failed(expectedException)
+ }
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/FieldValuesQueryGeneratorSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/FieldValuesQueryGeneratorSpec.scala
new file mode 100644
index 000000000..9141ae7aa
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/FieldValuesQueryGeneratorSpec.scala
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.reader.unit.stores.readers.es.query
+
+import com.expedia.open.tracing.api.{Field, FieldValuesRequest}
+import com.expedia.www.haystack.trace.commons.config.entities.WhitelistIndexFieldConfiguration
+import com.expedia.www.haystack.trace.reader.config.entities.SpansIndexConfiguration
+import com.expedia.www.haystack.trace.reader.stores.readers.es.ESUtils._
+import com.expedia.www.haystack.trace.reader.stores.readers.es.query.FieldValuesQueryGenerator
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+
+class FieldValuesQueryGeneratorSpec extends BaseUnitTestSpec {
+ private val indexType = "spans"
+ private val spansIndexConfiguration = SpansIndexConfiguration(
+ indexNamePrefix = "haystack-traces",
+ indexType = indexType,
+ indexHourTtl = 72,
+ indexHourBucket = 6,
+ useRootDocumentStartTime = false)
+
+ describe("FieldValuesQueryGenerator") {
+ it("should generate valid search queries") {
+ Given("a trace search request")
+ val serviceName = "svcName"
+ val request = FieldValuesRequest
+ .newBuilder()
+ .setFieldName("operationName")
+ .addFilters(Field.newBuilder().setName("serviceName").setValue(serviceName).build())
+ .build()
+ val queryGenerator = new FieldValuesQueryGenerator(spansIndexConfiguration, "spans", new WhitelistIndexFieldConfiguration)
+
+ When("generating query")
+ val query = queryGenerator.generate(request)
+
+ Then("generate a valid query")
+ query.getType should be(indexType)
+ }
+
+ it("should generate caption independent search queries") {
+ Given("a trace search request")
+ val serviceField = "serviceName"
+ val operationField = "operationName"
+ val serviceName = "svcName"
+ val request = FieldValuesRequest
+ .newBuilder()
+ .setFieldName(operationField)
+ .addFilters(Field.newBuilder().setName(serviceField).setValue(serviceName).build())
+ .build()
+ val queryGenerator = new FieldValuesQueryGenerator(spansIndexConfiguration, "spans", new WhitelistIndexFieldConfiguration)
+
+ When("generating query")
+ val query = queryGenerator.generate(request)
+
+ Then("generate a valid query with fields in lowercase")
+ val queryString = query.toJson
+ queryString.contains(serviceField.toLowerCase()) should be(true)
+ queryString.contains(operationField.toLowerCase()) should be(true)
+ }
+ }
+}
\ No newline at end of file
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/ServiceMetadataQueryGeneratorSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/ServiceMetadataQueryGeneratorSpec.scala
new file mode 100644
index 000000000..d2301514d
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/ServiceMetadataQueryGeneratorSpec.scala
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.reader.unit.stores.readers.es.query
+
+import com.expedia.www.haystack.trace.reader.config.entities.ServiceMetadataIndexConfiguration
+import com.expedia.www.haystack.trace.reader.stores.readers.es.query.ServiceMetadataQueryGenerator
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+import com.google.gson.Gson
+
+class ServiceMetadataQueryGeneratorSpec extends BaseUnitTestSpec {
+ private val indexType = "metadata"
+ private val serviceMetadataIndexConfiguration = ServiceMetadataIndexConfiguration(
+ enabled = true,
+ indexName = "service_metadata",
+ indexType = indexType)
+
+ describe("ServiceMetadataQueryGenerator") {
+ it("should generate valid aggregation queries for service names") {
+ Given("a query generator")
+ val queryGenerator = new ServiceMetadataQueryGenerator(serviceMetadataIndexConfiguration)
+
+ When("asked for aggregated service name")
+ val query = queryGenerator.generateSearchServiceQuery()
+
+ Then("generate a valid query")
+ query.getType should be(indexType)
+ query.getData(new Gson()) shouldEqual "{\n \"size\" : 0,\n \"aggregations\" : {\n \"distinct_services\" : {\n \"terms\" : {\n \"field\" : \"servicename\",\n \"size\" : 10000,\n \"min_doc_count\" : 1,\n \"shard_min_doc_count\" : 0,\n \"show_term_doc_count_error\" : false,\n \"order\" : [\n {\n \"_count\" : \"desc\"\n },\n {\n \"_key\" : \"asc\"\n }\n ]\n }\n }\n }\n}"
+ query.toString shouldEqual "Search{uri=service_metadata/metadata/_search, method=POST}"
+ }
+
+ it("should generate valid aggregation queries for operation names") {
+ Given("a query generator and a service name")
+ val queryGenerator = new ServiceMetadataQueryGenerator(serviceMetadataIndexConfiguration)
+ val serviceName = "test_service"
+ When("asked for aggregated operation names")
+ val query = queryGenerator.generateSearchOperationQuery(serviceName)
+
+ Then("generate a valid query")
+ query.getType should be(indexType)
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/TraceCountsQueryGeneratorSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/TraceCountsQueryGeneratorSpec.scala
new file mode 100644
index 000000000..b19e92473
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/TraceCountsQueryGeneratorSpec.scala
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.stores.readers.es.query
+
+import java.util.concurrent.TimeUnit
+
+import com.expedia.open.tracing.api._
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc
+import com.expedia.www.haystack.trace.commons.config.entities.WhitelistIndexFieldConfiguration
+import com.expedia.www.haystack.trace.reader.config.entities.SpansIndexConfiguration
+import com.expedia.www.haystack.trace.reader.stores.readers.es.query.TraceCountsQueryGenerator
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+import com.expedia.www.haystack.trace.reader.unit.stores.readers.es.query.helper.ExpressionTreeBuilder._
+import com.google.gson.Gson
+import io.searchbox.core.Search
+
+class TraceCountsQueryGeneratorSpec extends BaseUnitTestSpec {
+ private val ES_INDEX_HOUR_BUCKET = 6
+ private val ES_INDEX_HOUR_TTL = 72
+ private val INDEX_NAME_PREFIX = "haystack-spans"
+ private val interval = TimeUnit.SECONDS.toMicros(60)
+
+ private val spansIndexConfiguration = SpansIndexConfiguration(
+ indexNamePrefix = INDEX_NAME_PREFIX,
+ indexType = "spans",
+ indexHourTtl = ES_INDEX_HOUR_TTL,
+ indexHourBucket = ES_INDEX_HOUR_BUCKET,
+ useRootDocumentStartTime = true)
+
+ describe("TraceSearchQueryGenerator") {
+ it("should generate valid search queries") {
+ Given("a trace search request")
+ val serviceName = "svcName"
+ val operationName = "opName"
+ val startTime = 1529418475791000l // Tuesday, June 19, 2018 2:27:55.791 PM
+ val endTime = 1529419075791000l // Tuesday, June 19, 2018 2:37:55.791 PM
+ val request = TraceCountsRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName).build())
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.OPERATION_KEY_NAME).setValue(operationName).build())
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setInterval(interval)
+ .build()
+ val queryGenerator = new TraceCountsQueryGenerator(spansIndexConfiguration, "spans", new WhitelistIndexFieldConfiguration)
+
+ When("generating query")
+ val query = queryGenerator.generate(request)
+ Then("generate a valid query")
+ query.getData(new Gson()).replaceAll("\n", "").replaceAll(" ", "") shouldEqual "{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"range\":{\"starttime\":{\"from\":1529418475791000,\"to\":1529419075791000,\"include_lower\":true,\"include_upper\":true,\"boost\":1.0}}}],\"filter\":[{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.servicename\":{\"value\":\"svcName\",\"boost\":1.0}}},{\"term\":{\"spans.operationname\":{\"value\":\"opName\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"aggregations\":{\"countagg\":{\"histogram\":{\"field\":\"starttime\",\"interval\":6.0E7,\"offset\":0.0,\"order\":{\"_key\":\"asc\"},\"keyed\":false,\"min_doc_count\":0,\"extended_bounds\":{\"min\":1.529418475791E15,\"max\":1.529419075791E15}}}}}"
+ query.getURI shouldEqual "haystack-spans-2018-06-19-2/spans/_search"
+ }
+
+ it("should generate valid search queries for bucketed search count") {
+ Given("a trace search request")
+ val serviceName = "svcName"
+ val operationName = "opName"
+ val startTimeInMicros = 1
+ val endTimeInMicros = 1527487220L * 1000 * 1000 // May 28, 2018 6:00:20 AM
+ val request = TraceCountsRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName).build())
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.OPERATION_KEY_NAME).setValue(operationName).build())
+ .setStartTime(startTimeInMicros)
+ .setEndTime(endTimeInMicros)
+ .setInterval(interval)
+ .build()
+ val queryGenerator = new TraceCountsQueryGenerator(spansIndexConfiguration, "spans", new WhitelistIndexFieldConfiguration)
+
+ When("generating query")
+ val query = queryGenerator.generate(request)
+
+ Then("generate a valid query")
+ query.getURI shouldEqual "haystack-spans/spans/_search"
+ }
+
+ it("should return a valid list of indexes for overlapping time range") {
+ Given("starttime and endtime")
+ val startTimeInMicros = 1527501725L * 1000 * 1000 // Monday, May 28, 2018 10:03:36 AM
+ val endTimeInMicros = 1527512524L * 1000 * 1000 // Monday, May 28, 2018 1:02:04 PM
+ val queryGenerator = new TraceCountsQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("retrieving index names")
+ val indexNames = queryGenerator.getESIndexes(startTimeInMicros, endTimeInMicros, INDEX_NAME_PREFIX, ES_INDEX_HOUR_BUCKET, ES_INDEX_HOUR_TTL)
+
+ Then("should get index names")
+ indexNames should not be null
+ indexNames.size shouldEqual 2
+ indexNames should contain allOf("haystack-spans-2018-05-28-1", "haystack-spans-2018-05-28-2")
+ }
+
+ it("should return a valid list of indexes") {
+ Given("starttime and endtime")
+ val startTimeInMicros = 1527487200L * 1000 * 1000 // May 28, 2018 6:00:00 AM
+ val endTimeInMicros = 1527508800L * 1000 * 1000 // May 28, 2018 12:00:00 PM
+ val queryGenerator = new TraceCountsQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("retrieving index names")
+ val indexNames = queryGenerator.getESIndexes(startTimeInMicros, endTimeInMicros, INDEX_NAME_PREFIX, ES_INDEX_HOUR_BUCKET, ES_INDEX_HOUR_TTL)
+
+ Then("should get index names")
+ indexNames should not be null
+ indexNames.size shouldEqual 2
+ indexNames should contain allOf("haystack-spans-2018-05-28-1", "haystack-spans-2018-05-28-2")
+ }
+
+ it("should return only a single index name for time range within same bucket") {
+ Given("starttime and endtime")
+ val starttimeInMicros = 1527487100L * 1000 * 1000 // May 28, 2018 5:58:20 AM
+ val endtimeInMicros = 1527487120L * 1000 * 1000 // May 28, 2018 5:58:40 AM
+ val queryGenerator = new TraceCountsQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("retrieving index names")
+ val indexNames = queryGenerator.getESIndexes(starttimeInMicros, endtimeInMicros, INDEX_NAME_PREFIX, ES_INDEX_HOUR_BUCKET, ES_INDEX_HOUR_TTL)
+
+ Then("should get index names")
+ indexNames should not be null
+ indexNames.size shouldBe 1
+ indexNames.head shouldEqual "haystack-spans-2018-05-28-0"
+ }
+
+ it("should return index alias (not return specific index) in case endtime minus starttime exceeds index retention") {
+ Given("starttime and endtime")
+ val startTimeInMicros = 0
+ val endTimeInMicros = 1527487220L * 1000 * 1000 // May 28, 2018 6:00:20 AM
+ val queryGenerator = new TraceCountsQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("retrieving index names")
+ val indexNames = queryGenerator.getESIndexes(startTimeInMicros, endTimeInMicros, INDEX_NAME_PREFIX, ES_INDEX_HOUR_BUCKET, ES_INDEX_HOUR_TTL)
+
+ Then("should get index names")
+ indexNames should not be null
+ indexNames.size shouldEqual 1
+ indexNames.head shouldEqual INDEX_NAME_PREFIX
+ }
+
+ it("should generate valid count queries for expression tree based search counts") {
+ Given("a trace count request")
+ val startTime = 1529418475791000l // Tuesday, June 19, 2018 2:27:55.791 PM
+ val endTime = 1529419075791000l
+ val request = TraceCountsRequest
+ .newBuilder()
+ .setFilterExpression(operandLevelExpressionTree)
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setInterval(interval)
+ .build()
+
+ val queryGenerator = new TraceCountsQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("generating query")
+ val query: Search = queryGenerator.generate(request)
+
+ Then("generate a valid query with fields in lowercase")
+ query.getData(new Gson()).replaceAll("\n", "").replaceAll(" ", "") shouldEqual
+ "{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"range\":{\"starttime\":{\"from\":1529418475791000,\"to\":1529419075791000,\"include_lower\":true,\"include_upper\":true,\"boost\":1.0}}}],\"filter\":[{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.svcname\":{\"value\":\"svcValue\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.1\":{\"value\":\"1\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.2\":{\"value\":\"2\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.3\":{\"value\":\"3\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"aggregations\":{\"countagg\":{\"histogram\":{\"field\":\"starttime\",\"interval\":6.0E7,\"offset\":0.0,\"order\":{\"_key\":\"asc\"},\"keyed\":false,\"min_doc_count\":0,\"extended_bounds\":{\"min\":1.529418475791E15,\"max\":1.529419075791E15}}}}}"
+ query.getURI shouldEqual "haystack-spans-2018-06-19-2/spans/_search"
+ }
+
+
+ it("should generate valid count query for expression tree based searches with span level searches") {
+ Given("a trace count request")
+ val startTime = 1529418475791000l // Tuesday, June 19, 2018 2:27:55.791 PM
+ val endTime = 1529419075791000l
+ val request = TraceCountsRequest
+ .newBuilder()
+ .setFilterExpression(spanLevelExpressionTree)
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setInterval(interval)
+ .build()
+
+ val queryGenerator = new TraceCountsQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("generating query")
+ val query: Search = queryGenerator.generate(request)
+
+ Then("generate a valid query")
+ query.getData(new Gson()).replaceAll("\n", "").replaceAll(" ", "") shouldEqual
+ "{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"range\":{\"starttime\":{\"from\":1529418475791000,\"to\":1529419075791000,\"include_lower\":true,\"include_upper\":true,\"boost\":1.0}}}],\"filter\":[{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.1\":{\"value\":\"1\",\"boost\":1.0}}},{\"term\":{\"spans.2\":{\"value\":\"2\",\"boost\":1.0}}},{\"term\":{\"spans.3\":{\"value\":\"3\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.4\":{\"value\":\"4\",\"boost\":1.0}}},{\"term\":{\"spans.5\":{\"value\":\"5\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.svcname\":{\"value\":\"svcValue\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.0\":{\"value\":\"0\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"aggregations\":{\"countagg\":{\"histogram\":{\"field\":\"starttime\",\"interval\":6.0E7,\"offset\":0.0,\"order\":{\"_key\":\"asc\"},\"keyed\":false,\"min_doc_count\":0,\"extended_bounds\":{\"min\":1.529418475791E15,\"max\":1.529419075791E15}}}}}"
+
+ query.getURI shouldEqual "haystack-spans-2018-06-19-2/spans/_search"
+ }
+
+
+ it("should generate valid count query for expression tree with duration field types") {
+ Given("a trace count request")
+ val queryGenerator = new TraceCountsQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+ val startTime = 1529418475791000l // Tuesday, June 19, 2018 2:27:55.791 PM
+ val endTime = 1529419075791000l
+
+ val requests = Seq(expressionTreeWithDurationFields, anotherExpressionTreeWithDurationFields, oneMoreExpressionTreeWithDurationFields, expressionTreeWithGreaterThanOperator) map {
+ expression => {
+ TraceCountsRequest
+ .newBuilder()
+ .setFilterExpression(expression)
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setInterval(interval)
+ .build()
+ }
+ }
+ When("generating query")
+ val queries: Seq[Search] = requests.map(req => queryGenerator.generate(req))
+
+ Then("generate a valid query")
+ queries.map(query => query.getData(new Gson()).replaceAll("\n", "").replaceAll(" ", "")) shouldEqual Seq(
+ "{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"range\":{\"starttime\":{\"from\":1529418475791000,\"to\":1529419075791000,\"include_lower\":true,\"include_upper\":true,\"boost\":1.0}}}],\"filter\":[{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.1\":{\"value\":\"1\",\"boost\":1.0}}},{\"term\":{\"spans.2\":{\"value\":\"2\",\"boost\":1.0}}},{\"term\":{\"spans.3\":{\"value\":\"3\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.4\":{\"value\":\"4\",\"boost\":1.0}}},{\"term\":{\"spans.5\":{\"value\":\"5\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.svcname\":{\"value\":\"svcValue\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"range\":{\"spans.duration\":{\"from\":500000,\"to\":null,\"include_lower\":false,\"include_upper\":true,\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"aggregations\":{\"countagg\":{\"histogram\":{\"field\":\"starttime\",\"interval\":6.0E7,\"offset\":0.0,\"order\":{\"_key\":\"asc\"},\"keyed\":false,\"min_doc_count\":0,\"extended_bounds\":{\"min\":1.529418475791E15,\"max\":1.529419075791E15}}}}}",
+ "{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"range\":{\"starttime\":{\"from\":1529418475791000,\"to\":1529419075791000,\"include_lower\":true,\"include_upper\":true,\"boost\":1.0}}}],\"filter\":[{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.1\":{\"value\":\"1\",\"boost\":1.0}}},{\"term\":{\"spans.2\":{\"value\":\"2\",\"boost\":1.0}}},{\"term\":{\"spans.3\":{\"value\":\"3\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.4\":{\"value\":\"4\",\"boost\":1.0}}},{\"term\":{\"spans.5\":{\"value\":\"5\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.svcname\":{\"value\":\"svcValue\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"range\":{\"spans.duration\":{\"from\":null,\"to\":180000000,\"include_lower\":true,\"include_upper\":false,\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"aggregations\":{\"countagg\":{\"histogram\":{\"field\":\"starttime\",\"interval\":6.0E7,\"offset\":0.0,\"order\":{\"_key\":\"asc\"},\"keyed\":false,\"min_doc_count\":0,\"extended_bounds\":{\"min\":1.529418475791E15,\"max\":1.529419075791E15}}}}}",
+ "{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"range\":{\"starttime\":{\"from\":1529418475791000,\"to\":1529419075791000,\"include_lower\":true,\"include_upper\":true,\"boost\":1.0}}}],\"filter\":[{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.1\":{\"value\":\"1\",\"boost\":1.0}}},{\"term\":{\"spans.2\":{\"value\":\"2\",\"boost\":1.0}}},{\"term\":{\"spans.3\":{\"value\":\"3\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.4\":{\"value\":\"4\",\"boost\":1.0}}},{\"term\":{\"spans.5\":{\"value\":\"5\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.svcname\":{\"value\":\"svcValue\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"range\":{\"spans.duration\":{\"from\":null,\"to\":2000000,\"include_lower\":true,\"include_upper\":false,\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"aggregations\":{\"countagg\":{\"histogram\":{\"field\":\"starttime\",\"interval\":6.0E7,\"offset\":0.0,\"order\":{\"_key\":\"asc\"},\"keyed\":false,\"min_doc_count\":0,\"extended_bounds\":{\"min\":1.529418475791E15,\"max\":1.529419075791E15}}}}}",
+ "{\"size\":0,\"query\":{\"bool\":{\"must\":[{\"range\":{\"starttime\":{\"from\":1529418475791000,\"to\":1529419075791000,\"include_lower\":true,\"include_upper\":true,\"boost\":1.0}}}],\"filter\":[{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.1\":{\"value\":\"1\",\"boost\":1.0}}},{\"term\":{\"spans.2\":{\"value\":\"2\",\"boost\":1.0}}},{\"term\":{\"spans.3\":{\"value\":\"3\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.4\":{\"value\":\"4\",\"boost\":1.0}}},{\"term\":{\"spans.5\":{\"value\":\"5\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.svcname\":{\"value\":\"svcValue\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"range\":{\"spans.duration\":{\"from\":240000,\"to\":null,\"include_lower\":false,\"include_upper\":true,\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"aggregations\":{\"countagg\":{\"histogram\":{\"field\":\"starttime\",\"interval\":6.0E7,\"offset\":0.0,\"order\":{\"_key\":\"asc\"},\"keyed\":false,\"min_doc_count\":0,\"extended_bounds\":{\"min\":1.529418475791E15,\"max\":1.529419075791E15}}}}}")
+
+ queries.map(query => query.getURI).toSet shouldEqual Set("haystack-spans-2018-06-19-2/spans/_search")
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/TraceSearchQueryGeneratorSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/TraceSearchQueryGeneratorSpec.scala
new file mode 100644
index 000000000..c49403195
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/TraceSearchQueryGeneratorSpec.scala
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.expedia.www.haystack.trace.reader.unit.stores.readers.es.query
+
+import com.expedia.open.tracing.api.{Field, TracesSearchRequest}
+import com.expedia.www.haystack.trace.commons.clients.es.document.TraceIndexDoc
+import com.expedia.www.haystack.trace.commons.config.entities.WhitelistIndexFieldConfiguration
+import com.expedia.www.haystack.trace.reader.config.entities.SpansIndexConfiguration
+import com.expedia.www.haystack.trace.reader.stores.readers.es.ESUtils._
+import com.expedia.www.haystack.trace.reader.stores.readers.es.query.TraceSearchQueryGenerator
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+import com.expedia.www.haystack.trace.reader.unit.stores.readers.es.query.helper.ExpressionTreeBuilder._
+import com.google.gson.Gson
+import io.searchbox.core.Search
+import org.scalatest.BeforeAndAfterEach
+
+class TraceSearchQueryGeneratorSpec extends BaseUnitTestSpec with BeforeAndAfterEach {
+ private val spansIndexConfiguration = SpansIndexConfiguration(
+ indexNamePrefix = "haystack-traces",
+ indexType = "spans",
+ indexHourTtl = 72,
+ indexHourBucket = 6,
+ useRootDocumentStartTime = false)
+
+ var timezone: String = _
+
+ override def beforeEach() {
+ timezone = System.getProperty("user.timezone")
+ System.setProperty("user.timezone", "CST")
+ }
+
+ override def afterEach(): Unit = {
+ System.setProperty("user.timezone", timezone)
+ }
+
+ describe("TraceSearchQueryGenerator") {
+ it("should generate valid search queries") {
+ Given("a trace search request")
+ val serviceName = "svcName"
+ val operationName = "opName"
+ val request = TracesSearchRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(TraceIndexDoc.SERVICE_KEY_NAME).setValue(serviceName).build())
+ .addFields(Field.newBuilder().setName("operation").setValue(operationName).build())
+ .setStartTime(1)
+ .setEndTime(System.currentTimeMillis() * 1000)
+ .setLimit(10)
+ .build()
+ val queryGenerator = new TraceSearchQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("generating query")
+ val query = queryGenerator.generate(request)
+
+ Then("generate a valid query")
+ query.getType should be("spans")
+ }
+
+ it("should generate caption independent search queries") {
+ Given("a trace search request")
+ val fieldKey = "svcName"
+ val fieldValue = "opName"
+ val request = TracesSearchRequest
+ .newBuilder()
+ .addFields(Field.newBuilder().setName(fieldKey).setValue(fieldValue).build())
+ .setStartTime(1)
+ .setEndTime(System.currentTimeMillis() * 1000)
+ .setLimit(10)
+ .build()
+ val queryGenerator = new TraceSearchQueryGenerator(spansIndexConfiguration, "spans", new WhitelistIndexFieldConfiguration)
+
+ When("generating query")
+ val query: Search = queryGenerator.generate(request)
+
+ Then("generate a valid query with fields in lowercase")
+ query.toJson.contains(fieldKey.toLowerCase()) should be(true)
+ }
+
+ it("should generate valid search queries for expression tree based searches") {
+ Given("a trace search request")
+
+ val request = TracesSearchRequest
+ .newBuilder()
+ .setFilterExpression(operandLevelExpressionTree)
+ .setStartTime(1)
+ .setEndTime(System.currentTimeMillis() * 1000)
+ .setLimit(10)
+ .build()
+
+ val queryGenerator = new TraceSearchQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("generating query")
+ val query: Search = queryGenerator.generate(request)
+
+ Then("generate a valid query with fields in lowercase")
+ query.toJson.contains(fieldKey.toLowerCase()) should be(true)
+ }
+
+ it("should generate valid search queries for expression tree based searches with span level searches") {
+ Given("a trace search request")
+
+ val startTime = 1531454400L * 1000 * 1000 // July 13, 2018 04:00:00 AM (in microSec)
+ val endTime = 1531476000L * 1000 * 1000 // July 13, 2018 10:00:00 AM (in microSec)
+
+ val request = TracesSearchRequest
+ .newBuilder()
+ .setFilterExpression(spanLevelExpressionTree)
+ .setStartTime(startTime)
+ .setEndTime(endTime)
+ .setLimit(10)
+ .build()
+
+ val queryGenerator = new TraceSearchQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("generating query")
+ val query: Search = queryGenerator.generate(request)
+
+ Then("generate a valid query with fields in lowercase")
+ query.toJson.contains(fieldKey.toLowerCase()) should be(true)
+ query.getIndex shouldBe "haystack-traces-2018-07-13-0,haystack-traces-2018-07-13-1"
+ }
+
+ it("should use UTC when determining which indexes to read") {
+ Given("the system timezone is NOT UTC")
+ System.setProperty("user.timezone", "CST")
+
+ When("getting the indexes")
+ val esIndexes = new TraceSearchQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration()).getESIndexes(1530806291394000L, 1530820646394000L, "haystack-traces", 4, 24)
+
+ Then("they are correct based off of UTC")
+ esIndexes shouldBe Vector("haystack-traces-2018-07-05-3", "haystack-traces-2018-07-05-4")
+ }
+
+ it("should query the mentioned index rather that calculated one") {
+ Given("a trace search request")
+
+ val request = TracesSearchRequest
+ .newBuilder()
+ .setFilterExpression(spanLevelExpressionTree)
+ .setStartTime(1)
+ .setEndTime(System.currentTimeMillis() * 1000)
+ .setLimit(10)
+ .build()
+
+ val queryGenerator = new TraceSearchQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+
+ When("generating query")
+ val query: Search = queryGenerator.generate(request, useSpecificIndices = false)
+
+ Then("generate a valid query with given index name")
+ query.toJson.contains(fieldKey.toLowerCase()) should be(true)
+ query.getIndex shouldBe "haystack-traces"
+ }
+
+ it("should generate valid count query for expression tree with duration field types") {
+ Given("a trace count request")
+ val queryGenerator = new TraceSearchQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
+ val requests = Seq(expressionTreeWithDurationFields) map {
+ expression => {
+ TracesSearchRequest
+ .newBuilder()
+ .setFilterExpression(expression)
+ .setStartTime(1)
+ .setEndTime(1100 * 1000 * 1000)
+ .setLimit(10)
+ .build()
+ }
+ }
+ When("generating query")
+ val queries: Seq[Search] = requests.map(req => queryGenerator.generate(req, useSpecificIndices = false))
+
+ Then("generate a valid query")
+ queries.map(query => query.getData(new Gson()).replaceAll("\n", "").replaceAll(" ", "")) shouldEqual Seq(
+ "{\"size\":10,\"query\":{\"bool\":{\"must\":[{\"nested\":{\"query\":{\"range\":{\"spans.starttime\":{\"from\":1,\"to\":1100000000,\"include_lower\":true,\"include_upper\":true,\"boost\":1.0}}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"filter\":[{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.1\":{\"value\":\"1\",\"boost\":1.0}}},{\"term\":{\"spans.2\":{\"value\":\"2\",\"boost\":1.0}}},{\"term\":{\"spans.3\":{\"value\":\"3\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.4\":{\"value\":\"4\",\"boost\":1.0}}},{\"term\":{\"spans.5\":{\"value\":\"5\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"term\":{\"spans.svcname\":{\"value\":\"svcValue\",\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}},{\"nested\":{\"query\":{\"bool\":{\"filter\":[{\"range\":{\"spans.duration\":{\"from\":500000,\"to\":null,\"include_lower\":false,\"include_upper\":true,\"boost\":1.0}}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"path\":\"spans\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}},\"sort\":[{\"spans.starttime\":{\"order\":\"desc\",\"nested_path\":\"spans\"}}]}")
+ }
+ }
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/helper/ExpressionTreeBuilder.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/helper/ExpressionTreeBuilder.scala
new file mode 100644
index 000000000..1789cf4eb
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/es/query/helper/ExpressionTreeBuilder.scala
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.stores.readers.es.query.helper
+
+import com.expedia.open.tracing.api.ExpressionTree.Operator
+import com.expedia.open.tracing.api.{ExpressionTree, Field, Operand}
+
+object ExpressionTreeBuilder {
+
+ val fieldKey = "svcName"
+ val fieldValue = "svcValue"
+
+ private val spanLevelTreeFirst = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .setIsSpanLevelExpression(true)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("1").setValue("1")))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("2").setValue("2")))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("3").setValue("3")))
+ .build()
+
+ private val spanLevelTreeSecond = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .setIsSpanLevelExpression(true)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("4").setValue("4")))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("5").setValue("5")))
+ .build()
+
+
+ val operandLevelExpressionTree: ExpressionTree = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(fieldKey).setValue(fieldValue)))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("1").setValue("1")))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("2").setValue("2")))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("3").setValue("3")))
+ .build()
+
+ val spanLevelExpressionTree: ExpressionTree = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .setIsSpanLevelExpression(true)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(fieldKey).setValue(fieldValue)))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("0").setValue("0")))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeFirst))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeSecond))
+ .build()
+
+ val expressionTreeWithDurationFields: ExpressionTree = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .setIsSpanLevelExpression(true)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(fieldKey).setValue(fieldValue)))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("duration").setValue("500000").setOperator(Field.Operator.GREATER_THAN)))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeFirst))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeSecond))
+ .build()
+
+ val anotherExpressionTreeWithDurationFields: ExpressionTree = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .setIsSpanLevelExpression(true)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(fieldKey).setValue(fieldValue)))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("duration").setOperator(Field.Operator.LESS_THAN).setValue("180000000")))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeFirst))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeSecond))
+ .build()
+
+ val oneMoreExpressionTreeWithDurationFields: ExpressionTree = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .setIsSpanLevelExpression(true)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(fieldKey).setValue(fieldValue).setOperator(Field.Operator.EQUAL)))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("duration").setOperator(Field.Operator.LESS_THAN).setValue("2000000")))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeFirst))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeSecond))
+ .build()
+
+ val expressionTreeWithGreaterThanOperator: ExpressionTree = ExpressionTree
+ .newBuilder()
+ .setOperator(Operator.AND)
+ .setIsSpanLevelExpression(true)
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName(fieldKey).setValue(fieldValue).setOperator(Field.Operator.EQUAL)))
+ .addOperands(Operand.newBuilder().setField(Field.newBuilder().setName("duration").setOperator(Field.Operator.GREATER_THAN).setValue("240000")))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeFirst))
+ .addOperands(Operand.newBuilder().setExpression(spanLevelTreeSecond))
+ .build()
+}
diff --git a/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/grpc/ReadSpansResponseListenerSpec.scala b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/grpc/ReadSpansResponseListenerSpec.scala
new file mode 100644
index 000000000..569d87b24
--- /dev/null
+++ b/traces/reader/src/test/scala/com/expedia/www/haystack/trace/reader/unit/stores/readers/grpc/ReadSpansResponseListenerSpec.scala
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.expedia.www.haystack.trace.reader.unit.stores.readers.grpc
+
+import java.util.concurrent.Future
+
+import com.codahale.metrics.{Meter, Timer}
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.api.Trace
+import com.expedia.open.tracing.backend.{ReadSpansResponse, TraceRecord}
+import com.expedia.open.tracing.buffer.SpanBuffer
+import com.expedia.www.haystack.trace.commons.packer.NoopPacker
+import com.expedia.www.haystack.trace.reader.stores.readers.grpc.ReadSpansResponseListener
+import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
+import com.google.protobuf.ByteString
+import io.grpc.{Status, StatusException}
+import org.easymock.EasyMock
+
+import scala.collection.JavaConverters._
+import scala.concurrent.Promise
+
+class ReadSpansResponseListenerSpec extends BaseUnitTestSpec {
+ val packer = new NoopPacker[SpanBuffer]
+
+ describe("read span response listener for raw traces") {
+ it("should read the trace-records, de-serialized spans and return the complete trace") {
+ val mockReadResult = mock[Future[ReadSpansResponse]]
+
+ val promise = mock[Promise[Seq[Trace]]]
+ val failureMeter = mock[Meter]
+ val tracesFailures = mock[Meter]
+ val timer = mock[Timer.Context]
+
+ val span_1 = Span.newBuilder().setTraceId("TRACE_ID1").setSpanId("SPAN_ID_1")
+ val spanBuffer_1 = packer.apply(SpanBuffer.newBuilder().setTraceId("TRACE_ID1").addChildSpans(span_1).build())
+ val traceRecord_1 = TraceRecord.newBuilder()
+ .setTraceId("TRACE_ID1")
+ .setTimestamp(System.currentTimeMillis())
+ .setSpans(ByteString.copyFrom(spanBuffer_1.packedDataBytes))
+ .build()
+ val span_2 = Span.newBuilder().setTraceId("TRACE_ID1").setSpanId("SPAN_ID_2")
+ val spanBuffer_2 = packer.apply(SpanBuffer.newBuilder().setTraceId("TRACE_ID1").addChildSpans(span_2).build())
+ val traceRecord_2 = TraceRecord.newBuilder()
+ .setTraceId("TRACE_ID1")
+ .setTimestamp(System.currentTimeMillis())
+ .setSpans(ByteString.copyFrom(spanBuffer_2.packedDataBytes))
+ .build()
+
+ val span_3 = Span.newBuilder().setTraceId("TRACE_ID3").setSpanId("SPAN_ID_3")
+ val spanBuffer_3 = packer.apply(SpanBuffer.newBuilder().setTraceId("TRACE_ID3").addChildSpans(span_3).build())
+ val traceRecord_3 = TraceRecord.newBuilder()
+ .setTraceId("TRACE_ID3")
+ .setTimestamp(System.currentTimeMillis())
+ .setSpans(ByteString.copyFrom(spanBuffer_3.packedDataBytes))
+ .build()
+
+ val readSpanResponse = ReadSpansResponse.newBuilder().addAllRecords(List(traceRecord_1, traceRecord_2, traceRecord_3).asJava).build()
+ val capturedTraces = EasyMock.newCapture[Seq[Trace]]()
+ val capturedMeter = EasyMock.newCapture[Int]()
+ expecting {
+ timer.close()
+ tracesFailures.mark(EasyMock.capture(capturedMeter))
+ mockReadResult.get().andReturn(readSpanResponse)
+ promise.success(EasyMock.capture(capturedTraces)).andReturn(promise)
+ }
+
+ whenExecuting(mockReadResult, promise, tracesFailures, failureMeter, timer) {
+ val listener = new ReadSpansResponseListener(mockReadResult, promise, timer, failureMeter, tracesFailures, 2)
+ listener.run()
+ val traceIdSpansMap: Map[String, Set[String]] = capturedTraces.getValue.map(capturedTrace =>
+ capturedTrace.getTraceId -> capturedTrace.getChildSpansList.asScala.map(_.getSpanId).toSet).toMap
+
+ traceIdSpansMap("TRACE_ID1") shouldEqual Set("SPAN_ID_1", "SPAN_ID_2")
+ traceIdSpansMap("TRACE_ID3") shouldEqual Set("SPAN_ID_3")
+
+ capturedMeter.getValue shouldEqual 0
+ }
+ }
+
+
+
+ it("should return an exception for empty traceId") {
+ val mockReadResult = mock[Future[ReadSpansResponse]]
+ val promise = mock[Promise[Seq[Trace]]]
+ val failureMeter = mock[Meter]
+ val tracesFailures = mock[Meter]
+ val timer = mock[Timer.Context]
+ val readSpansResponse = ReadSpansResponse.newBuilder().build()
+ val capturedException = EasyMock.newCapture[StatusException]()
+ val capturedMeter = EasyMock.newCapture[Int]()
+ expecting {
+ timer.close()
+ failureMeter.mark()
+ tracesFailures.mark(EasyMock.capture(capturedMeter))
+ mockReadResult.get().andReturn(readSpansResponse)
+ promise.failure(EasyMock.capture(capturedException)).andReturn(promise)
+ }
+
+ whenExecuting(mockReadResult, promise, failureMeter, tracesFailures, timer) {
+ val listener = new ReadSpansResponseListener(mockReadResult, promise, timer, failureMeter, tracesFailures, 0)
+ listener.run()
+ capturedException.getValue.getStatus.getCode shouldEqual Status.NOT_FOUND.getCode
+ capturedMeter.getValue shouldEqual 0
+ }
+ }
+
+ }
+}
diff --git a/trends/.gitignore b/trends/.gitignore
new file mode 100644
index 000000000..0071b9e08
--- /dev/null
+++ b/trends/.gitignore
@@ -0,0 +1,11 @@
+*.log
+*.ipr
+*.iws
+.classpath
+.project
+target/
+lib/
+logs/
+**/.idea/
+*.iml
+*.DS_Store
diff --git a/trends/.mvn/wrapper/maven-wrapper.jar b/trends/.mvn/wrapper/maven-wrapper.jar
new file mode 100755
index 000000000..08ebbb67f
Binary files /dev/null and b/trends/.mvn/wrapper/maven-wrapper.jar differ
diff --git a/trends/.mvn/wrapper/maven-wrapper.properties b/trends/.mvn/wrapper/maven-wrapper.properties
new file mode 100755
index 000000000..a5fcc1192
--- /dev/null
+++ b/trends/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1 @@
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.2/apache-maven-3.5.2-bin.zip
\ No newline at end of file
diff --git a/trends/.travis.yml b/trends/.travis.yml
new file mode 100644
index 000000000..f86c8adb6
--- /dev/null
+++ b/trends/.travis.yml
@@ -0,0 +1,32 @@
+sudo: required
+dist: trusty
+
+language: java
+
+jdk:
+ - oraclejdk8
+
+services:
+ - docker
+
+env:
+ global:
+ - BRANCH=${TRAVIS_BRANCH}
+ - TAG=${TRAVIS_TAG}
+ - SHA=${TRAVIS_COMMIT}
+
+cache:
+ directories:
+ - $HOME/.m2
+
+script:
+ # build, create docker image
+ # upload to dockerhub only for master(non PR) and tag scenario
+ - if ([ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]) || [ -n "$TRAVIS_TAG" ]; then make release; else make all; fi
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash) || echo 'Codecov failed to upload'
+
+notifications:
+ email:
+ - haystack-notifications@expedia.com
diff --git a/trends/CONTRIBUTING.md b/trends/CONTRIBUTING.md
new file mode 100644
index 000000000..fe9cb7577
--- /dev/null
+++ b/trends/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+## Bugs
+We use Github Issues for our bug reporting. Please make sure the bug isn't already listed before opening a new issue.
+
+## Development
+All work on Haystack happens directly on Github. Core Haystack team members will review opened pull requests.
+
+## Requests
+If you see a feature that you would like to be added, please open an issue in the respective repository or in the general Haystack repo.
+
+## Contributing to Documentation
+To contribute to documentation, you can directly modify the corresponding .md files in the docs directory under the base haystack repository, and submit a pull request. Once your PR is merged, the documentation is automatically built and deployed to https://expediadotcom.github.io/haystack.
+
+## License
+By contributing to Haystack, you agree that your contributions will be licensed under its Apache License.
diff --git a/trends/LICENSE b/trends/LICENSE
new file mode 100644
index 000000000..9f133f5cd
--- /dev/null
+++ b/trends/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2017 Expedia, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/trends/Makefile b/trends/Makefile
new file mode 100644
index 000000000..8fc31e38d
--- /dev/null
+++ b/trends/Makefile
@@ -0,0 +1,36 @@
+.PHONY: all build_transformer build_aggregator span-timeseries-transformer timeseries-aggregator release
+
+PWD := $(shell pwd)
+MAVEN := ./mvnw
+
+clean:
+ ${MAVEN} clean
+
+build: clean
+ ${MAVEN} install package
+
+all: clean build_transformer build_aggregator span-timeseries-transformer timeseries-aggregator
+
+report-coverage:
+ ${MAVEN} scoverage:report-only
+
+build_transformer:
+ ${MAVEN} package -DfinalName=haystack-span-timeseries-transformer -pl span-timeseries-transformer -am
+
+span-timeseries-transformer:
+ $(MAKE) -C span-timeseries-transformer all
+
+timeseries-aggregator:
+ $(MAKE) -C timeseries-aggregator all
+
+build_aggregator:
+ ${MAVEN} package -DfinalName=haystack-timeseries-aggregator -pl timeseries-aggregator -am
+
+# build all and release
+release: clean build_transformer build_aggregator
+ cd span-timeseries-transformer && $(MAKE) release
+ cd timeseries-aggregator && $(MAKE) release
+ ./.travis/deploy.sh
+
+
+
diff --git a/trends/README.md b/trends/README.md
new file mode 100644
index 000000000..2676525f0
--- /dev/null
+++ b/trends/README.md
@@ -0,0 +1,77 @@
+[](https://travis-ci.org/ExpediaDotCom/haystack-trends)
+[](https://codecov.io/gh/ExpediaDotCom/haystack-trends)
+
+# Haystack Trends
+
+haystack-trends contains the required modules for trending the spans pushed to haystack. We currently plan to compute three trends for each
+combination `service_name` and `operation_name` contained in the spans (refer to the [span schema](https://github.com/ExpediaDotCom/haystack-idl/blob/master/proto/span.proto) for details of the fields in the span )
+
+1. success_count `[1min, 5min, 15min, 1hour]`
+2. failure_count `[1min, 5min, 15min, 1hour]`
+3. duration `[mean, median, std-dev, 99 percentile, 95 percentile]`
+
+> *Note:* If an error tag is present and has a value of true, then the span will be treated as a failure. In all other scenarios, it will be treated as a success.
+
+More trends can be computed by adding a [transformer](https://github.com/ExpediaDotCom/haystack-trends/tree/master/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer)
+to create the metric point and adding an [aggregation-rule](https://github.com/ExpediaDotCom/haystack-trends/tree/master/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules) for it
+
+## Required Reading
+
+In order to understand the haystack-trends one must be familiar with the [haystack](https://github.com/ExpediaDotCom/haystack) project. Its written in kafka-streams(http://docs.confluent.io/current/streams/index.html)
+and hence some prior knowledge of kafka-streams would be useful.
+
+## Technical Details
+
+
+
+Haystack trends is a collection of modules which reads spans and pushes aggregated metric points to kafka, each module runs as individual apps and talk to each other via kafka.
+
+* [span-timeseries-transformer](https://github.com/ExpediaDotCom/haystack-trends/tree/master/span-timeseries-transformer) - this app is responsible
+for reading spans, converting them to metric points and pushing raw metric points to kafka partitioned by metric-key
+
+* [timeseries-aggregator](https://github.com/ExpediaDotCom/haystack-trends/tree/master/timeseries-aggregator) - this app is responsible
+for reading metric points, aggregating them based on rules and pushing the aggregated metric points to kafka
+
+The timeseries metric points are metrictank complient and can be directly consumed by [metrictank](https://github.com/grafana/metrictank), one can write their own serde if they want to push the metrics out in some other timeseries format.
+
+Sample [MetricPoint](https://github.com/ExpediaDotCom/haystack-trends/blob/master/commons/src/main/scala/com/expedia/www/haystack/trends/commons/entities/MetricPoint.scala) :
+```json
+{
+ "type": "count",
+ "metric": "duration",
+ "tags": {
+ "client": "expweb",
+ "operationName": "getOffers"
+ },
+ "epochTimeInSeconds": 1492641000,
+ "value": 420.02
+}
+```
+
+The raw and aggregated metric points are of the same json schema but are pushed to different kafka topics
+
+## Building
+
+#### Prerequisite:
+
+* Make sure you have Java 1.8
+* Make sure you have docker 1.13 or higher
+
+#### Build
+
+You can choose to build the individual subdirectories if you're working on any specific sub-app but in case you are making changes to the contract
+such as span or metric point which would effect multiple modules you should run
+
+```
+make all
+```
+This would build all the individual apps and including unit tests, jar + docker image build and run integration tests for haystack-trends.
+
+
+#### Integration Test
+
+If you are developing and just want to run integration tests
+```
+make integration_test
+
+```
diff --git a/trends/Release.md b/trends/Release.md
new file mode 100644
index 000000000..d9ceb9724
--- /dev/null
+++ b/trends/Release.md
@@ -0,0 +1,10 @@
+#Releasing
+Currently we publish the repo to docker hub and nexus central repository.
+
+#How to release and publish
+
+* Git tagging:
+
+```git tag -a 1.x.x -m "Release description..."```
+
+Or you can also tag using UI: https://github.com/ExpediaDotCom/haystack-trends/releases
\ No newline at end of file
diff --git a/trends/deployment/scripts/publish-to-docker-hub.sh b/trends/deployment/scripts/publish-to-docker-hub.sh
new file mode 100755
index 000000000..0ff8e3bf4
--- /dev/null
+++ b/trends/deployment/scripts/publish-to-docker-hub.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+set -e
+
+QUALIFIED_DOCKER_IMAGE_NAME=$DOCKER_ORG/$DOCKER_IMAGE_NAME
+echo "DOCKER_ORG=$DOCKER_ORG, DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME, QUALIFIED_DOCKER_IMAGE_NAME=$QUALIFIED_DOCKER_IMAGE_NAME"
+echo "BRANCH=$BRANCH, TAG=$TAG, SHA=$SHA"
+
+# login
+docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
+
+# Add tags
+if [[ $TAG =~ ([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
+ echo "releasing semantic versions"
+
+ unset MAJOR MINOR PATCH
+ MAJOR="${BASH_REMATCH[1]}"
+ MINOR="${BASH_REMATCH[2]}"
+ PATCH="${BASH_REMATCH[3]}"
+
+ # for tag, add MAJOR, MAJOR.MINOR, MAJOR.MINOR.PATCH and latest as tag
+ # publish image with tags
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR.$PATCH
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR.$PATCH
+
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:latest
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME:latest
+
+elif [[ "$BRANCH" == "master" ]]; then
+ echo "releasing master branch"
+
+ # for 'master' branch, add SHA as tags
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$SHA
+
+ # publish image with tags
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME
+fi
diff --git a/trends/deployment/terraform/main.tf b/trends/deployment/terraform/main.tf
new file mode 100644
index 000000000..4469e4333
--- /dev/null
+++ b/trends/deployment/terraform/main.tf
@@ -0,0 +1,74 @@
+locals {
+ external_metric_tank_enabled = "${var.metrictank["external_hostname"] != "" && var.metrictank["external_kafka_broker_hostname"] != ""? "true" : "false"}"
+}
+
+//metrictank for haystack-apps
+module "metrictank" {
+ source = "metrictank"
+ replicas = "${var.metrictank["instances"]}"
+ cassandra_address = "${var.cassandra_hostname}:${var.cassandra_port}"
+ tag_support = "${var.metrictank["tag_support"]}"
+ kafka_address = "${var.kafka_hostname}:${var.kafka_port}"
+ namespace = "${var.app_namespace}"
+ graphite_address = "${var.graphite_hostname}:${var.graphite_port}"
+ enabled = "${local.external_metric_tank_enabled == "true" ? "false" : "true" }"
+ memory_limit = "${var.metrictank["memory_limit"]}"
+ memory_request = "${var.metrictank["memory_request"]}"
+ cpu_limit = "${var.metrictank["cpu_limit"]}"
+ cpu_request = "${var.metrictank["cpu_request"]}"
+ node_selecter_label = "${var.node_selector_label}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ env_vars = "${var.metrictank["environment_overrides"]}"
+
+}
+module "span-timeseries-transformer" {
+ source = "span-timeseries-transformer"
+ image = "expediadotcom/haystack-span-timeseries-transformer:${var.trends["version"]}"
+ replicas = "${var.trends["span_timeseries_transformer_instances"]}"
+ namespace = "${var.app_namespace}"
+ kafka_endpoint = "${var.kafka_hostname}:${var.kafka_port}"
+ graphite_hostname = "${var.graphite_hostname}"
+ graphite_port = "${var.graphite_port}"
+ graphite_enabled = "${var.graphite_enabled}"
+ node_selecter_label = "${var.node_selector_label}"
+ enabled = "${var.trends["enabled"]}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ cpu_limit = "${var.trends["span_timeseries_transformer_cpu_limit"]}"
+ cpu_request = "${var.trends["span_timeseries_transformer_cpu_request"]}"
+ memory_limit = "${var.trends["span_timeseries_transformer_memory_limit"]}"
+ memory_request = "${var.trends["span_timeseries_transformer_memory_request"]}"
+ jvm_memory_limit = "${var.trends["span_timeseries_transformer_jvm_memory_limit"]}"
+ env_vars = "${var.trends["span_timeseries_transformer_environment_overrides"]}"
+ kafka_num_stream_threads = "${var.trends["span_timeseries_transformer_kafka_num_stream_threads"]}"
+ metricpoint_encoder_type = "${var.trends["metricpoint_encoder_type"]}"
+}
+module "timeseries-aggregator" {
+ source = "timeseries-aggregator"
+ image = "expediadotcom/haystack-timeseries-aggregator:${var.trends["version"]}"
+ replicas = "${var.trends["timeseries_aggregator_instances"]}"
+ namespace = "${var.app_namespace}"
+ kafka_endpoint = "${var.kafka_hostname}:${var.kafka_port}"
+ graphite_hostname = "${var.graphite_hostname}"
+ graphite_port = "${var.graphite_port}"
+ graphite_enabled = "${var.graphite_enabled}"
+ enable_external_kafka_producer = "${local.external_metric_tank_enabled}"
+ enable_metrics_sink = "${var.trends["timeseries_aggregator_enable_metrics_sink"]}"
+ external_kafka_producer_endpoint = "${var.metrictank["external_kafka_broker_hostname"]}:${var.metrictank["external_kafka_broker_port"]}"
+ node_selecter_label = "${var.node_selector_label}"
+ enabled = "${var.trends["enabled"]}"
+ kubectl_executable_name = "${var.kubectl_executable_name}"
+ kubectl_context_name = "${var.kubectl_context_name}"
+ cpu_limit = "${var.trends["timeseries_aggregator_cpu_limit"]}"
+ cpu_request = "${var.trends["timeseries_aggregator_cpu_request"]}"
+ memory_limit = "${var.trends["timeseries_aggregator_memory_limit"]}"
+ memory_request = "${var.trends["timeseries_aggregator_memory_request"]}"
+ jvm_memory_limit = "${var.trends["timeseries_aggregator_jvm_memory_limit"]}"
+ env_vars = "${var.trends["timeseries_aggregator_environment_overrides"]}"
+ metricpoint_encoder_type = "${var.trends["metricpoint_encoder_type"]}"
+ histogram_max_value = "${var.trends["timeseries_aggregator_histogram_max_value"]}"
+ histogram_precision = "${var.trends["timeseries_aggregator_histogram_precision"]}"
+ histogram_value_unit = "${var.trends["timeseries_aggregator_histogram_value_unit"]}"
+ additionalTags = "${var.trends["timeseries_aggregator_additional_tags"]}"
+}
diff --git a/trends/deployment/terraform/metrictank/main.tf b/trends/deployment/terraform/metrictank/main.tf
new file mode 100644
index 000000000..18dcf9301
--- /dev/null
+++ b/trends/deployment/terraform/metrictank/main.tf
@@ -0,0 +1,48 @@
+locals {
+ app_name = "metrictank"
+ service_port = 6060
+ container_port = 6060
+ deployment_yaml_file_path = "${path.module}/templates/deployment_yaml.tpl"
+ image = "grafana/metrictank:0.10.1"
+ count = "${var.enabled == "true" ? 1:0}"
+
+}
+
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ gra = "${var.graphite_address}"
+ graphite_address = "${var.graphite_address}"
+ node_selecter_label = "${var.node_selecter_label}"
+ kafka_address = "${var.kafka_address}"
+ cassandra_address = "${var.cassandra_address}"
+ tag_support = "${var.tag_support}"
+ replicas = "${var.replicas}"
+ image = "${local.image}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ service_port = "${local.service_port}"
+ container_port = "${local.container_port}"
+ env_vars= "${indent(9,"${var.env_vars}")}"
+ }
+}
+
+
+resource "null_resource" "kubectl_apply" {
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/trends/deployment/terraform/metrictank/outputs.tf b/trends/deployment/terraform/metrictank/outputs.tf
new file mode 100644
index 000000000..396c57294
--- /dev/null
+++ b/trends/deployment/terraform/metrictank/outputs.tf
@@ -0,0 +1,7 @@
+output "metrictank_hostname" {
+ value = "${local.app_name}"
+}
+
+output "metrictank_port" {
+ value = "${local.service_port}"
+}
\ No newline at end of file
diff --git a/trends/deployment/terraform/metrictank/templates/deployment_yaml.tpl b/trends/deployment/terraform/metrictank/templates/deployment_yaml.tpl
new file mode 100644
index 000000000..02e3628a4
--- /dev/null
+++ b/trends/deployment/terraform/metrictank/templates/deployment_yaml.tpl
@@ -0,0 +1,66 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: ${app_name}
+ image: ${image}
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "MT_HTTP_MULTI_TENANT"
+ value: "false"
+ - name: "MT_CARBON_IN_ENABLED"
+ value: "false"
+ - name: "MT_KAFKA_MDM_IN_ENABLED"
+ value: "true"
+ - name: "MT_CASSANDRA_ADDRS"
+ value: "${cassandra_address}"
+ - name: "MT_KAFKA_MDM_IN_BROKERS"
+ value: "${kafka_address}"
+ - name: "MT_CASSANDRA_IDX_HOSTS"
+ value: "${cassandra_address}"
+ - name: "MT_STATS_ADDR"
+ value: "${graphite_address}"
+ - name: "MT_MEMORY_IDX_TAG_SUPPORT"
+ value: "${tag_support}"
+ ${env_vars}
+ nodeSelector:
+ ${node_selecter_label}
+
+# ------------------- Service ------------------- #
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ ports:
+ - port: ${service_port}
+ targetPort: ${container_port}
+ selector:
+ k8s-app: ${app_name}
diff --git a/trends/deployment/terraform/metrictank/variables.tf b/trends/deployment/terraform/metrictank/variables.tf
new file mode 100644
index 000000000..464c4fe62
--- /dev/null
+++ b/trends/deployment/terraform/metrictank/variables.tf
@@ -0,0 +1,21 @@
+variable "namespace" {}
+variable "replicas" {}
+variable "cassandra_address" {}
+variable "tag_support" {}
+variable "kafka_address" {}
+variable "graphite_address" {}
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selecter_label"{}
+variable "memory_request"{}
+variable "memory_limit"{}
+variable "cpu_request"{}
+variable "cpu_limit"{}
+variable "env_vars" {}
+
+variable "termination_grace_period" {
+ default = 30
+}
+variable "enabled" {}
+
+
diff --git a/trends/deployment/terraform/outputs.tf b/trends/deployment/terraform/outputs.tf
new file mode 100644
index 000000000..56eb82148
--- /dev/null
+++ b/trends/deployment/terraform/outputs.tf
@@ -0,0 +1,7 @@
+output "metrictank_hostname" {
+ value = "${local.external_metric_tank_enabled == "true" ? var.metrictank["external_hostname"] : module.metrictank.metrictank_hostname}"
+}
+
+output "metrictank_port" {
+ value = "${local.external_metric_tank_enabled == "true" ? var.metrictank["external_port"] : module.metrictank.metrictank_port}"
+}
\ No newline at end of file
diff --git a/trends/deployment/terraform/span-timeseries-transformer/main.tf b/trends/deployment/terraform/span-timeseries-transformer/main.tf
new file mode 100644
index 000000000..470917940
--- /dev/null
+++ b/trends/deployment/terraform/span-timeseries-transformer/main.tf
@@ -0,0 +1,75 @@
+locals {
+ app_name = "span-timeseries-transformer"
+ config_file_path = "${path.module}/templates/span-timeseries-transformer_conf.tpl"
+ deployment_yaml_file_path = "${path.module}/templates/deployment_yaml.tpl"
+ count = "${var.enabled?1:0}"
+ checksum = "${sha1("${data.template_file.config_data.rendered}")}"
+ configmap_name = "transformer-${local.checksum}"
+}
+
+resource "kubernetes_config_map" "haystack-config" {
+ metadata {
+ name = "${local.configmap_name}"
+ namespace = "${var.namespace}"
+ }
+ data {
+ "span-timeseries-transformer.conf" = "${data.template_file.config_data.rendered}"
+ }
+ count = "${local.count}"
+
+}
+data "template_file" "config_data" {
+ template = "${file("${local.config_file_path}")}"
+
+ vars {
+ kafka_endpoint = "${var.kafka_endpoint}"
+ metricpoint_encoder_type = "${var.metricpoint_encoder_type}"
+ kafka_num_stream_threads = "${var.kafka_num_stream_threads}"
+ }
+}
+
+
+//using kubectl to craete deployment construct since its not natively support by the kubernetes provider
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ graphite_port = "${var.graphite_port}"
+ graphite_host = "${var.graphite_hostname}"
+ graphite_enabled = "${var.graphite_enabled}"
+ node_selecter_label = "${var.node_selecter_label}"
+ image = "${var.image}"
+ replicas = "${var.replicas}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ jvm_memory_limit = "${var.jvm_memory_limit}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ configmap_name = "${local.configmap_name}"
+ env_vars= "${indent(9,"${var.env_vars}")}"
+ }
+}
+
+resource "null_resource" "kubectl_apply" {
+
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+
+ count = "${local.count}"
+}
+
+
+resource "null_resource" "kubectl_destroy" {
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/trends/deployment/terraform/span-timeseries-transformer/outputs.tf b/trends/deployment/terraform/span-timeseries-transformer/outputs.tf
new file mode 100644
index 000000000..e69de29bb
diff --git a/trends/deployment/terraform/span-timeseries-transformer/templates/deployment_yaml.tpl b/trends/deployment/terraform/span-timeseries-transformer/templates/deployment_yaml.tpl
new file mode 100644
index 000000000..a516ec2e7
--- /dev/null
+++ b/trends/deployment/terraform/span-timeseries-transformer/templates/deployment_yaml.tpl
@@ -0,0 +1,64 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "HAYSTACK_OVERRIDES_CONFIG_PATH"
+ value: "/config/span-timeseries-transformer.conf"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "HAYSTACK_GRAPHITE_ENABLED"
+ value: "${graphite_enabled}"
+ - name: "JAVA_XMS"
+ value: "${jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${jvm_memory_limit}m"
+ ${env_vars}
+ livenessProbe:
+ exec:
+ command:
+ - grep
+ - "true"
+ - /app/isHealthy
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ failureThreshold: 6
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
+
diff --git a/trends/deployment/terraform/span-timeseries-transformer/templates/span-timeseries-transformer_conf.tpl b/trends/deployment/terraform/span-timeseries-transformer/templates/span-timeseries-transformer_conf.tpl
new file mode 100644
index 000000000..1d4560398
--- /dev/null
+++ b/trends/deployment/terraform/span-timeseries-transformer/templates/span-timeseries-transformer_conf.tpl
@@ -0,0 +1,31 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "span-timeseries-transformer-v2"
+ bootstrap.servers = "${kafka_endpoint}"
+ num.stream.threads = "${kafka_num_stream_threads}"
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "com.expedia.www.haystack.commons.kstreams.SpanTimestampExtractor"
+ }
+
+ producer {
+ topic = "metric-data-points"
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+}
+
+// there are three types of encoders that are used on service and operation names:
+// 1) periodreplacement: replaces all periods with 3 underscores
+// 2) base64: base64 encodes the full name with a padding of _
+// 3) noop: does not perform any encoding
+metricpoint.encoder.type = "${metricpoint_encoder_type}"
+enable.metricpoint.service.level.generation = false
+
+blacklist.services = []
diff --git a/trends/deployment/terraform/span-timeseries-transformer/variables.tf b/trends/deployment/terraform/span-timeseries-transformer/variables.tf
new file mode 100644
index 000000000..bd7f9b608
--- /dev/null
+++ b/trends/deployment/terraform/span-timeseries-transformer/variables.tf
@@ -0,0 +1,26 @@
+variable "image" {}
+variable "replicas" {}
+variable "namespace" {}
+variable "kafka_endpoint" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "graphite_enabled" {}
+
+variable "enabled" {}
+
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selecter_label"{}
+variable "memory_limit"{}
+variable "memory_request"{}
+variable "metricpoint_encoder_type" {}
+variable "jvm_memory_limit"{}
+variable "cpu_limit"{}
+variable "cpu_request"{}
+variable "env_vars" {}
+
+variable "kafka_num_stream_threads" {}
+
+variable "termination_grace_period" {
+ default = 30
+}
diff --git a/trends/deployment/terraform/timeseries-aggregator/main.tf b/trends/deployment/terraform/timeseries-aggregator/main.tf
new file mode 100644
index 000000000..3f177a7f8
--- /dev/null
+++ b/trends/deployment/terraform/timeseries-aggregator/main.tf
@@ -0,0 +1,84 @@
+locals {
+ app_name = "timeseries-aggregator"
+ config_file_path = "${path.module}/templates/timeseries-aggregator_conf.tpl"
+ deployment_yaml_file_path = "${path.module}/templates/deployment_yaml.tpl"
+ count = "${var.enabled?1:0}"
+ checksum = "${sha1("${data.template_file.config_data.rendered}")}"
+ configmap_name = "aggregator-${local.checksum}"
+}
+
+data "template_file" "config_data" {
+ template = "${file("${local.config_file_path}")}"
+
+ vars {
+ kafka_endpoint = "${var.kafka_endpoint}"
+ enable_external_kafka_producer = "${var.enable_external_kafka_producer}"
+ enable_metrics_sink = "${var.enable_metrics_sink?true:false}"
+ external_kafka_producer_endpoint = "${var.external_kafka_producer_endpoint}"
+ metricpoint_encoder_type = "${var.metricpoint_encoder_type}"
+ histogram_max_value = "${var.histogram_max_value}"
+ histogram_precision = "${var.histogram_precision}"
+ histogram_value_unit = "${var.histogram_value_unit}"
+ additionalTags = "${var.additionalTags}"
+ }
+}
+
+
+resource "kubernetes_config_map" "haystack-config" {
+ metadata {
+ name = "${local.configmap_name}"
+ namespace = "${var.namespace}"
+ }
+ data {
+ "timeseries-aggregator.conf" ="${data.template_file.config_data.rendered}"
+ }
+ count = "${local.count}"
+
+}
+
+//using kubectl to craete deployment construct since its not natively support by the kubernetes provider
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ graphite_port = "${var.graphite_port}"
+ graphite_host = "${var.graphite_hostname}"
+ graphite_enabled = "${var.graphite_enabled}"
+ config = "${data.template_file.config_data.rendered}"
+ node_selecter_label = "${var.node_selecter_label}"
+ image = "${var.image}"
+ replicas = "${var.replicas}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ jvm_memory_limit = "${var.jvm_memory_limit}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ configmap_name = "${local.configmap_name}"
+ env_vars= "${indent(9,"${var.env_vars}")}"
+ }
+}
+
+
+resource "null_resource" "kubectl_apply" {
+
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+
+ count = "${local.count}"
+}
+
+
+resource "null_resource" "kubectl_destroy" {
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/trends/deployment/terraform/timeseries-aggregator/outputs.tf b/trends/deployment/terraform/timeseries-aggregator/outputs.tf
new file mode 100644
index 000000000..e69de29bb
diff --git a/trends/deployment/terraform/timeseries-aggregator/templates/deployment_yaml.tpl b/trends/deployment/terraform/timeseries-aggregator/templates/deployment_yaml.tpl
new file mode 100644
index 000000000..8dfca7645
--- /dev/null
+++ b/trends/deployment/terraform/timeseries-aggregator/templates/deployment_yaml.tpl
@@ -0,0 +1,64 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_request}Mi
+ env:
+ - name: "HAYSTACK_OVERRIDES_CONFIG_PATH"
+ value: "/config/timeseries-aggregator.conf"
+ - name: "HAYSTACK_GRAPHITE_HOST"
+ value: "${graphite_host}"
+ - name: "HAYSTACK_GRAPHITE_PORT"
+ value: "${graphite_port}"
+ - name: "HAYSTACK_GRAPHITE_ENABLED"
+ value: "${graphite_enabled}"
+ - name: "JAVA_XMS"
+ value: "${jvm_memory_limit}m"
+ - name: "JAVA_XMX"
+ value: "${jvm_memory_limit}m"
+ ${env_vars}
+ livenessProbe:
+ exec:
+ command:
+ - grep
+ - "true"
+ - /app/isHealthy
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ failureThreshold: 6
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
+
diff --git a/trends/deployment/terraform/timeseries-aggregator/templates/timeseries-aggregator_conf.tpl b/trends/deployment/terraform/timeseries-aggregator/templates/timeseries-aggregator_conf.tpl
new file mode 100644
index 000000000..e53e40860
--- /dev/null
+++ b/trends/deployment/terraform/timeseries-aggregator/templates/timeseries-aggregator_conf.tpl
@@ -0,0 +1,73 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "timeseries-aggregator-v2"
+ bootstrap.servers = "${kafka_endpoint}"
+ num.stream.threads = 2
+ commit.interval.ms = 5000
+ auto.offset.reset = latest
+ timestamp.extractor = "com.expedia.www.haystack.commons.kstreams.MetricDataTimestampExtractor"
+ consumer.heartbeat.interval.ms = 30000
+ consumer.session.timeout.ms = 100000
+ consumer.max.partition.fetch.bytes = 262144
+ }
+
+ // For producing data to external kafka: set enable.external.kafka.produce to true and uncomment the props.
+ // For producing to same kafka: set enable.external.kafka.produce to false and comment the props.
+ producer {
+ topics : [
+ {
+ topic: "metrics"
+ serdeClassName : "com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricDataSerde"
+ enabled: ${enable_metrics_sink}
+ },
+ {
+ topic: "mdm"
+ serdeClassName : "com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricTankSerde"
+ enabled: true
+ }
+ ]
+ enable.external.kafka.produce = ${enable_external_kafka_producer}
+ external.kafka.topic = "mdm"
+ props {
+ bootstrap.servers = "${external_kafka_producer_endpoint}"
+ retries = 50
+ batch.size = 65536
+ linger.ms = 250
+ }
+ }
+
+ consumer {
+ topic = "metric-data-points"
+ }
+}
+
+state.store {
+ enable.logging = true
+ logging.delay.seconds = 60
+
+ // It is capacity for the trends to be kept in memory before flushing it to state store
+ cache.size = 3000
+ changelog.topic {
+ cleanup.policy = "compact,delete"
+ retention.ms = 14400000 // 4Hrs
+ }
+}
+
+
+// there are three types of encoders that are used on service and operation names:
+// 1) periodreplacement: replaces all periods with 3 underscores
+// 2) base64: base64 encodes the full name with a padding of _
+// 3) noop: does not perform any encoding
+metricpoint.encoder.type = "${metricpoint_encoder_type}"
+
+histogram {
+ max.value = "${histogram_max_value}"
+ precision = "${histogram_precision}"
+ value.unit = "${histogram_value_unit}" // can be micros / millis / seconds
+}
+
+additionalTags = "${additionalTags}"
diff --git a/trends/deployment/terraform/timeseries-aggregator/variables.tf b/trends/deployment/terraform/timeseries-aggregator/variables.tf
new file mode 100644
index 000000000..e32f1833d
--- /dev/null
+++ b/trends/deployment/terraform/timeseries-aggregator/variables.tf
@@ -0,0 +1,36 @@
+variable "image" {}
+variable "replicas" {}
+variable "namespace" {}
+variable "kafka_endpoint" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "graphite_enabled" {}
+
+variable "enabled" {}
+variable "enable_external_kafka_producer" {}
+variable "enable_metrics_sink" {
+ default = true
+}
+variable "external_kafka_producer_endpoint" {}
+variable "metricpoint_encoder_type" {}
+variable "env_vars" {}
+
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selecter_label" {}
+variable "memory_request" {}
+variable "memory_limit" {}
+variable "jvm_memory_limit"{}
+
+variable "cpu_request" {}
+variable "cpu_limit" {}
+
+variable "histogram_max_value" {}
+variable "histogram_precision" {}
+variable "histogram_value_unit" {}
+
+variable "additionalTags" {}
+
+variable "termination_grace_period" {
+ default = 30
+}
diff --git a/trends/deployment/terraform/variables.tf b/trends/deployment/terraform/variables.tf
new file mode 100644
index 000000000..9939f4cfd
--- /dev/null
+++ b/trends/deployment/terraform/variables.tf
@@ -0,0 +1,24 @@
+
+variable "kafka_hostname" {}
+variable "kafka_port" {}
+variable "cassandra_hostname" {}
+variable "cassandra_port" {}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "graphite_enabled" {}
+variable "kubectl_context_name" {}
+variable "kubectl_executable_name" {}
+variable "app_namespace" {}
+variable "node_selector_label"{}
+
+# trends config
+variable "trends" {
+ type = "map"
+}
+
+
+#metrictank
+variable "metrictank" {
+ type = "map"
+}
+
diff --git a/trends/documents/diagrams/haystack_trends.png b/trends/documents/diagrams/haystack_trends.png
new file mode 100644
index 000000000..af8f578a1
Binary files /dev/null and b/trends/documents/diagrams/haystack_trends.png differ
diff --git a/trends/mvnw b/trends/mvnw
new file mode 100755
index 000000000..961a82500
--- /dev/null
+++ b/trends/mvnw
@@ -0,0 +1,286 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven2 Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+ # TODO classpath?
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project base directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+##########################################################################################
+# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+# This allows using the maven wrapper in projects that prohibit checking in binary data.
+##########################################################################################
+if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found .mvn/wrapper/maven-wrapper.jar"
+ fi
+else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
+ fi
+ jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.0/maven-wrapper-0.4.0.jar"
+ while IFS="=" read key value; do
+ case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
+ esac
+ done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Downloading from: $jarUrl"
+ fi
+ wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
+
+ if command -v wget > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found wget ... using wget"
+ fi
+ wget "$jarUrl" -O "$wrapperJarPath"
+ elif command -v curl > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found curl ... using curl"
+ fi
+ curl -o "$wrapperJarPath" "$jarUrl"
+ else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Falling back to using Java to download"
+ fi
+ javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
+ if [ -e "$javaClass" ]; then
+ if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Compiling MavenWrapperDownloader.java ..."
+ fi
+ # Compiling the Java class
+ ("$JAVA_HOME/bin/javac" "$javaClass")
+ fi
+ if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ # Running the downloader
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Running MavenWrapperDownloader.java ..."
+ fi
+ ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
+ fi
+ fi
+ fi
+fi
+##########################################################################################
+# End of extension
+##########################################################################################
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/trends/mvnw.cmd b/trends/mvnw.cmd
new file mode 100755
index 000000000..830073a17
--- /dev/null
+++ b/trends/mvnw.cmd
@@ -0,0 +1,161 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven2 Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM set title of command window
+title %0
+@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.0/maven-wrapper-0.4.0.jar"
+FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO (
+ IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
+)
+
+@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
+if exist %WRAPPER_JAR% (
+ echo Found %WRAPPER_JAR%
+) else (
+ echo Couldn't find %WRAPPER_JAR%, downloading it ...
+ echo Downloading from: %DOWNLOAD_URL%
+ powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"
+ echo Finished downloading %WRAPPER_JAR%
+)
+@REM End of extension
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/trends/pom.xml b/trends/pom.xml
new file mode 100644
index 000000000..d47bc8751
--- /dev/null
+++ b/trends/pom.xml
@@ -0,0 +1,556 @@
+
+
+
+
+
+ 4.0.0
+
+ com.expedia.www
+ haystack-trends
+ 1.0.0-SNAPSHOT
+ pom
+
+
+ scm:git:git://github.com/ExpediaDotCom/haystack-trends.git
+ scm:git:ssh://github.com/ExpediaDotCom/haystack-trends.git
+ http://github.com/ExpediaDotCom/haystack-trends
+
+
+ ${project.groupId}:${project.artifactId}
+ Code to trend the tuple of serviceName and operationName present in a span
+ https://github.com/ExpediaDotCom/haystack-trends/tree/master
+
+
+
+
+ Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+
+
+ haystack
+ Haystack Team
+ haystack@expedia.com
+ https://github.com/ExpediaDotCom/haystack
+
+
+
+
+ span-timeseries-transformer
+ timeseries-aggregator
+
+
+
+
+
+ 1.6.0
+ 3.0.3
+ 1.7.25
+ 1.0.0
+ 4.12
+ 1.3.1
+ 1.2.3
+ 3.0.2
+ 3.3.1
+ 0.8.13
+ 1.2.1
+ 1.4
+ 3.4
+ 0.1.12
+ 1.0.61
+ 2.23.0
+
+
+ 1.8
+ 2
+ 12
+ 2
+ ${scala.major.version}.${scala.minor.version}
+ ${scala.major.minor.version}.${scala.maintenance.version}
+
+
+ 3.3.0.1
+ 3.2.1
+ 1.0
+ false
+ 3.6.1
+ 2.6
+ 3.0.0
+ 1.0
+ ${project.basedir}/../scalastyle/scalastyle_config.xml
+ 0.9.0
+ 1.3.0
+ com.expedia.www.haystack.trends.App
+
+ 1.6
+ 3.0.1
+ 1.6.8
+
+ true
+
+
+
+
+
+
+
+ org.scalatest
+ scalatest_${scala.major.minor.version}
+ ${scalatest.version}
+ test
+
+
+
+ org.mockito
+ mockito-core
+ ${mockito-core.version}
+ test
+
+
+
+
+
+
+ org.pegdown
+ pegdown
+ ${pegdown.version}
+ test
+
+
+
+ org.easymock
+ easymock
+ ${easymock.version}
+ test
+
+
+ commons-codec
+ commons-codec
+ ${commons-codec.version}
+
+
+
+ org.apache.kafka
+ kafka-streams
+ ${kafka.version}
+ test
+ test
+
+
+
+ org.apache.kafka
+ kafka-clients
+ ${kafka.version}
+ test
+ test
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ ${kafka.version}
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ test
+
+
+
+ org.apache.kafka
+ kafka_${scala.major.minor.version}
+ ${kafka.version}
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ test
+ test
+
+
+
+ junit
+ junit
+ ${junit.version}
+ test
+
+
+
+
+
+
+
+
+
+ org.scala-lang
+ scala-library
+ ${scala-library.version}
+
+
+
+ org.slf4j
+ slf4j-api
+ ${slf4j.version}
+ compile
+
+
+
+ com.codahale.metrics
+ metrics-core
+ ${metrics-core.version}
+
+
+ com.expedia.www
+ haystack-commons
+ ${haystack.commons.version}
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+ ${haystack.logback.metrics.appender.version}
+
+
+
+ org.msgpack
+ msgpack-core
+ ${msgpack.version}
+
+
+
+
+ org.apache.kafka
+ kafka-streams
+ ${kafka.version}
+
+
+
+ ch.qos.logback
+ logback-classic
+ ${logback.version}
+
+
+
+ com.typesafe
+ config
+ ${typesafe-config.version}
+ compile
+
+
+
+ org.hdrhistogram
+ HdrHistogram
+ ${hdrhistogram.version}
+
+
+
+ com.google.protobuf
+ protobuf-java
+ ${protobuf.version}
+
+
+
+
+
+
+
+ ${basedir}/src/main/scala
+
+
+ ${basedir}/src/main/resources
+ true
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+ ${maven-shade-plugin.version}
+
+ false
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ package
+
+ shade
+
+
+
+
+ ${mainClass}
+
+
+
+
+
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+ ${maven-scalatest-plugin.version}
+
+
+ test
+
+ test
+
+
+
+ testTopic
+
+ ${featureTestClasses}
+
+
+
+ integration-test
+ integration-test
+
+ test
+
+
+ ${integrationTestClasses}
+ false
+
+
+
+
+
+
+ com.github.os72
+ protoc-jar-maven-plugin
+ ${maven-protobuf-plugin.version}
+
+
+ generate-sources
+
+ run
+
+
+
+ ${project.basedir}/../haystack-idl/proto
+
+
+ ${project.basedir}/../haystack-idl/proto
+
+ ${project.basedir}/target/generated-sources
+
+
+
+
+
+ org.scoverage
+ scoverage-maven-plugin
+ ${scoverage.plugin.version}
+
+
+ 80
+ true
+ true
+ ${scala-library.version}
+ true
+
+
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+ ${maven-scalastyle-plugin.version}
+
+ false
+ true
+ true
+ false
+ ${project.basedir}/src/main/scala
+ ${project.basedir}/src/test/scala
+ ${scalastyle.config.location}
+ ${project.build.directory}/scalastyle-output.xml
+ UTF-8
+
+
+
+ compile-scalastyle
+
+ check
+
+ compile
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ ${maven-source-plugin.version}
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+ ${maven-gpg-plugin.version}
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ ${nexus-staging-maven-plugin.version}
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ ${project.jdk.version}
+ ${project.jdk.version}
+ UTF-8
+
+ ${maven-compiler-plugin.version}
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+ ${maven-scala-plugin.version}
+
+
+ scala-compile-first
+ process-resources
+
+ add-source
+ compile
+
+
+
+ scala-test-compile
+ process-test-resources
+
+ testCompile
+
+
+
+ attach-javadocs
+
+ doc-jar
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+
+ ${skipGpg}
+
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ true
+
+ ossrh
+ https://oss.sonatype.org/
+ true
+
+
+
+
+
+
+
+ ossrh
+ https://oss.sonatype.org/content/repositories/snapshots
+
+
+ ossrh
+ http://oss.sonatype.org/service/local/staging/deploy/maven2/
+
+
+
diff --git a/trends/scalastyle/scalastyle_config.xml b/trends/scalastyle/scalastyle_config.xml
new file mode 100644
index 000000000..e0cd28086
--- /dev/null
+++ b/trends/scalastyle/scalastyle_config.xml
@@ -0,0 +1,136 @@
+
+ Scalastyle standard configuration
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/trends/span-timeseries-transformer/Makefile b/trends/span-timeseries-transformer/Makefile
new file mode 100644
index 000000000..102470ff3
--- /dev/null
+++ b/trends/span-timeseries-transformer/Makefile
@@ -0,0 +1,19 @@
+.PHONY: integration_test release all
+
+MAVEN := ../mvnw
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-span-timeseries-transformer
+
+docker_build:
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+integration_test:
+ ${MAVEN} scoverage:integration-check
+
+# build jar, docker image and run integration tests
+all: docker_build integration_test
+
+# build all and release
+release: docker_build
+ ../deployment/scripts/publish-to-docker-hub.sh
diff --git a/trends/span-timeseries-transformer/README.md b/trends/span-timeseries-transformer/README.md
new file mode 100644
index 000000000..992cdf563
--- /dev/null
+++ b/trends/span-timeseries-transformer/README.md
@@ -0,0 +1,35 @@
+# Haystack Span Timeseries Transformer
+
+Haystack-span-timeseries-transformer is the module which reads the spans from kafka and converts them to timeseries metricPoints based on transformers and writes out the time-series metricPoints back to kafka.
+
+Haystack's has another app [timeseries-aggregator](https://github.com/ExpediaDotCom/haystack-trends/tree/master/timeseries-aggregator) which consumes these metric points
+and aggregates them based on predefined rules which can be visualized on the [haystack ui](https://github.com/ExpediaDotCom/haystack-ui)
+
+This is a simple public static void main application which is written in scala and uses kafka-streams. This is designed to be deployed as a docker containers.
+
+
+## Building
+
+#### Prerequisite:
+
+* Make sure you have Java 1.8
+* Make sure you have maven 3.3.9 or higher
+* Make sure you have docker 1.13 or higher
+
+
+
+
+#### Build
+
+For a full build, including unit tests, jar + docker image build and integration test, you can run -
+```
+make all
+```
+
+#### Integration Test
+
+If you are developing and just want to run integration tests
+```
+make integration_test
+
+```
\ No newline at end of file
diff --git a/trends/span-timeseries-transformer/build/docker/Dockerfile b/trends/span-timeseries-transformer/build/docker/Dockerfile
new file mode 100644
index 000000000..073e990c1
--- /dev/null
+++ b/trends/span-timeseries-transformer/build/docker/Dockerfile
@@ -0,0 +1,23 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-span-timeseries-transformer
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+ENV DOCKERIZE_VERSION v0.6.1
+
+ADD https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/dockerize-alpine-linux-amd64-${DOCKERIZE_VERSION}.tar.gz dockerize.tar.gz
+RUN tar xzf dockerize.tar.gz
+RUN chmod +x dockerize
+
+RUN mkdir -p ${APP_HOME}
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/trends/span-timeseries-transformer/build/docker/jmxtrans-agent.xml b/trends/span-timeseries-transformer/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..3504e8e07
--- /dev/null
+++ b/trends/span-timeseries-transformer/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,115 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:false}
+ haystack.trends.span-transformer.#hostname#.
+
+ 60
+
diff --git a/trends/span-timeseries-transformer/build/docker/start-app.sh b/trends/span-timeseries-transformer/build/docker/start-app.sh
new file mode 100755
index 000000000..58cf4cd31
--- /dev/null
+++ b/trends/span-timeseries-transformer/build/docker/start-app.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+-XX:+UseG1GC \
+-Xloggc:/var/log/gc.log \
+-XX:+PrintGCDetails \
+-XX:+PrintGCDateStamps \
+-XX:+UseGCLogFileRotation \
+-XX:NumberOfGCLogFiles=5 \
+-XX:GCLogFileSize=2M \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-Dcom.sun.management.jmxremote.authenticate=false \
+-Dcom.sun.management.jmxremote.ssl=false \
+-Dcom.sun.management.jmxremote.port=1098 \
+-Dapplication.name=${APP_NAME} \
+-Dapplication.home=${APP_HOME}"
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/trends/span-timeseries-transformer/pom.xml b/trends/span-timeseries-transformer/pom.xml
new file mode 100644
index 000000000..e88a6ba27
--- /dev/null
+++ b/trends/span-timeseries-transformer/pom.xml
@@ -0,0 +1,113 @@
+
+
+
+
+
+ 4.0.0
+
+ haystack-span-timeseries-transformer
+ jar
+ haystack-span-timeseries-transformer
+ scala module which creates timeseries metricpoints for spans
+
+
+ com.expedia.www
+ haystack-trends
+ 1.0.0-SNAPSHOT
+
+
+
+
+ The Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+
+
+
+
+ com.expedia.www.haystack.trends.App
+ com.expedia.www.haystack.trends.feature.tests
+ com.expedia.www.haystack.trends.integration.tests
+ ${project.artifactId}-${project.version}
+
+
+
+
+ com.google.protobuf
+ protobuf-java
+ ${protobuf.version}
+
+
+ com.expedia.www
+ haystack-commons
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+
+
+ org.apache.kafka
+ kafka-streams
+
+
+ org.msgpack
+ msgpack-core
+
+
+ com.typesafe
+ config
+
+
+ com.codahale.metrics
+ metrics-core
+
+
+
+
+ ${finalName}
+
+
+ src/main/resources
+ true
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+
+ com.github.os72
+ protoc-jar-maven-plugin
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+
+
diff --git a/trends/span-timeseries-transformer/src/main/resources/config/base.conf b/trends/span-timeseries-transformer/src/main/resources/config/base.conf
new file mode 100644
index 000000000..48b0f6386
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/resources/config/base.conf
@@ -0,0 +1,36 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-span-timeseries-transformer"
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 4
+ request.timeout.ms = 60000
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "com.expedia.www.haystack.commons.kstreams.SpanTimestampExtractor"
+ }
+
+ producer {
+ topic = "metric-data-points"
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+}
+
+haystack.graphite.host = "monitoring-influxdb-graphite.kube-system.svc"
+
+// there are three types of encoders that are used on service and operation names:
+// 1) periodreplacement: replaces all periods with 3 underscores
+// 2) base64: base64 encodes the full name with a padding of _
+// 3) noop: does not perform any encoding
+metricpoint.encoder.type = "periodreplacement"
+enable.metricpoint.service.level.generation = true
+
+// List of Regex expressions used to filter out services from generating trends
+blacklist.services = [
+]
diff --git a/trends/span-timeseries-transformer/src/main/resources/logback.xml b/trends/span-timeseries-transformer/src/main/resources/logback.xml
new file mode 100644
index 000000000..c45f62d7b
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/resources/logback.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
diff --git a/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/App.scala b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/App.scala
new file mode 100644
index 000000000..c5ad0e83f
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/App.scala
@@ -0,0 +1,72 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends
+
+import java.util.function.Supplier
+
+import com.expedia.www.haystack.commons.health.{HealthStatusController, UpdateHealthStatusFile}
+import com.expedia.www.haystack.commons.kstreams.app.{Main, StateChangeListener, StreamsFactory, StreamsRunner}
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.netflix.servo.util.VisibleForTesting
+import org.apache.kafka.streams.Topology
+
+object App extends Main {
+
+ /**
+ * Creates a valid instance of StreamsRunner.
+ *
+ * StreamsRunner is created with a valid StreamsFactory instance and a listener that observes
+ * state changes of the kstreams application.
+ *
+ * StreamsFactory in turn is created with a Topology Supplier and kafka.StreamsConfig. Any failure in
+ * StreamsFactory is gracefully handled by StreamsRunner to shut the application off
+ *
+ * Core logic of this application is in the `Streams` instance - which is a topology supplier. The
+ * topology of this application is built in this class.
+ *
+ * @return A valid instance of `StreamsRunner`
+ */
+
+ override def createStreamsRunner(): StreamsRunner = {
+ val appConfiguration = new AppConfiguration()
+
+ val healthStatusController = new HealthStatusController
+ healthStatusController.addListener(new UpdateHealthStatusFile(appConfiguration.healthStatusFilePath))
+
+ val stateChangeListener = new StateChangeListener(healthStatusController)
+
+ createStreamsRunner(appConfiguration, stateChangeListener)
+ }
+
+ @VisibleForTesting
+ private[trends] def createStreamsRunner(appConfiguration: AppConfiguration,
+ stateChangeListener: StateChangeListener): StreamsRunner = {
+ //create the topology provider
+ val kafkaConfig = appConfiguration.kafkaConfig
+ val streams: Supplier[Topology] = new Streams(appConfiguration.kafkaConfig, appConfiguration.transformerConfiguration)
+
+ val streamsFactory = new StreamsFactory(streams, kafkaConfig.streamsConfig, kafkaConfig.consumeTopic)
+
+ new StreamsRunner(streamsFactory, stateChangeListener)
+ }
+}
+
+
+
+
diff --git a/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/MetricDataGenerator.scala b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/MetricDataGenerator.scala
new file mode 100644
index 000000000..12d096ef0
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/MetricDataGenerator.scala
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends
+
+import com.expedia.metrics.MetricData
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.entities.encoders.Encoder
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trends.transformer.MetricDataTransformer
+
+import scala.util.matching.Regex
+
+trait MetricDataGenerator extends MetricsSupport {
+
+ private val SpanValidationErrors = metricRegistry.meter("span.validation.failure")
+ private val BlackListedSpans = metricRegistry.meter("span.validation.black.listed")
+ private val metricPointGenerationTimer = metricRegistry.timer("metricpoint.generation.time")
+
+ /**
+ * This function is responsible for generating all the metric points which can be created given a span
+ *
+ * @param span incoming span
+ * @param transformers list of transformers to be applied
+ * @param encoder encoder object
+ * @param serviceOnlyFlag tells if metric data should be generated for serviceOnly, default is true
+ * @return
+ */
+ def generateMetricDataList(span: Span,
+ transformers: Seq[MetricDataTransformer],
+ encoder: Encoder,
+ serviceOnlyFlag: Boolean = true): Seq[MetricData] = {
+ val timer = metricPointGenerationTimer.time()
+ val metricPoints = transformers.flatMap(transformer => transformer.mapSpan(span, serviceOnlyFlag, encoder))
+ timer.close()
+ metricPoints
+ }
+
+ /**
+ * This function validates a span and makes sure that the span has the necessary data to generate meaningful metrics
+ * This layer is supposed to do generic validations which would impact all the transformers.
+ * Validation specific to the transformer can be done in the transformer itself
+ *
+ * @param span incoming span
+ * @return Try object which should return either the span as is or a validation exception
+ */
+ def isValidSpan(span: Span, blackListedServices: List[Regex]): Boolean = {
+ if (span.getServiceName.isEmpty || span.getOperationName.isEmpty) {
+ SpanValidationErrors.mark()
+ return false
+ }
+
+ val isBlacklisted = blackListedServices.exists {
+ regexp =>
+ regexp.pattern.matcher(span.getServiceName).find()
+ }
+
+ if (isBlacklisted) BlackListedSpans.mark()
+ !isBlacklisted
+ }
+}
diff --git a/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/Streams.scala b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/Streams.scala
new file mode 100644
index 000000000..b9a717163
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/Streams.scala
@@ -0,0 +1,69 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends
+
+import java.util.function.Supplier
+
+import com.expedia.metrics.MetricData
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.kstreams.serde.SpanSerde
+import com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricTankSerde
+import com.expedia.www.haystack.commons.util.MetricDefinitionKeyGenerator._
+import com.expedia.www.haystack.trends.config.entities.{KafkaConfiguration, TransformerConfiguration}
+import com.expedia.www.haystack.trends.transformer.MetricDataTransformer.allTransformers
+import org.apache.kafka.common.serialization.Serdes.StringSerde
+import org.apache.kafka.streams._
+import org.apache.kafka.streams.kstream.Produced
+
+import scala.collection.JavaConverters._
+
+class Streams(kafkaConfig: KafkaConfiguration, transformConfig: TransformerConfiguration) extends Supplier[Topology]
+ with MetricDataGenerator {
+
+ private[trends] def initialize(builder: StreamsBuilder): Topology = {
+ val consumed = Consumed.`with`(kafkaConfig.autoOffsetReset)
+ .withKeySerde(new StringSerde)
+ .withValueSerde(new SpanSerde)
+ .withTimestampExtractor(kafkaConfig.timestampExtractor)
+
+ builder
+ .stream(kafkaConfig.consumeTopic, consumed)
+ .filter((_: String, span: Span) => isValidSpan(span, transformConfig.blacklistedServices))
+ .flatMap[String, MetricData]((_: String, span: Span) => mapToMetricDataKeyValue(span))
+ .to(kafkaConfig.produceTopic, Produced.`with`(new StringSerde(), new MetricTankSerde()))
+
+ builder.build()
+ }
+
+ private def mapToMetricDataKeyValue(span: Span): java.lang.Iterable[KeyValue[String, MetricData]] = {
+ val metricData: Seq[MetricData] = generateMetricDataList(span,
+ allTransformers,
+ transformConfig.encoder,
+ transformConfig.enableMetricPointServiceLevelGeneration)
+
+ metricData.map {
+ md => new KeyValue[String, MetricData](generateKey(md.getMetricDefinition), md)
+ }.asJavaCollection
+ }
+
+ override def get(): Topology = {
+ val builder = new StreamsBuilder()
+ initialize(builder)
+ }
+}
diff --git a/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/config/AppConfiguration.scala b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/config/AppConfiguration.scala
new file mode 100644
index 000000000..0172267c9
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/config/AppConfiguration.scala
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.config
+
+import java.util.Properties
+
+import com.expedia.www.haystack.commons.config.ConfigurationLoader
+import com.expedia.www.haystack.commons.entities.encoders.EncoderFactory
+import com.expedia.www.haystack.trends.config.entities.{KafkaConfiguration, TransformerConfiguration}
+import com.typesafe.config.Config
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+import org.apache.kafka.streams.processor.TimestampExtractor
+
+import scala.collection.JavaConverters._
+import scala.util.matching.Regex
+
+class AppConfiguration {
+ private val config = ConfigurationLoader.loadConfigFileWithEnvOverrides()
+
+ val healthStatusFilePath: String = config.getString("health.status.path")
+
+ /**
+ *
+ * @return transformer related configs
+ */
+ def transformerConfiguration: TransformerConfiguration = {
+ val encoderType = config.getString("metricpoint.encoder.type")
+ TransformerConfiguration(EncoderFactory.newInstance(encoderType),
+ config.getBoolean("enable.metricpoint.service.level.generation"),
+ config.getStringList("blacklist.services").asScala.toList.map(x => new Regex(x))
+ )
+ }
+
+ /**
+ *
+ * @return streams configuration object
+ */
+ def kafkaConfig: KafkaConfiguration = {
+
+ // verify if the applicationId and bootstrap server config are non empty
+ def verifyRequiredProps(props: Properties): Unit = {
+ require(props.getProperty(StreamsConfig.APPLICATION_ID_CONFIG).nonEmpty)
+ require(props.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG).nonEmpty)
+ }
+
+ def addProps(config: Config, props: Properties, prefix: (String) => String = identity): Unit = {
+ config.entrySet().asScala.foreach(kv => {
+ val propKeyName = prefix(kv.getKey)
+ props.setProperty(propKeyName, kv.getValue.unwrapped().toString)
+ })
+ }
+
+ val kafka = config.getConfig("kafka")
+ val producerConfig = kafka.getConfig("producer")
+ val consumerConfig = kafka.getConfig("consumer")
+ val streamsConfig = kafka.getConfig("streams")
+
+ val props = new Properties
+
+ // add stream specific properties
+ addProps(streamsConfig, props)
+
+ // validate props
+ verifyRequiredProps(props)
+
+ val timestampExtractor = Class.forName(props.getProperty("timestamp.extractor",
+ "org.apache.kafka.streams.processor.WallclockTimestampExtractor"))
+
+ KafkaConfiguration(new StreamsConfig(props),
+ produceTopic = producerConfig.getString("topic"),
+ consumeTopic = consumerConfig.getString("topic"),
+ if (streamsConfig.hasPath("auto.offset.reset")) AutoOffsetReset.valueOf(streamsConfig.getString("auto.offset.reset").toUpperCase)
+ else AutoOffsetReset.LATEST
+ , timestampExtractor.newInstance().asInstanceOf[TimestampExtractor],
+ kafka.getLong("close.timeout.ms"))
+ }
+}
diff --git a/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/config/entities/KafkaConfiguration.scala b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/config/entities/KafkaConfiguration.scala
new file mode 100644
index 000000000..ff2efc96a
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/config/entities/KafkaConfiguration.scala
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.config.entities
+
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+import org.apache.kafka.streams.processor.TimestampExtractor
+
+/**
+ * @param streamsConfig config object to be used for initializing KafkaStreams
+ * @param produceTopic producer topic
+ * @param consumeTopic consumer topic
+ * @param autoOffsetReset auto offset reset policy
+ * @param timestampExtractor timestamp extractor
+ * @param closeTimeoutInMs timeout for closing kafka streams in ms
+ */
+case class KafkaConfiguration(streamsConfig: StreamsConfig,
+ produceTopic: String,
+ consumeTopic: String,
+ autoOffsetReset: AutoOffsetReset,
+ timestampExtractor: TimestampExtractor,
+ closeTimeoutInMs: Long)
diff --git a/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/config/entities/TransformerConfiguration.scala b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/config/entities/TransformerConfiguration.scala
new file mode 100644
index 000000000..5943178c6
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/config/entities/TransformerConfiguration.scala
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.config.entities
+
+import com.expedia.www.haystack.commons.entities.encoders.Encoder
+
+import scala.util.matching.Regex
+
+/**
+ * @param encoder config for encoder type in metric point key
+ * @param enableMetricPointServiceLevelGeneration config for also generating service level trends
+ */
+case class TransformerConfiguration(encoder: Encoder,
+ enableMetricPointServiceLevelGeneration: Boolean,
+ blacklistedServices: List[Regex])
diff --git a/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer/MetricDataTransformer.scala b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer/MetricDataTransformer.scala
new file mode 100644
index 000000000..7caf35839
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer/MetricDataTransformer.scala
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.transformer
+
+import java.util
+
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.entities.TagKeys._
+import com.expedia.www.haystack.commons.entities.encoders.{Encoder, PeriodReplacementEncoder}
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+
+
+trait MetricDataTransformer extends MetricsSupport {
+
+ protected val PRODUCT = "haystack"
+ protected var encoder: Encoder = new PeriodReplacementEncoder
+
+ def mapSpan(span: Span, serviceOnlyFlag: Boolean, encoder: Encoder): List[MetricData] = {
+ this.encoder = encoder
+ mapSpan(span, serviceOnlyFlag)
+ }
+
+ protected def mapSpan(span: Span, serviceOnlyFlag: Boolean): List[MetricData]
+
+ protected def getDataPointTimestamp(span: Span): Long = span.getStartTime / 1000000
+
+ protected def getMetricData(metricName: String,
+ metricTags: util.LinkedHashMap[String, String],
+ metricType: String,
+ metricUnit: String,
+ value: Double,
+ timestamp: Long): MetricData = {
+ val tags = new util.LinkedHashMap[String, String] {
+ putAll(metricTags)
+ put(MetricDefinition.MTYPE, metricType)
+ put(MetricDefinition.UNIT, metricUnit)
+ put(PRODUCT_KEY, PRODUCT)
+ }
+ val metricDefinition = new MetricDefinition(metricName, new TagCollection(tags), TagCollection.EMPTY)
+ val metricData = new MetricData(metricDefinition, value, timestamp)
+ metricData
+ }
+
+ /**
+ * This function creates the common metric tags from a span object.
+ * Every metric point must have the operationName and ServiceName in its tags, the individual transformer
+ * can add more tags to the metricPoint.
+ *
+ * @param span incoming span
+ * @return metric tags in the form of HashMap of string,string
+ */
+ protected def createCommonMetricTags(span: Span): util.LinkedHashMap[String, String] = {
+ new util.LinkedHashMap[String, String] {
+ put(SERVICE_NAME_KEY, encoder.encode(span.getServiceName))
+ put(OPERATION_NAME_KEY, encoder.encode(span.getOperationName))
+ }
+ }
+
+ protected def createServiceOnlyMetricTags(span: Span): util.LinkedHashMap[String, String] = {
+ new util.LinkedHashMap[String, String] {
+ put(SERVICE_NAME_KEY, encoder.encode(span.getServiceName))
+ }
+ }
+}
+
+object MetricDataTransformer {
+ val allTransformers = List(SpanDurationMetricDataTransformer, SpanStatusMetricDataTransformer)
+}
+
diff --git a/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer/SpanDurationMetricDataTransformer.scala b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer/SpanDurationMetricDataTransformer.scala
new file mode 100644
index 000000000..5005b8121
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer/SpanDurationMetricDataTransformer.scala
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.transformer
+
+import com.expedia.metrics.MetricData
+import com.expedia.open.tracing.Span
+
+/**
+ * This Transformer reads a span and creates a duration metric point with the value as the
+ */
+trait SpanDurationMetricDataTransformer extends MetricDataTransformer {
+
+ private val spanDurationMetricPoints = metricRegistry.meter("metricpoint.span.duration")
+
+ val DURATION_METRIC_NAME = "duration"
+ val MTYPE = "gauge"
+ val UNIT = "microseconds"
+
+ override def mapSpan(span: Span, serviceOnlyFlag: Boolean): List[MetricData] = {
+ spanDurationMetricPoints.mark()
+
+ var metricDataList = List(getMetricData(DURATION_METRIC_NAME, createCommonMetricTags(span), MTYPE, UNIT, span.getDuration, getDataPointTimestamp(span)))
+ if (serviceOnlyFlag) {
+ metricDataList = metricDataList :+
+ getMetricData(DURATION_METRIC_NAME, createServiceOnlyMetricTags(span), MTYPE, UNIT, span.getDuration, getDataPointTimestamp(span))
+ }
+ metricDataList
+ }
+
+}
+
+object SpanDurationMetricDataTransformer extends SpanDurationMetricDataTransformer
+
diff --git a/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer/SpanStatusMetricDataTransformer.scala b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer/SpanStatusMetricDataTransformer.scala
new file mode 100644
index 000000000..e0440ae93
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/main/scala/com/expedia/www/haystack/trends/transformer/SpanStatusMetricDataTransformer.scala
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.transformer
+
+import com.expedia.metrics.MetricData
+import com.expedia.open.tracing.Span
+import com.expedia.open.tracing.Tag.TagType
+import com.expedia.www.haystack.commons.entities.TagKeys
+
+import scala.collection.JavaConverters._
+
+
+/**
+ * This transformer generates a success or a failure metric y
+ */
+trait SpanStatusMetricDataTransformer extends MetricDataTransformer {
+ private val spanSuccessMetricPoints = metricRegistry.meter("metricpoint.span.success")
+ private val spanFailuresMetricPoints = metricRegistry.meter("metricpoint.span.failure")
+
+ val SUCCESS_METRIC_NAME = "success-span"
+ val FAILURE_METRIC_NAME = "failure-span"
+ val MTYPE = "gauge"
+ val UNIT = "short"
+
+ override def mapSpan(span: Span, serviceOnlyFlag: Boolean): List[MetricData] = {
+ var metricName: String = null
+
+ if (isError(span)) {
+ spanFailuresMetricPoints.mark()
+ metricName = FAILURE_METRIC_NAME
+ } else {
+ spanSuccessMetricPoints.mark()
+ metricName = SUCCESS_METRIC_NAME
+ }
+
+ var metricDataList = List(getMetricData(metricName, createCommonMetricTags(span), MTYPE, UNIT, 1, getDataPointTimestamp(span)))
+
+ if (serviceOnlyFlag) {
+ metricDataList = metricDataList :+ getMetricData(metricName, createServiceOnlyMetricTags(span), MTYPE, UNIT, 1, getDataPointTimestamp(span))
+ }
+ metricDataList
+ }
+
+ protected def isError(span: Span): Boolean = {
+ val value = span.getTagsList.asScala.find(tag => tag.getKey.equalsIgnoreCase(TagKeys.ERROR_KEY)).map(x => {
+ if (TagType.BOOL == x.getType) {
+ return x.getVBool
+ } else if (TagType.STRING == x.getType) {
+ return !"false".equalsIgnoreCase(x.getVStr)
+ }
+ return true
+ })
+ value.getOrElse(false)
+ }
+}
+
+object SpanStatusMetricDataTransformer extends SpanStatusMetricDataTransformer
diff --git a/trends/span-timeseries-transformer/src/test/resources/config/base.conf b/trends/span-timeseries-transformer/src/test/resources/config/base.conf
new file mode 100644
index 000000000..5ed779efd
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/resources/config/base.conf
@@ -0,0 +1,25 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-span-timeseries-transformer-v2"
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 4
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "com.expedia.www.haystack.commons.kstreams.SpanTimestampExtractor"
+ }
+
+ producer {
+ topic = "metric-data-points"
+ }
+
+ consumer {
+ topic = "proto-spans"
+ }
+}
+
+metricpoint.encoder.type = "periodreplacement"
+enable.metricpoint.service.level.generation=true
diff --git a/trends/span-timeseries-transformer/src/test/resources/logback-test.xml b/trends/span-timeseries-transformer/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..adfa02c68
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/FeatureSpec.scala b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/FeatureSpec.scala
new file mode 100644
index 000000000..b91e9087e
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/FeatureSpec.scala
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.feature
+
+import java._
+import java.util.Properties
+
+import com.expedia.metrics.MetricData
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.entities.encoders.Base64Encoder
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.config.entities.{KafkaConfiguration, TransformerConfiguration}
+import org.apache.kafka.streams.StreamsConfig
+import org.easymock.EasyMock
+import org.scalatest.easymock.EasyMockSugar
+import org.scalatest.{FeatureSpecLike, GivenWhenThen, Matchers}
+
+
+trait FeatureSpec extends FeatureSpecLike with GivenWhenThen with Matchers with EasyMockSugar {
+
+ protected val METRIC_TYPE = "gauge"
+
+ def generateTestSpan(duration: Long): Span = {
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .build()
+ }
+
+ protected def mockAppConfig: AppConfiguration = {
+ val kafkaConsumeTopic = "test-consume"
+ val kafkaProduceTopic = "test-produce"
+ val streamsConfig = new Properties()
+ streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, "test-app")
+ streamsConfig.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "test-kafka-broker")
+ val kafkaConfig = KafkaConfiguration(new StreamsConfig(streamsConfig), kafkaProduceTopic, kafkaConsumeTopic, null, null, 0l)
+ val transformerConfig = TransformerConfiguration(new Base64Encoder, enableMetricPointServiceLevelGeneration = true, List())
+ val appConfiguration = mock[AppConfiguration]
+
+ expecting {
+ appConfiguration.kafkaConfig.andReturn(kafkaConfig).anyTimes()
+ appConfiguration.transformerConfiguration.andReturn(transformerConfig).anyTimes()
+ }
+ EasyMock.replay(appConfiguration)
+ appConfiguration
+ }
+
+ protected def getMetricDataTags(metricData : MetricData): util.Map[String, String] = {
+ metricData.getMetricDefinition.getTags.getKv
+ }
+
+}
diff --git a/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/config/ConfigurationLoaderSpec.scala b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/config/ConfigurationLoaderSpec.scala
new file mode 100644
index 000000000..8225382fb
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/config/ConfigurationLoaderSpec.scala
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.feature.tests.config
+
+import com.expedia.www.haystack.commons.entities.encoders.PeriodReplacementEncoder
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+
+class ConfigurationLoaderSpec extends FeatureSpec {
+
+ feature("Configuration loader") {
+
+
+ scenario("should load the health status config from base.conf") {
+
+ Given("A config file at base config file containing config for health status file path")
+ val healthStatusFilePath = "/app/isHealthy"
+
+ When("When the configuration is loaded in app configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("the healthStatusFilePath should be correct")
+ projectConfig.healthStatusFilePath shouldEqual healthStatusFilePath
+ }
+
+ scenario("should load the metric point enable period replacement config from base.conf") {
+
+ Given("A config file at base config file containing config for enable period replacement")
+ val enableMetricPointServiceLevelGeneration = true
+
+ When("When the configuration is loaded in app configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("the encoder should be correct")
+ projectConfig.transformerConfiguration.encoder shouldBe an[PeriodReplacementEncoder]
+ projectConfig.transformerConfiguration.enableMetricPointServiceLevelGeneration shouldEqual enableMetricPointServiceLevelGeneration
+ }
+
+ scenario("should load the kafka config from base.conf") {
+
+ Given("A config file at base config file containing kafka ")
+
+ When("When the configuration is loaded in app configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("It should create the write configuration object based on the file contents")
+ val kafkaConfig = projectConfig.kafkaConfig
+ kafkaConfig.consumeTopic shouldBe "proto-spans"
+ }
+
+
+ scenario("should override configuration based on environment variable") {
+
+
+ Given("A config file at base config file containing config for kafka")
+
+ When("When the configuration is loaded in app configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("It should override the configuration object based on the environment variable if it exists")
+
+ val kafkaProduceTopic = sys.env.getOrElse("HAYSTACK_PROP_KAFKA_PRODUCER_TOPIC", "metric-data-points")
+ val kafkaConfig = projectConfig.kafkaConfig
+ kafkaConfig.produceTopic shouldBe kafkaProduceTopic
+ }
+
+ }
+}
diff --git a/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/StreamsSpec.scala b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/StreamsSpec.scala
new file mode 100644
index 000000000..751ad7b3c
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/StreamsSpec.scala
@@ -0,0 +1,31 @@
+package com.expedia.www.haystack.trends.feature.tests.kstreams
+
+import com.expedia.www.haystack.trends.Streams
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import org.apache.kafka.streams.StreamsBuilder
+
+
+class StreamsSpec extends FeatureSpec {
+
+ feature("Streams should build a topology") {
+
+ scenario("a valid kafka configuration") {
+
+ Given("an valid kafka configuration")
+
+ val appConfig = mockAppConfig
+ val streams = new Streams(appConfig.kafkaConfig, appConfig.transformerConfiguration)
+ val streamBuilder = mock[StreamsBuilder]
+
+
+ When("the stream topology is built")
+ val topology = streams.get()
+
+ Then("it should be able to build a successful topology")
+ topology should not be null
+
+ Then("then it should return an empty state store")
+ topology.describe().globalStores().isEmpty shouldBe true
+ }
+ }
+}
diff --git a/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/transformer/MetricDataGeneratorSpec.scala b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/transformer/MetricDataGeneratorSpec.scala
new file mode 100644
index 000000000..43e92a40f
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/transformer/MetricDataGeneratorSpec.scala
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.feature.tests.transformer
+
+import com.expedia.metrics.MetricDefinition
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.commons.entities.TagKeys
+import com.expedia.www.haystack.commons.entities.encoders.PeriodReplacementEncoder
+import com.expedia.www.haystack.trends.MetricDataGenerator
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import com.expedia.www.haystack.trends.transformer.{SpanDurationMetricDataTransformer, SpanStatusMetricDataTransformer}
+
+import scala.collection.JavaConverters._
+import scala.util.matching.Regex
+
+
+class MetricDataGeneratorSpec extends FeatureSpec with MetricDataGenerator {
+
+ private def getMetricDataTransformers = {
+ List(SpanDurationMetricDataTransformer, SpanStatusMetricDataTransformer)
+ }
+
+ feature("The metricData generator must generate list of metricData given a span object") {
+
+ scenario("any valid span object") {
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ Given("a valid span")
+ val span = Span.newBuilder()
+ .setDuration(System.currentTimeMillis())
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .setStartTime(System.currentTimeMillis() * 1000) // in micro seconds
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVBool(false))
+ .build()
+ When("its asked to map to metricPoints")
+ val isValid = isValidSpan(span, Nil)
+ val metricDataList = generateMetricDataList(span, getMetricDataTransformers, new PeriodReplacementEncoder)
+
+ Then("the number of metricPoints returned should be equal to the number of metricPoint transformers")
+ metricDataList should not be empty
+ val metricPointTransformers = getMetricDataTransformers
+ metricDataList.size shouldEqual metricPointTransformers.size * 2
+
+ Then("each metricPoint should have the timestamps in seconds and which should equal to the span timestamp")
+ isValid shouldBe true
+ metricDataList.foreach(metricData => {
+ metricData.getTimestamp shouldEqual span.getStartTime / 1000000
+ })
+
+ Then("each metricPoint should have the metric type as Metric")
+ metricDataList.foreach(metricData => {
+ getMetricDataTags(metricData).get(MetricDefinition.MTYPE) shouldEqual METRIC_TYPE
+ })
+
+ }
+
+ scenario("an invalid span object") {
+ val operationName = ""
+ val serviceName = ""
+ Given("an invalid span")
+ val span = Span.newBuilder()
+ .setDuration(System.currentTimeMillis())
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVBool(false))
+ .build()
+
+ When("its asked to map to metricPoints")
+ val isValid = isValidSpan(span, Nil)
+ Then("It should return a metricPoint validation exception")
+ isValid shouldBe false
+ metricRegistry.meter("span.validation.failure").getCount shouldBe 1
+ }
+
+ scenario("a span object with a valid service Name") {
+ val operationName = "testSpan"
+ val serviceName = "testService"
+
+ Given("a valid span")
+ val span = Span.newBuilder()
+ .setDuration(System.currentTimeMillis())
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVBool(false))
+ .build()
+ val encoder = new PeriodReplacementEncoder
+
+ When("its asked to map to metricPoints")
+ val isValid = isValidSpan(span, Nil)
+ val metricDataList = generateMetricDataList(span, getMetricDataTransformers, encoder)
+
+ Then("it should create metricPoints with service name as one its keys")
+ isValid shouldBe true
+ metricDataList.map(metricData => {
+ val tags = getMetricDataTags(metricData).asScala
+ tags.get(TagKeys.SERVICE_NAME_KEY) should not be None
+ tags.get(TagKeys.SERVICE_NAME_KEY) shouldEqual Some(encoder.encode(serviceName))
+ })
+ }
+
+ scenario("a span object with a blacklisted service Name") {
+ val operationName = "testSpan"
+ val blacklistedServiceName = "testService"
+
+ Given("a valid span with a blacklisted service name")
+ val span = Span.newBuilder()
+ .setDuration(System.currentTimeMillis())
+ .setOperationName(operationName)
+ .setServiceName(blacklistedServiceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVBool(false))
+ .build()
+
+ When("its asked to map to metricPoints")
+ val isValid = isValidSpan(span, List(new Regex(blacklistedServiceName)))
+ Then("It should return a metricPoint validation exception")
+
+ isValid shouldBe false
+ metricRegistry.meter("span.validation.black.listed").getCount shouldBe 1
+ }
+
+ scenario("a span object with a blacklisted regex service Name") {
+ val serviceName = "testservice"
+
+ Given("a valid span with a blacklisted service name")
+ val span = Span.newBuilder().setDuration(System.currentTimeMillis()).setOperationName("testSpan").setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVBool(false)).build()
+
+ When("its asked to map to metricPoints")
+ val isValid = isValidSpan(span, List(new Regex("^[a-z]*$")))
+
+ Then("It should return a metricPoint")
+ isValid shouldBe false
+ }
+
+ scenario("a span object with a non-blacklisted regex service Name") {
+ val serviceName = "testService"
+
+ Given("a valid span with a blacklisted service name")
+ val span = Span.newBuilder().setDuration(System.currentTimeMillis()).setOperationName("testSpan").setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVBool(false)).build()
+
+ When("its asked to map to metricPoints")
+ val isValid = isValidSpan(span, List(new Regex("^[a-z]*")))
+ val metricDataList = generateMetricDataList(span, getMetricDataTransformers, new PeriodReplacementEncoder, serviceOnlyFlag = false)
+
+ Then("It should return a metricPoint validation exception")
+ isValid shouldBe false
+ metricDataList should not be empty
+ }
+ }
+}
diff --git a/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/transformer/SpanDurationMetricDataTransformerSpec.scala b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/transformer/SpanDurationMetricDataTransformerSpec.scala
new file mode 100644
index 000000000..30e2d447c
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/transformer/SpanDurationMetricDataTransformerSpec.scala
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.feature.tests.transformer
+
+import com.expedia.www.haystack.commons.entities.TagKeys
+import com.expedia.www.haystack.commons.entities.encoders.PeriodReplacementEncoder
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import com.expedia.www.haystack.trends.transformer.SpanDurationMetricDataTransformer
+
+class SpanDurationMetricDataTransformerSpec extends FeatureSpec with SpanDurationMetricDataTransformer {
+
+ feature("metricData transformer for creating duration metricData") {
+ scenario("should have duration value in metricData for given duration in span " +
+ "and when service level generation is enabled") {
+
+ Given("a valid span object")
+ val duration = System.currentTimeMillis
+ val span = generateTestSpan(duration)
+
+ When("metricPoint is created using transformer")
+ val metricDataList = mapSpan(span, true)
+
+ Then("should only have 2 metricPoint")
+ metricDataList.length shouldEqual 2
+
+ Then("same duration should be in metricPoint value")
+ metricDataList.head.getValue shouldEqual duration
+
+
+ Then("the metric name should be duration")
+ metricDataList.head.getMetricDefinition.getKey shouldEqual DURATION_METRIC_NAME
+
+ Then("returned keys should be as expected")
+ getMetricDataTags(metricDataList.head).get(TagKeys.SERVICE_NAME_KEY) shouldEqual encoder.encode(span.getServiceName)
+ getMetricDataTags(metricDataList.head).get(TagKeys.OPERATION_NAME_KEY) shouldEqual encoder.encode(span.getOperationName)
+ getMetricDataTags(metricDataList.reverse.head).get(TagKeys.SERVICE_NAME_KEY) shouldEqual encoder.encode(span.getServiceName)
+ getMetricDataTags(metricDataList.reverse.head).get(TagKeys.OPERATION_NAME_KEY) shouldEqual null
+
+ }
+
+ scenario("should have duration value in metricPoint for given duration in span " +
+ "and when service level generation is disabled") {
+
+ Given("a valid span object")
+ val duration = System.currentTimeMillis
+ val span = generateTestSpan(duration)
+
+ When("metricData is created using transformer")
+ val metricDataList = mapSpan(span, false)
+
+ Then("should only have 1 metricPoint")
+ metricDataList.length shouldEqual 1
+
+ Then("same duration should be in metricPoint value")
+ metricDataList.head.getValue shouldEqual duration
+
+
+ Then("the metric name should be duration")
+ metricDataList.head.getMetricDefinition.getKey shouldEqual DURATION_METRIC_NAME
+
+ Then("returned keys should be as expected")
+ getMetricDataTags(metricDataList.head).get(TagKeys.SERVICE_NAME_KEY) shouldEqual encoder.encode(span.getServiceName)
+ getMetricDataTags(metricDataList.head).get(TagKeys.OPERATION_NAME_KEY) shouldEqual encoder.encode(span.getOperationName)
+ }
+ }
+}
diff --git a/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/transformer/SpanStatusMetricDataTransformerSpec.scala b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/transformer/SpanStatusMetricDataTransformerSpec.scala
new file mode 100644
index 000000000..7e8e76786
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/feature/tests/transformer/SpanStatusMetricDataTransformerSpec.scala
@@ -0,0 +1,248 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.feature.tests.transformer
+
+import com.expedia.open.tracing.Tag.TagType
+import com.expedia.open.tracing.{Span, Tag}
+import com.expedia.www.haystack.commons.entities.TagKeys
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import com.expedia.www.haystack.trends.transformer.SpanStatusMetricDataTransformer
+
+class SpanStatusMetricDataTransformerSpec extends FeatureSpec with SpanStatusMetricDataTransformer {
+
+ feature("metricData transformer for creating status count metricData") {
+
+ scenario("should have a success-spans metricData given span which is successful " +
+ "and when service level generation is enabled") {
+
+ Given("a successful span object")
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ val duration = System.currentTimeMillis
+ val span = Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setType(TagType.BOOL).setVBool(false))
+ .build()
+
+ When("metricData is created using the transformer")
+ val metricDataList = mapSpan(span, true)
+
+ Then("should only have 2 metricData")
+ metricDataList.length shouldEqual 2
+
+ Then("the metricData value should be 1")
+ metricDataList(0).getValue shouldEqual 1
+ metricDataList(1).getValue shouldEqual 1
+
+ Then("metric name should be success-spans")
+ metricDataList(0).getMetricDefinition.getKey shouldEqual SUCCESS_METRIC_NAME
+
+ Then("returned keys should be as expected")
+ getMetricDataTags(metricDataList.head).get(TagKeys.SERVICE_NAME_KEY) shouldEqual encoder.encode(span.getServiceName)
+ getMetricDataTags(metricDataList.head).get(TagKeys.OPERATION_NAME_KEY) shouldEqual encoder.encode(span.getOperationName)
+ getMetricDataTags(metricDataList.reverse.head).get(TagKeys.SERVICE_NAME_KEY) shouldEqual encoder.encode(span.getServiceName)
+ getMetricDataTags(metricDataList.reverse.head).get(TagKeys.OPERATION_NAME_KEY) shouldEqual null
+ }
+
+ scenario("should have a failure-spans metricData given span which is erroneous " +
+ "and when service level generation is enabled") {
+
+ Given("a erroneous span object")
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ val duration = System.currentTimeMillis
+ val span = Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setType(TagType.BOOL).setVBool(true))
+ .build()
+
+ When("metricData is created using transformer")
+ val metricDataList = mapSpan(span, true)
+
+ Then("should only have 2 metricData")
+ metricDataList.length shouldEqual 2
+
+ Then("the metricData value should be 1")
+ metricDataList(0).getValue shouldEqual 1
+ metricDataList(1).getValue shouldEqual 1
+
+ Then("metric name should be failure-spans")
+ metricDataList(0).getMetricDefinition.getKey shouldEqual FAILURE_METRIC_NAME
+ }
+
+ scenario("should have a failure-span metricData if the error tag is a true string") {
+ Given("a failure span object")
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ val duration = System.currentTimeMillis
+ val span = Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVStr("true"))
+ .build()
+
+ When("metricData is created using transformer")
+ val metricDataList = mapSpan(span, true)
+
+ Then("metric name should be failure-spans")
+ metricDataList(0).getMetricDefinition.getKey shouldEqual FAILURE_METRIC_NAME
+ }
+
+ scenario("should have a failure-span metricData if the error tag not a false string") {
+ Given("a failure span object")
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ val duration = System.currentTimeMillis
+ val span = Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVStr("500"))
+ .build()
+
+ When("metricData is created using transformer")
+ val metricDataList = mapSpan(span, true)
+
+ Then("metric name should be failure-spans")
+ metricDataList(0).getMetricDefinition.getKey shouldEqual FAILURE_METRIC_NAME
+ }
+
+ scenario("should have a failure-span metricData if the error tag exists but is not a boolean or string") {
+ Given("a failure span object")
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ val duration = System.currentTimeMillis
+ val span = Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setType(TagType.LONG).setVLong(100L))
+ .build()
+
+ When("metricData is created using transformer")
+ val metricDataList = mapSpan(span, true)
+
+ Then("metric name should be failure-spans")
+ metricDataList(0).getMetricDefinition.getKey shouldEqual FAILURE_METRIC_NAME
+ }
+
+ scenario("should return a success span when error key is missing in span tags and when service level generation is enabled") {
+
+ Given("a span object which missing error tag")
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ val duration = System.currentTimeMillis
+ val span = Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .build()
+
+ When("metricData is created using transformer")
+ val metricDataList = mapSpan(span, true)
+
+ Then("should return metricData List")
+ metricDataList.length shouldEqual 2
+ metricDataList(0).getMetricDefinition.getKey shouldEqual SUCCESS_METRIC_NAME
+ metricDataList(1).getMetricDefinition.getKey shouldEqual SUCCESS_METRIC_NAME
+ }
+
+ scenario("should have a success-spans metricData given span which is successful " +
+ "and when service level generation is disabled") {
+
+ Given("a successful span object")
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ val duration = System.currentTimeMillis
+ val span = Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setType(TagType.BOOL).setVBool(false))
+ .build()
+
+ When("metricData is created using the transformer")
+ val metricDataList = mapSpan(span, false)
+
+ Then("should only have 1 metricData")
+ metricDataList.length shouldEqual 1
+
+ Then("the metricData value should be 1")
+ metricDataList(0).getValue shouldEqual 1
+
+ Then("metric name should be success-spans")
+ metricDataList(0).getMetricDefinition.getKey shouldEqual SUCCESS_METRIC_NAME
+
+ Then("returned keys should be as expected")
+ getMetricDataTags(metricDataList.head).get(TagKeys.SERVICE_NAME_KEY) shouldEqual encoder.encode(span.getServiceName)
+ getMetricDataTags(metricDataList.head).get(TagKeys.OPERATION_NAME_KEY) shouldEqual encoder.encode(span.getOperationName)
+ }
+
+ scenario("should have a failure-spans metricData given span which is erroneous " +
+ "and when service level generation is disabled") {
+
+ Given("a erroneous span object")
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ val duration = System.currentTimeMillis
+ val span = Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .addTags(Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setType(TagType.BOOL).setVBool(true))
+ .build()
+
+ When("metricData is created using transformer")
+ val metricDataList = mapSpan(span, false)
+
+ Then("should only have 1 metricData")
+ metricDataList.length shouldEqual 1
+
+ Then("the metricData value should be 1")
+ metricDataList(0).getValue shouldEqual 1
+
+ Then("metric name should be failure-spans")
+ metricDataList(0).getMetricDefinition.getKey shouldEqual FAILURE_METRIC_NAME
+ }
+
+ scenario("should return a success span when error key is missing in span tags and when service level generation is disabled") {
+
+ Given("a span object which missing error tag")
+ val operationName = "testSpan"
+ val serviceName = "testService"
+ val duration = System.currentTimeMillis
+ val span = Span.newBuilder()
+ .setDuration(duration)
+ .setOperationName(operationName)
+ .setServiceName(serviceName)
+ .build()
+
+ When("metricData is created using transformer")
+ val metricPoints = mapSpan(span, false)
+
+ Then("should return metricData")
+ metricPoints.length shouldEqual 1
+ metricPoints(0).getMetricDefinition.getKey shouldEqual SUCCESS_METRIC_NAME
+ }
+ }
+}
diff --git a/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/integration/IntegrationTestSpec.scala b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/integration/IntegrationTestSpec.scala
new file mode 100644
index 000000000..74c26e5e2
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/integration/IntegrationTestSpec.scala
@@ -0,0 +1,120 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.integration
+
+
+import java.util.Properties
+import java.util.concurrent.{Executors, ScheduledExecutorService, ScheduledFuture, TimeUnit}
+
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.entities.encoders.PeriodReplacementEncoder
+import com.expedia.www.haystack.commons.kstreams.serde.SpanSerde
+import com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricTankSerde
+import org.apache.kafka.clients.consumer.ConsumerConfig
+import org.apache.kafka.clients.producer.ProducerConfig
+import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
+import org.apache.kafka.streams.integration.utils.{EmbeddedKafkaCluster, IntegrationTestUtils}
+import org.apache.kafka.streams.{KeyValue, StreamsConfig}
+import org.scalatest._
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration.FiniteDuration
+
+object EmbeddedKafka {
+ val CLUSTER = new EmbeddedKafkaCluster(1)
+ CLUSTER.start()
+}
+
+class IntegrationTestSpec extends WordSpec with GivenWhenThen with Matchers with BeforeAndAfterAll with BeforeAndAfterEach {
+
+ protected val PUNCTUATE_INTERVAL_MS = 2000
+ protected val PRODUCER_CONFIG = new Properties()
+ protected val RESULT_CONSUMER_CONFIG = new Properties()
+ protected val STREAMS_CONFIG = new Properties()
+ protected val scheduledJobFuture: ScheduledFuture[_] = null
+ protected val INPUT_TOPIC = "spans"
+ protected val OUTPUT_TOPIC = "metricpoints"
+ protected var scheduler: ScheduledExecutorService = _
+ protected var APP_ID = "haystack-trends"
+ protected val METRIC_TYPE = "gauge"
+ protected var CHANGELOG_TOPIC = ""
+ protected var KAFKA_ENDPOINT = "192.168.99.100:9092"
+
+
+ override def beforeAll() {
+ scheduler = Executors.newSingleThreadScheduledExecutor()
+ }
+
+ override def afterAll(): Unit = {
+ scheduler.shutdownNow()
+ }
+
+ override def beforeEach() {
+ val metricTankSerde = new MetricTankSerde()
+
+ EmbeddedKafka.CLUSTER.createTopic(INPUT_TOPIC)
+ EmbeddedKafka.CLUSTER.createTopic(OUTPUT_TOPIC)
+
+ PRODUCER_CONFIG.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, EmbeddedKafka.CLUSTER.bootstrapServers)
+ PRODUCER_CONFIG.put(ProducerConfig.ACKS_CONFIG, "all")
+ PRODUCER_CONFIG.put(ProducerConfig.RETRIES_CONFIG, "0")
+ PRODUCER_CONFIG.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
+ PRODUCER_CONFIG.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, new SpanSerde().serializer().getClass)
+
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, EmbeddedKafka.CLUSTER.bootstrapServers)
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.GROUP_ID_CONFIG, APP_ID + "-result-consumer")
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, metricTankSerde.deserializer().getClass)
+
+ STREAMS_CONFIG.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, EmbeddedKafka.CLUSTER.bootstrapServers)
+ STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID)
+ STREAMS_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
+ STREAMS_CONFIG.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0")
+ STREAMS_CONFIG.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "300")
+ STREAMS_CONFIG.put(StreamsConfig.STATE_DIR_CONFIG, "/tmp/kafka-streams")
+
+ IntegrationTestUtils.purgeLocalStreamsState(STREAMS_CONFIG)
+
+ CHANGELOG_TOPIC = s"$APP_ID-AggregatedMetricPointStore-changelog"
+ }
+
+ override def afterEach(): Unit = {
+ EmbeddedKafka.CLUSTER.deleteTopic(INPUT_TOPIC)
+ EmbeddedKafka.CLUSTER.deleteTopic(OUTPUT_TOPIC)
+ }
+
+ protected def produceSpansAsync(produceInterval: FiniteDuration,
+ spans: List[Span]): Unit = {
+ var currentTime = System.currentTimeMillis()
+ var idx = 0
+ scheduler.scheduleWithFixedDelay(() => {
+ if (idx < spans.size) {
+ currentTime = currentTime + ((idx * PUNCTUATE_INTERVAL_MS) / (spans.size - 1))
+ val span = spans.apply(idx)
+ val records = List(new KeyValue[String, Span](span.getTraceId, span)).asJava
+ IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(
+ INPUT_TOPIC,
+ records,
+ PRODUCER_CONFIG,
+ currentTime)
+ }
+ idx = idx + 1
+ }, 0, produceInterval.toMillis, TimeUnit.MILLISECONDS)
+ }
+}
diff --git a/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/integration/tests/TimeSeriesTransformerTopologySpec.scala b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/integration/tests/TimeSeriesTransformerTopologySpec.scala
new file mode 100644
index 000000000..491337d0b
--- /dev/null
+++ b/trends/span-timeseries-transformer/src/test/scala/com/expedia/www/haystack/trends/integration/tests/TimeSeriesTransformerTopologySpec.scala
@@ -0,0 +1,125 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.integration.tests
+
+import java.util.UUID
+
+import com.expedia.metrics.{MetricData, MetricDefinition}
+import com.expedia.open.tracing.Span
+import com.expedia.www.haystack.commons.entities.TagKeys
+import com.expedia.www.haystack.commons.entities.encoders.PeriodReplacementEncoder
+import com.expedia.www.haystack.commons.health.HealthStatusController
+import com.expedia.www.haystack.commons.kstreams.app.{StateChangeListener, StreamsFactory, StreamsRunner}
+import com.expedia.www.haystack.commons.util.MetricDefinitionKeyGenerator
+import com.expedia.www.haystack.trends.config.entities.{KafkaConfiguration, TransformerConfiguration}
+import com.expedia.www.haystack.trends.integration.IntegrationTestSpec
+import com.expedia.www.haystack.trends.transformer.MetricDataTransformer
+import com.expedia.www.haystack.trends.{MetricDataGenerator, Streams}
+import org.apache.kafka.clients.admin.AdminClient
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+import org.apache.kafka.streams.processor.WallclockTimestampExtractor
+import org.apache.kafka.streams.{KeyValue, StreamsConfig}
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+class TimeSeriesTransformerTopologySpec extends IntegrationTestSpec with MetricDataGenerator {
+
+ "TimeSeries Transformer Topology" should {
+
+ "consume spans from input topic and transform them to metric data list based on available transformers" in {
+
+ Given("a set of spans and kafka specific configurations")
+ val traceId = "trace-id-dummy"
+ val spanId = "span-id-dummy"
+ val duration = 3
+ val errorFlag = false
+ val spans = generateSpans(traceId, spanId, duration, errorFlag, 10000, 8)
+ val kafkaConfig = KafkaConfiguration(new StreamsConfig(STREAMS_CONFIG), OUTPUT_TOPIC, INPUT_TOPIC, AutoOffsetReset.EARLIEST, new WallclockTimestampExtractor, 30000)
+ val transformerConfig = TransformerConfiguration(encoder = new PeriodReplacementEncoder, enableMetricPointServiceLevelGeneration = true, List())
+ val streams = new Streams(kafkaConfig, transformerConfig)
+ val factory = new StreamsFactory(streams, kafkaConfig.streamsConfig, kafkaConfig.consumeTopic)
+ val streamsRunner = new StreamsRunner(factory, new StateChangeListener(new HealthStatusController))
+
+
+ When("spans with duration and error=false are produced in 'input' topic, and kafka-streams topology is started")
+ produceSpansAsync(10.millis, spans)
+ streamsRunner.start()
+
+ Then("we should write transformed metricPoints to the 'output' topic")
+ val metricDataList: List[MetricData] = spans.flatMap(span => generateMetricDataList(span, MetricDataTransformer.allTransformers, new PeriodReplacementEncoder)) // directly call transformers to get metricPoints
+ metricDataList.size shouldBe (spans.size * MetricDataTransformer.allTransformers.size * 2) // two times because of service only metric points
+
+ val records: List[KeyValue[String, MetricData]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived[String, MetricData](RESULT_CONSUMER_CONFIG, OUTPUT_TOPIC, metricDataList.size, 15000).asScala.toList // get metricPoints from Kafka's output topic
+ records.map(record => {
+ record.value.getMetricDefinition.getTags.getKv.get(MetricDefinition.MTYPE) shouldEqual METRIC_TYPE
+ })
+
+ Then("same metricPoints should be created as that from transformers")
+
+ val metricDataSetTransformer: Set[MetricData] = metricDataList.toSet
+ val metricDataSetKafka: Set[MetricData] = records.map(metricDataKv => metricDataKv.value).toSet
+
+ val diffSetMetricPoint: Set[MetricData] = metricDataSetTransformer.diff(metricDataSetKafka)
+
+ metricDataList.size shouldEqual records.size
+ diffSetMetricPoint.isEmpty shouldEqual true
+
+ Then("same keys / partition should be created as that from transformers")
+ val keySetTransformer: Set[String] = metricDataList.map(metricData => MetricDefinitionKeyGenerator.generateKey(metricData.getMetricDefinition)).toSet
+ val keySetKafka: Set[String] = records.map(metricDataKv => metricDataKv.key).toSet
+
+ val diffSetKey: Set[String] = keySetTransformer.diff(keySetKafka)
+
+ keySetTransformer.size shouldEqual keySetKafka.size
+ diffSetKey.isEmpty shouldEqual true
+
+ Then("no other intermediate partitions are created after as a result of topology")
+ val adminClient: AdminClient = AdminClient.create(STREAMS_CONFIG)
+ val topicNames: Iterable[String] = adminClient.listTopics.listings().get().asScala
+ .map(topicListing => topicListing.name)
+
+ topicNames.size shouldEqual 2
+ topicNames.toSet.contains(INPUT_TOPIC) shouldEqual true
+ topicNames.toSet.contains(OUTPUT_TOPIC) shouldEqual true
+ }
+ }
+
+ private def generateSpans(traceId: String, spanId: String, duration: Int, errorFlag: Boolean, spanIntervalInMs: Long, spanCount: Int): List[Span] = {
+
+ var currentTime = System.currentTimeMillis()
+ for (i <- 1 to spanCount) yield {
+ currentTime = currentTime + i * spanIntervalInMs
+
+ val span = Span.newBuilder()
+ .setTraceId(traceId)
+ .setParentSpanId(UUID.randomUUID().toString)
+ .setSpanId(spanId)
+ .setOperationName("some-op")
+ .setStartTime(currentTime)
+ .setDuration(duration)
+ .setServiceName("some-service")
+ .addTags(com.expedia.open.tracing.Tag.newBuilder().setKey(TagKeys.ERROR_KEY).setVStr("some-error"))
+ .build()
+ span
+ }
+ }.toList
+}
+
diff --git a/trends/timeseries-aggregator/Makefile b/trends/timeseries-aggregator/Makefile
new file mode 100644
index 000000000..00051d8de
--- /dev/null
+++ b/trends/timeseries-aggregator/Makefile
@@ -0,0 +1,19 @@
+.PHONY: integration_test release all
+
+MAVEN := ../mvnw
+
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-timeseries-aggregator
+
+docker_build:
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+integration_test:
+ ${MAVEN} scoverage:integration-check
+
+# build jar, docker image and run integration tests
+all: docker_build integration_test
+
+# build all and release
+release: docker_build
+ ../deployment/scripts/publish-to-docker-hub.sh
diff --git a/trends/timeseries-aggregator/README.md b/trends/timeseries-aggregator/README.md
new file mode 100644
index 000000000..9f99b1ffd
--- /dev/null
+++ b/trends/timeseries-aggregator/README.md
@@ -0,0 +1,35 @@
+# Haystack Timeseries Aggregator
+
+haystack-timeseries-aggregator is the module which reads metric points from kafka, aggregates them based on rules and pushes the aggregated metric points to kafka
+
+These aggregated metric points and stored in a time-series database and can be visualized on the [haystack ui](https://github.com/ExpediaDotCom/haystack-ui)
+
+
+Haystack's has another app [span-timeseries-transformer](https://github.com/ExpediaDotCom/haystack-trends/tree/master/span-timeseries-transformer)
+which is responsible for reading the spans and creating raw metric points for aggregation
+
+This is a simple public static void main application which is written in scala and uses kafka-streams. This is designed to be deployed as a docker containers.
+
+
+## Building
+
+#### Prerequisite:
+
+* Make sure you have Java 1.8
+* Make sure you have maven 3.3.9 or higher
+* Make sure you have docker 1.13 or higher
+
+#### Build
+
+For a full build, including unit tests, jar + docker image build and integration test, you can run -
+```
+make all
+```
+
+#### Integration Test
+
+If you are developing and just want to run integration tests
+```
+make integration_test
+
+```
\ No newline at end of file
diff --git a/trends/timeseries-aggregator/build/docker/Dockerfile b/trends/timeseries-aggregator/build/docker/Dockerfile
new file mode 100644
index 000000000..af451d9a3
--- /dev/null
+++ b/trends/timeseries-aggregator/build/docker/Dockerfile
@@ -0,0 +1,23 @@
+FROM openjdk:8-jre
+MAINTAINER Haystack
+
+ENV APP_NAME haystack-timeseries-aggregator
+ENV APP_HOME /app/bin
+ENV JMXTRANS_AGENT jmxtrans-agent-1.2.6
+ENV DOCKERIZE_VERSION v0.6.1
+
+ADD https://github.com/jwilder/dockerize/releases/download/${DOCKERIZE_VERSION}/dockerize-alpine-linux-amd64-${DOCKERIZE_VERSION}.tar.gz dockerize.tar.gz
+RUN tar xzf dockerize.tar.gz
+RUN chmod +x dockerize
+
+RUN mkdir -p ${APP_HOME}
+
+COPY target/${APP_NAME}.jar ${APP_HOME}/
+COPY build/docker/start-app.sh ${APP_HOME}/
+COPY build/docker/jmxtrans-agent.xml ${APP_HOME}/
+
+ADD https://github.com/jmxtrans/jmxtrans-agent/releases/download/${JMXTRANS_AGENT}/${JMXTRANS_AGENT}.jar ${APP_HOME}/
+
+WORKDIR ${APP_HOME}
+
+ENTRYPOINT ["./start-app.sh"]
diff --git a/trends/timeseries-aggregator/build/docker/jmxtrans-agent.xml b/trends/timeseries-aggregator/build/docker/jmxtrans-agent.xml
new file mode 100644
index 000000000..7b06786f5
--- /dev/null
+++ b/trends/timeseries-aggregator/build/docker/jmxtrans-agent.xml
@@ -0,0 +1,120 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ${HAYSTACK_GRAPHITE_HOST:monitoring-influxdb-graphite.kube-system.svc}
+ ${HAYSTACK_GRAPHITE_PORT:2003}
+ ${HAYSTACK_GRAPHITE_ENABLED:false}
+ haystack.trends.timeseries-aggregator.#hostname#.
+
+ 60
+
diff --git a/trends/timeseries-aggregator/build/docker/start-app.sh b/trends/timeseries-aggregator/build/docker/start-app.sh
new file mode 100755
index 000000000..1988f0624
--- /dev/null
+++ b/trends/timeseries-aggregator/build/docker/start-app.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+[ -z "$JAVA_XMS" ] && JAVA_XMS=1024m
+[ -z "$JAVA_XMX" ] && JAVA_XMX=1024m
+
+set -e
+JAVA_OPTS="${JAVA_OPTS} \
+-javaagent:${APP_HOME}/${JMXTRANS_AGENT}.jar=${APP_HOME}/jmxtrans-agent.xml \
+-XX:+UseG1GC \
+-Xloggc:/var/log/gc.log \
+-XX:+PrintGCDetails \
+-XX:+PrintGCDateStamps \
+-XX:+UseGCLogFileRotation \
+-XX:NumberOfGCLogFiles=5 \
+-XX:GCLogFileSize=2M \
+-Xmx${JAVA_XMX} \
+-Xms${JAVA_XMS} \
+-Dapplication.name=${APP_NAME} \
+-Dcom.sun.management.jmxremote.authenticate=false \
+-Dcom.sun.management.jmxremote.ssl=false \
+-Dcom.sun.management.jmxremote.port=1098 \
+-Dapplication.home=${APP_HOME}"
+
+exec java ${JAVA_OPTS} -jar "${APP_HOME}/${APP_NAME}.jar"
diff --git a/trends/timeseries-aggregator/pom.xml b/trends/timeseries-aggregator/pom.xml
new file mode 100644
index 000000000..bc8fc0d97
--- /dev/null
+++ b/trends/timeseries-aggregator/pom.xml
@@ -0,0 +1,107 @@
+
+
+
+
+
+ 4.0.0
+
+ haystack-timeseries-aggregator
+ jar
+ haystack-timeseries-aggregator
+ scala module which aggregates timeseries metricpoints based on predefined rules
+
+
+ com.expedia.www
+ haystack-trends
+ 1.0.0-SNAPSHOT
+
+
+
+
+ The Apache License, Version 2.0
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+
+
+
+
+ com.expedia.www.haystack.trends.App
+ com.expedia.www.haystack.trends.feature.tests,com.expedia.www.haystack.trends.unit.tests
+ com.expedia.www.haystack.trends.integration.tests
+ ${project.artifactId}-${project.version}
+
+
+
+
+ org.hdrhistogram
+ HdrHistogram
+
+
+ com.expedia.www
+ haystack-commons
+
+
+ com.expedia.www
+ haystack-logback-metrics-appender
+
+
+ org.apache.kafka
+ kafka-streams
+
+
+ org.msgpack
+ msgpack-core
+
+
+ com.typesafe
+ config
+
+
+ com.codahale.metrics
+ metrics-core
+
+
+
+
+ ${finalName}
+
+
+ src/main/resources
+ true
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+
+ org.scalatest
+ scalatest-maven-plugin
+
+
+ org.scalastyle
+ scalastyle-maven-plugin
+
+
+
+
+
diff --git a/trends/timeseries-aggregator/src/main/resources/config/base.conf b/trends/timeseries-aggregator/src/main/resources/config/base.conf
new file mode 100644
index 000000000..052e652a2
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/resources/config/base.conf
@@ -0,0 +1,74 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-timeseries-aggregator-dev-v2"
+ bootstrap.servers = "kafkasvc:9092"
+ num.stream.threads = 1
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ producer.retries = 50,
+ producer.batch.size = 65536,
+ producer.linger.ms = 250
+ metrics.recording.level = DEBUG
+ timestamp.extractor = "com.expedia.www.haystack.commons.kstreams.MetricDataTimestampExtractor"
+ }
+
+ // For producing data to external & internal (both) kafka: set enable.external.kafka.produce to true and uncomment the props.
+ // For producing to same (internal) kafka: set enable.external.kafka.produce to false and comment the props.
+ producer {
+ topics : [
+ {
+ topic: "metrics"
+ serdeClassName : "com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricDataSerde"
+ enabled: true
+ },
+ {
+ topic: "mdm"
+ serdeClassName : "com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricTankSerde"
+ enabled: true
+ }
+ ]
+ enable.external.kafka.produce = false
+ external.kafka.topic = "mdm"
+ // props {
+ // bootstrap.servers = "kafkasvc:9092"
+ // }
+ }
+
+ consumer {
+ topic = "metric-data-points"
+ }
+}
+
+state.store {
+ enable.logging = true
+ logging.delay.seconds = 60
+ // It is capacity for the trends to be kept in memory before flushing it to state store
+ cache.size = 3000
+ changelog.topic {
+ cleanup.policy = "compact,delete"
+ retention.ms = 14400000 // 4Hrs
+ }
+}
+
+// there are three types of encoders that are used on service and operation names:
+// 1) periodreplacement: replaces all periods with 3 underscores
+// 2) base64: base64 encodes the full name with a padding of _
+// 3) noop: does not perform any encoding
+metricpoint.encoder.type = "periodreplacement"
+
+histogram {
+ max.value = 1800000 // 30 mins
+ precision = 2
+ value.unit = "millis" // can be micros / millis / seconds
+}
+
+// additional tags to be passed as part of metric data
+// It can be of format hocon config such as
+// additionalTags = {key: "value", key2:"value2"}
+// or json such as
+// additionalTags = """{"key": "value", "key2":"value2"}"""
+additionalTags = {}
diff --git a/trends/timeseries-aggregator/src/main/resources/logback.xml b/trends/timeseries-aggregator/src/main/resources/logback.xml
new file mode 100644
index 000000000..31cc8523a
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/resources/logback.xml
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+ true
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss:SSS} %thread, %level, %logger{70}, "%msg" %replace(%ex){'[\n]+', '\\n'}%nopex%n
+
+
+
+
+
+ ${HAYSTACK_LOG_QUEUE_SIZE:-500}
+ ${HAYSTACK_LOG_DISCARD_THRESHOLD:-0}
+
+
+
+
+
+
+
+
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/App.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/App.scala
new file mode 100644
index 000000000..7375d7001
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/App.scala
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends
+
+import java.util.function.Supplier
+
+import com.expedia.www.haystack.commons.health.{HealthStatusController, UpdateHealthStatusFile}
+import com.expedia.www.haystack.commons.kstreams.app.{Main, StateChangeListener, StreamsFactory, StreamsRunner}
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.kstream.Streams
+import com.netflix.servo.util.VisibleForTesting
+import org.apache.kafka.streams.Topology
+
+
+object App extends Main {
+
+ /**
+ * Creates a valid instance of StreamsRunner.
+ *
+ * StreamsRunner is created with a valid StreamsFactory instance and a listener that observes
+ * state changes of the kstreams application.
+ *
+ * StreamsFactory in turn is created with a Topology Supplier and kafka.StreamsConfig. Any failure in
+ * StreamsFactory is gracefully handled by StreamsRunner to shut the application off
+ *
+ * Core logic of this application is in the `Streams` instance - which is a topology supplier. The
+ * topology of this application is built in this class.
+ *
+ * @return A valid instance of `StreamsRunner`
+ */
+
+ override def createStreamsRunner(): StreamsRunner = {
+ val ProjectConfiguration = new AppConfiguration()
+
+ val healthStatusController = new HealthStatusController
+ healthStatusController.addListener(new UpdateHealthStatusFile(ProjectConfiguration.healthStatusFilePath))
+
+ val stateChangeListener = new StateChangeListener(healthStatusController)
+
+ createStreamsRunner(ProjectConfiguration, stateChangeListener)
+ }
+
+ @VisibleForTesting
+ private[trends] def createStreamsRunner(ProjectConfiguration: AppConfiguration,
+ stateChangeListener: StateChangeListener): StreamsRunner = {
+ //create the topology provider
+ val kafkaConfig = ProjectConfiguration.kafkaConfig
+ val streams: Supplier[Topology] = new Streams(ProjectConfiguration)
+
+ val streamsFactory = new StreamsFactory(streams, kafkaConfig.streamsConfig, kafkaConfig.consumeTopic)
+
+ new StreamsRunner(streamsFactory, stateChangeListener)
+ }
+}
+
+
+
+
+
+
+
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/TrendHdrHistogram.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/TrendHdrHistogram.scala
new file mode 100644
index 000000000..28a5116b9
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/TrendHdrHistogram.scala
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2019 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation
+
+import java.nio.ByteBuffer
+import java.util.concurrent.TimeUnit
+
+import com.expedia.www.haystack.trends.config.entities.HistogramUnit.HistogramUnit
+import com.expedia.www.haystack.trends.config.entities.{HistogramMetricConfiguration, HistogramUnit}
+import org.HdrHistogram.Histogram
+
+/**
+ * Wrapper over hdr Histogram. Takes care of unit mismatch of histogram and the other systems.
+ *
+ * @param hdrHistogram : instance of hdr Histogram
+ * @param unit : unit of the recorded values, can be millis, micros or seconds
+ */
+case class TrendHdrHistogram(private val hdrHistogram: Histogram, unit: HistogramUnit) {
+
+ def this(histogramConfig: HistogramMetricConfiguration) = this(
+ new Histogram(histogramConfig.maxValue, histogramConfig.precision), histogramConfig.unit)
+
+ def recordValue(valInMicros: Long): Unit = {
+ val metricDataValue = fromMicros(valInMicros)
+ hdrHistogram.recordValue(metricDataValue)
+ }
+
+ def getMinValue: Long = toMicros(hdrHistogram.getMinValue)
+
+ def getMaxValue: Long = toMicros(hdrHistogram.getMaxValue)
+
+ def getMean: Long = toMicros(hdrHistogram.getMean.toLong)
+
+ def getStdDeviation: Long = toMicros(hdrHistogram.getStdDeviation.toLong)
+
+ def getTotalCount: Long = hdrHistogram.getTotalCount
+
+ def getHighestTrackableValue: Long = hdrHistogram.getHighestTrackableValue
+
+ def getHighesTrackableValueInMicros: Long = toMicros(hdrHistogram.getHighestTrackableValue)
+
+ def getValueAtPercentile(percentile: Double): Long = toMicros(hdrHistogram.getValueAtPercentile(percentile))
+
+ def getEstimatedFootprintInBytes: Int = hdrHistogram.getEstimatedFootprintInBytes
+
+ def encodeIntoByteBuffer(buffer: ByteBuffer): Int = {
+ hdrHistogram.encodeIntoByteBuffer(buffer)
+ }
+
+ private def fromMicros(value: Long): Long = {
+ unit match {
+ case HistogramUnit.MILLIS => TimeUnit.MICROSECONDS.toMillis(value)
+ case HistogramUnit.SECONDS => TimeUnit.MICROSECONDS.toSeconds(value)
+ case _ => value
+ }
+ }
+
+ private def toMicros(value: Long): Long = {
+ unit match {
+ case HistogramUnit.MILLIS => TimeUnit.MILLISECONDS.toMicros(value.toLong)
+ case HistogramUnit.SECONDS => TimeUnit.SECONDS.toMicros(value.toLong)
+ case _ => value
+ }
+ }
+}
\ No newline at end of file
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/TrendMetric.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/TrendMetric.scala
new file mode 100644
index 000000000..ac6296b0b
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/TrendMetric.scala
@@ -0,0 +1,140 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation
+
+import com.codahale.metrics.{Meter, Timer}
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.Interval
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trends.aggregation.TrendMetric._
+import com.expedia.www.haystack.trends.aggregation.metrics.MetricFactory
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import org.slf4j.LoggerFactory
+
+import scala.util.Try
+
+/**
+ * This class contains a windowedMetric for each interval being computed. The number of time windows at any moment is = no. of intervals
+ * depends upon interval, numberOfWatermarkWindows and in which timeWindow incoming metric lies
+ *
+ * @param trendMetricsMap map containing intervals and windowedMetrics
+ * @param metricFactory factory which is used to create new metrics when required
+ */
+class TrendMetric private(var trendMetricsMap: Map[Interval, WindowedMetric], metricFactory: MetricFactory) extends MetricsSupport {
+
+ private val trendMetricComputeTimer: Timer = metricRegistry.timer("trendmetric.compute.time")
+ private val metricPointComputeFailureMeter: Meter = metricRegistry.meter("metricpoints.compute.failure")
+ private var currentEpochTimeInSec: Long = 0
+ private var shouldLog = true
+
+ def getMetricFactory: MetricFactory = {
+ metricFactory
+ }
+
+ /**
+ * function to compute the incoming metric data
+ * it updates all the metrics for the windows within which the incoming metric point lies
+ *
+ * @param incomingMetricData - incoming metric data
+ */
+ def compute(incomingMetricData: MetricData): Unit = {
+ val timerContext = trendMetricComputeTimer.time()
+ Try {
+ //discarding values which are less than 0 assuming they are invalid metric points
+ trendMetricsMap.foreach(trendMetrics => {
+ val windowedMetric = trendMetrics._2
+ windowedMetric.compute(incomingMetricData)
+ })
+ }.recover {
+ case failure: Throwable =>
+ metricPointComputeFailureMeter.mark()
+ LOGGER.error(s"Failed to compute metricpoint : $incomingMetricData with exception ", failure)
+ failure
+ }
+
+ // check whether time to log to state store
+ if ((incomingMetricData.getTimestamp - currentEpochTimeInSec) > AppConfiguration.stateStoreConfig.changeLogDelayInSecs) {
+ currentEpochTimeInSec = incomingMetricData.getTimestamp
+ shouldLog = true
+ }
+
+ timerContext.close()
+ }
+
+ /**
+ * returns list of metricPoints which are evicted and their window is closes
+ *
+ * @return list of evicted metricPoints
+ */
+ def getComputedMetricPoints(incomingMetricData: MetricData): List[MetricData] = {
+ List(trendMetricsMap.flatMap {
+ case (_, windowedMetric) =>
+ windowedMetric.getComputedMetricDataList(incomingMetricData)
+ }).flatten
+ }
+
+ /**
+ * flag to tell whether we need to log to state store
+ *
+ * @return flag to indicate should we log
+ */
+ def shouldLogToStateStore: Boolean = {
+ if (shouldLog) {
+ shouldLog = false
+ return true
+ }
+ false
+ }
+}
+
+object TrendMetric {
+
+ private val LOGGER = LoggerFactory.getLogger(this.getClass)
+
+
+ // config for watermark windows & tick per interval
+ val trendMetricConfig = Map(
+ Interval.ONE_MINUTE -> (1, 1),
+ Interval.FIVE_MINUTE -> (1, 1),
+ Interval.FIFTEEN_MINUTE -> (0, 1),
+ Interval.ONE_HOUR -> (0, 1))
+
+ def createTrendMetric(intervals: List[Interval],
+ firstMetricData: MetricData,
+ metricFactory: MetricFactory): TrendMetric = {
+ // this enable to log data to state store for the very first time
+ val trendMetricMap = createMetricsForEachInterval(intervals, firstMetricData, metricFactory)
+ new TrendMetric(trendMetricMap, metricFactory)
+ }
+
+ def restoreTrendMetric(trendMetricMap: Map[Interval, WindowedMetric],
+ metricFactory: MetricFactory): TrendMetric = {
+ new TrendMetric(trendMetricMap, metricFactory)
+ }
+
+ private def createMetricsForEachInterval(intervals: List[Interval],
+ metricData: MetricData,
+ metricFactory: MetricFactory): Map[Interval, WindowedMetric] = {
+ intervals.map(interval => {
+ val windowedMetric = WindowedMetric.createWindowedMetric(metricData, metricFactory, trendMetricConfig(interval)._1, interval)
+ interval -> windowedMetric
+ }).toMap
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/WindowedMetric.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/WindowedMetric.scala
new file mode 100644
index 000000000..6fb905199
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/WindowedMetric.scala
@@ -0,0 +1,126 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation
+
+import java.util.concurrent.TimeUnit
+
+import com.codahale.metrics.{Histogram, Meter}
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trends.aggregation.entities.TimeWindow
+import com.expedia.www.haystack.trends.aggregation.metrics.{Metric, MetricFactory}
+import org.slf4j.LoggerFactory
+
+import scala.collection.mutable
+import scala.util.Try
+
+/**
+ * This class contains a metric for each time window being computed for a single interval
+ *
+ * @param windowedMetricsMap map containing sorted timewindows and metrics for an Interval
+ * @param metricFactory factory which is used to create new metrics when required
+ */
+class WindowedMetric private(var windowedMetricsMap: mutable.TreeMap[TimeWindow, Metric], metricFactory: MetricFactory, numberOfWatermarkedWindows: Int, interval: Interval) extends MetricsSupport {
+
+ private val disorderedMetricPointMeter: Meter = metricRegistry.meter("metricpoints.disordered")
+ private val timeInTopicMetricPointHistogram: Histogram = metricRegistry.histogram("metricpoints.timeInTopic")
+ private var computedMetrics = List[(Long, Metric)]()
+ private val LOGGER = LoggerFactory.getLogger(this.getClass)
+
+ def getMetricFactory: MetricFactory = {
+ metricFactory
+ }
+
+ /**
+ * function to compute the incoming metric data
+ * it updates all the metrics for the windows within which the incoming metric data lies for an interval
+ *
+ * @param incomingMetricData - incoming metric data
+ */
+ def compute(incomingMetricData: MetricData): Unit = {
+ timeInTopicMetricPointHistogram.update(TimeUnit.SECONDS.toMillis(incomingMetricData.getTimestamp()) - System.currentTimeMillis())
+
+ val incomingMetricPointTimeWindow = TimeWindow.apply(incomingMetricData.getTimestamp, interval)
+
+ val matchedWindowedMetric = windowedMetricsMap.get(incomingMetricPointTimeWindow)
+
+ if (matchedWindowedMetric.isDefined) {
+ // an existing metric
+ matchedWindowedMetric.get.compute(incomingMetricData)
+ } else {
+ // incoming metric is a new metric
+ if (incomingMetricPointTimeWindow.compare(windowedMetricsMap.firstKey) > 0) {
+ // incoming metric's time is more that minimum (first) time window
+ createNewMetric(incomingMetricPointTimeWindow, incomingMetricData)
+ evictMetric()
+ } else {
+ // disordered metric
+ disorderedMetricPointMeter.mark()
+ }
+ }
+ }
+
+ private def createNewMetric(incomingMetricPointTimeWindow: TimeWindow, incomingMetricData: MetricData) = {
+ val newMetric = metricFactory.createMetric(interval)
+ newMetric.compute(incomingMetricData)
+ windowedMetricsMap.put(incomingMetricPointTimeWindow, newMetric)
+ }
+
+ private def evictMetric() = {
+ if (windowedMetricsMap.size > (numberOfWatermarkedWindows + 1)) {
+ val evictInterval = windowedMetricsMap.firstKey
+ windowedMetricsMap.remove(evictInterval).foreach { evictedMetric =>
+ computedMetrics = (evictInterval.endTime, evictedMetric) :: computedMetrics
+ }
+ }
+ }
+
+ /**
+ * returns list of metricData which are evicted and their window is closes
+ *
+ * @return list of evicted metricData
+ */
+ def getComputedMetricDataList(incomingMetricData: MetricData): List[MetricData] = {
+ val metricDataList = computedMetrics.flatMap {
+ case (publishTime, metric) =>
+ metric.mapToMetricDataList(incomingMetricData.getMetricDefinition.getKey, incomingMetricData.getMetricDefinition.getTags.getKv, publishTime)
+ }
+ computedMetrics = List[(Long, Metric)]()
+ metricDataList
+ }
+}
+
+/**
+ * Windowed metric factory which can create a new windowed metric or restore an existing windowed metric for an interval
+ */
+object WindowedMetric {
+
+ private val LOGGER = LoggerFactory.getLogger(this.getClass)
+
+ def createWindowedMetric(firstMetricData: MetricData, metricFactory: MetricFactory, watermarkedWindows: Int, interval: Interval): WindowedMetric = {
+ val windowedMetricMap = mutable.TreeMap[TimeWindow, Metric]()
+ windowedMetricMap.put(TimeWindow.apply(firstMetricData.getTimestamp, interval), metricFactory.createMetric(interval))
+ new WindowedMetric(windowedMetricMap, metricFactory, watermarkedWindows, interval)
+ }
+
+ def restoreWindowedMetric(windowedMetricsMap: mutable.TreeMap[TimeWindow, Metric], metricFactory: MetricFactory, watermarkedWindows: Int, interval: Interval): WindowedMetric = {
+ new WindowedMetric(windowedMetricsMap, metricFactory, watermarkedWindows, interval)
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/entities/StatValue.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/entities/StatValue.scala
new file mode 100644
index 000000000..70675f934
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/entities/StatValue.scala
@@ -0,0 +1,18 @@
+package com.expedia.www.haystack.trends.aggregation.entities
+
+/**
+ * This enumeration contains all the supported statistics we want to emit for a given histogram metric
+ */
+object StatValue extends Enumeration {
+ type StatValue = Value
+
+ val MEAN = Value("mean")
+ val MAX = Value("max")
+ val MIN = Value("min")
+ val COUNT = Value("count")
+ val STDDEV = Value("std")
+ val PERCENTILE_95 = Value("*_95")
+ val PERCENTILE_99 = Value("*_99")
+ val MEDIAN = Value("*_50")
+
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/entities/TimeWindow.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/entities/TimeWindow.scala
new file mode 100644
index 000000000..163cf7758
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/entities/TimeWindow.scala
@@ -0,0 +1,40 @@
+package com.expedia.www.haystack.trends.aggregation.entities
+
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+
+
+/**
+ * This class encapsulates a time window which contains a start time and an end-time
+ */
+case class TimeWindow(startTime: Long, endTime: Long) extends Ordered[TimeWindow] {
+
+ override def compare(that: TimeWindow): Int = {
+ this.startTime.compare(that.startTime)
+ }
+
+ override def hashCode(): Int = {
+ this.startTime.hashCode()
+ }
+
+ override def equals(that: scala.Any): Boolean = {
+ this.startTime == that.asInstanceOf[TimeWindow].startTime && this.endTime == that.asInstanceOf[TimeWindow].endTime
+ }
+}
+
+object TimeWindow {
+
+ /**
+ * This function creates the time window based on the given time in seconds and the interval of the window
+ * Eg : given a timestamp 145 seconds and an interval of 1 minute, the window would be 120 seconds - 180 seconds
+ * @param timestamp given time in seconds
+ * @param interval interval for which we would need to create the window
+ * @return time window
+ */
+
+ def apply(timestamp: Long, interval: Interval): TimeWindow = {
+ val intervalTimeInSeconds = interval.timeInSeconds
+ val windowStart = (timestamp / intervalTimeInSeconds) * intervalTimeInSeconds
+ val windowEnd = windowStart + intervalTimeInSeconds
+ TimeWindow(windowStart, windowEnd)
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/metrics/CountMetric.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/metrics/CountMetric.scala
new file mode 100644
index 000000000..16948b9b5
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/metrics/CountMetric.scala
@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation.metrics
+
+import java._
+
+import com.codahale.metrics.Timer
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.trends.aggregation.entities.StatValue
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType.AggregationType
+import com.expedia.www.haystack.trends.kstream.serde.metric.{CountMetricSerde, MetricSerde}
+
+/**
+ * This is a base metric which can compute the count of the given events
+ *
+ * @param interval : interval for the metric
+ * @param currentCount : current count, the current count should be 0 for a new metric but can be passed when we want to restore a given metric after the application crashed
+ */
+
+class CountMetric(interval: Interval, var currentCount: Long) extends Metric(interval) {
+
+ def this(interval: Interval) = this(interval, 0)
+
+ private val CountMetricComputeTimer: Timer = metricRegistry.timer("count.metric.compute.time")
+
+
+ override def mapToMetricDataList(metricKey: String, tags: util.Map[String, String], publishingTimestamp: Long): List[MetricData] = {
+ val tagCollection = new TagCollection(appendTags(tags, interval, StatValue.COUNT))
+ val metricDefinition = new MetricDefinition(metricKey, tagCollection, TagCollection.EMPTY)
+ val metricData = new MetricData(metricDefinition, currentCount, publishingTimestamp)
+ List(metricData)
+ }
+
+ def getCurrentCount: Long = {
+ currentCount
+ }
+
+
+ override def compute(metricData: MetricData): CountMetric = {
+ val timerContext = CountMetricComputeTimer.time()
+ currentCount += metricData.getValue.toLong
+ timerContext.close()
+ this
+ }
+}
+
+object CountMetricFactory extends MetricFactory {
+ override def createMetric(interval: Interval): CountMetric = new CountMetric(interval)
+
+ override def getAggregationType: AggregationType = AggregationType.Count
+
+ override def getMetricSerde: MetricSerde = CountMetricSerde
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/metrics/HistogramMetric.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/metrics/HistogramMetric.scala
new file mode 100644
index 000000000..f7f70759f
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/metrics/HistogramMetric.scala
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation.metrics
+
+import com.codahale.metrics.Timer
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.trends.aggregation.TrendHdrHistogram
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType.AggregationType
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.kstream.serde.metric.{HistogramMetricSerde, MetricSerde}
+
+
+/**
+ * This is a base metric which can compute the histogram of the given events. It uses hdr histogram(https://github.com/HdrHistogram/HdrHistogram) internally to compute the histogram
+ *
+ * @param interval : interval for the metric
+ * @param histogram : current histogram, the current histogram should be a new histogram object for a new metric but can be passed when we want to restore a given metric after the application crashed
+ */
+class HistogramMetric(interval: Interval, histogram: TrendHdrHistogram) extends Metric(interval) {
+
+ private val HistogramMetricComputeTimer: Timer = metricRegistry.timer("histogram.metric.compute.time")
+
+ def this(interval: Interval) = this(interval, new TrendHdrHistogram(AppConfiguration.histogramMetricConfiguration))
+
+
+ override def mapToMetricDataList(metricKey: String, tags: java.util.Map[String, String], publishingTimestamp: Long): List[MetricData] = {
+ import com.expedia.www.haystack.trends.aggregation.entities.StatValue._
+ histogram.getTotalCount match {
+ case 0 => List()
+ case _ => val result = Map(
+ MEAN -> histogram.getMean,
+ MIN -> histogram.getMinValue,
+ PERCENTILE_95 -> histogram.getValueAtPercentile(95),
+ PERCENTILE_99 -> histogram.getValueAtPercentile(99),
+ STDDEV -> histogram.getStdDeviation,
+ MEDIAN -> histogram.getValueAtPercentile(50),
+ MAX -> histogram.getMaxValue
+ ).map {
+ case (stat, value) =>
+ val tagCollection = new TagCollection(appendTags(tags, interval, stat))
+ val metricDefinition = new MetricDefinition(metricKey, tagCollection, TagCollection.EMPTY)
+ new MetricData(metricDefinition, value, publishingTimestamp)
+ }
+ result.toList
+ }
+ }
+
+ def getRunningHistogram: TrendHdrHistogram = histogram
+
+ override def compute(metricData: MetricData): HistogramMetric = {
+ //metricdata value is in micro seconds
+ if (metricData.getValue.toLong <= histogram.getHighesTrackableValueInMicros) {
+ val timerContext = HistogramMetricComputeTimer.time()
+ histogram.recordValue(metricData.getValue.toLong)
+ timerContext.close()
+ }
+ else {
+ val timerContext = HistogramMetricComputeTimer.time()
+ histogram.recordValue(histogram.getHighesTrackableValueInMicros)
+ timerContext.close()
+ }
+ this
+ }
+}
+
+object HistogramMetricFactory extends MetricFactory {
+
+ override def createMetric(interval: Interval): HistogramMetric = new HistogramMetric(interval)
+
+ override def getAggregationType: AggregationType = AggregationType.Histogram
+
+ override def getMetricSerde: MetricSerde = HistogramMetricSerde
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/metrics/Metric.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/metrics/Metric.scala
new file mode 100644
index 000000000..097ed1565
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/metrics/Metric.scala
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation.metrics
+
+import java._
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.commons.entities.TagKeys
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trends.aggregation.entities.StatValue.StatValue
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType.AggregationType
+import com.expedia.www.haystack.trends.kstream.serde.metric.MetricSerde
+
+abstract class Metric(interval: Interval) extends MetricsSupport {
+
+ /**
+ * function to compute the incoming metric-data
+ *
+ * @param value - incoming metric data
+ * @return : returns the metric (in most cases it should return the same object(this) but returning a metric gives the metric implementation class to create an immutable metric)
+ */
+ def compute(value: MetricData): Metric
+
+ def getMetricInterval: Interval = {
+ interval
+ }
+
+
+ /**
+ * This function returns the metric points which contains the current snapshot of the metric
+ *
+ * @param publishingTimestamp : timestamp in seconds which the consumer wants to be used as the timestamps of these published metricpoints
+ * @param metricKey : the name of the metricData to be generated
+ * @param tags : tags to be associated with the metricData
+ * @return list of published metricdata
+ */
+ def mapToMetricDataList(metricKey: String, tags: util.Map[String, String], publishingTimestamp: Long): List[MetricData]
+
+ protected def appendTags(tags: util.Map[String, String], interval: Interval, statValue: StatValue): util.Map[String, String] = {
+ new util.LinkedHashMap[String, String] {
+ putAll(tags)
+ put(TagKeys.INTERVAL_KEY, interval.name)
+ put(TagKeys.STATS_KEY, statValue.toString)
+ }
+ }
+
+}
+
+/**
+ * The enum contains the support aggregation type, which is currently count and histogram
+ */
+object AggregationType extends Enumeration {
+ type AggregationType = Value
+ val Count, Histogram = Value
+}
+
+
+/**
+ * This trait is supposed to be created by every metric class which lets them create the metric when ever required
+ */
+trait MetricFactory {
+ def createMetric(interval: Interval): Metric
+
+ def getAggregationType: AggregationType
+
+ def getMetricSerde: MetricSerde
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/DurationMetricRule.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/DurationMetricRule.scala
new file mode 100644
index 000000000..924ece4eb
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/DurationMetricRule.scala
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation.rules
+
+import com.expedia.metrics.{MetricData, MetricDefinition}
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType.AggregationType
+
+/**
+ * This Rule applies a Histogram aggregation type when the incoming metric point's name is duration and is of type gauge
+ */
+trait DurationMetricRule extends MetricRule {
+ override def isMatched(metricData: MetricData): Option[AggregationType] = {
+ if (metricData.getMetricDefinition.getKey.toLowerCase.contains("duration") && containsTag(metricData,MetricDefinition.MTYPE, MTYPE_GAUGE)) {
+ Some(AggregationType.Histogram)
+ } else {
+ super.isMatched(metricData)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/FailureMetricRule.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/FailureMetricRule.scala
new file mode 100644
index 000000000..10d950726
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/FailureMetricRule.scala
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation.rules
+
+import com.expedia.metrics.{MetricData, MetricDefinition}
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType.AggregationType
+
+/**
+ * This Rule applies a Count aggregation type when the incoming metric point's name is failure-span and is of type gauge
+ */
+trait FailureMetricRule extends MetricRule {
+ override def isMatched(metricData: MetricData): Option[AggregationType] = {
+ if (metricData.getMetricDefinition.getKey.toLowerCase.contains("failure-span") && containsTag(metricData, MetricDefinition.MTYPE, MTYPE_GAUGE)) {
+ Some(AggregationType.Count)
+ } else {
+ super.isMatched(metricData)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/LatencyMetricRule.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/LatencyMetricRule.scala
new file mode 100644
index 000000000..a388577de
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/LatencyMetricRule.scala
@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2018 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation.rules
+
+import com.expedia.metrics.{MetricData, MetricDefinition}
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType.AggregationType
+
+/**
+ * This Rule applies a Histogram aggregation type when the incoming metric point's name is latency and is of type gauge
+ */
+trait LatencyMetricRule extends MetricRule {
+
+ override def isMatched(metricData: MetricData): Option[AggregationType] = {
+ if (metricData.getMetricDefinition.getKey.toLowerCase.contains("latency") && containsTag(metricData,MetricDefinition.MTYPE, MTYPE_GAUGE)) {
+ Some(AggregationType.Histogram)
+ } else {
+ super.isMatched(metricData)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/MetricRule.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/MetricRule.scala
new file mode 100644
index 000000000..f3b9144e1
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/MetricRule.scala
@@ -0,0 +1,39 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation.rules
+
+import java._
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType.AggregationType
+
+trait MetricRule {
+ val MTYPE_GAUGE = "gauge"
+ def isMatched(metricData: MetricData): Option[AggregationType] = None
+
+ def containsTag(metricData: MetricData, tagKey: String, tagValue: String): Boolean = {
+ val tags = getTags(metricData)
+ tags.containsKey(tagKey) && tags.get(tagKey).equalsIgnoreCase(tagValue)
+ }
+
+ def getTags(metricData: MetricData): util.Map[String, String] = {
+ metricData.getMetricDefinition.getTags.getKv
+ }
+}
+
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/MetricRuleEngine.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/MetricRuleEngine.scala
new file mode 100644
index 000000000..90c73108b
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/MetricRuleEngine.scala
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation.rules
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType.AggregationType
+
+
+/**
+ * This Metric Rule engine applies all the metric rules it extends from right to left(http://jim-mcbeath.blogspot.in/2009/08/scala-class-linearization.html).
+ * it returns None if none of the rules are applicable.
+ * to add another rule, create a rule trait and add it to the with clause in the engine.
+ * If multiple rules match the rightmost rule is applied
+ */
+trait MetricRuleEngine extends LatencyMetricRule with DurationMetricRule with FailureMetricRule with SuccessMetricRule {
+
+ def findMatchingMetric(metricData: MetricData): Option[AggregationType] = {
+ isMatched(metricData)
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/SuccessMetricRule.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/SuccessMetricRule.scala
new file mode 100644
index 000000000..5afff8e69
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/aggregation/rules/SuccessMetricRule.scala
@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.aggregation.rules
+
+import com.expedia.metrics.{MetricData, MetricDefinition}
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType.AggregationType
+
+/**
+ * This Rule applies a Count aggregation type when the incoming metric point's name is success-span and is of type gauge
+ */
+trait SuccessMetricRule extends MetricRule {
+
+ override def isMatched(metricData: MetricData): Option[AggregationType] = {
+ if (metricData.getMetricDefinition.getKey.toLowerCase.contains("success-span") && containsTag(metricData, MetricDefinition.MTYPE, MTYPE_GAUGE)) {
+ Some(AggregationType.Count)
+ } else {
+ super.isMatched(metricData)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/AppConfiguration.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/AppConfiguration.scala
new file mode 100644
index 000000000..2f1f73e85
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/AppConfiguration.scala
@@ -0,0 +1,189 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.config
+
+import java.util.Properties
+
+import com.expedia.www.haystack.commons.config.ConfigurationLoader
+import com.expedia.www.haystack.commons.entities.encoders.{Encoder, EncoderFactory}
+import com.expedia.www.haystack.commons.kstreams.MetricDataTimestampExtractor
+import com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricDataSerializer
+import com.expedia.www.haystack.trends.config.entities._
+import com.typesafe.config.{Config, ConfigFactory, ConfigValueType}
+import org.apache.kafka.clients.producer.ProducerConfig
+import org.apache.kafka.clients.producer.ProducerConfig.{KEY_SERIALIZER_CLASS_CONFIG, VALUE_SERIALIZER_CLASS_CONFIG}
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+import org.apache.kafka.streams.processor.TimestampExtractor
+
+import scala.collection.JavaConverters._
+
+class AppConfiguration {
+ private val config = ConfigurationLoader.loadConfigFileWithEnvOverrides()
+
+ val healthStatusFilePath: String = config.getString("health.status.path")
+ private val kafka = config.getConfig("kafka")
+ private val producerConfig = kafka.getConfig("producer")
+ private val consumerConfig = kafka.getConfig("consumer")
+ private val streamsConfig = kafka.getConfig("streams")
+
+
+
+ /**
+ *
+ * @return type of encoder to use on metricpoint key names
+ */
+ def encoder: Encoder = {
+ val encoderType = config.getString("metricpoint.encoder.type")
+ EncoderFactory.newInstance(encoderType)
+ }
+
+
+ /**
+ *
+ * @return configurations specific to creating HDR histogram objects
+ */
+ def histogramMetricConfiguration: HistogramMetricConfiguration = {
+ val histCfg = config.getConfig("histogram")
+
+ HistogramMetricConfiguration(
+ histCfg.getInt("precision"),
+ histCfg.getInt("max.value"),
+ HistogramUnit.from(histCfg.getString("value.unit")))
+ }
+
+ /**
+ *
+ * @return state store stream config while aggregating
+ */
+ def stateStoreConfig: StateStoreConfiguration = {
+
+ val stateStoreConfigs = config.getConfig("state.store")
+
+
+ val cacheSize = stateStoreConfigs.getInt("cache.size")
+ val enableChangeLog = stateStoreConfigs.getBoolean("enable.logging")
+ val changeLogDelayInSecs = stateStoreConfigs.getInt("logging.delay.seconds")
+
+ val changeLogTopicConfiguration = if (stateStoreConfigs.getConfig("changelog.topic").isEmpty) {
+ Map[String, String]()
+ } else {
+ stateStoreConfigs.getConfig("changelog.topic").entrySet().asScala.map(entry => entry.getKey -> entry.getValue.unwrapped().toString).toMap
+ }
+
+ StateStoreConfiguration(cacheSize, enableChangeLog, changeLogDelayInSecs, changeLogTopicConfiguration)
+
+
+ }
+
+ /**
+ *
+ * @return streams configuration object
+ */
+ def kafkaConfig: KafkaConfiguration = {
+
+ // verify if the applicationId and bootstrap server config are non empty
+ def verifyRequiredProps(props: Properties): Unit = {
+ require(props.getProperty(StreamsConfig.APPLICATION_ID_CONFIG).nonEmpty)
+ require(props.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG).nonEmpty)
+ }
+
+ def addProps(config: Config, props: Properties, prefix: (String) => String = identity): Unit = {
+ config.entrySet().asScala.foreach(kv => {
+ val propKeyName = prefix(kv.getKey)
+ props.setProperty(propKeyName, kv.getValue.unwrapped().toString)
+ })
+ }
+
+ def getExternalKafkaProps(producerConfig: Config): Option[Properties] = {
+
+ if (producerConfig.getBoolean("enable.external.kafka.produce")) {
+ val props = new Properties()
+ val kafkaProducerProps = producerConfig.getConfig("props")
+
+ kafkaProducerProps.entrySet() forEach {
+ kv => {
+ props.setProperty(kv.getKey, kv.getValue.unwrapped().toString)
+ }
+ }
+
+ props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
+ props.put(VALUE_SERIALIZER_CLASS_CONFIG, classOf[MetricDataSerializer].getCanonicalName)
+
+ require(props.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG).nonEmpty)
+ Option(props)
+ } else {
+ Option.empty
+ }
+ }
+
+ /**
+ *
+ * @return returns the kafka autoreset configuration
+ */
+ def getKafkaAutoReset: AutoOffsetReset = {
+ if (streamsConfig.hasPath("auto.offset.reset")) AutoOffsetReset.valueOf(streamsConfig.getString("auto.offset.reset").toUpperCase)
+ else AutoOffsetReset.LATEST
+ }
+
+
+ val props = new Properties
+ // add stream specific properties
+ addProps(streamsConfig, props)
+ // validate props
+ verifyRequiredProps(props)
+
+ val timestampExtractor = Option(props.getProperty("timestamp.extractor")) match {
+ case Some(timeStampExtractorClass) =>
+ Class.forName(timeStampExtractorClass).newInstance().asInstanceOf[TimestampExtractor]
+ case None =>
+ new MetricDataTimestampExtractor
+ }
+
+ //set timestamp extractor
+ props.setProperty("timestamp.extractor", timestampExtractor.getClass.getName)
+
+ val kafkaSinkTopicConfig = producerConfig.getConfigList("topics").asScala
+
+ val kafkaSinkTopics = kafkaSinkTopicConfig.map(sinkTopic =>
+ KafkaSinkTopic(sinkTopic.getString("topic"), sinkTopic.getString("serdeClassName"),
+ sinkTopic.getBoolean("enabled")))
+
+ KafkaConfiguration(
+ new StreamsConfig(props),
+ producerConfig = KafkaProduceConfiguration(kafkaSinkTopics.toList, getExternalKafkaProps(producerConfig),
+ producerConfig.getString("external.kafka.topic"), producerConfig.getBoolean("enable.external.kafka.produce")),
+ consumeTopic = consumerConfig.getString("topic"),
+ getKafkaAutoReset,
+ timestampExtractor,
+ kafka.getLong("close.timeout.ms"))
+ }
+
+ def additionalTags: Map[String, String] = {
+ val additionalTagsConfig = config.getValue("additionalTags").valueType() match {
+ case ConfigValueType.OBJECT => config.getConfig("additionalTags")
+ case _ => ConfigFactory.parseString(config.getString("additionalTags"))
+ }
+ additionalTagsConfig.entrySet().asScala.map(entrySet => entrySet.getKey -> entrySet.getValue.unwrapped().toString) toMap
+ }
+
+}
+
+object AppConfiguration extends AppConfiguration
+
+
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/HistogramMetricConfiguration.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/HistogramMetricConfiguration.scala
new file mode 100644
index 000000000..27026ac59
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/HistogramMetricConfiguration.scala
@@ -0,0 +1,47 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.config.entities
+
+import com.expedia.www.haystack.trends.config.entities.HistogramUnit.HistogramUnit
+
+
+/**
+ *This configuration helps create the HistogramMetric.
+ *
+ * @param precision - Decimal precision required for the histogram,allowable precision of histogram must be 0 <= value <= 5
+ * @param maxValue - maximum value for the incoming metric (should always be > than the maximum value you're expecting for a metricpoint)
+ * @param unit - unit of the value that will be given to histogram (can be micros, millis, seconds)
+ */
+case class HistogramMetricConfiguration(precision: Int, maxValue: Int, unit: HistogramUnit)
+
+object HistogramUnit extends Enumeration {
+ type HistogramUnit = Value
+ val MILLIS, MICROS, SECONDS = Value
+
+ def from(unit: String): HistogramUnit = {
+ unit.toLowerCase() match {
+ case "millis" => HistogramUnit.MILLIS
+ case "micros" => HistogramUnit.MICROS
+ case "seconds" => HistogramUnit.SECONDS
+ case _ => throw new RuntimeException(
+ String.format("Fail to understand the histogram unit %s, should be one of [millis, micros or seconds]", unit))
+ }
+ }
+
+ def default: HistogramUnit = HistogramUnit.MICROS
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/KafkaConfiguration.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/KafkaConfiguration.scala
new file mode 100644
index 000000000..67b104738
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/KafkaConfiguration.scala
@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.config.entities
+
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+import org.apache.kafka.streams.processor.TimestampExtractor
+
+/**
+ * @param streamsConfig config object to be used for initializing KafkaStreams
+ * @param producerConfig producer config
+ * @param consumeTopic consumer topic
+ * @param autoOffsetReset auto offset reset policy
+ * @param timestampExtractor timestamp extractor
+ * @param closeTimeoutInMs timeout for closing kafka streams in ms
+ */
+case class KafkaConfiguration(streamsConfig: StreamsConfig,
+ producerConfig: KafkaProduceConfiguration,
+ consumeTopic: String,
+ autoOffsetReset: AutoOffsetReset,
+ timestampExtractor: TimestampExtractor,
+ closeTimeoutInMs: Long)
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/KafkaProduceConfiguration.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/KafkaProduceConfiguration.scala
new file mode 100644
index 000000000..4fccffae2
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/KafkaProduceConfiguration.scala
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.config.entities
+
+import java.util.Properties
+
+/**
+ *This configuration specifies if the stream topology writes the aggregated metrics to an external kafka cluster
+ * @param kafkaSinkTopics - list of all sinks along with the serdes.
+ * @param props - Kafka producer configuration
+ * @param enableExternalKafka - enable/disable external kafka sink
+ */
+case class KafkaProduceConfiguration(kafkaSinkTopics: List[KafkaSinkTopic], props: Option[Properties], externalKafkaTopic: String, enableExternalKafka: Boolean)
+
+case class KafkaSinkTopic(topic: String, serdeClassName:String, enabled: Boolean)
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/StateStoreConfiguration.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/StateStoreConfiguration.scala
new file mode 100644
index 000000000..d6c238006
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/config/entities/StateStoreConfiguration.scala
@@ -0,0 +1,30 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.config.entities
+
+
+/**
+ * This class contains configurations specific to the kafka streams state store for keeping the trend metrics being computed
+ *
+ * @param stateStoreCacheSize - max number of trends which can be computed by single state store (number * number of stream tasks * size of a trendMetric in memory should be < heap of the process)
+ * @param enableChangeLogging - enable/disable chanelogging - This helps in recreating the state in the app crashes or partition reassignment
+ * @param changeLogDelayInSecs - Interval at which the state should be check pointed at the changelog topic in kafka
+ * @param changeLogTopicConfiguration - Configuration specific to kafka changelog topic - refer to
+ */
+
+case class StateStoreConfiguration(stateStoreCacheSize: Int, enableChangeLogging: Boolean, changeLogDelayInSecs: Int, changeLogTopicConfiguration: Map[String, String])
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/Streams.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/Streams.scala
new file mode 100644
index 000000000..483371eb1
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/Streams.scala
@@ -0,0 +1,119 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.kstream
+
+import java.util.function.Supplier
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.kstreams.serde.metricdata.{MetricDataSerde, MetricTankSerde}
+import com.expedia.www.haystack.trends.aggregation.TrendMetric
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.kstream.processor.{AdditionalTagsProcessorSupplier, ExternalKafkaProcessorSupplier, MetricAggProcessorSupplier}
+import com.expedia.www.haystack.trends.kstream.store.HaystackStoreBuilder
+import org.apache.kafka.common.serialization.{Serde, StringDeserializer, StringSerializer}
+import org.apache.kafka.streams.Topology
+import org.apache.kafka.streams.state.{KeyValueStore, StoreBuilder}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters
+
+class Streams(appConfiguration: AppConfiguration) extends Supplier[Topology] {
+
+ private val LOGGER = LoggerFactory.getLogger(classOf[Streams])
+ private val TOPOLOGY_SOURCE_NAME = "metricpoint-source"
+ private val TOPOLOGY_EXTERNAL_SINK_NAME = "metricpoint-aggegated-sink-external"
+ private val TOPOLOGY_INTERNAL_SINK_NAME = "metric-data-aggegated-sink-internal"
+ private val TOPOLOGY_AGGREGATOR_PROCESSOR_NAME = "metricpoint-aggregator-process"
+ private val TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME = "additional-tags-process"
+ private val TOPOLOGY_AGGREGATOR_TREND_METRIC_STORE_NAME = "trend-metric-store"
+ private val kafkaConfig = appConfiguration.kafkaConfig
+
+ private def initialize(topology: Topology): Topology = {
+
+ //add source - topic where the raw metricpoints are pushed by the span-timeseries-transformer
+ topology.addSource(
+ kafkaConfig.autoOffsetReset,
+ TOPOLOGY_SOURCE_NAME,
+ kafkaConfig.timestampExtractor,
+ new StringDeserializer,
+ new MetricTankSerde().deserializer(),
+ kafkaConfig.consumeTopic)
+
+
+ //The processor which performs aggregations on the metrics
+ topology.addProcessor(
+ TOPOLOGY_AGGREGATOR_PROCESSOR_NAME,
+ new MetricAggProcessorSupplier(TOPOLOGY_AGGREGATOR_TREND_METRIC_STORE_NAME, appConfiguration.encoder),
+ TOPOLOGY_SOURCE_NAME)
+
+
+ //key-value, state store associated with each kstreams task(partition)
+ // which keeps the trend-metrics which are currently being computed in memory
+ topology.addStateStore(createTrendMetricStateStore(), TOPOLOGY_AGGREGATOR_PROCESSOR_NAME)
+
+ // topology to add additional tags if any
+ topology.addProcessor(TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME, new AdditionalTagsProcessorSupplier(appConfiguration.additionalTags), TOPOLOGY_AGGREGATOR_PROCESSOR_NAME)
+
+ if (appConfiguration.kafkaConfig.producerConfig.enableExternalKafka) {
+ topology.addProcessor(
+ TOPOLOGY_EXTERNAL_SINK_NAME,
+ new ExternalKafkaProcessorSupplier(appConfiguration.kafkaConfig.producerConfig),
+ TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME
+ )
+ }
+
+ // adding sinks
+ appConfiguration.kafkaConfig.producerConfig.kafkaSinkTopics.foreach(sinkTopic => {
+ if(sinkTopic.enabled){
+ val serde = Class.forName(sinkTopic.serdeClassName).newInstance().asInstanceOf[Serde[MetricData]]
+ topology.addSink(
+ s"${TOPOLOGY_INTERNAL_SINK_NAME}-${sinkTopic.topic}",
+ sinkTopic.topic,
+ new StringSerializer,
+ serde.serializer(),
+ TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME)
+ }
+ })
+
+ topology
+ }
+
+
+ private def createTrendMetricStateStore(): StoreBuilder[KeyValueStore[String, TrendMetric]] = {
+
+ val stateStoreConfiguration = appConfiguration.stateStoreConfig
+
+ val storeBuilder = new HaystackStoreBuilder(TOPOLOGY_AGGREGATOR_TREND_METRIC_STORE_NAME, stateStoreConfiguration.stateStoreCacheSize)
+
+ if (stateStoreConfiguration.enableChangeLogging) {
+ storeBuilder
+ .withLoggingEnabled(JavaConverters.mapAsJavaMap(stateStoreConfiguration.changeLogTopicConfiguration))
+
+ } else {
+ storeBuilder
+ .withLoggingDisabled()
+ }
+ }
+
+
+ override def get(): Topology = {
+ val topology = new Topology
+ initialize(topology)
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/processor/AdditionalTagsProcessorSupplier.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/processor/AdditionalTagsProcessorSupplier.scala
new file mode 100644
index 000000000..1aa6abe80
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/processor/AdditionalTagsProcessorSupplier.scala
@@ -0,0 +1,32 @@
+package com.expedia.www.haystack.trends.kstream.processor
+
+import java.util
+
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.util.MetricDefinitionKeyGenerator.generateKey
+import org.apache.kafka.streams.processor._
+
+import scala.collection.JavaConverters._
+
+class AdditionalTagsProcessorSupplier(additionalTags: Map[String, String]) extends ProcessorSupplier[String, MetricData] {
+ override def get(): Processor[String, MetricData] = new AdditionalTagsProcessor(additionalTags)
+}
+
+
+class AdditionalTagsProcessor(additionalTags: Map[String, String]) extends AbstractProcessor[String, MetricData] {
+
+ override def process(key: String, value: MetricData): Unit = {
+ if (additionalTags.isEmpty) {
+ context().forward(key, value)
+ }
+ else {
+ val tags = new util.LinkedHashMap[String, String] {
+ putAll(value.getMetricDefinition.getTags.getKv)
+ putAll(additionalTags.asJava)
+ }
+ val metricDefinition = new MetricDefinition(value.getMetricDefinition.getKey, new TagCollection(tags), TagCollection.EMPTY)
+ val metricData = new MetricData(metricDefinition, value.getValue, value.getTimestamp)
+ context.forward(generateKey(metricData.getMetricDefinition), metricData)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/processor/ExternalKafkaProcessorSupplier.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/processor/ExternalKafkaProcessorSupplier.scala
new file mode 100644
index 000000000..2828b5d6b
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/processor/ExternalKafkaProcessorSupplier.scala
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.kstream.processor
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.trends.config.entities.KafkaProduceConfiguration
+import com.expedia.www.haystack.trends.kstream.serde.TrendMetricSerde.metricRegistry
+import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata}
+import org.apache.kafka.streams.processor.{AbstractProcessor, Processor, ProcessorContext, ProcessorSupplier}
+import org.slf4j.LoggerFactory
+
+class ExternalKafkaProcessorSupplier(kafkaProduceConfig: KafkaProduceConfiguration) extends ProcessorSupplier[String, MetricData] {
+
+ private val LOGGER = LoggerFactory.getLogger(this.getClass)
+ private val metricPointExternalKafkaSuccessMeter = metricRegistry.meter("metricpoint.kafka-external.success")
+ private val metricPointExternalKafkaFailureMeter = metricRegistry.meter("metricpoint.kafka-external.failure")
+
+ def get: Processor[String, MetricData] = {
+ new ExternalKafkaProcessor(kafkaProduceConfig: KafkaProduceConfiguration)
+ }
+
+ /**
+ * This is the Processor which contains the map of unique trends consumed from the assigned partition and the corresponding trend metric for each trend
+ * Each trend is uniquely identified by the metricPoint key - which is a combination of the name and the list of tags. Its backed by a state store which keeps this map and has the
+ * ability to restore the map if/when the app restarts or when the assigned kafka partitions change
+ *
+ * @param kafkaProduceConfig - configuration to create kafka producer
+ */
+ private class ExternalKafkaProcessor(kafkaProduceConfig: KafkaProduceConfiguration) extends AbstractProcessor[String, MetricData] {
+
+ private val kafkaProducer: KafkaProducer[String, MetricData] = new KafkaProducer[String, MetricData](kafkaProduceConfig.props.get)
+ private val kafkaProduceTopic = kafkaProduceConfig.externalKafkaTopic
+
+ @SuppressWarnings(Array("unchecked"))
+ override def init(context: ProcessorContext) {
+ super.init(context)
+ }
+
+ /**
+ * tries to fetch the trend metric based on the key, if it exists it updates the trend metric else it tries to create a new trend metric and adds it to the store *
+ *
+ * @param key - key in the kafka record - should be MetricDefinitionKeyGenerator.generateKey(metricData.getMetricDefinition)
+ * @param value - metricData
+ */
+ def process(key: String, value: MetricData): Unit = {
+
+ val kafkaMessage = new ProducerRecord(kafkaProduceTopic,
+ key, value)
+ kafkaProducer.send(kafkaMessage, new Callback {
+ override def onCompletion(recordMetadata: RecordMetadata, e: Exception): Unit = {
+ if (e != null) {
+ LOGGER.error(s"Failed to produce the message to kafka for topic=$kafkaProduceTopic, with reason=", e)
+ metricPointExternalKafkaFailureMeter.mark()
+ } else {
+ metricPointExternalKafkaSuccessMeter.mark()
+ }
+ }
+ })
+ }
+ }
+}
+
+
+
+
+
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/processor/MetricAggProcessorSupplier.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/processor/MetricAggProcessorSupplier.scala
new file mode 100644
index 000000000..ccb464096
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/processor/MetricAggProcessorSupplier.scala
@@ -0,0 +1,132 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.kstream.processor
+
+import com.codahale.metrics.{Counter, Meter}
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.encoders.Encoder
+import com.expedia.www.haystack.commons.entities.Interval
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.commons.util.MetricDefinitionKeyGenerator.generateKey
+import com.expedia.www.haystack.trends.aggregation.TrendMetric
+import com.expedia.www.haystack.trends.aggregation.metrics._
+import com.expedia.www.haystack.trends.aggregation.rules.MetricRuleEngine
+import org.apache.kafka.streams.kstream.internals._
+import org.apache.kafka.streams.processor.{AbstractProcessor, Processor, ProcessorContext}
+import org.apache.kafka.streams.state.KeyValueStore
+import org.slf4j.LoggerFactory
+
+class MetricAggProcessorSupplier(trendMetricStoreName: String, encoder: Encoder) extends KStreamAggProcessorSupplier[String, String, MetricData, TrendMetric] with MetricRuleEngine with MetricsSupport {
+
+ private var sendOldValues: Boolean = false
+ private val LOGGER = LoggerFactory.getLogger(this.getClass)
+
+ def get: Processor[String, MetricData] = {
+ new MetricAggProcessor(trendMetricStoreName)
+ }
+
+
+ def enableSendingOldValues() {
+ sendOldValues = true
+ }
+
+ override def view(): KTableValueGetterSupplier[String, TrendMetric] = new KTableValueGetterSupplier[String, TrendMetric]() {
+
+ override def get(): KTableValueGetter[String, TrendMetric] = new TrendMetricAggregateValueGetter()
+
+ override def storeNames(): Array[String] = Array[String](trendMetricStoreName)
+
+ private class TrendMetricAggregateValueGetter extends KTableValueGetter[String, TrendMetric] {
+
+ private var store: KeyValueStore[String, TrendMetric] = _
+
+ @SuppressWarnings(Array("unchecked")) def init(context: ProcessorContext) {
+ store = context.getStateStore(trendMetricStoreName).asInstanceOf[KeyValueStore[String, TrendMetric]]
+ }
+
+ def get(key: String): TrendMetric = store.get(key)
+ }
+ }
+
+ /**
+ * This is the Processor which contains the map of unique trends consumed from the assigned partition and the corresponding trend metric for each trend
+ * Each trend is uniquely identified by the metricPoint key - which is a combination of the name and the list of tags. Its backed by a state store which keeps this map and has the
+ * ability to restore the map if/when the app restarts or when the assigned kafka partitions change
+ *
+ * @param trendMetricStoreName - name of the key-value state store
+ */
+ private class MetricAggProcessor(trendMetricStoreName: String) extends AbstractProcessor[String, MetricData] {
+ private var trendMetricStore: KeyValueStore[String, TrendMetric] = _
+
+
+ private var trendsCount: Counter = _
+ private val invalidMetricPointMeter: Meter = metricRegistry.meter("metricprocessor.invalid")
+
+ @SuppressWarnings(Array("unchecked"))
+ override def init(context: ProcessorContext) {
+ super.init(context)
+ trendsCount = metricRegistry.counter(s"metricprocessor.trendcount.${context.taskId()}")
+ trendMetricStore = context.getStateStore(trendMetricStoreName).asInstanceOf[KeyValueStore[String, TrendMetric]]
+ trendsCount.dec(trendsCount.getCount)
+ trendsCount.inc(trendMetricStore.approximateNumEntries())
+ LOGGER.info(s"Triggering init for metric agg processor for task id ${context.taskId()}")
+ }
+
+ /**
+ * tries to fetch the trend metric based on the key, if it exists it updates the trend metric else it tries to create a new trend metric and adds it to the store *
+ *
+ * @param key - key in the kafka record - should be metricPoint.getKey
+ * @param metricData - metricData
+ */
+ def process(key: String, metricData: MetricData): Unit = {
+ if (key != null && metricData.getValue > 0) {
+
+ // first get the matching windows
+ Option(trendMetricStore.get(key)).orElse(createTrendMetric(metricData)).foreach(trendMetric => {
+ trendMetric.compute(metricData)
+
+ /*
+ we finally put the updated trend metric back to the store since we want the changelog the state store with the latest state of the trend metric, if we don't put the metric
+ back and update the mutable metric, the kstreams would not capture the change and app wouldn't be able to restore to the same state when the app comes back again.
+ */
+ if (trendMetric.shouldLogToStateStore) {
+ trendMetricStore.put(key, trendMetric)
+ }
+
+ //retrieve the computed metrics and push it to the kafka topic.
+ trendMetric.getComputedMetricPoints(metricData).foreach(metricPoint => {
+ context().forward(generateKey(metricData.getMetricDefinition), metricPoint)
+ })
+ })
+ } else {
+ invalidMetricPointMeter.mark()
+ }
+ }
+
+ private def createTrendMetric(value: MetricData): Option[TrendMetric] = {
+ findMatchingMetric(value).map {
+ case AggregationType.Histogram =>
+ trendsCount.inc()
+ TrendMetric.createTrendMetric(Interval.all, value, HistogramMetricFactory)
+ case AggregationType.Count =>
+ trendsCount.inc()
+ TrendMetric.createTrendMetric(Interval.all, value, CountMetricFactory)
+ }
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/TrendMetricSerde.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/TrendMetricSerde.scala
new file mode 100644
index 000000000..6777bae59
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/TrendMetricSerde.scala
@@ -0,0 +1,145 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.kstream.serde
+
+import java.util
+
+import com.expedia.www.haystack.commons.entities.Interval
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trends.aggregation.TrendMetric
+import com.expedia.www.haystack.trends.aggregation.metrics.{AggregationType, CountMetricFactory, HistogramMetricFactory}
+import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer}
+import org.msgpack.core.MessagePack
+import org.msgpack.value.ValueFactory
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+import scala.util.Try
+
+object TrendMetricSerde extends Serde[TrendMetric] with MetricsSupport {
+
+ private val LOGGER = LoggerFactory.getLogger(this.getClass)
+
+ private val trendMetricStatsDeserFailureMeter = metricRegistry.meter("trendmetric.deser.failure")
+ private val trendMetricStatsSerSuccessMeter = metricRegistry.meter("trendmetric.ser.success")
+ private val trendMetricStatsDeserSuccessMeter = metricRegistry.meter("trendmetric.deser.success")
+ private val INTERVAL_KEY: String = "interval"
+ private val TREND_METRIC_KEY: String = "trendMetric"
+ private val AGGREGATION_TYPE_KEY = "aggregationType"
+ private val METRICS_KEY = "metrics"
+
+ override def deserializer(): Deserializer[TrendMetric] = {
+ new Deserializer[TrendMetric] {
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+ /**
+ * converts the messagepack encoded bytes into trendMetric object
+ *
+ * @param topic topic associated with data
+ * @param data serialized bytes of trendMetric
+ * @return
+ */
+ override def deserialize(topic: String, data: Array[Byte]): TrendMetric = {
+ Try {
+ val unpacker = MessagePack.newDefaultUnpacker(data)
+ val serializedWindowedMetric = unpacker.unpackValue().asMapValue().map()
+ val aggregationType = AggregationType.withName(serializedWindowedMetric.get(ValueFactory.newString(AGGREGATION_TYPE_KEY)).asStringValue().toString)
+
+ val metricFactory = aggregationType match {
+ case AggregationType.Histogram => HistogramMetricFactory
+ case AggregationType.Count => CountMetricFactory
+ }
+
+ val trendMetricMap = serializedWindowedMetric.get(ValueFactory.newString(METRICS_KEY)).asArrayValue().asScala.map(mapValue => {
+ val map = mapValue.asMapValue().map()
+ val intervalVal = map.get(ValueFactory.newString(INTERVAL_KEY)).asIntegerValue().asLong()
+ val interval = Interval.fromVal(intervalVal)
+
+ val windowedMetricByteArray = map.get(ValueFactory.newString(TREND_METRIC_KEY)).asBinaryValue().asByteArray()
+ val windowedMetric = WindowedMetricSerde.deserializer().deserialize(topic, windowedMetricByteArray)
+
+ interval -> windowedMetric
+ }).toMap
+
+ val metric = TrendMetric.restoreTrendMetric(trendMetricMap, metricFactory)
+ trendMetricStatsDeserSuccessMeter.mark()
+ metric
+
+ }.recover {
+ case ex: Exception =>
+ LOGGER.error("failed to deserialize trend metric with exception", ex)
+ trendMetricStatsDeserFailureMeter.mark()
+ throw ex
+ }.toOption.orNull
+ }
+ }
+ }
+
+ override def serializer(): Serializer[TrendMetric] = {
+ new Serializer[TrendMetric] {
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ /**
+ * converts the trendMetric object to encoded bytes
+ *
+ * @param topic topic associated with data
+ * @param trendMetric trendMetric object
+ * @return
+ */
+ override def serialize(topic: String, trendMetric: TrendMetric): Array[Byte] = {
+
+ val packer = MessagePack.newDefaultBufferPacker()
+
+ if (trendMetric == null) {
+ LOGGER.error("TrendMetric is null")
+ null
+ } else if (trendMetric.trendMetricsMap == null) {
+ LOGGER.error("TrendMetric map is null")
+ null
+ }
+ else {
+ val serializedTrendMetric = trendMetric.trendMetricsMap.map {
+ case (interval, windowedMetric) =>
+ ValueFactory.newMap(Map(
+ ValueFactory.newString(INTERVAL_KEY) -> ValueFactory.newInteger(interval.timeInSeconds),
+ ValueFactory.newString(TREND_METRIC_KEY) -> ValueFactory.newBinary(WindowedMetricSerde.serializer().serialize(topic, windowedMetric))
+ ).asJava)
+ }
+
+ val windowedMetricMessagePack = Map(
+ ValueFactory.newString(METRICS_KEY) -> ValueFactory.newArray(serializedTrendMetric.toList.asJava),
+ ValueFactory.newString(AGGREGATION_TYPE_KEY) -> ValueFactory.newString(trendMetric.getMetricFactory.getAggregationType.toString)
+ )
+ packer.packValue(ValueFactory.newMap(windowedMetricMessagePack.asJava))
+ val data = packer.toByteArray
+ trendMetricStatsSerSuccessMeter.mark()
+ data
+ }
+ }
+
+ override def close(): Unit = ()
+ }
+ }
+
+ override def close(): Unit = ()
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/WindowedMetricSerde.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/WindowedMetricSerde.scala
new file mode 100644
index 000000000..2a77daf42
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/WindowedMetricSerde.scala
@@ -0,0 +1,125 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.kstream.serde
+
+import java.util
+
+import com.expedia.www.haystack.commons.entities.Interval
+import com.expedia.www.haystack.commons.metrics.MetricsSupport
+import com.expedia.www.haystack.trends.aggregation.metrics.{AggregationType, CountMetricFactory, HistogramMetricFactory, Metric}
+import com.expedia.www.haystack.trends.aggregation.{TrendMetric, WindowedMetric}
+import com.expedia.www.haystack.trends.aggregation.entities.TimeWindow
+import org.apache.kafka.common.serialization.{Deserializer, Serde, Serializer}
+import org.msgpack.core.MessagePack
+import org.msgpack.value.ValueFactory
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+
+object WindowedMetricSerde extends Serde[WindowedMetric] with MetricsSupport {
+
+ private val SERIALIZED_METRIC_KEY = "serializedMetric"
+ private val START_TIME_KEY = "startTime"
+ private val END_TIME_KEY = "endTime"
+
+ private val aggregationTypeKey = "aggregationType"
+ private val metricsKey = "metrics"
+
+ override def close(): Unit = ()
+
+ override def deserializer(): Deserializer[WindowedMetric] = {
+ new Deserializer[WindowedMetric] {
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ override def close(): Unit = ()
+
+ /**
+ * converts the messagepack encoded bytes into windowedMetric object
+ *
+ * @param data serialized bytes of windowedMetric
+ * @return
+ */
+ override def deserialize(topic: String, data: Array[Byte]): WindowedMetric = {
+ val unpacker = MessagePack.newDefaultUnpacker(data)
+ val serializedWindowedMetric = unpacker.unpackValue().asMapValue().map()
+ val aggregationType = AggregationType.withName(serializedWindowedMetric.get(ValueFactory.newString(aggregationTypeKey)).asStringValue().toString)
+
+ val metricFactory = aggregationType match {
+ case AggregationType.Histogram => HistogramMetricFactory
+ case AggregationType.Count => CountMetricFactory
+ }
+
+ val windowedMetricMap = mutable.TreeMap[TimeWindow, Metric]()
+ serializedWindowedMetric.get(ValueFactory.newString(metricsKey)).asArrayValue().asScala.map(mapValue => {
+ val map = mapValue.asMapValue().map()
+ val startTime = map.get(ValueFactory.newString(START_TIME_KEY)).asIntegerValue().asLong()
+ val endTime = map.get(ValueFactory.newString(END_TIME_KEY)).asIntegerValue().asLong()
+ val window = TimeWindow(startTime, endTime)
+ val metric = metricFactory.getMetricSerde.deserialize(map.get(ValueFactory.newString(SERIALIZED_METRIC_KEY)).asBinaryValue().asByteArray())
+ windowedMetricMap.put(window, metric)
+ })
+
+ val intervalVal = windowedMetricMap.firstKey.endTime - windowedMetricMap.firstKey.startTime
+ val interval = Interval.fromVal(intervalVal)
+ val metric = WindowedMetric.restoreWindowedMetric(windowedMetricMap, metricFactory, TrendMetric.trendMetricConfig(interval)._1, interval)
+ metric
+ }
+ }
+ }
+
+
+ override def serializer(): Serializer[WindowedMetric] = {
+ new Serializer[WindowedMetric] {
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+
+ /**
+ * converts the windowedMetric object to encoded bytes
+ *
+ * @param topic topic associated with data
+ * @param windowedMetric windowedMetric object
+ * @return
+ */
+ override def serialize(topic: String, windowedMetric: WindowedMetric): Array[Byte] = {
+
+ val packer = MessagePack.newDefaultBufferPacker()
+
+ val serializedMetrics = windowedMetric.windowedMetricsMap.map {
+ case (timeWindow, metric) =>
+ ValueFactory.newMap(Map(
+ ValueFactory.newString(START_TIME_KEY) -> ValueFactory.newInteger(timeWindow.startTime),
+ ValueFactory.newString(END_TIME_KEY) -> ValueFactory.newInteger(timeWindow.endTime),
+ ValueFactory.newString(SERIALIZED_METRIC_KEY) -> ValueFactory.newBinary(windowedMetric.getMetricFactory.getMetricSerde.serialize(metric))
+ ).asJava)
+ }
+ val windowedMetricMessagePack = Map(
+ ValueFactory.newString(metricsKey) -> ValueFactory.newArray(serializedMetrics.toList.asJava),
+ ValueFactory.newString(aggregationTypeKey) -> ValueFactory.newString(windowedMetric.getMetricFactory.getAggregationType.toString)
+ )
+ packer.packValue(ValueFactory.newMap(windowedMetricMessagePack.asJava))
+ val data = packer.toByteArray
+ data
+ }
+
+ override def close(): Unit = ()
+ }
+ }
+
+ override def configure(map: util.Map[String, _], b: Boolean): Unit = ()
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/metric/CountMetricSerde.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/metric/CountMetricSerde.scala
new file mode 100644
index 000000000..3a2851f28
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/metric/CountMetricSerde.scala
@@ -0,0 +1,41 @@
+package com.expedia.www.haystack.trends.kstream.serde.metric
+
+import com.expedia.www.haystack.commons.entities.Interval
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.trends.aggregation.metrics.{CountMetric, Metric}
+import org.msgpack.core.MessagePack
+import org.msgpack.value.{Value, ValueFactory}
+
+import scala.collection.JavaConverters._
+
+/**
+ * Serde which lets us serialize and deserilize the count metric, this is used when we serialize/deserialize the windowedMetric which can internally contain count or histogram metric
+ * It uses messagepack to pack the object into bytes
+ */
+object CountMetricSerde extends MetricSerde {
+
+ private val currentCountKey = "currentCount"
+ private val intervalKey = "interval"
+
+
+ override def serialize(metric: Metric): Array[Byte] = {
+
+ val countMetric = metric.asInstanceOf[CountMetric]
+ val packer = MessagePack.newDefaultBufferPacker()
+ val metricData = Map[Value, Value](
+ ValueFactory.newString(currentCountKey) -> ValueFactory.newInteger(countMetric.getCurrentCount),
+ ValueFactory.newString(intervalKey) -> ValueFactory.newString(countMetric.getMetricInterval.name)
+ )
+ packer.packValue(ValueFactory.newMap(metricData.asJava))
+ packer.toByteArray
+ }
+
+ override def deserialize(data: Array[Byte]): Metric = {
+ val metric = MessagePack.newDefaultUnpacker(data).unpackValue().asMapValue().map()
+ val currentCount:Long = metric.get(ValueFactory.newString(currentCountKey)).asIntegerValue().asLong()
+ val interval:Interval = Interval.fromName(metric.get(ValueFactory.newString(intervalKey)).asStringValue().toString)
+ new CountMetric(interval,currentCount)
+ }
+
+
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/metric/HistogramMetricSerde.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/metric/HistogramMetricSerde.scala
new file mode 100644
index 000000000..d429f87b5
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/metric/HistogramMetricSerde.scala
@@ -0,0 +1,70 @@
+package com.expedia.www.haystack.trends.kstream.serde.metric
+
+import java.nio.ByteBuffer
+
+import com.expedia.www.haystack.commons.entities.Interval
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.trends.aggregation.TrendHdrHistogram
+import com.expedia.www.haystack.trends.aggregation.metrics.{HistogramMetric, Metric}
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.config.entities.HistogramUnit
+import org.HdrHistogram.Histogram
+import org.msgpack.core.MessagePack
+import org.msgpack.value.{Value, ValueFactory}
+import org.slf4j.LoggerFactory
+
+import scala.collection.JavaConverters._
+
+/**
+ * Serde which lets us serialize and deserilize the histogram metric, this is used when we serialize/deserialize the windowedMetric which can internally contain count or histogram metric
+ * It uses messagepack to pack the object into bytes
+ */
+object HistogramMetricSerde extends MetricSerde {
+ private val LOGGER = LoggerFactory.getLogger(HistogramMetricSerde.getClass)
+
+ private val intHistogramKey = "intHistogram"
+ private val intervalKey = "interval"
+ private val unitKey = "unit"
+ private val highestTrackableValKey = "highestTrackableVal"
+
+ override def serialize(metric: Metric): Array[Byte] = {
+
+ val histogramMetric = metric.asInstanceOf[HistogramMetric]
+ val packer = MessagePack.newDefaultBufferPacker()
+ val runningHistogram = histogramMetric.getRunningHistogram
+ val serializedHistogram = ByteBuffer.allocate(runningHistogram.getEstimatedFootprintInBytes)
+ runningHistogram.encodeIntoByteBuffer(serializedHistogram)
+ val metricData = Map[Value, Value](
+ ValueFactory.newString(intHistogramKey) -> ValueFactory.newBinary(serializedHistogram.array()),
+ ValueFactory.newString(intervalKey) -> ValueFactory.newString(metric.getMetricInterval.name),
+ ValueFactory.newString(unitKey) -> ValueFactory.newString(histogramMetric.getRunningHistogram.unit.toString),
+ ValueFactory.newString(highestTrackableValKey) -> ValueFactory.newInteger(histogramMetric.getRunningHistogram.getHighestTrackableValue)
+ )
+ packer.packValue(ValueFactory.newMap(metricData.asJava))
+ packer.toByteArray
+
+ }
+
+ override def deserialize(data: Array[Byte]): HistogramMetric = {
+ val metric = MessagePack.newDefaultUnpacker(data).unpackValue().asMapValue().map()
+ val serializedHistogram = metric.get(ValueFactory.newString(intHistogramKey)).asBinaryValue().asByteArray
+ val interval: Interval = Interval.fromName(metric.get(ValueFactory.newString(intervalKey)).asStringValue().toString)
+
+ // before the unit concept is introduced, default histogram recorded value's unit was micros
+ val unitValue = metric.get(ValueFactory.newString(unitKey))
+ val histogramUnit = if(unitValue == null) HistogramUnit.default else HistogramUnit.from(unitValue.asStringValue().toString)
+
+ val highestTrackableVal = metric.get(ValueFactory.newString(highestTrackableValKey))
+ val maxTrackableHistogramVal = if (highestTrackableVal == null) Int.MaxValue else highestTrackableVal.asIntegerValue().toInt
+
+ try {
+ val hdrHistogram = Histogram.decodeFromByteBuffer(ByteBuffer.wrap(serializedHistogram), maxTrackableHistogramVal)
+ new HistogramMetric(interval, TrendHdrHistogram(hdrHistogram, histogramUnit))
+ } catch {
+ case ex: Exception =>
+ LOGGER.error("Fail to deserialize the hdr histogram with error", ex)
+ // create a default hdr histogram using the config
+ new HistogramMetric(interval, new TrendHdrHistogram(AppConfiguration.histogramMetricConfiguration))
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/metric/MetricSerde.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/metric/MetricSerde.scala
new file mode 100644
index 000000000..f79ec3798
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/serde/metric/MetricSerde.scala
@@ -0,0 +1,8 @@
+package com.expedia.www.haystack.trends.kstream.serde.metric
+
+import com.expedia.www.haystack.trends.aggregation.metrics.Metric
+
+trait MetricSerde {
+ def serialize(metric:Metric): Array[Byte]
+ def deserialize(data: Array[Byte]) : Metric
+}
diff --git a/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/store/HaystackStoreBuilder.scala b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/store/HaystackStoreBuilder.scala
new file mode 100644
index 000000000..f5f0c358b
--- /dev/null
+++ b/trends/timeseries-aggregator/src/main/scala/com/expedia/www/haystack/trends/kstream/store/HaystackStoreBuilder.scala
@@ -0,0 +1,52 @@
+package com.expedia.www.haystack.trends.kstream.store
+
+import java.util
+
+import com.expedia.www.haystack.trends.aggregation.TrendMetric
+import com.expedia.www.haystack.trends.kstream.serde.TrendMetricSerde
+import org.apache.kafka.common.serialization.Serdes.StringSerde
+import org.apache.kafka.streams.state.internals.InMemoryLRUCacheStoreSupplier
+import org.apache.kafka.streams.state.{KeyValueStore, StoreBuilder}
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+
+class HaystackStoreBuilder(storeName: String, maxCacheSize: Int) extends StoreBuilder[KeyValueStore[String, TrendMetric]] {
+
+ private var changeLogEnabled = false
+ private var changeLogProperties = mutable.Map[String, String]()
+
+ override def loggingEnabled(): Boolean = {
+ changeLogEnabled
+ }
+
+ override def withLoggingEnabled(config: util.Map[String, String]): StoreBuilder[KeyValueStore[String, TrendMetric]] = {
+ changeLogEnabled = true
+ changeLogProperties = config.asScala
+ this
+ }
+
+ override def logConfig(): util.Map[String, String] = changeLogProperties.asJava
+
+ override def name(): String = {
+ storeName
+ }
+
+ override def withCachingEnabled(): StoreBuilder[KeyValueStore[String, TrendMetric]] = {
+ changeLogEnabled = true
+ this
+ }
+
+ override def build(): KeyValueStore[String, TrendMetric] = {
+ val lRUCacheStoreSupplier = new InMemoryLRUCacheStoreSupplier[String, TrendMetric](storeName, maxCacheSize, new StringSerde, TrendMetricSerde, loggingEnabled(), logConfig())
+ lRUCacheStoreSupplier.get().asInstanceOf[KeyValueStore[String, TrendMetric]]
+ }
+
+
+ override def withLoggingDisabled(): StoreBuilder[KeyValueStore[String, TrendMetric]] = {
+ changeLogEnabled = false
+ changeLogProperties.clear()
+ this
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/resources/config/base.conf b/trends/timeseries-aggregator/src/test/resources/config/base.conf
new file mode 100644
index 000000000..0bb8be6b0
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/resources/config/base.conf
@@ -0,0 +1,66 @@
+health.status.path = "/app/isHealthy"
+
+kafka {
+ close.timeout.ms = 30000
+
+ streams {
+ application.id = "haystack-timeseries-aggregator-dev"
+ bootstrap.servers = "192.168.99.100:9092"
+ num.stream.threads = 1
+ commit.interval.ms = 3000
+ auto.offset.reset = latest
+ timestamp.extractor = "com.expedia.www.haystack.commons.kstreams.MetricDataTimestampExtractor"
+ }
+
+
+ // For producing data to external and internal (both) kafka: set enable.external.kafka.produce to true and uncomment the props.
+ // For producing to same (internal) kafka: set enable.external.kafka.produce to false and comment the props.
+ producer {
+ topics : [
+ {
+ topic: "metrics"
+ serdeClassName : "com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricDataSerde"
+ enabled: true
+ },
+ {
+ topic: "mdm"
+ serdeClassName : "com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricTankSerde"
+ enabled: true
+ }
+ ]
+ enable.external.kafka.produce = true
+ external.kafka.topic = "mdm"
+ props {
+ bootstrap.servers = "kafkasvc:9092"
+ }
+ }
+
+ consumer {
+ topic = "metric-data-points"
+ }
+}
+
+state.store {
+ cleanup.policy = "compact,delete"
+ retention.ms = 14400000 // 4Hrs
+}
+
+statestore {
+ enable.logging = true
+ logging.delay.seconds = 60
+}
+
+metricpoint.encoder.type = "periodreplacement"
+
+histogram {
+ max.value = 1800000 // 30 mins
+ precision = 2
+ value.unit = "millis" // can be micros / millis / seconds
+}
+
+// additional tags to be passed as part of metric data
+// It can be of format typesafe hocon config such as
+// additionalTags = {key: "value", key2:"value2"}
+// or json such as
+// additionalTags = """{"key": "value", "key2":"value2"}"""
+additionalTags = """{"key1": "value1", "key2":"value2"}"""
diff --git a/trends/timeseries-aggregator/src/test/resources/logback-test.xml b/trends/timeseries-aggregator/src/test/resources/logback-test.xml
new file mode 100644
index 000000000..adfa02c68
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/resources/logback-test.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/FeatureSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/FeatureSpec.scala
new file mode 100644
index 000000000..6d88865f5
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/FeatureSpec.scala
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.feature
+
+import java.util
+import java.util.Properties
+
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.entities.encoders.PeriodReplacementEncoder
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.config.entities.{KafkaConfiguration, KafkaProduceConfiguration, KafkaSinkTopic, StateStoreConfiguration}
+import org.apache.kafka.streams.StreamsConfig
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+import org.apache.kafka.streams.processor.WallclockTimestampExtractor
+import org.easymock.EasyMock
+import org.scalatest._
+import org.scalatest.easymock.EasyMockSugar
+import org.mockito.Mockito._
+
+import scala.collection.JavaConverters._
+
+
+trait FeatureSpec extends FeatureSpecLike with GivenWhenThen with Matchers with EasyMockSugar {
+
+ def currentTimeInSecs: Long = {
+ System.currentTimeMillis() / 1000l
+ }
+
+ protected def mockAppConfig: AppConfiguration = {
+ val kafkaConsumeTopic = "test-consume"
+ val kafkaProduceTopic = "test-produce"
+ val kafkaMetricTankProduceTopic = "test-mdm-produce"
+ val streamsConfig = new Properties()
+ streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, "test-app")
+ streamsConfig.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "test-kafka-broker")
+
+ val kafkaSinkTopics = List(KafkaSinkTopic("metrics","com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricDataSerde",true), KafkaSinkTopic("mdm","com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricTankSerde",true))
+ val kafkaConfig = KafkaConfiguration(new StreamsConfig(streamsConfig), KafkaProduceConfiguration(kafkaSinkTopics, None, "mdm", false), kafkaConsumeTopic, AutoOffsetReset.EARLIEST, new WallclockTimestampExtractor, 30000)
+ val projectConfiguration = mock[AppConfiguration]
+
+ expecting {
+ projectConfiguration.kafkaConfig.andReturn(kafkaConfig).anyTimes()
+ projectConfiguration.encoder.andReturn(new PeriodReplacementEncoder).anyTimes()
+ projectConfiguration.stateStoreConfig.andReturn(StateStoreConfiguration(128, false, 60, Map())).anyTimes()
+ projectConfiguration.additionalTags.andReturn(Map("k1"->"v1", "k2"-> "v2")).anyTimes()
+ }
+ EasyMock.replay(projectConfiguration)
+ projectConfiguration
+ }
+
+ protected def getMetricData(metricKey: String, tags: Map[String, String], value: Double, timeStamp: Long): MetricData = {
+
+ val tagsMap = new java.util.LinkedHashMap[String, String] {
+ if (tags != null) putAll(tags.asJava)
+ put(MetricDefinition.MTYPE, "gauge")
+ put(MetricDefinition.UNIT, "short")
+ }
+ val metricDefinition = new MetricDefinition(metricKey, new TagCollection(tagsMap), TagCollection.EMPTY)
+ new MetricData(metricDefinition, value, timeStamp)
+ }
+
+ protected def containsTagInMetricData(metricData: MetricData, tagKey: String, tagValue: String): Boolean = {
+ val tags = getTagsFromMetricData(metricData)
+ tags.containsKey(tagKey) && tags.get(tagKey).equalsIgnoreCase(tagValue)
+ }
+
+ protected def getTagsFromMetricData(metricData: MetricData): util.Map[String, String] = {
+ metricData.getMetricDefinition.getTags.getKv
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/TrendMetricSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/TrendMetricSpec.scala
new file mode 100644
index 000000000..63b488290
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/TrendMetricSpec.scala
@@ -0,0 +1,151 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.feature.tests.aggregation
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.commons.entities.{Interval, TagKeys}
+import com.expedia.www.haystack.trends.aggregation.TrendMetric
+import com.expedia.www.haystack.trends.aggregation.entities.TimeWindow
+import com.expedia.www.haystack.trends.aggregation.metrics.{CountMetric, CountMetricFactory, HistogramMetric, HistogramMetricFactory}
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+
+class TrendMetricSpec extends FeatureSpec {
+
+ val SERVICE_NAME = "dummy_service"
+ val OPERATION_NAME = "dummy_operation"
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+
+ val alternateMetricKeys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME.concat("_2"),
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+
+
+ feature("Creating a TrendMetric") {
+
+ scenario("should get Histogram aggregated MetricPoints post watermarked metrics") {
+ val DURATION_METRIC_NAME = "duration"
+
+ Given("some duration MetricData points")
+ val intervals: List[Interval] = List(Interval.ONE_MINUTE)
+ val currentTime = 1
+ val expectedMetric: HistogramMetric = new HistogramMetric(Interval.ONE_MINUTE)
+ val firstMetricData: MetricData = getMetricData(DURATION_METRIC_NAME, keys, 1, currentTime)
+
+ When("creating a WindowedMetric and passing first MetricData")
+ val trendMetric: TrendMetric = TrendMetric.createTrendMetric(intervals, firstMetricData, HistogramMetricFactory)
+ trendMetric.compute(firstMetricData)
+ expectedMetric.compute(firstMetricData)
+
+ Then("should return 0 MetricData points if we try to get within (watermark + 1) metrics")
+ trendMetric.getComputedMetricPoints(firstMetricData).size shouldBe 0
+ trendMetric.shouldLogToStateStore shouldBe true
+ var i = TrendMetric.trendMetricConfig(intervals.head)._1
+ while (i > 0) {
+ val secondMetricData: MetricData = getMetricData(DURATION_METRIC_NAME, keys, 2, currentTime + intervals.head.timeInSeconds * i)
+ trendMetric.compute(secondMetricData)
+ trendMetric.shouldLogToStateStore shouldBe true
+ trendMetric.getComputedMetricPoints(secondMetricData).size shouldEqual 0
+ i = i - 1
+ }
+
+ When("adding another MetricData after watermark")
+ val metricDataAfterWatermark: MetricData = getMetricData(DURATION_METRIC_NAME, keys, 10, currentTime + intervals.head.timeInSeconds * (TrendMetric.trendMetricConfig(intervals.head)._1 + 1))
+ trendMetric.compute(metricDataAfterWatermark)
+ val aggMetrics = trendMetric.getComputedMetricPoints(metricDataAfterWatermark)
+ aggMetrics.size shouldEqual 1 * 7 // HistogramMetric
+
+ Then("values for histogram should same as expected")
+ expectedMetric.getRunningHistogram.getMean shouldEqual aggMetrics.find(metricData => containsTagInMetricData(metricData, TagKeys.STATS_KEY, "mean")).get.getValue
+ expectedMetric.getRunningHistogram.getMaxValue shouldEqual aggMetrics.find(metricData => containsTagInMetricData(metricData, TagKeys.STATS_KEY, "max")).get.getValue
+ expectedMetric.getRunningHistogram.getMinValue shouldEqual aggMetrics.find(metricData => containsTagInMetricData(metricData, TagKeys.STATS_KEY, "min")).get.getValue
+ expectedMetric.getRunningHistogram.getValueAtPercentile(99) shouldEqual aggMetrics.find(metricData => containsTagInMetricData(metricData, TagKeys.STATS_KEY, "*_99")).get.getValue
+ expectedMetric.getRunningHistogram.getValueAtPercentile(95) shouldEqual aggMetrics.find(metricData => containsTagInMetricData(metricData, TagKeys.STATS_KEY, "*_95")).get.getValue
+ expectedMetric.getRunningHistogram.getValueAtPercentile(50) shouldEqual aggMetrics.find(metricData => containsTagInMetricData(metricData, TagKeys.STATS_KEY, "*_50")).get.getValue
+
+ Then("timestamp of the evicted metric should equal the endtime of that window")
+ aggMetrics.map(metricPoint => {
+ metricPoint.getTimestamp shouldEqual TimeWindow(firstMetricData.getTimestamp, intervals.head).endTime
+ })
+ }
+
+ scenario("should get count aggregated MetricPoint post watermarked metrics") {
+ val COUNT_METRIC_NAME = "span-received"
+
+ Given("some count MetricPoints")
+ val intervals: List[Interval] = List(Interval.ONE_MINUTE, Interval.FIVE_MINUTE)
+ val currentTime = 1
+
+ val firstMetricData: MetricData = getMetricData(COUNT_METRIC_NAME, keys, 1, currentTime)
+ val trendMetric = TrendMetric.createTrendMetric(intervals, firstMetricData, CountMetricFactory)
+ trendMetric.compute(firstMetricData)
+ val expectedMetric: CountMetric = new CountMetric(Interval.FIVE_MINUTE)
+ expectedMetric.compute(firstMetricData)
+
+ var i = TrendMetric.trendMetricConfig(intervals.last)._1
+ while (i > 0) {
+ val secondMetricData: MetricData = getMetricData(COUNT_METRIC_NAME, keys, 1, currentTime + intervals.last.timeInSeconds * i)
+ trendMetric.compute(secondMetricData)
+ i = i - 1
+ }
+
+ When("adding another MetricPoint after watermark")
+ val metricDataAfterWatermark: MetricData = getMetricData(COUNT_METRIC_NAME, keys, 10, currentTime + intervals.last.timeInSeconds * (TrendMetric.trendMetricConfig(intervals.head)._1 + 1))
+ trendMetric.compute(metricDataAfterWatermark)
+ val aggMetrics = trendMetric.getComputedMetricPoints(metricDataAfterWatermark)
+
+ Then("values for count should same as expected")
+ expectedMetric.getCurrentCount shouldEqual aggMetrics.find(metricData => containsTagInMetricData(metricData, TagKeys.INTERVAL_KEY, "FiveMinute")).get.getValue
+ }
+
+ scenario("should log to state store for different metrics based on timestamp") {
+ val COUNT_METRIC_NAME = "span-received"
+
+ Given("multiple metricPoints for different operations")
+ val intervals: List[Interval] = List(Interval.ONE_MINUTE, Interval.FIVE_MINUTE)
+ val currentTime = 1
+
+ val firstMetricData: MetricData = getMetricData(COUNT_METRIC_NAME, keys, 1, currentTime)
+ val anotherMetricData: MetricData = getMetricData(COUNT_METRIC_NAME, alternateMetricKeys, 1, currentTime)
+ val trendMetric = TrendMetric.createTrendMetric(intervals, firstMetricData, CountMetricFactory)
+ val anotherTrendMetric = TrendMetric.createTrendMetric(intervals, anotherMetricData, CountMetricFactory)
+ trendMetric.compute(firstMetricData)
+ trendMetric.shouldLogToStateStore shouldBe true
+
+ anotherTrendMetric.compute(anotherMetricData)
+ anotherTrendMetric.shouldLogToStateStore shouldBe true
+
+ When("metricpoints are added to multiple trend metrics")
+ val secondMetricData: MetricData = getMetricData(COUNT_METRIC_NAME, keys, 1, currentTime + 1 + AppConfiguration.stateStoreConfig.changeLogDelayInSecs)
+ trendMetric.compute(secondMetricData)
+ val secondAnotherMetricData = getMetricData(COUNT_METRIC_NAME, alternateMetricKeys, 1, currentTime + 1 + AppConfiguration.stateStoreConfig.changeLogDelayInSecs)
+ anotherTrendMetric.compute(secondAnotherMetricData)
+
+ Then("trend metric should log to state store")
+ trendMetric.shouldLogToStateStore shouldBe true
+ anotherTrendMetric.shouldLogToStateStore shouldBe true
+
+ }
+
+
+ }
+
+
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/WindowedMetricSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/WindowedMetricSpec.scala
new file mode 100644
index 000000000..8fb7dee83
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/WindowedMetricSpec.scala
@@ -0,0 +1,163 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.feature.tests.aggregation
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.Interval
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.trends.aggregation.WindowedMetric
+import com.expedia.www.haystack.trends.aggregation.metrics.{CountMetric, HistogramMetric, HistogramMetricFactory}
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+
+class WindowedMetricSpec extends FeatureSpec {
+
+ val DURATION_METRIC_NAME = "duration"
+ val SERVICE_NAME = "dummy_service"
+ val OPERATION_NAME = "dummy_operation"
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+
+ object TagKeys {
+ val OPERATION_NAME_KEY = "operationName"
+ val SERVICE_NAME_KEY = "serviceName"
+ }
+
+ feature("Creating a WindowedMetric") {
+
+ scenario("should get aggregated MetricData List post watermarked metrics") {
+
+ Given("some duration MetricData")
+ val durations: List[Long] = List(10, 140)
+ val intervals: List[Interval] = List(Interval.ONE_MINUTE, Interval.FIFTEEN_MINUTE)
+
+ val metricDataList: List[MetricData] = durations.map(duration => getMetricData(DURATION_METRIC_NAME, keys, duration, currentTimeInSecs))
+
+ When("creating a WindowedMetric and passing some MetricData and aggregation type as Histogram")
+ val windowedMetric: WindowedMetric = WindowedMetric.createWindowedMetric(metricDataList.head, HistogramMetricFactory, watermarkedWindows = 1, Interval.ONE_MINUTE)
+
+ metricDataList.indices.foreach(i => if (i > 0) {
+ windowedMetric.compute(metricDataList(i))
+ })
+
+ val expectedMetric: HistogramMetric = new HistogramMetric(Interval.ONE_MINUTE)
+ metricDataList.foreach(metricData => expectedMetric.compute(metricData))
+
+ Then("should return 0 Metric Data Points if we try to get it before interval")
+ val aggregatedMetricPointsBefore: List[MetricData] = windowedMetric.getComputedMetricDataList(metricDataList.last)
+ aggregatedMetricPointsBefore.size shouldBe 0
+
+ When("adding a MetricData outside of first Interval")
+ val newMetricPointAfterFirstInterval: MetricData = getMetricData(DURATION_METRIC_NAME, keys, 80, currentTimeInSecs + intervals.head.timeInSeconds)
+
+ windowedMetric.compute(newMetricPointAfterFirstInterval)
+
+ val aggregatedMetricPointsAfterFirstInterval: List[MetricData] = windowedMetric.getComputedMetricDataList(metricDataList.last)
+
+ //Have to fix dev code and then all the validation test
+ Then("should return the metric data for the previous interval")
+
+
+ When("adding a MetricData outside of second interval now")
+ expectedMetric.compute(newMetricPointAfterFirstInterval)
+ val newMetricPointAfterSecondInterval: MetricData = getMetricData(DURATION_METRIC_NAME, keys, 80, currentTimeInSecs + intervals(1).timeInSeconds)
+ windowedMetric.compute(newMetricPointAfterSecondInterval)
+ val aggregatedMetricPointsAfterSecondInterval: List[MetricData] = windowedMetric.getComputedMetricDataList(metricDataList.last)
+
+ //Have to fix dev code and then all the validation test
+ Then("should return the metric points for the second interval")
+ }
+
+ scenario("should skip aggregated MetricData List for duration values greater than permissible value post watermarked metrics") {
+
+ Given("duration MetricData with duration values greater than permissible value")
+ val durations: List[Double] = List(4.576661E9, 5.57661E9)
+ val intervals: List[Interval] = List(Interval.ONE_MINUTE, Interval.FIFTEEN_MINUTE)
+
+ val metricDataList: List[MetricData] = durations.map(duration => getMetricData(DURATION_METRIC_NAME, keys, duration, currentTimeInSecs))
+
+ When("creating a WindowedMetric and passing some MetricData and aggregation type as Histogram")
+ val windowedMetric: WindowedMetric = WindowedMetric.createWindowedMetric(metricDataList.head, HistogramMetricFactory, watermarkedWindows = 1, Interval.ONE_MINUTE)
+
+ metricDataList.indices.foreach(i => if (i > 0) {
+ windowedMetric.compute(metricDataList(i))
+ })
+
+ val expectedMetric: HistogramMetric = new HistogramMetric(Interval.ONE_MINUTE)
+ metricDataList.foreach(metricData => expectedMetric.compute(metricData))
+
+ Then("should return 0 Metric Data Points if we try to get it before interval")
+ val aggregatedMetricPointsBefore: List[MetricData] = windowedMetric.getComputedMetricDataList(metricDataList.last)
+ aggregatedMetricPointsBefore.size shouldBe 0
+
+ When("adding a MetricData outside of first Interval")
+ val newMetricPointAfterFirstInterval: MetricData = getMetricData(DURATION_METRIC_NAME, keys, 80, currentTimeInSecs + intervals.head.timeInSeconds)
+
+ windowedMetric.compute(newMetricPointAfterFirstInterval)
+
+ val aggregatedMetricPointsAfterFirstInterval: List[MetricData] = windowedMetric.getComputedMetricDataList(metricDataList.last)
+
+ Then("should return the empty metric data for the previous interval")
+ aggregatedMetricPointsAfterFirstInterval.length shouldBe 0
+
+
+ When("adding a MetricData outside of second interval now")
+ expectedMetric.compute(newMetricPointAfterFirstInterval)
+ val newMetricPointAfterSecondInterval: MetricData = getMetricData(DURATION_METRIC_NAME, keys, 80, currentTimeInSecs + intervals(1).timeInSeconds)
+ windowedMetric.compute(newMetricPointAfterSecondInterval)
+ val aggregatedMetricPointsAfterSecondInterval: List[MetricData] = windowedMetric.getComputedMetricDataList(metricDataList.last)
+
+ //Have to fix dev code and then all the validation test
+ Then("should return the metric points for the second interval")
+ }
+
+ scenario("should get aggregated MetricData points post maximum Interval") {
+
+ Given("some duration MetricData points")
+ val durations: List[Long] = List(10, 140, 250)
+ val intervals: List[Interval] = List(Interval.ONE_MINUTE, Interval.FIFTEEN_MINUTE, Interval.ONE_HOUR)
+
+ val metricDataList: List[MetricData] = durations.map(duration => getMetricData(DURATION_METRIC_NAME, keys, duration, currentTimeInSecs))
+
+
+ When("creating a WindowedMetric and passing some MetricData points")
+ val windowedMetric: WindowedMetric = WindowedMetric.createWindowedMetric(metricDataList.head, HistogramMetricFactory, watermarkedWindows = 1, Interval.ONE_MINUTE)
+
+ metricDataList.indices.foreach(i => if (i > 0) {
+ windowedMetric.compute(metricDataList(i))
+ })
+
+ When("adding a MetricData point outside of max Interval")
+ val newMetricPointAfterMaxInterval: MetricData = getMetricData(DURATION_METRIC_NAME, keys, 80, currentTimeInSecs + intervals.last.timeInSeconds)
+ windowedMetric.compute(newMetricPointAfterMaxInterval)
+ val aggregatedMetricDataPointsAfterMaxInterval: List[MetricData] = windowedMetric.getComputedMetricDataList(metricDataList.last)
+
+ Then("should return valid values for all count intervals")
+
+ val expectedOneMinuteMetric: CountMetric = new CountMetric(Interval.ONE_MINUTE)
+ metricDataList.foreach(metricPoint => expectedOneMinuteMetric.compute(metricPoint))
+
+ val expectedFifteenMinuteMetric: CountMetric = new CountMetric(Interval.FIFTEEN_MINUTE)
+ metricDataList.foreach(metricPoint => expectedFifteenMinuteMetric.compute(metricPoint))
+
+ val expectedOneHourMetric: CountMetric = new CountMetric(Interval.ONE_HOUR)
+ metricDataList.foreach(metricPoint => expectedOneHourMetric.compute(metricPoint))
+ }
+ }
+
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/metrics/CountMetricSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/metrics/CountMetricSpec.scala
new file mode 100644
index 000000000..336e79fb6
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/metrics/CountMetricSpec.scala
@@ -0,0 +1,83 @@
+package com.expedia.www.haystack.trends.feature.tests.aggregation.metrics
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.commons.entities.{Interval, TagKeys}
+import com.expedia.www.haystack.trends.aggregation.metrics.{CountMetric, Metric}
+import com.expedia.www.haystack.trends.aggregation.entities._
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import scala.collection.JavaConverters._
+
+class CountMetricSpec extends FeatureSpec {
+
+ val DURATION_METRIC_NAME = "duration"
+ val SUCCESS_METRIC_NAME = "success-spans"
+ val INVALID_METRIC_NAME = "invalid_metric"
+ val SERVICE_NAME = "dummy_service"
+ val OPERATION_NAME = "dummy_operation"
+
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+
+ scenario("should compute the correct count for valid similar metric data points") {
+
+ Given("some 'total-spans' metric data points")
+ val interval: Interval = Interval.FIFTEEN_MINUTE
+
+ val metricDataList = List(
+ getMetricData(SUCCESS_METRIC_NAME, keys, 2, currentTimeInSecs),
+ getMetricData(SUCCESS_METRIC_NAME, keys, 4, currentTimeInSecs),
+ getMetricData(SUCCESS_METRIC_NAME, keys, 5, currentTimeInSecs))
+
+
+ When("get metric is constructed")
+ val metric: Metric = new CountMetric(interval)
+
+ When("MetricData are processed")
+ metricDataList.map(metricData => metric.compute(metricData))
+
+ val countMetricDataList: List[MetricData] = metric.mapToMetricDataList(metricDataList.last.getMetricDefinition.getKey, getTagsFromMetricData(metricDataList.last), metricDataList.last.getTimestamp)
+
+
+ Then("it should return a single aggregated metric data")
+ countMetricDataList.size shouldBe 1
+ val countMetric = countMetricDataList.head
+
+ Then("aggregated metric name be the original metric name")
+ metricDataList.foreach(metricData => {
+ countMetric.getMetricDefinition.getKey shouldEqual metricData.getMetricDefinition.getKey
+ })
+
+ Then("aggregated metric should contain of original metric tags")
+ metricDataList.foreach(metricData => {
+ getTagsFromMetricData(metricData).asScala.foreach(tag => {
+ val aggregatedMetricTag = countMetric.getMetricDefinition.getTags.getKv.get(tag._1)
+ aggregatedMetricTag should not be None
+ aggregatedMetricTag shouldBe tag._2
+ })
+ })
+
+ Then("aggregated metric name should be the same as the metricpoint name")
+ countMetricDataList
+ .map(countMetricPoint =>
+ countMetricPoint.getMetricDefinition.getKey shouldEqual countMetric.getMetricDefinition.getKey)
+
+ Then("aggregated metric should count metric type in tags")
+ getTagsFromMetricData(countMetric).get(TagKeys.STATS_KEY) should not be None
+ getTagsFromMetricData(countMetric).get(TagKeys.STATS_KEY) shouldEqual StatValue.COUNT.toString
+
+ Then("aggregated metric should contain the correct interval name in tags")
+ getTagsFromMetricData(countMetric).get(TagKeys.INTERVAL_KEY) should not be None
+ getTagsFromMetricData(countMetric).get(TagKeys.INTERVAL_KEY) shouldEqual interval.name
+
+ Then("should return valid aggregated value for count")
+ val totalSum = metricDataList.foldLeft(0f)((currentValue, point) => {
+ currentValue + point.getValue.toFloat
+ })
+ totalSum shouldEqual countMetric.getValue
+
+
+ }
+
+
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/metrics/HistogramMetricSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/metrics/HistogramMetricSpec.scala
new file mode 100644
index 000000000..c1ac9cd2b
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/metrics/HistogramMetricSpec.scala
@@ -0,0 +1,126 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.feature.tests.aggregation.metrics
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.commons.entities.{Interval, TagKeys}
+import com.expedia.www.haystack.trends.aggregation.TrendHdrHistogram
+import com.expedia.www.haystack.trends.aggregation.entities._
+import com.expedia.www.haystack.trends.aggregation.metrics.HistogramMetric
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+
+class HistogramMetricSpec extends FeatureSpec {
+
+ val DURATION_METRIC_NAME = "duration"
+ val SUCCESS_METRIC_NAME = "success-spans"
+ val INVALID_METRIC_NAME = "invalid_metric"
+ val SERVICE_NAME = "dummy_service"
+ val OPERATION_NAME = "dummy_operation"
+
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+
+ feature("Creating a histogram metric") {
+ scenario("should get gauge metric type and stats for valid duration points") {
+
+ Given("some duration Metric Data points")
+ val durations = List(10000000, 140000000) // in micros
+ val interval: Interval = Interval.ONE_MINUTE
+
+ val metricDataList: List[MetricData] = durations.map(duration => getMetricData(DURATION_METRIC_NAME, keys, duration, currentTimeInSecs))
+
+ When("get metric is constructed")
+ val metric = new HistogramMetric(interval)
+
+ When("MetricData points are processed")
+ metricDataList.map(metricData => metric.compute(metricData))
+ val histMetricDataList: List[MetricData] = metric.mapToMetricDataList(metricDataList.last.getMetricDefinition.getKey, getTagsFromMetricData(metricDataList.last), metricDataList.last.getTimestamp)
+
+ Then("aggregated metric name should be the same as the MetricData points name")
+ histMetricDataList
+ .map(histMetricData =>
+ histMetricData.getMetricDefinition.getKey shouldEqual metricDataList.head.getMetricDefinition.getKey)
+
+ Then("aggregated metric should contain of original metric tags")
+ histMetricDataList.foreach(histogramMetricData => {
+ val tags = histogramMetricData.getMetricDefinition.getTags.getKv
+
+ keys.foreach(IncomingMetricPointTag => {
+ tags.get(IncomingMetricPointTag._1) should not be None
+ tags.get(IncomingMetricPointTag._1) shouldEqual IncomingMetricPointTag._2
+ })
+
+ })
+
+ Then("aggregated metric should contain the correct interval name in tags")
+ histMetricDataList.map(histMetricData => {
+ getTagsFromMetricData(histMetricData).get(TagKeys.INTERVAL_KEY) should not be null
+ getTagsFromMetricData(histMetricData).get(TagKeys.INTERVAL_KEY) shouldEqual interval.name
+ })
+
+ Then("should return valid values for all stats types")
+ val expectedHistogram = new TrendHdrHistogram(AppConfiguration.histogramMetricConfiguration)
+
+ metricDataList.foreach(metricPoint => {
+ expectedHistogram.recordValue(metricPoint.getValue.toLong)
+ })
+ verifyHistogramMetricValues(histMetricDataList, expectedHistogram)
+ }
+
+ scenario("should return nearest point to the maxTrackableValue as per the precision if point is larger than the Histogram maxValue") {
+
+ Given("some duration Metric points")
+
+ val maxTrackableValueInMillis = AppConfiguration.histogramMetricConfiguration.maxValue.toLong
+ val maxTrackableValueInMicros = maxTrackableValueInMillis * 1000
+ val durations = List(10000, maxTrackableValueInMicros + 100000) // in micros
+ val interval: Interval = Interval.ONE_MINUTE
+
+ val metricDataList: List[MetricData] = durations.map(duration => getMetricData(DURATION_METRIC_NAME, keys, duration, currentTimeInSecs))
+
+ When("get metric is constructed")
+ val metric = new HistogramMetric(interval)
+
+ When("MetricData points are processed")
+ metricDataList.map(metricData => metric.compute(metricData))
+ val histMetricDataList: List[MetricData] = metric.mapToMetricDataList(metricDataList.last.getMetricDefinition.getKey, getTagsFromMetricData(metricDataList.last), metricDataList.last.getTimestamp)
+
+
+ Then("the max should be the maxTrackableValue that was in the histogram boundaries")
+ histMetricDataList.filter(m => "max".equals(getTagsFromMetricData(m).get("stat").toString)).head.getValue shouldEqual 1794048000
+ histMetricDataList.filter(m => "mean".equals(getTagsFromMetricData(m).get("stat").toString)).head.getValue shouldEqual 899077000
+ histMetricDataList.filter(m => "*_95".equals(getTagsFromMetricData(m).get("stat").toString)).head.getValue shouldEqual 1794048000
+ }
+
+ def verifyHistogramMetricValues(resultingMetricPoints: List[MetricData], expectedHistogram: TrendHdrHistogram) = {
+ val resultingMetricPointsMap: Map[String, Float] =
+ resultingMetricPoints.map(resultingMetricPoint => getTagsFromMetricData(resultingMetricPoint).get(TagKeys.STATS_KEY) -> resultingMetricPoint.getValue.toFloat).toMap
+
+ resultingMetricPointsMap(StatValue.MEAN.toString) shouldEqual expectedHistogram.getMean
+ resultingMetricPointsMap(StatValue.MAX.toString) shouldEqual expectedHistogram.getMaxValue
+ resultingMetricPointsMap(StatValue.MIN.toString) shouldEqual expectedHistogram.getMinValue
+ resultingMetricPointsMap(StatValue.PERCENTILE_95.toString) shouldEqual expectedHistogram.getValueAtPercentile(95)
+ resultingMetricPointsMap(StatValue.PERCENTILE_99.toString) shouldEqual expectedHistogram.getValueAtPercentile(99)
+ resultingMetricPointsMap(StatValue.STDDEV.toString) shouldEqual expectedHistogram.getStdDeviation
+ resultingMetricPointsMap(StatValue.MEDIAN.toString) shouldEqual expectedHistogram.getValueAtPercentile(50)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/DurationMetricRuleSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/DurationMetricRuleSpec.scala
new file mode 100644
index 000000000..f25b82f96
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/DurationMetricRuleSpec.scala
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.feature.tests.aggregation.rules
+
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType
+import com.expedia.www.haystack.trends.aggregation.rules.DurationMetricRule
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+
+class DurationMetricRuleSpec extends FeatureSpec with DurationMetricRule {
+
+ val DURATION_METRIC_NAME = "duration"
+ val SERVICE_NAME = "dummy_service"
+ val OPERATION_NAME = "dummy_operation"
+
+ object TagKeys {
+ val OPERATION_NAME_KEY = "operationName"
+ val SERVICE_NAME_KEY = "serviceName"
+ }
+
+ feature("DurationMetricRule for identifying MetricRule") {
+
+ scenario("should get Histogram AggregationType for duration MetricData") {
+
+ Given("a duration MetricPoint")
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+ val duration = 10
+ val startTime = currentTimeInSecs
+
+ val metricData = getMetricData(DURATION_METRIC_NAME, keys, duration, startTime)
+
+ When("trying to find matching AggregationType")
+ val aggregationType = isMatched(metricData)
+
+ Then("should get Histogram AggregationType")
+ aggregationType shouldEqual Some(AggregationType.Histogram)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/FailureMetricRuleSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/FailureMetricRuleSpec.scala
new file mode 100644
index 000000000..e0ce90a32
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/FailureMetricRuleSpec.scala
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.feature.tests.aggregation.rules
+
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType
+import com.expedia.www.haystack.trends.aggregation.rules.FailureMetricRule
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+
+class FailureMetricRuleSpec extends FeatureSpec with FailureMetricRule {
+
+ val FAILURE_METRIC_NAME = "failure-spans"
+ val SERVICE_NAME = "dummy_service"
+ val OPERATION_NAME = "dummy_operation"
+
+ object TagKeys {
+ val OPERATION_NAME_KEY = "operationName"
+ val SERVICE_NAME_KEY = "serviceName"
+ }
+
+ feature("DurationMetricRule for identifying MetricRule") {
+
+ scenario("should get Aggregate AggregationType for Failure MetricData") {
+
+ Given("a failure MetricPoint")
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+ val startTime = currentTimeInSecs
+
+ val metricData = getMetricData(FAILURE_METRIC_NAME, keys, 1, startTime)
+
+ When("trying to find matching AggregationType")
+ val aggregationType = isMatched(metricData)
+
+ Then("should get Aggregate AggregationType")
+ aggregationType shouldEqual Some(AggregationType.Count)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/LatencyMetricRuleSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/LatencyMetricRuleSpec.scala
new file mode 100644
index 000000000..e6199da0e
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/LatencyMetricRuleSpec.scala
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.feature.tests.aggregation.rules
+
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType
+import com.expedia.www.haystack.trends.aggregation.rules.LatencyMetricRule
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+
+class LatencyMetricRuleSpec extends FeatureSpec with LatencyMetricRule {
+
+ val LATENCY_METRIC_NAME = "latency"
+ val SERVICE_NAME = "dummy_service"
+ val OPERATION_NAME = "dummy_operation"
+
+ object TagKeys {
+ val OPERATION_NAME_KEY = "operationName"
+ val SERVICE_NAME_KEY = "serviceName"
+ }
+
+ feature("LatencyMetricRule for identifying MetricRule") {
+
+ scenario("should get Aggregate AggregationType for Latency MetricData") {
+
+ Given("a Latency MetricPoint")
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+ val startTime = currentTimeInSecs
+
+ val metricData = getMetricData(LATENCY_METRIC_NAME, keys, 1, startTime)
+
+ When("trying to find matching AggregationType")
+ val aggregationType = isMatched(metricData)
+
+ Then("should get Aggregate AggregationType")
+ aggregationType shouldEqual Some(AggregationType.Histogram)
+ }
+
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/SuccessMetricRuleSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/SuccessMetricRuleSpec.scala
new file mode 100644
index 000000000..fd0eb9bf1
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/aggregation/rules/SuccessMetricRuleSpec.scala
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.feature.tests.aggregation.rules
+
+import com.expedia.www.haystack.trends.aggregation.metrics.AggregationType
+import com.expedia.www.haystack.trends.aggregation.rules.SuccessMetricRule
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+
+class SuccessMetricRuleSpec extends FeatureSpec with SuccessMetricRule {
+
+ val SUCCESS_METRIC_NAME = "success-spans"
+ val SERVICE_NAME = "dummy_service"
+ val OPERATION_NAME = "dummy_operation"
+
+ object TagKeys {
+ val OPERATION_NAME_KEY = "operationName"
+ val SERVICE_NAME_KEY = "serviceName"
+ }
+
+ feature("SuccessMetricRule for identifying MetricRule") {
+
+
+ scenario("should get Aggregate AggregationType for Success MetricData") {
+
+ Given("a success MetricPoint")
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+ val startTime = currentTimeInSecs
+
+ val metricData = getMetricData(SUCCESS_METRIC_NAME, keys, 1, startTime)
+
+ When("trying to find matching AggregationType")
+ val aggregationType = isMatched(metricData)
+
+ Then("should get Aggregate AggregationType")
+ aggregationType shouldEqual Some(AggregationType.Count)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/config/ConfigurationLoaderSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/config/ConfigurationLoaderSpec.scala
new file mode 100644
index 000000000..3dc36b10d
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/config/ConfigurationLoaderSpec.scala
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package com.expedia.www.haystack.trends.feature.tests.config
+
+import com.expedia.www.haystack.commons.entities.encoders.PeriodReplacementEncoder
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.config.entities.HistogramUnit
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+
+class ConfigurationLoaderSpec extends FeatureSpec {
+
+ feature("Configuration loader") {
+
+ scenario("should load the health status config from base.conf") {
+
+ Given("A config file at base config file containing config for health status file path")
+ val healthStatusFilePath = "/app/isHealthy"
+
+ When("When the configuration is loaded in project configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("the healthStatusFilePath should be correct")
+ projectConfig.healthStatusFilePath shouldEqual healthStatusFilePath
+ }
+
+ scenario("should load the metric point enable period replacement config from base.conf") {
+
+ Given("A config file at base config file containing config for enable period replacement")
+ val enableMetricPointPeriodReplacement = true
+
+ When("When the configuration is loaded in project configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("the encoder should be correct")
+ projectConfig.encoder shouldBe an[PeriodReplacementEncoder]
+ }
+
+ scenario("should load the kafka config from base.conf") {
+
+ Given("A config file at base config file containing kafka ")
+
+ When("When the configuration is loaded in project configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("It should create the write configuration object based on the file contents")
+ val kafkaConfig = projectConfig.kafkaConfig
+ kafkaConfig.consumeTopic shouldBe "metric-data-points"
+ }
+
+ scenario("should load additional tags config from base.conf") {
+ Given("A config file at base config file containing additionalTags ")
+
+ When("When the configuration is loaded in project configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("It should create the addtionalTags map based on the file contents")
+ val additionalTags = projectConfig.additionalTags
+ additionalTags.keySet.size shouldEqual 2
+ additionalTags("key1") shouldEqual "value1"
+ additionalTags("key2") shouldEqual "value2"
+
+ }
+
+ scenario("should override configuration based on environment variable") {
+
+
+ Given("A config file at base config file containing config for kafka")
+
+ When("When the configuration is loaded in project configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("It should override the configuration object based on the environment variable if it exists")
+
+ val kafkaProduceTopic = sys.env.getOrElse("HAYSTACK_PROP_KAFKA_PRODUCER_TOPIC", """{
+ | topic: "metrics"
+ | serdeClassName : "com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricDataSerde"
+ | enabled: true
+ | },
+ | {
+ | topic: "mdm"
+ | serdeClassName : "com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricTankSerde"
+ | enabled: true
+ | }""")
+ val kafkaConfig = projectConfig.kafkaConfig
+ kafkaConfig.producerConfig.kafkaSinkTopics.head.topic shouldBe "metrics"
+ }
+
+ scenario("should load the state store configs from base.conf") {
+
+ Given("A config file at base config file containing kafka ")
+
+ When("When the configuration is loaded in project configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("It should create the write configuration object based on the file contents")
+ val stateStoreConfigs = projectConfig.stateStoreConfig.changeLogTopicConfiguration
+ projectConfig.stateStoreConfig.enableChangeLogging shouldBe true
+ projectConfig.stateStoreConfig.changeLogDelayInSecs shouldBe 60
+ stateStoreConfigs("cleanup.policy") shouldBe "compact,delete"
+ stateStoreConfigs("retention.ms") shouldBe "14400000"
+ }
+
+ scenario("should load the external kafka configs from base.conf") {
+
+ Given("A config file at base config file containing kafka ")
+
+ When("When the configuration is loaded in project configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("It should create the write configuration object based on the file contents")
+ projectConfig.kafkaConfig.producerConfig.enableExternalKafka shouldBe true
+ projectConfig.kafkaConfig.producerConfig.kafkaSinkTopics.length shouldBe 2
+ projectConfig.kafkaConfig.producerConfig.kafkaSinkTopics.head.topic shouldBe "metrics"
+ projectConfig.kafkaConfig.producerConfig.props.get.getProperty("bootstrap.servers") shouldBe "kafkasvc:9092"
+ }
+
+ scenario("should load the histogram configs from base.conf") {
+
+ Given("A config file at base config file containing kafka ")
+
+ When("When the configuration is loaded in project configuration")
+ val projectConfig = new AppConfiguration()
+
+ Then("It should create the write configuration object based on the file contents")
+ projectConfig.histogramMetricConfiguration.maxValue shouldBe 1800000
+ projectConfig.histogramMetricConfiguration.unit == HistogramUnit.MILLIS shouldBe true
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/StreamsSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/StreamsSpec.scala
new file mode 100644
index 000000000..64b232998
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/StreamsSpec.scala
@@ -0,0 +1,29 @@
+package com.expedia.www.haystack.trends.feature.tests.kstreams
+
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import com.expedia.www.haystack.trends.kstream.Streams
+
+class StreamsSpec extends FeatureSpec {
+
+ feature("Streams should build a topology") {
+
+ scenario("a valid kafka configuration") {
+
+ Given("an valid kafka configuration")
+
+ val appConfig = mockAppConfig
+ val streams = new Streams(appConfig)
+
+
+ When("the stream topology is built")
+ val topology = streams.get()
+
+ Then("it should be able to build a successful topology")
+ topology should not be null
+
+ Then("it should create a state store")
+ topology.describe().globalStores().isEmpty shouldBe true
+ }
+ }
+
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/processor/AdditionalTagsProcessorSupplierSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/processor/AdditionalTagsProcessorSupplierSpec.scala
new file mode 100644
index 000000000..9be62b977
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/processor/AdditionalTagsProcessorSupplierSpec.scala
@@ -0,0 +1,79 @@
+package com.expedia.www.haystack.trends.feature.tests.kstreams.processor
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import com.expedia.www.haystack.trends.kstream.processor.AdditionalTagsProcessor
+import org.apache.kafka.streams.processor.ProcessorContext
+import org.mockito.ArgumentMatchers._
+import org.mockito.Mockito
+import org.mockito.Mockito._
+import org.mockito.invocation.InvocationOnMock
+
+import scala.collection.mutable.ListBuffer
+
+class AdditionalTagsProcessorSupplierSpec extends FeatureSpec {
+
+ feature("Additional Tags processor supplier should add additional tags") {
+
+ val appConfiguration = mockAppConfig
+
+ scenario("should add additional tags if any and forward metricData") {
+
+ Given("an additional tags processor")
+
+ val keys = Map("product" -> "haystack")
+ val listBuffer = ListBuffer[MetricData]()
+ val metricData = getMetricData("success-count", keys, 10, System.currentTimeMillis())
+ val additionalTagsProcessor = new AdditionalTagsProcessor(appConfiguration.additionalTags)
+ val processorContext = Mockito.mock(classOf[ProcessorContext])
+ additionalTagsProcessor.init(processorContext)
+
+ when(processorContext.forward(anyString(), any(classOf[MetricData]))).thenAnswer((invocationOnMock: InvocationOnMock) => {
+ listBuffer += invocationOnMock.getArgument[MetricData](1)
+ })
+
+
+ When("additional tags processor is passed with metric data")
+ additionalTagsProcessor.process("abc", metricData)
+
+
+ Then("additional tags are added to metric data")
+ val metricDataForwaded = listBuffer.toList
+ metricDataForwaded.length shouldEqual 1
+ metricDataForwaded.head.getMetricDefinition.getTags.getKv.containsKey("k1") shouldEqual true
+ metricDataForwaded.head.getMetricDefinition.getTags.getKv.containsKey("k2") shouldEqual true
+
+ }
+
+
+ scenario("should not add additional tags if additional tags and forward metricData") {
+
+ Given("an additional tags processor")
+
+ val keys = Map("product" -> "haystack")
+ val listBuffer = ListBuffer[MetricData]()
+ val metricData = getMetricData("success-count", keys, 10, System.currentTimeMillis())
+ val additionalTagsProcessor = new AdditionalTagsProcessor(Map())
+ val processorContext = Mockito.mock(classOf[ProcessorContext])
+ additionalTagsProcessor.init(processorContext)
+
+ when(processorContext.forward(anyString(), any(classOf[MetricData]))).thenAnswer((invocationOnMock: InvocationOnMock) => {
+ listBuffer += invocationOnMock.getArgument[MetricData](1)
+ })
+
+
+ When("additional tags processor is passed with metric data")
+ additionalTagsProcessor.process("abc", metricData)
+
+
+ Then("additional tags are added to metric data")
+ val metricDataForwaded = listBuffer.toList
+ metricDataForwaded.length shouldEqual 1
+ metricDataForwaded.head shouldEqual metricData
+
+ }
+
+ }
+
+
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/processor/MetricAggProcessorSupplierSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/processor/MetricAggProcessorSupplierSpec.scala
new file mode 100644
index 000000000..8ab0e6e08
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/processor/MetricAggProcessorSupplierSpec.scala
@@ -0,0 +1,82 @@
+package com.expedia.www.haystack.trends.feature.tests.kstreams.processor
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.Interval
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.commons.entities.encoders.PeriodReplacementEncoder
+import com.expedia.www.haystack.commons.metrics.MetricsRegistries
+import com.expedia.www.haystack.trends.aggregation.TrendMetric
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import com.expedia.www.haystack.trends.kstream.processor.MetricAggProcessorSupplier
+import org.apache.kafka.streams.kstream.internals.KTableValueGetter
+import org.apache.kafka.streams.processor.ProcessorContext
+import org.apache.kafka.streams.state.KeyValueStore
+import org.easymock.EasyMock
+
+class MetricAggProcessorSupplierSpec extends FeatureSpec {
+
+ feature("Metric aggregator processor supplier should return windowed metric from store") {
+
+ val windowedMetricStoreName = "dummy-windowed-metric-store"
+
+ scenario("should return windowed metric for a given key") {
+
+ Given("a metric aggregator supplier and metric processor")
+ val trendMetric = mock[TrendMetric]
+ val metricAggProcessorSupplier = new MetricAggProcessorSupplier(windowedMetricStoreName, new PeriodReplacementEncoder)
+ val keyValueStore: KeyValueStore[String, TrendMetric] = mock[KeyValueStore[String, TrendMetric]]
+ val processorContext = mock[ProcessorContext]
+ expecting {
+ keyValueStore.get("metrics").andReturn(trendMetric)
+ processorContext.getStateStore(windowedMetricStoreName).andReturn(keyValueStore)
+ }
+ EasyMock.replay(keyValueStore)
+ EasyMock.replay(processorContext)
+
+ When("metric processor is initialised with processor context")
+ val kTableValueGetter: KTableValueGetter[String, TrendMetric] = metricAggProcessorSupplier.view().get()
+ kTableValueGetter.init(processorContext)
+
+ Then("same windowed metric should be retrieved with the given key")
+ kTableValueGetter.get("metrics") shouldBe trendMetric
+ }
+
+ scenario("should not return any AggregationType for invalid MetricData") {
+
+ Given("a metric aggregator supplier and an invalid metric data")
+ val metricData = getMetricData("invalid-metric", null, 80, currentTimeInSecs)
+ val metricAggProcessorSupplier = new MetricAggProcessorSupplier(windowedMetricStoreName, new PeriodReplacementEncoder)
+
+ When("find the AggregationType for the metric point")
+ val aggregationType = metricAggProcessorSupplier.findMatchingMetric(metricData)
+
+ Then("no AggregationType should be returned")
+ aggregationType shouldEqual None
+ }
+
+ scenario("jmx metric (metricpoints.invalid) should be set for invalid MetricPoints") {
+ val DURATION_METRIC_NAME = "duration"
+ val validMetricPoint: MetricData = getMetricData(DURATION_METRIC_NAME, null, 10, currentTimeInSecs)
+ val intervals: List[Interval] = List(Interval.ONE_MINUTE, Interval.FIFTEEN_MINUTE)
+ val metricAggProcessor = new MetricAggProcessorSupplier(windowedMetricStoreName, new PeriodReplacementEncoder).get
+ val metricsRegistry = MetricsRegistries.metricRegistry
+
+ Given("metric points with invalid values")
+ val negativeValueMetricPoint: MetricData = getMetricData(DURATION_METRIC_NAME, null, -1, currentTimeInSecs)
+ val zeroValueMetricPoint: MetricData = getMetricData(DURATION_METRIC_NAME, null, 0, currentTimeInSecs)
+
+ When("computing a negative value MetricPoint")
+ metricAggProcessor.process(negativeValueMetricPoint.getMetricDefinition.getKey, negativeValueMetricPoint)
+
+ Then("metric for invalid value should get incremented")
+ metricsRegistry.getMeters.get("metricprocessor.invalid").getCount shouldEqual 1
+
+ When("computing a zero value MetricPoint")
+ metricAggProcessor.process(negativeValueMetricPoint.getMetricDefinition.getKey, zeroValueMetricPoint)
+
+ Then("metric for invalid value should get incremented")
+ metricsRegistry.getMeters.get("metricprocessor.invalid").getCount shouldEqual 2
+ }
+ }
+
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/serde/TrendMetricSerdeSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/serde/TrendMetricSerdeSpec.scala
new file mode 100644
index 000000000..ed24ed1e2
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/serde/TrendMetricSerdeSpec.scala
@@ -0,0 +1,114 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.feature.tests.kstreams.serde
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.Interval.Interval
+import com.expedia.www.haystack.commons.entities.{Interval, TagKeys}
+import com.expedia.www.haystack.trends.aggregation.TrendMetric
+import com.expedia.www.haystack.trends.aggregation.metrics.{CountMetric, CountMetricFactory, HistogramMetric, HistogramMetricFactory}
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import com.expedia.www.haystack.trends.kstream.serde.TrendMetricSerde
+
+class TrendMetricSerdeSpec extends FeatureSpec {
+
+ val DURATION_METRIC_NAME = "duration"
+ val SUCCESS_METRIC_NAME = "success-spans"
+ val SERVICE_NAME = "dummy_service"
+ val TOPIC_NAME = "dummy"
+ val OPERATION_NAME = "dummy_operation"
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+ val currentTime = 0
+
+ feature("Serializing/Deserializing Trend Metric") {
+
+ scenario("should be able to serialize and deserialize a valid trend metric computing histograms") {
+ val durations: List[Long] = List(10, 140)
+ val intervals: List[Interval] = List(Interval.ONE_MINUTE, Interval.FIFTEEN_MINUTE)
+
+ val metricPoints: List[MetricData] = durations.map(duration => getMetricData(DURATION_METRIC_NAME, keys, duration, currentTime))
+
+ When("creating a TrendMetric and passing some MetricPoints and aggregation type as Histogram")
+ val trendMetric: TrendMetric = TrendMetric.createTrendMetric(intervals, metricPoints.head, HistogramMetricFactory)
+ metricPoints.indices.foreach(i => if (i > 0) {
+ trendMetric.compute(metricPoints(i))
+ })
+
+ When("the trend metric is serialized and then deserialized back")
+ val serializedMetric = TrendMetricSerde.serializer().serialize(TOPIC_NAME, trendMetric)
+ val deserializedMetric = TrendMetricSerde.deserializer().deserialize(TOPIC_NAME, serializedMetric)
+
+ Then("Then it should deserialize the metric back in the same state")
+ deserializedMetric should not be null
+ trendMetric.trendMetricsMap.map {
+ case (interval, windowedMetric) =>
+ deserializedMetric.trendMetricsMap.get(interval) should not be None
+ windowedMetric.windowedMetricsMap.map {
+ case (timeWindow, metric) =>
+ val histogram = metric.asInstanceOf[HistogramMetric]
+ val deserializedHistogram = deserializedMetric.trendMetricsMap(interval).windowedMetricsMap(timeWindow).asInstanceOf[HistogramMetric]
+ histogram.getMetricInterval shouldEqual deserializedHistogram.getMetricInterval
+ histogram.getRunningHistogram.getTotalCount shouldEqual deserializedHistogram.getRunningHistogram.getTotalCount
+ histogram.getRunningHistogram.getMaxValue shouldEqual deserializedHistogram.getRunningHistogram.getMaxValue
+ histogram.getRunningHistogram.getValueAtPercentile(50) shouldEqual deserializedHistogram.getRunningHistogram.getValueAtPercentile(50)
+ }
+ }
+ }
+
+ scenario("should be able to serialize and deserialize a valid trend metric computing counts") {
+
+ Given("some count Metric points")
+ val counts: List[Long] = List(10, 140)
+ val intervals: List[Interval] = List(Interval.ONE_MINUTE, Interval.FIFTEEN_MINUTE)
+ val metricPoints: List[MetricData] = counts.map(count => getMetricData(SUCCESS_METRIC_NAME, keys, count, currentTime))
+
+
+ When("creating a TrendMetric and passing some MetricPoints and aggregation type as Count")
+ val trendMetric: TrendMetric = TrendMetric.createTrendMetric(intervals, metricPoints.head, CountMetricFactory)
+ metricPoints.indices.foreach(i => if (i > 0) {
+ trendMetric.compute(metricPoints(i))
+ })
+
+ When("the trend metric is serialized and then deserialized back")
+ val serializer = TrendMetricSerde.serializer()
+ val deserializer = TrendMetricSerde.deserializer()
+ val serializedMetric = serializer.serialize(TOPIC_NAME, trendMetric)
+ val deserializedMetric = deserializer.deserialize(TOPIC_NAME, serializedMetric)
+
+
+ Then("Then it should deserialize the metric back in the same state")
+ deserializedMetric should not be null
+ trendMetric.trendMetricsMap.map {
+ case (interval, windowedMetric) =>
+ deserializedMetric.trendMetricsMap.get(interval) should not be None
+ windowedMetric.windowedMetricsMap.map {
+ case (timeWindow, metric) =>
+
+ val countMetric = metric.asInstanceOf[CountMetric]
+ val deserializedCountMetric = deserializedMetric.trendMetricsMap(interval).windowedMetricsMap(timeWindow).asInstanceOf[CountMetric]
+ countMetric.getMetricInterval shouldEqual deserializedCountMetric.getMetricInterval
+ countMetric.getCurrentCount shouldEqual deserializedCountMetric.getCurrentCount
+ }
+ }
+ serializer.close()
+ deserializer.close()
+ TrendMetricSerde.close()
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/serde/WindowedMetricSerdeSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/serde/WindowedMetricSerdeSpec.scala
new file mode 100644
index 000000000..ff8754c3f
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/serde/WindowedMetricSerdeSpec.scala
@@ -0,0 +1,88 @@
+package com.expedia.www.haystack.trends.feature.tests.kstreams.serde
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.commons.entities.{Interval, TagKeys}
+import com.expedia.www.haystack.trends.aggregation.WindowedMetric
+import com.expedia.www.haystack.trends.aggregation.metrics.{CountMetric, CountMetricFactory, HistogramMetric, HistogramMetricFactory}
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import com.expedia.www.haystack.trends.kstream.serde.WindowedMetricSerde
+
+class WindowedMetricSerdeSpec extends FeatureSpec {
+
+ val DURATION_METRIC_NAME = "duration"
+ val SUCCESS_METRIC_NAME = "success-spans"
+ val SERVICE_NAME = "dummy_service"
+ val TOPIC_NAME = "dummy"
+ val OPERATION_NAME = "dummy_operation"
+ val keys = Map(TagKeys.OPERATION_NAME_KEY -> OPERATION_NAME,
+ TagKeys.SERVICE_NAME_KEY -> SERVICE_NAME)
+
+ feature("Serializing/Deserializing Windowed Metric") {
+
+ scenario("should be able to serialize and deserialize a valid windowed metric computing histograms") {
+ val durations: List[Long] = List(10, 140)
+ val metricPoints: List[MetricData] = durations.map(duration => getMetricData(DURATION_METRIC_NAME, keys, duration, currentTimeInSecs))
+
+ When("creating a WindowedMetric and passing some MetricPoints and aggregation type as Histogram")
+ val windowedMetric: WindowedMetric = WindowedMetric.createWindowedMetric(metricPoints.head, HistogramMetricFactory, 1, Interval.ONE_MINUTE)
+ metricPoints.indices.foreach(i => if (i > 0) {
+ windowedMetric.compute(metricPoints(i))
+ })
+
+ When("the windowed metric is serialized and then deserialized back")
+ val serializedMetric = WindowedMetricSerde.serializer().serialize(TOPIC_NAME, windowedMetric)
+ val deserializedMetric = WindowedMetricSerde.deserializer().deserialize(TOPIC_NAME, serializedMetric)
+
+ Then("Then it should deserialize the metric back in the same state")
+ deserializedMetric should not be null
+ windowedMetric.windowedMetricsMap.map {
+ case (window, metric) =>
+ deserializedMetric.windowedMetricsMap.get(window) should not be None
+
+ val histogram = metric.asInstanceOf[HistogramMetric]
+ val deserializedHistogram = deserializedMetric.windowedMetricsMap(window).asInstanceOf[HistogramMetric]
+ histogram.getMetricInterval shouldEqual deserializedHistogram.getMetricInterval
+ histogram.getRunningHistogram.getTotalCount shouldEqual deserializedHistogram.getRunningHistogram.getTotalCount
+ histogram.getRunningHistogram.getMaxValue shouldEqual deserializedHistogram.getRunningHistogram.getMaxValue
+ histogram.getRunningHistogram.getMinValue shouldEqual deserializedHistogram.getRunningHistogram.getMinValue
+ histogram.getRunningHistogram.getValueAtPercentile(99) shouldEqual deserializedHistogram.getRunningHistogram.getValueAtPercentile(99)
+ }
+ }
+
+ scenario("should be able to serialize and deserialize a valid windowed metric computing counts") {
+
+ Given("some count Metric points")
+ val counts: List[Long] = List(10, 140)
+ val metricPoints: List[MetricData] = counts.map(count => getMetricData(SUCCESS_METRIC_NAME, keys, count, currentTimeInSecs))
+
+
+ When("creating a WindowedMetric and passing some MetricPoints and aggregation type as Count")
+ val windowedMetric: WindowedMetric = WindowedMetric.createWindowedMetric(metricPoints.head, CountMetricFactory, 1, Interval.ONE_MINUTE)
+ metricPoints.indices.foreach(i => if (i > 0) {
+ windowedMetric.compute(metricPoints(i))
+ })
+
+ When("the windowed metric is serialized and then deserialized back")
+ val serializer = WindowedMetricSerde.serializer()
+ val deserializer = WindowedMetricSerde.deserializer()
+ val serializedMetric = serializer.serialize(TOPIC_NAME, windowedMetric)
+ val deserializedMetric = deserializer.deserialize(TOPIC_NAME, serializedMetric)
+
+
+ Then("Then it should deserialize the metric back in the same state")
+ deserializedMetric should not be null
+ windowedMetric.windowedMetricsMap.map {
+ case (window, metric) =>
+ deserializedMetric.windowedMetricsMap.get(window) should not be None
+
+ val countMetric = metric.asInstanceOf[CountMetric]
+ val deserializedCountMetric = deserializedMetric.windowedMetricsMap(window).asInstanceOf[CountMetric]
+ countMetric.getMetricInterval shouldEqual deserializedCountMetric.getMetricInterval
+ countMetric.getCurrentCount shouldEqual deserializedCountMetric.getCurrentCount
+ }
+ serializer.close()
+ deserializer.close()
+ WindowedMetricSerde.close()
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/store/HaystackStoreBuilderSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/store/HaystackStoreBuilderSpec.scala
new file mode 100644
index 000000000..c45d85745
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/feature/tests/kstreams/store/HaystackStoreBuilderSpec.scala
@@ -0,0 +1,27 @@
+package com.expedia.www.haystack.trends.feature.tests.kstreams.store
+
+import com.expedia.www.haystack.trends.aggregation.TrendMetric
+import com.expedia.www.haystack.trends.feature.FeatureSpec
+import com.expedia.www.haystack.trends.kstream.store.HaystackStoreBuilder
+import org.apache.kafka.streams.state.internals.{InMemoryKeyValueLoggedStore, MemoryNavigableLRUCache, MeteredKeyValueStore}
+
+class HaystackStoreBuilderSpec extends FeatureSpec {
+
+ feature("Haystack Store Builder should build appropriate store for haystack metrics") {
+
+ scenario("build store with changelog enabled") {
+
+ Given("a haystack store builder")
+ val storeName = "test-store"
+ val cacheSize = 100
+ val storeBuilder = new HaystackStoreBuilder(storeName, cacheSize)
+
+ When("change logging is enabled")
+ storeBuilder.withCachingEnabled()
+ val store = storeBuilder.build()
+
+ Then("it should build a metered lru-cache based changelogging store")
+ store.isInstanceOf[MeteredKeyValueStore[String, TrendMetric]] shouldBe true
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/IntegrationTestSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/IntegrationTestSpec.scala
new file mode 100644
index 000000000..422eac08d
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/IntegrationTestSpec.scala
@@ -0,0 +1,227 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.integration
+
+import java.util
+import java.util.Properties
+import java.util.concurrent.{Executors, ScheduledExecutorService, ScheduledFuture, TimeUnit}
+
+import com.expedia.metrics.{MetricData, MetricDefinition, TagCollection}
+import com.expedia.www.haystack.commons.entities.Interval
+import com.expedia.www.haystack.commons.entities.encoders.PeriodReplacementEncoder
+import com.expedia.www.haystack.commons.health.HealthStatusController
+import com.expedia.www.haystack.commons.kstreams.app.{StateChangeListener, StreamsFactory, StreamsRunner}
+import com.expedia.www.haystack.commons.kstreams.serde.metricdata.{MetricDataSerde, MetricTankSerde}
+import com.expedia.www.haystack.commons.util.MetricDefinitionKeyGenerator
+import com.expedia.www.haystack.commons.util.MetricDefinitionKeyGenerator._
+import com.expedia.www.haystack.trends.config.AppConfiguration
+import com.expedia.www.haystack.trends.config.entities.{KafkaConfiguration, KafkaProduceConfiguration, KafkaSinkTopic, StateStoreConfiguration}
+import com.expedia.www.haystack.trends.kstream.Streams
+import org.apache.kafka.clients.consumer.ConsumerConfig
+import org.apache.kafka.clients.producer.ProducerConfig
+import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
+import org.apache.kafka.streams.Topology.AutoOffsetReset
+import org.apache.kafka.streams.integration.utils.{EmbeddedKafkaCluster, IntegrationTestUtils}
+import org.apache.kafka.streams.processor.WallclockTimestampExtractor
+import org.apache.kafka.streams.{KeyValue, StreamsConfig}
+import org.easymock.EasyMock
+import org.scalatest._
+import org.scalatest.easymock.EasyMockSugar
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration.FiniteDuration
+import scala.util.Random
+
+class IntegrationTestSpec extends WordSpec with GivenWhenThen with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with EasyMockSugar {
+
+ protected val PUNCTUATE_INTERVAL_SEC = 2000
+ protected val PRODUCER_CONFIG = new Properties()
+ protected val RESULT_CONSUMER_CONFIG = new Properties()
+ protected val STREAMS_CONFIG = new Properties()
+ protected val scheduledJobFuture: ScheduledFuture[_] = null
+ protected val INPUT_TOPIC = "metric-data-points"
+ protected val OUTPUT_TOPIC = "metrics"
+ protected val OUTPUT_METRICTANK_TOPIC = "mdm"
+ protected var scheduler: ScheduledExecutorService = _
+ protected var APP_ID = "haystack-trends"
+ protected var CHANGELOG_TOPIC = s"$APP_ID-trend-metric-store-changelog"
+ protected var embeddedKafkaCluster: EmbeddedKafkaCluster = _
+
+ override def beforeAll(): Unit = {
+ scheduler = Executors.newScheduledThreadPool(1)
+ }
+
+ override def afterAll(): Unit = {
+ scheduler.shutdownNow()
+ }
+
+ override def beforeEach() {
+ val metricDataSerde = new MetricDataSerde()
+
+ embeddedKafkaCluster = new EmbeddedKafkaCluster(1)
+ embeddedKafkaCluster.start()
+ embeddedKafkaCluster.createTopic(INPUT_TOPIC, 1, 1)
+ embeddedKafkaCluster.createTopic(OUTPUT_TOPIC, 1, 1)
+
+ PRODUCER_CONFIG.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafkaCluster.bootstrapServers)
+ PRODUCER_CONFIG.put(ProducerConfig.ACKS_CONFIG, "all")
+ PRODUCER_CONFIG.put(ProducerConfig.RETRIES_CONFIG, "0")
+ PRODUCER_CONFIG.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
+ PRODUCER_CONFIG.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, metricDataSerde.serializer().getClass)
+
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafkaCluster.bootstrapServers)
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.GROUP_ID_CONFIG, APP_ID + "-result-consumer")
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
+ RESULT_CONSUMER_CONFIG.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, metricDataSerde.deserializer().getClass)
+
+ STREAMS_CONFIG.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafkaCluster.bootstrapServers)
+ STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID)
+ STREAMS_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
+ STREAMS_CONFIG.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0")
+ STREAMS_CONFIG.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, "1")
+ STREAMS_CONFIG.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "300")
+ STREAMS_CONFIG.put(StreamsConfig.REPLICATION_FACTOR_CONFIG, "1")
+ STREAMS_CONFIG.put(StreamsConfig.STATE_DIR_CONFIG, "/tmp/kafka-streams")
+
+ IntegrationTestUtils.purgeLocalStreamsState(STREAMS_CONFIG)
+ }
+
+ override def afterEach(): Unit = {
+ embeddedKafkaCluster.deleteTopics(INPUT_TOPIC, OUTPUT_TOPIC)
+ }
+
+ def currentTimeInSecs: Long = {
+ System.currentTimeMillis() / 1000l
+ }
+
+ protected val stateStoreConfigs = Map("cleanup.policy" -> "compact,delete")
+
+
+ protected def mockAppConfig: AppConfiguration = {
+ val kafkaSinkTopics = List(KafkaSinkTopic("metrics","com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricDataSerde",true), KafkaSinkTopic("mdm","com.expedia.www.haystack.commons.kstreams.serde.metricdata.MetricTankSerde",true))
+ val kafkaConfig = KafkaConfiguration(new StreamsConfig(STREAMS_CONFIG),KafkaProduceConfiguration(kafkaSinkTopics, None, "mdm", false), INPUT_TOPIC, AutoOffsetReset.EARLIEST, new WallclockTimestampExtractor, 30000)
+ val projectConfiguration = mock[AppConfiguration]
+
+ expecting {
+ projectConfiguration.kafkaConfig.andReturn(kafkaConfig).anyTimes()
+ projectConfiguration.stateStoreConfig.andReturn(StateStoreConfiguration(128, enableChangeLogging = true, 60, stateStoreConfigs)).anyTimes()
+ projectConfiguration.encoder.andReturn(new PeriodReplacementEncoder).anyTimes()
+ projectConfiguration.additionalTags.andReturn(Map("k1"->"v1", "k2"-> "v2")).anyTimes()
+ }
+ EasyMock.replay(projectConfiguration)
+ projectConfiguration
+ }
+
+ protected def validateAggregatedMetricPoints(producedRecords: List[KeyValue[String, MetricData]],
+ expectedOneMinAggregatedPoints: Int,
+ expectedFiveMinAggregatedPoints: Int,
+ expectedFifteenMinAggregatedPoints: Int,
+ expectedOneHourAggregatedPoints: Int): Unit = {
+
+ val oneMinAggMetricPoints = producedRecords.filter(record => getTags(record.value).get("interval").equals(Interval.ONE_MINUTE.toString()))
+ val fiveMinAggMetricPoints = producedRecords.filter(record => getTags(record.value).get("interval").equals(Interval.FIVE_MINUTE.toString()))
+ val fifteenMinAggMetricPoints = producedRecords.filter(record => getTags(record.value).get("interval").equals(Interval.FIFTEEN_MINUTE.toString()))
+ val oneHourAggMetricPoints = producedRecords.filter(record => getTags(record.value).get("interval").equals(Interval.ONE_HOUR.toString()))
+
+ oneMinAggMetricPoints.size shouldEqual expectedOneMinAggregatedPoints
+ fiveMinAggMetricPoints.size shouldEqual expectedFiveMinAggregatedPoints
+ fifteenMinAggMetricPoints.size shouldEqual expectedFifteenMinAggregatedPoints
+ oneHourAggMetricPoints.size shouldEqual expectedOneHourAggregatedPoints
+ validateAdditionalTags(List(oneMinAggMetricPoints, fiveMinAggMetricPoints, fifteenMinAggMetricPoints, oneHourAggMetricPoints).flatten)
+ }
+
+ protected def validateAdditionalTags(kvPair: List[KeyValue[String, MetricData]]): Unit = {
+ val additionalTags = mockAppConfig.additionalTags
+ kvPair.foreach(kv => {
+ val tags = kv.value.getMetricDefinition.getTags.getKv.asScala
+ additionalTags.toSet subsetOf tags.toSet shouldEqual true
+ })
+ }
+
+ protected def produceMetricPointsAsync(maxMetricPoints: Int,
+ produceInterval: FiniteDuration,
+ metricName: String,
+ totalIntervalInSecs: Long = PUNCTUATE_INTERVAL_SEC
+ ): Unit = {
+ var epochTimeInSecs = 0l
+ var idx = 0
+ scheduler.scheduleWithFixedDelay(() => {
+ if (idx < maxMetricPoints) {
+ val metricData = randomMetricData(metricName = metricName, timestamp = epochTimeInSecs)
+ val keyValue = List(new KeyValue[String, MetricData](generateKey(metricData.getMetricDefinition), metricData)).asJava
+ IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(
+ INPUT_TOPIC,
+ keyValue,
+ PRODUCER_CONFIG,
+ epochTimeInSecs)
+ epochTimeInSecs = epochTimeInSecs + (totalIntervalInSecs / (maxMetricPoints - 1))
+ }
+ idx = idx + 1
+
+ }, 0, produceInterval.toMillis, TimeUnit.MILLISECONDS)
+ }
+
+ protected def produceMetricData(metricName: String,
+ epochTimeInSecs: Long,
+ produceTimeInSecs: Long
+ ): Unit = {
+ val metricPoint = randomMetricData(metricName = metricName, timestamp = epochTimeInSecs)
+ val keyValue = List(new KeyValue[String, MetricData](generateKey(metricPoint.getMetricDefinition), metricPoint)).asJava
+ IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(
+ INPUT_TOPIC,
+ keyValue,
+ PRODUCER_CONFIG,
+ produceTimeInSecs)
+ }
+
+ def randomMetricData(metricName: String,
+ value: Long = Math.abs(Random.nextInt()),
+ timestamp: Long = currentTimeInSecs): MetricData = {
+ getMetricData(metricName, Map[String, String](), value, timestamp)
+ }
+
+ protected def createStreamRunner(): StreamsRunner = {
+ val appConfig = mockAppConfig
+ val streams = new Streams(appConfig)
+ val factory = new StreamsFactory(streams, appConfig.kafkaConfig.streamsConfig, appConfig.kafkaConfig.consumeTopic)
+ new StreamsRunner(factory, new StateChangeListener(new HealthStatusController))
+
+
+ }
+
+ protected def getMetricData(metricKey: String, tags: Map[String, String], value: Double, timeStamp: Long): MetricData = {
+
+ val tagsMap = new java.util.LinkedHashMap[String, String] {
+ putAll(tags.asJava)
+ put(MetricDefinition.MTYPE, "gauge")
+ put(MetricDefinition.UNIT, "short")
+ }
+ val metricDefinition = new MetricDefinition(metricKey, new TagCollection(tagsMap), TagCollection.EMPTY)
+ new MetricData(metricDefinition, value, timeStamp)
+ }
+
+ protected def containsTag(metricData: MetricData, tagKey: String, tagValue: String): Boolean = {
+ val tags = getTags(metricData)
+ tags.containsKey(tagKey) && tags.get(tagKey).equalsIgnoreCase(tagValue)
+ }
+
+ protected def getTags(metricData: MetricData): util.Map[String, String] = {
+ metricData.getMetricDefinition.getTags.getKv
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/CountTrendsSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/CountTrendsSpec.scala
new file mode 100644
index 000000000..4569a6465
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/CountTrendsSpec.scala
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.integration.tests
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.trends.integration.IntegrationTestSpec
+import org.apache.kafka.streams.KeyValue
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+import org.scalatest.Sequential
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+@Sequential
+class CountTrendsSpec extends IntegrationTestSpec {
+
+ private val MAX_METRICPOINTS = 62
+ private val numberOfWatermarkedWindows = 1
+
+ "TimeSeriesAggregatorTopology" should {
+
+ "aggregate count type metricPoints from input topic based on rules" in {
+
+ Given("a set of metricPoints with type metric and kafka specific configurations")
+ val METRIC_NAME = "success-span"
+ // CountMetric
+ val expectedOneMinAggregatedPoints: Int = MAX_METRICPOINTS - numberOfWatermarkedWindows - 1
+ // Why one less -> won't be generated for last (MAX_METRICPOINTS * 60)th second metric point
+ val expectedFiveMinAggregatedPoints: Int = (MAX_METRICPOINTS / 5) - numberOfWatermarkedWindows
+ val expectedFifteenMinAggregatedPoints: Int = (MAX_METRICPOINTS / 15)
+ val expectedOneHourAggregatedPoints: Int = (MAX_METRICPOINTS / 60)
+ val expectedTotalAggregatedPoints: Int = expectedOneMinAggregatedPoints + expectedFiveMinAggregatedPoints + expectedFifteenMinAggregatedPoints + expectedOneHourAggregatedPoints
+ val streamsRunner = createStreamRunner()
+
+ When("metricPoints are produced in 'input' topic async, and kafka-streams topology is started")
+ produceMetricPointsAsync(MAX_METRICPOINTS, 10.milli, METRIC_NAME, MAX_METRICPOINTS * 60)
+ streamsRunner.start()
+
+ Then("we should read all aggregated metricData from 'output' topic")
+ val waitTimeMs = 15000
+ val result: List[KeyValue[String, MetricData]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived[String, MetricData](RESULT_CONSUMER_CONFIG, OUTPUT_TOPIC, expectedTotalAggregatedPoints, waitTimeMs).asScala.toList
+ print(result.length)
+ validateAggregatedMetricPoints(result, expectedOneMinAggregatedPoints, expectedFiveMinAggregatedPoints, expectedFifteenMinAggregatedPoints, expectedOneHourAggregatedPoints)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/HistogramTrendsSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/HistogramTrendsSpec.scala
new file mode 100644
index 000000000..d904a9426
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/HistogramTrendsSpec.scala
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.integration.tests
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.trends.integration.IntegrationTestSpec
+import org.apache.kafka.streams.KeyValue
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+import org.scalatest.{Ignore, Sequential}
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+@Ignore
+@Sequential
+class HistogramTrendsSpec extends IntegrationTestSpec {
+
+ private val MAX_METRICPOINTS = 62
+ private val numberOfWatermarkedWindows = 1
+
+ "TimeSeriesAggregatorTopology" should {
+
+ "aggregate histogram type metricPoints from input topic based on rules" in {
+ Given("a set of metricPoints with type metric and kafka specific configurations")
+ val METRIC_NAME = "duration"
+ //HistogramMetric
+ val expectedOneMinAggregatedPoints: Int = (MAX_METRICPOINTS - 1 - numberOfWatermarkedWindows) * 7
+ // Why one less -> won't be generated for last (MAX_METRICPOINTS * 60)th second metric point
+ val expectedFiveMinAggregatedPoints: Int = (MAX_METRICPOINTS / 5 - numberOfWatermarkedWindows) * 7
+ val expectedFifteenMinAggregatedPoints: Int = (MAX_METRICPOINTS / 15) * 7
+ val expectedOneHourAggregatedPoints: Int = (MAX_METRICPOINTS / 60) * 7
+ val expectedTotalAggregatedPoints: Int = expectedOneMinAggregatedPoints + expectedFiveMinAggregatedPoints + expectedFifteenMinAggregatedPoints + expectedOneHourAggregatedPoints
+ val streamsRunner = createStreamRunner()
+
+ When("metricPoints are produced in 'input' topic async, and kafka-streams topology is started")
+ produceMetricPointsAsync(MAX_METRICPOINTS, 10.milli, METRIC_NAME, MAX_METRICPOINTS * 60)
+ streamsRunner.start()
+
+ Then("we should read all aggregated metricData from 'output' topic")
+ val waitTimeMs = 15000
+ val result: List[KeyValue[String, MetricData]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived[String, MetricData](RESULT_CONSUMER_CONFIG, OUTPUT_TOPIC, expectedTotalAggregatedPoints, waitTimeMs).asScala.toList
+ validateAggregatedMetricPoints(result, expectedOneMinAggregatedPoints, expectedFiveMinAggregatedPoints, expectedFifteenMinAggregatedPoints, expectedOneHourAggregatedPoints)
+ }
+ }
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/StateStoreSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/StateStoreSpec.scala
new file mode 100644
index 000000000..7c1083ec2
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/StateStoreSpec.scala
@@ -0,0 +1,59 @@
+/*
+ *
+ * Copyright 2017 Expedia, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package com.expedia.www.haystack.trends.integration.tests
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.trends.integration.IntegrationTestSpec
+import org.apache.kafka.clients.admin.{AdminClient, Config}
+import org.apache.kafka.common.config.ConfigResource
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+import org.scalatest.Sequential
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+
+@Sequential
+class StateStoreSpec extends IntegrationTestSpec {
+
+ private val MAX_METRICPOINTS = 62
+ private val numberOfWatermarkedWindows = 1
+
+ "TimeSeriesAggregatorTopology" should {
+
+
+ "have state store (change log) configuration be set by the topology" in {
+ Given("a set of metricPoints with type metric and state store specific configurations")
+ val METRIC_NAME = "success-span"
+ // CountMetric
+ val streamsRunner = createStreamRunner()
+
+ When("metricPoints are produced in 'input' topic async, and kafka-streams topology is started")
+ produceMetricPointsAsync(3, 10.milli, METRIC_NAME, 3 * 60)
+ streamsRunner.start()
+
+ Then("we should see the state store topic created with specified properties")
+ val waitTimeMs = 15000
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived[String, MetricData](RESULT_CONSUMER_CONFIG, OUTPUT_TOPIC, 1, waitTimeMs).asScala.toList
+ val adminClient = AdminClient.create(STREAMS_CONFIG)
+ val configResource = new ConfigResource(ConfigResource.Type.TOPIC, CHANGELOG_TOPIC)
+ val describeConfigResult: java.util.Map[ConfigResource, Config] = adminClient.describeConfigs(java.util.Arrays.asList(configResource)).all().get()
+ describeConfigResult.get(configResource).get(stateStoreConfigs.head._1).value() shouldBe stateStoreConfigs.head._2
+ }
+ }
+
+}
diff --git a/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/WatermarkingSpec.scala b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/WatermarkingSpec.scala
new file mode 100644
index 000000000..43e0a0864
--- /dev/null
+++ b/trends/timeseries-aggregator/src/test/scala/com/expedia/www/haystack/trends/integration/tests/WatermarkingSpec.scala
@@ -0,0 +1,42 @@
+package com.expedia.www.haystack.trends.integration.tests
+
+import com.expedia.metrics.MetricData
+import com.expedia.www.haystack.trends.integration.IntegrationTestSpec
+import org.apache.kafka.streams.KeyValue
+import org.apache.kafka.streams.integration.utils.IntegrationTestUtils
+
+import scala.collection.JavaConverters._
+
+class WatermarkingSpec extends IntegrationTestSpec {
+
+ "TimeSeriesAggregatorTopology" should {
+ "watermark metrics for aggregate count type metricPoints from input topic" in {
+ Given("a set of metricPoints with type metric and kafka specific configurations")
+ val METRIC_NAME = "success-span"
+ // CountMetric
+ val expectedOneMinAggregatedPoints: Int = 3
+ // Why one less -> won't be generated for last (MAX_METRICPOINTS * 60)th second metric point
+ val expectedFiveMinAggregatedPoints: Int = 1
+ val expectedFifteenMinAggregatedPoints: Int = 0
+ val expectedOneHourAggregatedPoints: Int = 0
+ val expectedTotalAggregatedPoints: Int = expectedOneMinAggregatedPoints + expectedFiveMinAggregatedPoints + expectedFifteenMinAggregatedPoints + expectedOneHourAggregatedPoints
+ val streamsRunner = createStreamRunner()
+
+
+ When("metricPoints are produced in 'input' topic async, and kafka-streams topology is started")
+ produceMetricData(METRIC_NAME, 1l, 1l)
+ produceMetricData(METRIC_NAME, 65l, 2l)
+ produceMetricData(METRIC_NAME, 2l, 3l)
+ produceMetricData(METRIC_NAME, 130l, 4l)
+ produceMetricData(METRIC_NAME, 310l, 5l)
+ produceMetricData(METRIC_NAME, 610l, 6l)
+ streamsRunner.start()
+
+ Then("we should read all aggregated metricData from 'output' topic")
+ val waitTimeMs = 15000
+ val result: List[KeyValue[String, MetricData]] =
+ IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived[String, MetricData](RESULT_CONSUMER_CONFIG, OUTPUT_TOPIC, expectedTotalAggregatedPoints, waitTimeMs).asScala.toList
+ validateAggregatedMetricPoints(result, expectedOneMinAggregatedPoints, expectedFiveMinAggregatedPoints, expectedFifteenMinAggregatedPoints, expectedOneHourAggregatedPoints)
+ }
+ }
+}
diff --git a/ui/.babelrc b/ui/.babelrc
new file mode 100644
index 000000000..8ca3d4e31
--- /dev/null
+++ b/ui/.babelrc
@@ -0,0 +1,26 @@
+{
+ "plugins": [
+ "lodash",
+ [
+ "@babel/plugin-proposal-decorators",
+ {
+ "legacy": true
+ }
+ ],
+ [
+ "@babel/plugin-proposal-class-properties",
+ {
+ "loose": true
+ }
+ ],
+ "dynamic-import-node"
+ ],
+ "presets": [
+ ["@babel/preset-env", {
+ "targets": {
+ "node": "current"
+ }
+ }],
+ "@babel/preset-react"
+ ]
+}
diff --git a/ui/.coveralls.yml b/ui/.coveralls.yml
new file mode 100644
index 000000000..346a7f8cb
--- /dev/null
+++ b/ui/.coveralls.yml
@@ -0,0 +1,2 @@
+service_name: travis-ci
+repo_token: rmv39G8JKUPr9W8whITKKlHLl3x4CEi50
\ No newline at end of file
diff --git a/ui/.dockerignore b/ui/.dockerignore
new file mode 100644
index 000000000..b512c09d4
--- /dev/null
+++ b/ui/.dockerignore
@@ -0,0 +1 @@
+node_modules
\ No newline at end of file
diff --git a/ui/.eslintignore b/ui/.eslintignore
new file mode 100644
index 000000000..3a298b383
--- /dev/null
+++ b/ui/.eslintignore
@@ -0,0 +1,4 @@
+public/*
+haystack-idl/*
+static_codegen/*
+coverage/*'
\ No newline at end of file
diff --git a/ui/.eslintrc b/ui/.eslintrc
new file mode 100644
index 000000000..03b171cd5
--- /dev/null
+++ b/ui/.eslintrc
@@ -0,0 +1,75 @@
+{
+ "root": true,
+ "parser": "babel-eslint",
+ "extends": "airbnb",
+ "plugins": ["json", "prettier", "import", "jsx-a11y", "react"],
+ "parserOptions": {
+ "ecmaVersion": 7,
+ "sourceType": "module",
+ "ecmaFeatures": {
+ "legacyDecorators": true
+ }
+ },
+ "env": {
+ "node": true,
+ "browser": true,
+ "mocha": true,
+ "jquery": true
+ },
+ "rules": {
+ "no-unused-vars": ["error"],
+ "import/no-extraneous-dependencies": [
+ "error",
+ {
+ "devDependencies": true
+ }
+ ],
+ "arrow-parens": 0,
+ "comma-dangle": ["error", "never"],
+ "comma-style": 0,
+ "function-paren-newline": 0,
+ "implicit-arrow-linebreak": 0,
+ "import/no-cycle": 0,
+ "import/no-named-as-default": 0,
+ "import/no-useless-path-segments": 0,
+ "import/order": 0,
+ "indent": 0,
+ "jsx-a11y/anchor-is-valid": 0,
+ "jsx-a11y/click-events-have-key-events": 0,
+ "jsx-a11y/href-no-hash": 0,
+ "lines-between-class-members": 0,
+ "max-len": ["error", 250],
+ "no-confusing-arrow": 0,
+ "no-else-return": 0,
+ "no-mixed-operators": 0,
+ "no-multi-spaces": 0,
+ "no-multiple-empty-lines": 0,
+ "no-param-reassign": 0,
+ "no-plusplus": 0,
+ "no-restricted-globals": 0,
+ "no-trailing-spaces": 0,
+ "object-curly-newline": 0,
+ "object-curly-spacing": [0, "never"],
+ "operator-linebreak": 0,
+ "prefer-destructuring": 0,
+ "react/button-has-type": 0,
+ "react/default-props-match-prop-types": 0,
+ "react/destructuring-assignment": 0,
+ "react/forbid-prop-types": 0,
+ "react/jsx-closing-tag-location": 0,
+ "react/jsx-curly-brace-presence": 0,
+ "react/jsx-indent-props": 0,
+ "react/jsx-indent": 0,
+ "react/jsx-no-target-blank": 0,
+ "react/jsx-one-expression-per-line": 0,
+ "react/jsx-props-no-multi-spaces": 0,
+ "react/jsx-tag-spacing": 0,
+ "react/jsx-wrap-multilines": 0,
+ "react/no-access-state-in-setstate": 0,
+ "react/no-string-refs": 0,
+ "react/no-unescaped-entities": 0,
+ "react/no-unused-state": 0,
+ "react/require-default-props": 0,
+ "react/sort-comp": 0
+ }
+}
diff --git a/ui/.gitignore b/ui/.gitignore
new file mode 100644
index 000000000..1d48cce82
--- /dev/null
+++ b/ui/.gitignore
@@ -0,0 +1,31 @@
+# Logs
+logs
+sample
+*.log
+
+# Mac Trash directories
+*.DS_Store
+
+# Dependency directory
+node_modules
+
+#intellij
+.idea/
+*.iml
+*.ipr
+*.iws
+
+# generated scripts/css in public should not be commited
+public/bundles
+static_codegen
+
+# Code coverage reports
+.nyc_output/
+coverage
+
+# HTTPS Certs
+server.cert
+server.key
+
+# Zipkin runner
+zipkin-workspace
\ No newline at end of file
diff --git a/ui/.gitmodules b/ui/.gitmodules
new file mode 100644
index 000000000..2a8a7b89a
--- /dev/null
+++ b/ui/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "haystack-idl"]
+ path = haystack-idl
+ url = https://github.com/ExpediaDotCom/haystack-idl
diff --git a/ui/.npmrc b/ui/.npmrc
new file mode 100644
index 000000000..5660f81af
--- /dev/null
+++ b/ui/.npmrc
@@ -0,0 +1 @@
+registry=https://registry.npmjs.org/
\ No newline at end of file
diff --git a/ui/.prettierignore b/ui/.prettierignore
new file mode 100644
index 000000000..515bcd4f5
--- /dev/null
+++ b/ui/.prettierignore
@@ -0,0 +1,2 @@
+package.json
+package-lock.json
\ No newline at end of file
diff --git a/ui/.prettierrc b/ui/.prettierrc
new file mode 100644
index 000000000..22348c05a
--- /dev/null
+++ b/ui/.prettierrc
@@ -0,0 +1,7 @@
+{
+ "tabWidth": 4,
+ "printWidth": 150,
+ "singleQuote": true,
+ "bracketSpacing": false,
+ "arrowParens": "always"
+}
diff --git a/ui/.travis.yml b/ui/.travis.yml
new file mode 100644
index 000000000..1f3a04b37
--- /dev/null
+++ b/ui/.travis.yml
@@ -0,0 +1,29 @@
+language: node_js
+
+node_js:
+ - "12"
+
+services:
+ - docker
+
+dist: xenial
+
+env:
+ global:
+ - BRANCH=${TRAVIS_BRANCH}
+ - TAG=${TRAVIS_TAG}
+ - SHA=${TRAVIS_COMMIT}
+
+before_install:
+ - sudo apt-get update
+ - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
+ - sudo apt-get install libcairo2-dev libjpeg8-dev libpango1.0-dev libgif-dev build-essential g++
+
+script:
+ # build, create docker image
+ # upload to dockerhub only for master(non PR) and tag scenario
+ - if ([ "$TRAVIS_BRANCH" == "master" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]) || [ -n "$TRAVIS_TAG" ]; then make release; else make all; fi
+
+notifications:
+ email:
+ - haystack-notifications@expedia.com
diff --git a/ui/.vscode/settings.json b/ui/.vscode/settings.json
new file mode 100644
index 000000000..a820ff1bd
--- /dev/null
+++ b/ui/.vscode/settings.json
@@ -0,0 +1,8 @@
+// Place your settings in this file to overwrite default and user settings.
+{
+ // Prettier
+ "editor.formatOnSave": true,
+ "[less]": {
+ "editor.formatOnSave": true
+ }
+}
diff --git a/ui/CONTRIBUTING.md b/ui/CONTRIBUTING.md
new file mode 100644
index 000000000..317757128
--- /dev/null
+++ b/ui/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+##Bugs
+We use Github Issues for our bug reporting. Please make sure the bug isn't already listed before opening a new issue.
+
+##Development
+All work on Haystack happens directly on Github. Core Haystack team members will review opened pull requests.
+
+##Requests
+If you see a feature that you would like to be added, please open an issue in the respective repository or in the general Haystack repo.
+
+##Contributing to Documentation
+To contribute to documentation, you can directly modify the corresponding .md files in the docs directory under the base haystack repository, and submit a pull request. Once your PR is merged, the documentation is automatically built and deployed to https://expediadotcom.github.io/haystack.
+
+##License
+By contributing to Haystack, you agree that your contributions will be licensed under its Apache License.
\ No newline at end of file
diff --git a/ui/LICENSE b/ui/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/ui/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ui/Makefile b/ui/Makefile
new file mode 100644
index 000000000..3af339e21
--- /dev/null
+++ b/ui/Makefile
@@ -0,0 +1,23 @@
+.PHONY: clean build docker_build all release
+
+# docker namespace
+export DOCKER_ORG := expediadotcom
+export DOCKER_IMAGE_NAME := haystack-ui
+
+clean:
+ npm run clean
+
+install:
+ npm install
+
+build: clean install
+ npm run build
+
+docker_build:
+ docker build -t $(DOCKER_IMAGE_NAME) -f build/docker/Dockerfile .
+
+all: build docker_build
+
+# build all and release
+release: all
+ ./build/docker/publish-to-docker-hub.sh
diff --git a/ui/README.md b/ui/README.md
new file mode 100644
index 000000000..e0abf792c
--- /dev/null
+++ b/ui/README.md
@@ -0,0 +1,122 @@
+[](https://travis-ci.org/ExpediaDotCom/haystack-ui)
+[](https://coveralls.io/github/ExpediaDotCom/haystack-ui?branch=master&service=github)
+
+
+
+# Haystack-UI
+
+Haystack-ui is the web UI for haystack. It is the central place for visualizing processed data from various haystack sub-systems.
+Visualization tools in haystack-ui include -
+
+| Traces |
+| :------------------------------------------------------------: |
+| Distributed tracing visualization for easy root cause analysis |
+|  |
+
+| Trends |
+| :---------------------------------------------------: |
+| Visualization of vital service and operation trending |
+|  |
+
+| Service Graph |
+| :----------------------------------------------------------------: |
+| Real time dependency graph with health and connectivity indicators |
+|  |
+
+| Alerts and Anomaly Detection |
+| :-------------------------------------------------------: |
+| UI for displaying, configuring, and subscribing to alerts |
+|  |
+
+| Universal Search |
+| :--------------------------------------------------------------: |
+| Intuitive, sandbox-style searching for accurate results. |
+|  |
+
+## Development
+
+It is a expressjs based single page client side app written in ES6 + React and using Mobx for data flow.
+
+### Pre-requisites
+
+Ensure you have `node >= 10.0` and `npm >= 6.0` installed.
+
+Clone the repository including recursive submodules:
+
+```
+$ git clone --recurse-submodules https://github.com/ExpediaDotCom/haystack-ui.git
+$ cd haystack-ui
+```
+
+If the repository was already cloned, you can initialize and update submodules with `git submodule update --init --recursive`
+
+### Build and Run
+
+This application uses [webpack](https://webpack.github.io/) for building + bundling resources. To run in developer mode with client and server side hotloading, use:
+
+```
+$ npm install # install dependencies
+$ npm run start:dev # start server in dev mode with hotloading
+```
+
+Once start is successful you can visit [http://localhost:8080/](http://localhost:8080/)
+
+For running in production mode, use:
+
+```
+$ npm install # install dependencies
+$ npm run build # run tests(with coverage), build client side code and emit produciton optimized bundles
+$ npm start # start node server
+```
+
+#### Autoformatting in your favorite IDE with Prettier Integration
+
+This projects supports auto-formatting of source code! Simply find your favorite IDE from the list in the following list: https://prettier.io/docs/en/editors.html
+
+For VSCode support, perform the following steps:
+
+- Launch VS Code Quick Open (Ctrl+P)
+- Paste the following command, and press enter:
+
+```
+ext install esbenp.prettier-vscode
+```
+
+This projects has a pre-configured `.vscode/settings.json` which enables format on save. Auto-formatting should execute everytime you save a file.
+
+Prettier is also configured to run in a pre-commit hook to make enforcing consistency of source code between developers easy.
+
+## Testing
+
+Haystack-ui utilizes [Mocha](https://github.com/mochajs/mocha) as it's testing framework, with [Chai](https://github.com/chaijs/chai) as the assertation library, [Enzyme](https://github.com/airbnb/enzyme) for utility, and [JSDOM](https://github.com/tmpvar/jsdom) as a headless browser for rendering React components.
+[ESLint](https://github.com/eslint/eslint) is used as a linter and insurance of code quality.
+
+To run the test suite, enter the command `npm test`.
+
+To check code coverage, run `npm run coverage` and open the generated index.html in the created coverage folder
+
+**Note**-
+You may have to install Cairo dependencies separately for tests to work.
+
+- **OS X Users** : `brew install pkg-config cairo pango libpng jpeg giflib`
+ - _NOTE_: If you run into `Package libffi was not found in the pkg-config search path.` errors while running `npm install`, you will need to addtionally run the following command: `export PKG_CONFIG_PATH="${PKG_CONFIG_PATH}:/usr/local/opt/libffi/lib/pkgconfig"`
+- **Others**: Refer [https://www.npmjs.com/package/canvas#installation](https://www.npmjs.com/package/canvas#installation)
+
+### Docker
+
+We have provided `make` commands to facilitate building. For creating docker image use -
+
+```
+$ make all
+
+```
+
+## Configuration
+
+Haystack UI can be configured to use one or more stores, each providing user interface for one subsystem in Haystack. Based on what subsystems you have available in your haystack cluster, you can configure corresponding stores and UI will adapt to show interfaces only for the configured subsystems.
+For more details on this refer - [https://github.com/ExpediaDotCom/haystack-ui/wiki/Configuring-Subsystem-Connectors](https://github.com/ExpediaDotCom/haystack-ui/wiki/Configuring-Subsystem-Stores)
+
+## Haystack-ui as drop-in replacement for Zipkin UI
+
+If you have an existing zipkin cluster you can use haystack UI as a drop-in replacement for zipkin's UI.
+For more details on this refer - [https://github.com/ExpediaDotCom/haystack-ui/wiki/Configuring-Subsystem-Connectors#using-haystack-ui-as-replacement-for-zipkin-ui](https://github.com/ExpediaDotCom/haystack-ui/wiki/Configuring-Subsystem-Connectors#using-haystack-ui-as-replacement-for-zipkin-ui)
diff --git a/ui/Release.md b/ui/Release.md
new file mode 100644
index 000000000..f231bf6f2
--- /dev/null
+++ b/ui/Release.md
@@ -0,0 +1,10 @@
+#Releasing
+Currently we publish the repo to only docker hub. We don't publish it to nexus central repository since its a npm module.
+
+#How to release and publish
+
+* Git tagging:
+
+```git tag -a 1.x.x -m "Release description..."```
+
+Or you can also tag using UI: https://github.com/ExpediaDotCom/haystack-ui/releases
\ No newline at end of file
diff --git a/ui/build/docker/Dockerfile b/ui/build/docker/Dockerfile
new file mode 100644
index 000000000..e318e2112
--- /dev/null
+++ b/ui/build/docker/Dockerfile
@@ -0,0 +1,28 @@
+FROM node:12 AS base
+
+ENV APP_HOME /app
+ENV PUBLIC_PATH /${APP_HOME}/public
+ENV SERVER_PATH /${APP_HOME}/server
+ENV UNIVERSAL_PATH /${APP_HOME}/universal
+ENV NODE_MODULES_PATH /${APP_HOME}/node_modules
+ENV STATIC_CODEGEN_PATH /${APP_HOME}/static_codegen
+ENV PACKAGE_JSON_PATH /${APP_HOME}/package.json
+WORKDIR ${APP_HOME}
+
+# generating proto code, building bundles and running tests
+FROM base AS builder
+COPY . .
+RUN npm -q install
+RUN npm -q run build
+
+# creating release image
+FROM base AS release
+COPY --from=builder ${PUBLIC_PATH} ${PUBLIC_PATH}
+COPY --from=builder ${SERVER_PATH} ${SERVER_PATH}
+COPY --from=builder ${UNIVERSAL_PATH} ${UNIVERSAL_PATH}
+COPY --from=builder ${STATIC_CODEGEN_PATH} ${STATIC_CODEGEN_PATH}
+COPY --from=builder ${PACKAGE_JSON_PATH} ${PACKAGE_JSON_PATH}
+RUN npm -q install --only=prod
+
+EXPOSE 8080
+CMD node server/start.js
diff --git a/ui/build/docker/publish-to-docker-hub.sh b/ui/build/docker/publish-to-docker-hub.sh
new file mode 100755
index 000000000..473b61c41
--- /dev/null
+++ b/ui/build/docker/publish-to-docker-hub.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+set -e
+
+QUALIFIED_DOCKER_IMAGE_NAME=$DOCKER_ORG/$DOCKER_IMAGE_NAME
+echo "DOCKER_ORG=$DOCKER_ORG, DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME, QUALIFIED_DOCKER_IMAGE_NAME=$QUALIFIED_DOCKER_IMAGE_NAME"
+echo "BRANCH=$BRANCH, TAG=$TAG, SHA=$SHA"
+
+# login
+docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
+
+# Add tags
+if [[ $TAG =~ ([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
+ echo "releasing semantic versions"
+
+ unset MAJOR MINOR PATCH
+ MAJOR="${BASH_REMATCH[1]}"
+ MINOR="${BASH_REMATCH[2]}"
+ PATCH="${BASH_REMATCH[3]}"
+
+ # for tag, add MAJOR, MAJOR.MINOR, MAJOR.MINOR.PATCH and latest as tag
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$MAJOR.$MINOR.$PATCH
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:latest
+
+ # publish image with tags
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME
+
+elif [[ "$BRANCH" == "master" ]]; then
+ echo "releasing master branch"
+
+ # for 'master' branch, add SHA as tags
+ docker tag $DOCKER_IMAGE_NAME $QUALIFIED_DOCKER_IMAGE_NAME:$SHA
+
+ # publish image with tags
+ docker push $QUALIFIED_DOCKER_IMAGE_NAME
+fi
diff --git a/ui/build/zipkin/README.md b/ui/build/zipkin/README.md
new file mode 100644
index 000000000..5fc8ba6da
--- /dev/null
+++ b/ui/build/zipkin/README.md
@@ -0,0 +1,26 @@
+
+# Zipkin haystack-ui Quickstart Utility
+
+Utility script to run haystack-ui with a zipkin instance as backend for traces. It spins sleuth-webmvc-example services for feeding traces in Zipkin cluster and generates some examples. It configures haystack-ui point to Zipkin V2 api and has only traces subsystem.
+
+
+### PREREQUISITES
+
+- Assumes that you have mvn and git available on your machine.
+- haystack-ui must be already installed (npm install) and built (npm build), if not please install and build before running this script
+
+
+### USAGE
+
+```> ./zipkin-quickstart```
+
+Wait for couple of minutes till you see `Express server listening : 8080` message. Then you can hit [http://localhost:8080/search?serviceName=backend](http://localhost:8080/search?serviceName=backend) to use haystack-ui. Search for `serviceName=backend` to see pre-feeded traces coming from Zipkin backend.
+
+
+### OPTIONS
+
+```
+-h help
+-d debug mode, will emit out all logs from zipkin and sleuth-webmvc-example
+```
+
diff --git a/ui/build/zipkin/base.json b/ui/build/zipkin/base.json
new file mode 100644
index 000000000..f94fe1d72
--- /dev/null
+++ b/ui/build/zipkin/base.json
@@ -0,0 +1,15 @@
+{
+ "port": 8080,
+ "cluster": false,
+ "upstreamTimeout": 30000,
+ "connectors": {
+ "traces": {
+ "connectorName": "zipkin",
+ "zipkinUrl": "http://localhost:9411/api/v2"
+ },
+ "serviceGraph": {
+ "connectorName": "zipkin",
+ "zipkinUrl": "http://localhost:9411/api/v2"
+ }
+ }
+}
diff --git a/ui/build/zipkin/zipkin-quickstart.sh b/ui/build/zipkin/zipkin-quickstart.sh
new file mode 100755
index 000000000..f65bab107
--- /dev/null
+++ b/ui/build/zipkin/zipkin-quickstart.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+usage() {
+ cat < downloading sleuth webmvc example\n\n'
+ git clone https://github.com/openzipkin/sleuth-webmvc-example.git
+}
+
+runSleuthExample() {
+ printf '\n=> %s\n\n' "running sleuth webmvc ${1}"
+ if [[ "$2" = '-d' ]]; then
+ mvn compile exec:java -Dexec.mainClass=sleuth.webmvc.${1} &
+ else
+ mvn compile exec:java -Dexec.mainClass=sleuth.webmvc.${1} > /dev/null 2>&1 &
+ fi
+}
+
+downloadAndRunZipkin() {
+ printf '\n=> downloading and running zipkin\n\n'
+ curl -sSL https://zipkin.io/quickstart.sh | bash -s
+
+ if [[ "$1" = '-d' ]]; then
+ java -jar zipkin.jar &
+ else
+ java -jar zipkin.jar > /dev/null 2>&1 &
+ fi
+}
+
+downloadAndRunZipkin() {
+ printf '\n=> downloading and running zipkin\n\n'
+ curl -sSL https://zipkin.io/quickstart.sh | bash -s
+ java -jar zipkin.jar > /dev/null 2>&1 &
+}
+
+feedTraces() {
+ printf '\n=> waiting for zipkin to start and pushing sample traces\n\n'
+ sleep 60
+ curl http://localhost:8081
+ curl http://localhost:8081
+ curl http://localhost:8081
+ curl http://localhost:8081
+ curl http://localhost:8081
+ curl http://localhost:8081
+ curl http://localhost:9000/api
+ curl http://localhost:9000/api
+ curl http://localhost:9000/api
+ curl http://localhost:9000/api
+ sleep 60
+}
+
+startHaystackUi() {
+ printf '\n=> starting haystack-ui\n\n'
+ cd ../../../
+ HAYSTACK_OVERRIDES_CONFIG_PATH=../../build/zipkin/base.json npm start
+}
+
+main() {
+ if [[ "$1" = '-h' || "$1" = '--help' ]]; then
+ usage
+ exit
+ fi
+
+ # execution directory
+ local WORKSPACE=./zipkin-workspace
+ rm -rf $WORKSPACE
+ mkdir $WORKSPACE
+ cd $WORKSPACE
+
+ # download and run zipkin
+ downloadAndRunZipkin "$1"
+ ZIPKIN_PROC_ID=$!
+ printf '\n=> zipkin proc id %s\n\n' "${ZIPKIN_PROC_ID}"
+
+ # download and run sleuth example backend and frontend
+ fetchSleuthExample
+ cd sleuth-webmvc-example
+ runSleuthExample "Backend" "$1"
+ SLEUTH_BACKEND_PROC_ID=$!
+ printf '\n=> backend proc id %s\n\n' "${SLEUTH_BACKEND_PROC_ID}"
+ runSleuthExample "Frontend" "$1"
+ SLEUTH_FRONTEND_PROC_ID=$!
+ printf '\n=> frontend proc id %s\n\n' "${SLEUTH_FRONTEND_PROC_ID}"
+ cd ..
+
+ # feed traces to zipkin
+ feedTraces
+
+ # run haystack ui
+ startHaystackUi
+
+ # teardown services
+ printf '\n=> tearing down\n\n'
+ kill $SLEUTH_BACKEND_PROC_ID
+ kill $SLEUTH_FRONTEND_PROC_ID
+ kill $ZIPKIN_PROC_ID
+}
+
+main "$@"
diff --git a/ui/deployment/terraform/main.tf b/ui/deployment/terraform/main.tf
new file mode 100644
index 000000000..2a34dc082
--- /dev/null
+++ b/ui/deployment/terraform/main.tf
@@ -0,0 +1,79 @@
+locals {
+ app_name = "haystack-ui"
+ count = "${var.enabled?1:0}"
+ config_file_path = "${path.module}/templates/haystack-ui_json.tpl"
+ container_config_path = "/config/haystack-ui.json"
+ deployment_yaml_file_path = "${path.module}/templates/deployment_yaml.tpl"
+ checksum = "${sha1("${data.template_file.config_data.rendered}")}"
+ configmap_name = "ui-${local.checksum}"
+}
+
+
+resource "kubernetes_config_map" "haystack-config" {
+ metadata {
+ name = "${local.configmap_name}"
+ namespace = "${var.namespace}"
+ }
+ data {
+ "haystack-ui.json" = "${data.template_file.config_data.rendered}"
+ }
+ count = "${local.count}"
+}
+
+data "template_file" "config_data" {
+ template = "${file("${local.config_file_path}")}"
+
+ vars {
+ trace_reader_hostname = "${var.trace_reader_hostname}"
+ trace_reader_service_port = "${var.trace_reader_service_port}"
+ metrictank_hostname = "${var.metrictank_hostname}"
+ metrictank_port = "${var.metrictank_port}"
+ graphite_port = "${var.graphite_port}"
+ graphite_hostname = "${var.graphite_hostname}"
+ whitelisted_fields = "${var.whitelisted_fields}"
+ ui_enable_sso = "${var.ui_enable_sso}"
+ ui_saml_callback_url = "${var.ui_saml_callback_url}"
+ ui_saml_entry_point = "${var.ui_saml_entry_point}"
+ ui_saml_issuer = "${var.ui_saml_issuer}"
+ ui_session_secret = "${var.ui_session_secret}"
+ encoder_type = "${var.encoder_type}"
+ }
+}
+
+data "template_file" "deployment_yaml" {
+ template = "${file("${local.deployment_yaml_file_path}")}"
+ vars {
+ app_name = "${local.app_name}"
+ namespace = "${var.namespace}"
+ node_selecter_label = "${var.node_selecter_label}"
+ image = "${var.image}"
+ replicas = "${var.replicas}"
+ memory_limit = "${var.memory_limit}"
+ memory_request = "${var.memory_request}"
+ cpu_limit = "${var.cpu_limit}"
+ cpu_request = "${var.cpu_request}"
+ service_port = "${var.service_port}"
+ container_port = "${var.container_port}"
+ configmap_name = "${local.configmap_name}"
+ }
+}
+
+resource "null_resource" "kubectl_apply" {
+ triggers {
+ template = "${data.template_file.deployment_yaml.rendered}"
+ }
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} apply -f - --context ${var.kubectl_context_name}"
+ }
+ count = "${local.count}"
+}
+
+
+resource "null_resource" "kubectl_destroy" {
+
+ provisioner "local-exec" {
+ command = "echo '${data.template_file.deployment_yaml.rendered}' | ${var.kubectl_executable_name} delete -f - --context ${var.kubectl_context_name}"
+ when = "destroy"
+ }
+ count = "${local.count}"
+}
diff --git a/ui/deployment/terraform/outputs.tf b/ui/deployment/terraform/outputs.tf
new file mode 100644
index 000000000..e69de29bb
diff --git a/ui/deployment/terraform/templates/deployment_yaml.tpl b/ui/deployment/terraform/templates/deployment_yaml.tpl
new file mode 100644
index 000000000..619501734
--- /dev/null
+++ b/ui/deployment/terraform/templates/deployment_yaml.tpl
@@ -0,0 +1,59 @@
+# ------------------- Deployment ------------------- #
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ replicas: ${replicas}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: ${app_name}
+ template:
+ metadata:
+ labels:
+ k8s-app: ${app_name}
+ spec:
+ containers:
+ - name: ${app_name}
+ image: ${image}
+ volumeMounts:
+ # Create on-disk volume to store exec logs
+ - mountPath: /config
+ name: config-volume
+ resources:
+ limits:
+ cpu: ${cpu_limit}
+ memory: ${memory_limit}Mi
+ requests:
+ cpu: ${cpu_request}
+ memory: ${memory_limit}Mi
+ env:
+ - name: "HAYSTACK_OVERRIDES_CONFIG_PATH"
+ value: "/config/haystack-ui.json"
+ nodeSelector:
+ ${node_selecter_label}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: ${configmap_name}
+
+# ------------------- Service ------------------- #
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ${app_name}
+ name: ${app_name}
+ namespace: ${namespace}
+spec:
+ ports:
+ - port: ${service_port}
+ targetPort: ${container_port}
+ selector:
+ k8s-app: ${app_name}
diff --git a/ui/deployment/terraform/templates/haystack-ui_json.tpl b/ui/deployment/terraform/templates/haystack-ui_json.tpl
new file mode 100644
index 000000000..92910c485
--- /dev/null
+++ b/ui/deployment/terraform/templates/haystack-ui_json.tpl
@@ -0,0 +1,47 @@
+{
+ "port": 8080,
+ "cluster": true,
+ "upstreamTimeout": 30000,
+ "encoder": "${encoder_type}",
+ "enableServicePerformance": false,
+ "enableServiceLevelTrends": false,
+ "enableLatencyCostViewer": true,
+ "graphite": {
+ "host": "${graphite_hostname}",
+ "port": ${graphite_port}
+ },
+ "grpcOptions": {
+ "grpc.max_receive_message_length": 52428800
+ },
+ "connectors": {
+ "traces": {
+ "connectorName": "haystack",
+ "haystackHost": "${trace_reader_hostname}",
+ "haystackPort": ${trace_reader_service_port},
+ "serviceRefreshIntervalInSecs": 60,
+ "fieldKeys": [${whitelisted_fields}]
+ },
+ "trends": {
+ "connectorName": "haystack",
+ "metricTankUrl": "http://${metrictank_hostname}:${metrictank_port}"
+ },
+ "alerts": {
+ "connectorName": "haystack",
+ "metricTankUrl": "http://${metrictank_hostname}:${metrictank_port}",
+ "alertFreqInSec": 300,
+ "alertMergeBufferTimeInSec": 60,
+ "subscriptions": {
+ "connectorName": "stub",
+ "enabled": false
+ }
+ }
+ },
+ "enableSSO": ${ui_enable_sso},
+ "saml": {
+ "callbackUrl": "${ui_saml_callback_url}",
+ "entry_point": "${ui_saml_entry_point}",
+ "issuer": "${ui_saml_issuer}"
+ },
+ "sessionTimeout": 3600000,
+ "sessionSecret": "${ui_session_secret}"
+}
diff --git a/ui/deployment/terraform/variables.tf b/ui/deployment/terraform/variables.tf
new file mode 100644
index 000000000..2a1bf3f83
--- /dev/null
+++ b/ui/deployment/terraform/variables.tf
@@ -0,0 +1,51 @@
+variable "enabled" {
+ default = true
+}
+
+variable "image" {}
+variable "replicas" {}
+variable "namespace" {}
+variable "kubectl_executable_name" {}
+variable "kubectl_context_name" {}
+variable "node_selecter_label"{}
+variable "memory_request"{}
+variable "memory_limit"{}
+variable "cpu_request"{}
+variable "cpu_limit"{}
+variable "graphite_hostname" {}
+variable "graphite_port" {}
+variable "encoder_type" {}
+
+variable "termination_grace_period" {
+ default = 30
+}
+variable "service_port" {
+ default = 80
+}
+variable "container_port" {
+ default = 8080
+}
+
+variable "k8s_cluster_name" {}
+
+variable "trace_reader_hostname" {}
+
+variable "trace_reader_service_port" {}
+
+variable "metrictank_hostname" {}
+
+variable "metrictank_port" {}
+
+variable "whitelisted_fields" {}
+
+variable "ui_enable_sso" {
+ default = false
+}
+
+variable "ui_saml_callback_url" {}
+
+variable "ui_saml_entry_point" {}
+
+variable "ui_saml_issuer" {}
+
+variable "ui_session_secret" {}
diff --git a/ui/package-lock.json b/ui/package-lock.json
new file mode 100644
index 000000000..56e9b40d8
--- /dev/null
+++ b/ui/package-lock.json
@@ -0,0 +1,14057 @@
+{
+ "name": "haystack-ui",
+ "version": "1.0.0",
+ "lockfileVersion": 1,
+ "requires": true,
+ "dependencies": {
+ "@babel/cli": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/cli/-/cli-7.8.4.tgz",
+ "integrity": "sha512-XXLgAm6LBbaNxaGhMAznXXaxtCWfuv6PIDJ9Alsy9JYTOh+j2jJz+L/162kkfU1j/pTSxK1xGmlwI4pdIMkoag==",
+ "dev": true,
+ "requires": {
+ "chokidar": "^2.1.8",
+ "commander": "^4.0.1",
+ "convert-source-map": "^1.1.0",
+ "fs-readdir-recursive": "^1.1.0",
+ "glob": "^7.0.0",
+ "lodash": "^4.17.13",
+ "make-dir": "^2.1.0",
+ "slash": "^2.0.0",
+ "source-map": "^0.5.0"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "dev": true
+ }
+ }
+ },
+ "@babel/code-frame": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.8.3.tgz",
+ "integrity": "sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g==",
+ "dev": true,
+ "requires": {
+ "@babel/highlight": "^7.8.3"
+ }
+ },
+ "@babel/compat-data": {
+ "version": "7.8.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.8.5.tgz",
+ "integrity": "sha512-jWYUqQX/ObOhG1UiEkbH5SANsE/8oKXiQWjj7p7xgj9Zmnt//aUvyz4dBkK0HNsS8/cbyC5NmmH87VekW+mXFg==",
+ "dev": true,
+ "requires": {
+ "browserslist": "^4.8.5",
+ "invariant": "^2.2.4",
+ "semver": "^5.5.0"
+ }
+ },
+ "@babel/core": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.8.4.tgz",
+ "integrity": "sha512-0LiLrB2PwrVI+a2/IEskBopDYSd8BCb3rOvH7D5tzoWd696TBEduBvuLVm4Nx6rltrLZqvI3MCalB2K2aVzQjA==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.8.3",
+ "@babel/generator": "^7.8.4",
+ "@babel/helpers": "^7.8.4",
+ "@babel/parser": "^7.8.4",
+ "@babel/template": "^7.8.3",
+ "@babel/traverse": "^7.8.4",
+ "@babel/types": "^7.8.3",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.1",
+ "json5": "^2.1.0",
+ "lodash": "^4.17.13",
+ "resolve": "^1.3.2",
+ "semver": "^5.4.1",
+ "source-map": "^0.5.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+ "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ },
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "dev": true
+ }
+ }
+ },
+ "@babel/generator": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.8.4.tgz",
+ "integrity": "sha512-PwhclGdRpNAf3IxZb0YVuITPZmmrXz9zf6fH8lT4XbrmfQKr6ryBzhv593P5C6poJRciFCL/eHGW2NuGrgEyxA==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.8.3",
+ "jsesc": "^2.5.1",
+ "lodash": "^4.17.13",
+ "source-map": "^0.5.0"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "dev": true
+ }
+ }
+ },
+ "@babel/helper-annotate-as-pure": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.8.3.tgz",
+ "integrity": "sha512-6o+mJrZBxOoEX77Ezv9zwW7WV8DdluouRKNY/IR5u/YTMuKHgugHOzYWlYvYLpLA9nPsQCAAASpCIbjI9Mv+Uw==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-builder-binary-assignment-operator-visitor": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.8.3.tgz",
+ "integrity": "sha512-5eFOm2SyFPK4Rh3XMMRDjN7lBH0orh3ss0g3rTYZnBQ+r6YPj7lgDyCvPphynHvUrobJmeMignBr6Acw9mAPlw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-explode-assignable-expression": "^7.8.3",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-builder-react-jsx": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-builder-react-jsx/-/helper-builder-react-jsx-7.8.3.tgz",
+ "integrity": "sha512-JT8mfnpTkKNCboTqZsQTdGo3l3Ik3l7QIt9hh0O9DYiwVel37VoJpILKM4YFbP2euF32nkQSb+F9cUk9b7DDXQ==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.8.3",
+ "esutils": "^2.0.0"
+ }
+ },
+ "@babel/helper-call-delegate": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-call-delegate/-/helper-call-delegate-7.8.3.tgz",
+ "integrity": "sha512-6Q05px0Eb+N4/GTyKPPvnkig7Lylw+QzihMpws9iiZQv7ZImf84ZsZpQH7QoWN4n4tm81SnSzPgHw2qtO0Zf3A==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-hoist-variables": "^7.8.3",
+ "@babel/traverse": "^7.8.3",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-compilation-targets": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.8.4.tgz",
+ "integrity": "sha512-3k3BsKMvPp5bjxgMdrFyq0UaEO48HciVrOVF0+lon8pp95cyJ2ujAh0TrBHNMnJGT2rr0iKOJPFFbSqjDyf/Pg==",
+ "dev": true,
+ "requires": {
+ "@babel/compat-data": "^7.8.4",
+ "browserslist": "^4.8.5",
+ "invariant": "^2.2.4",
+ "levenary": "^1.1.1",
+ "semver": "^5.5.0"
+ }
+ },
+ "@babel/helper-create-class-features-plugin": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.8.3.tgz",
+ "integrity": "sha512-qmp4pD7zeTxsv0JNecSBsEmG1ei2MqwJq4YQcK3ZWm/0t07QstWfvuV/vm3Qt5xNMFETn2SZqpMx2MQzbtq+KA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-function-name": "^7.8.3",
+ "@babel/helper-member-expression-to-functions": "^7.8.3",
+ "@babel/helper-optimise-call-expression": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/helper-replace-supers": "^7.8.3",
+ "@babel/helper-split-export-declaration": "^7.8.3"
+ }
+ },
+ "@babel/helper-create-regexp-features-plugin": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.8.3.tgz",
+ "integrity": "sha512-Gcsm1OHCUr9o9TcJln57xhWHtdXbA2pgQ58S0Lxlks0WMGNXuki4+GLfX0p+L2ZkINUGZvfkz8rzoqJQSthI+Q==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-regex": "^7.8.3",
+ "regexpu-core": "^4.6.0"
+ }
+ },
+ "@babel/helper-define-map": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-define-map/-/helper-define-map-7.8.3.tgz",
+ "integrity": "sha512-PoeBYtxoZGtct3md6xZOCWPcKuMuk3IHhgxsRRNtnNShebf4C8YonTSblsK4tvDbm+eJAw2HAPOfCr+Q/YRG/g==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-function-name": "^7.8.3",
+ "@babel/types": "^7.8.3",
+ "lodash": "^4.17.13"
+ }
+ },
+ "@babel/helper-explode-assignable-expression": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.8.3.tgz",
+ "integrity": "sha512-N+8eW86/Kj147bO9G2uclsg5pwfs/fqqY5rwgIL7eTBklgXjcOJ3btzS5iM6AitJcftnY7pm2lGsrJVYLGjzIw==",
+ "dev": true,
+ "requires": {
+ "@babel/traverse": "^7.8.3",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-function-name": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.8.3.tgz",
+ "integrity": "sha512-BCxgX1BC2hD/oBlIFUgOCQDOPV8nSINxCwM3o93xP4P9Fq6aV5sgv2cOOITDMtCfQ+3PvHp3l689XZvAM9QyOA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-get-function-arity": "^7.8.3",
+ "@babel/template": "^7.8.3",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-get-function-arity": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.8.3.tgz",
+ "integrity": "sha512-FVDR+Gd9iLjUMY1fzE2SR0IuaJToR4RkCDARVfsBBPSP53GEqSFjD8gNyxg246VUyc/ALRxFaAK8rVG7UT7xRA==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-hoist-variables": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.8.3.tgz",
+ "integrity": "sha512-ky1JLOjcDUtSc+xkt0xhYff7Z6ILTAHKmZLHPxAhOP0Nd77O+3nCsd6uSVYur6nJnCI029CrNbYlc0LoPfAPQg==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-member-expression-to-functions": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.8.3.tgz",
+ "integrity": "sha512-fO4Egq88utkQFjbPrSHGmGLFqmrshs11d46WI+WZDESt7Wu7wN2G2Iu+NMMZJFDOVRHAMIkB5SNh30NtwCA7RA==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-module-imports": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.8.3.tgz",
+ "integrity": "sha512-R0Bx3jippsbAEtzkpZ/6FIiuzOURPcMjHp+Z6xPe6DtApDJx+w7UYyOLanZqO8+wKR9G10s/FmHXvxaMd9s6Kg==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-module-transforms": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.8.3.tgz",
+ "integrity": "sha512-C7NG6B7vfBa/pwCOshpMbOYUmrYQDfCpVL/JCRu0ek8B5p8kue1+BCXpg2vOYs7w5ACB9GTOBYQ5U6NwrMg+3Q==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-module-imports": "^7.8.3",
+ "@babel/helper-simple-access": "^7.8.3",
+ "@babel/helper-split-export-declaration": "^7.8.3",
+ "@babel/template": "^7.8.3",
+ "@babel/types": "^7.8.3",
+ "lodash": "^4.17.13"
+ }
+ },
+ "@babel/helper-optimise-call-expression": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.8.3.tgz",
+ "integrity": "sha512-Kag20n86cbO2AvHca6EJsvqAd82gc6VMGule4HwebwMlwkpXuVqrNRj6CkCV2sKxgi9MyAUnZVnZ6lJ1/vKhHQ==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-plugin-utils": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.8.3.tgz",
+ "integrity": "sha512-j+fq49Xds2smCUNYmEHF9kGNkhbet6yVIBp4e6oeQpH1RUs/Ir06xUKzDjDkGcaaokPiTNs2JBWHjaE4csUkZQ==",
+ "dev": true
+ },
+ "@babel/helper-regex": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-regex/-/helper-regex-7.8.3.tgz",
+ "integrity": "sha512-BWt0QtYv/cg/NecOAZMdcn/waj/5P26DR4mVLXfFtDokSR6fyuG0Pj+e2FqtSME+MqED1khnSMulkmGl8qWiUQ==",
+ "dev": true,
+ "requires": {
+ "lodash": "^4.17.13"
+ }
+ },
+ "@babel/helper-remap-async-to-generator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.8.3.tgz",
+ "integrity": "sha512-kgwDmw4fCg7AVgS4DukQR/roGp+jP+XluJE5hsRZwxCYGg+Rv9wSGErDWhlI90FODdYfd4xG4AQRiMDjjN0GzA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.8.3",
+ "@babel/helper-wrap-function": "^7.8.3",
+ "@babel/template": "^7.8.3",
+ "@babel/traverse": "^7.8.3",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-replace-supers": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.8.3.tgz",
+ "integrity": "sha512-xOUssL6ho41U81etpLoT2RTdvdus4VfHamCuAm4AHxGr+0it5fnwoVdwUJ7GFEqCsQYzJUhcbsN9wB9apcYKFA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-member-expression-to-functions": "^7.8.3",
+ "@babel/helper-optimise-call-expression": "^7.8.3",
+ "@babel/traverse": "^7.8.3",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-simple-access": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.8.3.tgz",
+ "integrity": "sha512-VNGUDjx5cCWg4vvCTR8qQ7YJYZ+HBjxOgXEl7ounz+4Sn7+LMD3CFrCTEU6/qXKbA2nKg21CwhhBzO0RpRbdCw==",
+ "dev": true,
+ "requires": {
+ "@babel/template": "^7.8.3",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-split-export-declaration": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.8.3.tgz",
+ "integrity": "sha512-3x3yOeyBhW851hroze7ElzdkeRXQYQbFIb7gLK1WQYsw2GWDay5gAJNw1sWJ0VFP6z5J1whqeXH/WCdCjZv6dA==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helper-wrap-function": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.8.3.tgz",
+ "integrity": "sha512-LACJrbUET9cQDzb6kG7EeD7+7doC3JNvUgTEQOx2qaO1fKlzE/Bf05qs9w1oXQMmXlPO65lC3Tq9S6gZpTErEQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-function-name": "^7.8.3",
+ "@babel/template": "^7.8.3",
+ "@babel/traverse": "^7.8.3",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/helpers": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.8.4.tgz",
+ "integrity": "sha512-VPbe7wcQ4chu4TDQjimHv/5tj73qz88o12EPkO2ValS2QiQS/1F2SsjyIGNnAD0vF/nZS6Cf9i+vW6HIlnaR8w==",
+ "dev": true,
+ "requires": {
+ "@babel/template": "^7.8.3",
+ "@babel/traverse": "^7.8.4",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/highlight": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.8.3.tgz",
+ "integrity": "sha512-PX4y5xQUvy0fnEVHrYOarRPXVWafSjTW9T0Hab8gVIawpl2Sj0ORyrygANq+KjcNlSSTw0YCLSNA8OyZ1I4yEg==",
+ "dev": true,
+ "requires": {
+ "chalk": "^2.0.0",
+ "esutils": "^2.0.2",
+ "js-tokens": "^4.0.0"
+ }
+ },
+ "@babel/parser": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.8.4.tgz",
+ "integrity": "sha512-0fKu/QqildpXmPVaRBoXOlyBb3MC+J0A66x97qEfLOMkn3u6nfY5esWogQwi/K0BjASYy4DbnsEWnpNL6qT5Mw==",
+ "dev": true
+ },
+ "@babel/plugin-proposal-async-generator-functions": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.8.3.tgz",
+ "integrity": "sha512-NZ9zLv848JsV3hs8ryEh7Uaz/0KsmPLqv0+PdkDJL1cJy0K4kOCFa8zc1E3mp+RHPQcpdfb/6GovEsW4VDrOMw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/helper-remap-async-to-generator": "^7.8.3",
+ "@babel/plugin-syntax-async-generators": "^7.8.0"
+ }
+ },
+ "@babel/plugin-proposal-class-properties": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.8.3.tgz",
+ "integrity": "sha512-EqFhbo7IosdgPgZggHaNObkmO1kNUe3slaKu54d5OWvy+p9QIKOzK1GAEpAIsZtWVtPXUHSMcT4smvDrCfY4AA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-create-class-features-plugin": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-proposal-decorators": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.8.3.tgz",
+ "integrity": "sha512-e3RvdvS4qPJVTe288DlXjwKflpfy1hr0j5dz5WpIYYeP7vQZg2WfAEIp8k5/Lwis/m5REXEteIz6rrcDtXXG7w==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-create-class-features-plugin": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-decorators": "^7.8.3"
+ }
+ },
+ "@babel/plugin-proposal-dynamic-import": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.8.3.tgz",
+ "integrity": "sha512-NyaBbyLFXFLT9FP+zk0kYlUlA8XtCUbehs67F0nnEg7KICgMc2mNkIeu9TYhKzyXMkrapZFwAhXLdnt4IYHy1w==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.0"
+ }
+ },
+ "@babel/plugin-proposal-json-strings": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.8.3.tgz",
+ "integrity": "sha512-KGhQNZ3TVCQG/MjRbAUwuH+14y9q0tpxs1nWWs3pbSleRdDro9SAMMDyye8HhY1gqZ7/NqIc8SKhya0wRDgP1Q==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-json-strings": "^7.8.0"
+ }
+ },
+ "@babel/plugin-proposal-nullish-coalescing-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.8.3.tgz",
+ "integrity": "sha512-TS9MlfzXpXKt6YYomudb/KU7nQI6/xnapG6in1uZxoxDghuSMZsPb6D2fyUwNYSAp4l1iR7QtFOjkqcRYcUsfw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0"
+ }
+ },
+ "@babel/plugin-proposal-object-rest-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.8.3.tgz",
+ "integrity": "sha512-8qvuPwU/xxUCt78HocNlv0mXXo0wdh9VT1R04WU8HGOfaOob26pF+9P5/lYjN/q7DHOX1bvX60hnhOvuQUJdbA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.0"
+ }
+ },
+ "@babel/plugin-proposal-optional-catch-binding": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.8.3.tgz",
+ "integrity": "sha512-0gkX7J7E+AtAw9fcwlVQj8peP61qhdg/89D5swOkjYbkboA2CVckn3kiyum1DE0wskGb7KJJxBdyEBApDLLVdw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.0"
+ }
+ },
+ "@babel/plugin-proposal-optional-chaining": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.8.3.tgz",
+ "integrity": "sha512-QIoIR9abkVn+seDE3OjA08jWcs3eZ9+wJCKSRgo3WdEU2csFYgdScb+8qHB3+WXsGJD55u+5hWCISI7ejXS+kg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.0"
+ }
+ },
+ "@babel/plugin-proposal-unicode-property-regex": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.8.3.tgz",
+ "integrity": "sha512-1/1/rEZv2XGweRwwSkLpY+s60za9OZ1hJs4YDqFHCw0kYWYwL5IFljVY1MYBL+weT1l9pokDO2uhSTLVxzoHkQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-syntax-async-generators": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
+ "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-decorators": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.8.3.tgz",
+ "integrity": "sha512-8Hg4dNNT9/LcA1zQlfwuKR8BUc/if7Q7NkTam9sGTcJphLwpf2g4S42uhspQrIrR+dpzE0dtTqBVFoHl8GtnnQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-syntax-dynamic-import": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz",
+ "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-json-strings": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
+ "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-jsx": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.8.3.tgz",
+ "integrity": "sha512-WxdW9xyLgBdefoo0Ynn3MRSkhe5tFVxxKNVdnZSh318WrG2e2jH+E9wd/++JsqcLJZPfz87njQJ8j2Upjm0M0A==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-syntax-nullish-coalescing-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
+ "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-object-rest-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
+ "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-optional-catch-binding": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
+ "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-optional-chaining": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
+ "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-top-level-await": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.8.3.tgz",
+ "integrity": "sha512-kwj1j9lL/6Wd0hROD3b/OZZ7MSrZLqqn9RAZ5+cYYsflQ9HZBIKCUkr3+uL1MEJ1NePiUbf98jjiMQSv0NMR9g==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-arrow-functions": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.8.3.tgz",
+ "integrity": "sha512-0MRF+KC8EqH4dbuITCWwPSzsyO3HIWWlm30v8BbbpOrS1B++isGxPnnuq/IZvOX5J2D/p7DQalQm+/2PnlKGxg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-async-to-generator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.8.3.tgz",
+ "integrity": "sha512-imt9tFLD9ogt56Dd5CI/6XgpukMwd/fLGSrix2httihVe7LOGVPhyhMh1BU5kDM7iHD08i8uUtmV2sWaBFlHVQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-module-imports": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/helper-remap-async-to-generator": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-block-scoped-functions": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.8.3.tgz",
+ "integrity": "sha512-vo4F2OewqjbB1+yaJ7k2EJFHlTP3jR634Z9Cj9itpqNjuLXvhlVxgnjsHsdRgASR8xYDrx6onw4vW5H6We0Jmg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-block-scoping": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.8.3.tgz",
+ "integrity": "sha512-pGnYfm7RNRgYRi7bids5bHluENHqJhrV4bCZRwc5GamaWIIs07N4rZECcmJL6ZClwjDz1GbdMZFtPs27hTB06w==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "lodash": "^4.17.13"
+ }
+ },
+ "@babel/plugin-transform-classes": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.8.3.tgz",
+ "integrity": "sha512-SjT0cwFJ+7Rbr1vQsvphAHwUHvSUPmMjMU/0P59G8U2HLFqSa082JO7zkbDNWs9kH/IUqpHI6xWNesGf8haF1w==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.8.3",
+ "@babel/helper-define-map": "^7.8.3",
+ "@babel/helper-function-name": "^7.8.3",
+ "@babel/helper-optimise-call-expression": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/helper-replace-supers": "^7.8.3",
+ "@babel/helper-split-export-declaration": "^7.8.3",
+ "globals": "^11.1.0"
+ }
+ },
+ "@babel/plugin-transform-computed-properties": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.8.3.tgz",
+ "integrity": "sha512-O5hiIpSyOGdrQZRQ2ccwtTVkgUDBBiCuK//4RJ6UfePllUTCENOzKxfh6ulckXKc0DixTFLCfb2HVkNA7aDpzA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-destructuring": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.8.3.tgz",
+ "integrity": "sha512-H4X646nCkiEcHZUZaRkhE2XVsoz0J/1x3VVujnn96pSoGCtKPA99ZZA+va+gK+92Zycd6OBKCD8tDb/731bhgQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-dotall-regex": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.8.3.tgz",
+ "integrity": "sha512-kLs1j9Nn4MQoBYdRXH6AeaXMbEJFaFu/v1nQkvib6QzTj8MZI5OQzqmD83/2jEM1z0DLilra5aWO5YpyC0ALIw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-duplicate-keys": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.8.3.tgz",
+ "integrity": "sha512-s8dHiBUbcbSgipS4SMFuWGqCvyge5V2ZeAWzR6INTVC3Ltjig/Vw1G2Gztv0vU/hRG9X8IvKvYdoksnUfgXOEQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-exponentiation-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.8.3.tgz",
+ "integrity": "sha512-zwIpuIymb3ACcInbksHaNcR12S++0MDLKkiqXHl3AzpgdKlFNhog+z/K0+TGW+b0w5pgTq4H6IwV/WhxbGYSjQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-builder-binary-assignment-operator-visitor": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-for-of": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.8.4.tgz",
+ "integrity": "sha512-iAXNlOWvcYUYoV8YIxwS7TxGRJcxyl8eQCfT+A5j8sKUzRFvJdcyjp97jL2IghWSRDaL2PU2O2tX8Cu9dTBq5A==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-function-name": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.8.3.tgz",
+ "integrity": "sha512-rO/OnDS78Eifbjn5Py9v8y0aR+aSYhDhqAwVfsTl0ERuMZyr05L1aFSCJnbv2mmsLkit/4ReeQ9N2BgLnOcPCQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-function-name": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-literals": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.8.3.tgz",
+ "integrity": "sha512-3Tqf8JJ/qB7TeldGl+TT55+uQei9JfYaregDcEAyBZ7akutriFrt6C/wLYIer6OYhleVQvH/ntEhjE/xMmy10A==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-member-expression-literals": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.8.3.tgz",
+ "integrity": "sha512-3Wk2EXhnw+rP+IDkK6BdtPKsUE5IeZ6QOGrPYvw52NwBStw9V1ZVzxgK6fSKSxqUvH9eQPR3tm3cOq79HlsKYA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-modules-amd": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.8.3.tgz",
+ "integrity": "sha512-MadJiU3rLKclzT5kBH4yxdry96odTUwuqrZM+GllFI/VhxfPz+k9MshJM+MwhfkCdxxclSbSBbUGciBngR+kEQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-module-transforms": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "babel-plugin-dynamic-import-node": "^2.3.0"
+ },
+ "dependencies": {
+ "babel-plugin-dynamic-import-node": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz",
+ "integrity": "sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ==",
+ "dev": true,
+ "requires": {
+ "object.assign": "^4.1.0"
+ }
+ }
+ }
+ },
+ "@babel/plugin-transform-modules-commonjs": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.8.3.tgz",
+ "integrity": "sha512-JpdMEfA15HZ/1gNuB9XEDlZM1h/gF/YOH7zaZzQu2xCFRfwc01NXBMHHSTT6hRjlXJJs5x/bfODM3LiCk94Sxg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-module-transforms": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/helper-simple-access": "^7.8.3",
+ "babel-plugin-dynamic-import-node": "^2.3.0"
+ },
+ "dependencies": {
+ "babel-plugin-dynamic-import-node": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz",
+ "integrity": "sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ==",
+ "dev": true,
+ "requires": {
+ "object.assign": "^4.1.0"
+ }
+ }
+ }
+ },
+ "@babel/plugin-transform-modules-systemjs": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.8.3.tgz",
+ "integrity": "sha512-8cESMCJjmArMYqa9AO5YuMEkE4ds28tMpZcGZB/jl3n0ZzlsxOAi3mC+SKypTfT8gjMupCnd3YiXCkMjj2jfOg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-hoist-variables": "^7.8.3",
+ "@babel/helper-module-transforms": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "babel-plugin-dynamic-import-node": "^2.3.0"
+ },
+ "dependencies": {
+ "babel-plugin-dynamic-import-node": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz",
+ "integrity": "sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ==",
+ "dev": true,
+ "requires": {
+ "object.assign": "^4.1.0"
+ }
+ }
+ }
+ },
+ "@babel/plugin-transform-modules-umd": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.8.3.tgz",
+ "integrity": "sha512-evhTyWhbwbI3/U6dZAnx/ePoV7H6OUG+OjiJFHmhr9FPn0VShjwC2kdxqIuQ/+1P50TMrneGzMeyMTFOjKSnAw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-module-transforms": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-named-capturing-groups-regex": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.8.3.tgz",
+ "integrity": "sha512-f+tF/8UVPU86TrCb06JoPWIdDpTNSGGcAtaD9mLP0aYGA0OS0j7j7DHJR0GTFrUZPUU6loZhbsVZgTh0N+Qdnw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-new-target": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.8.3.tgz",
+ "integrity": "sha512-QuSGysibQpyxexRyui2vca+Cmbljo8bcRckgzYV4kRIsHpVeyeC3JDO63pY+xFZ6bWOBn7pfKZTqV4o/ix9sFw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-object-super": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.8.3.tgz",
+ "integrity": "sha512-57FXk+gItG/GejofIyLIgBKTas4+pEU47IXKDBWFTxdPd7F80H8zybyAY7UoblVfBhBGs2EKM+bJUu2+iUYPDQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/helper-replace-supers": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-parameters": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.8.4.tgz",
+ "integrity": "sha512-IsS3oTxeTsZlE5KqzTbcC2sV0P9pXdec53SU+Yxv7o/6dvGM5AkTotQKhoSffhNgZ/dftsSiOoxy7evCYJXzVA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-call-delegate": "^7.8.3",
+ "@babel/helper-get-function-arity": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-property-literals": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.8.3.tgz",
+ "integrity": "sha512-uGiiXAZMqEoQhRWMK17VospMZh5sXWg+dlh2soffpkAl96KAm+WZuJfa6lcELotSRmooLqg0MWdH6UUq85nmmg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-react-display-name": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.8.3.tgz",
+ "integrity": "sha512-3Jy/PCw8Fe6uBKtEgz3M82ljt+lTg+xJaM4og+eyu83qLT87ZUSckn0wy7r31jflURWLO83TW6Ylf7lyXj3m5A==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-react-jsx": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.8.3.tgz",
+ "integrity": "sha512-r0h+mUiyL595ikykci+fbwm9YzmuOrUBi0b+FDIKmi3fPQyFokWVEMJnRWHJPPQEjyFJyna9WZC6Viv6UHSv1g==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-builder-react-jsx": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-jsx": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-react-jsx-self": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.8.3.tgz",
+ "integrity": "sha512-01OT7s5oa0XTLf2I8XGsL8+KqV9lx3EZV+jxn/L2LQ97CGKila2YMroTkCEIE0HV/FF7CMSRsIAybopdN9NTdg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-jsx": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-react-jsx-source": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.8.3.tgz",
+ "integrity": "sha512-PLMgdMGuVDtRS/SzjNEQYUT8f4z1xb2BAT54vM1X5efkVuYBf5WyGUMbpmARcfq3NaglIwz08UVQK4HHHbC6ag==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-syntax-jsx": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-regenerator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.8.3.tgz",
+ "integrity": "sha512-qt/kcur/FxrQrzFR432FGZznkVAjiyFtCOANjkAKwCbt465L6ZCiUQh2oMYGU3Wo8LRFJxNDFwWn106S5wVUNA==",
+ "dev": true,
+ "requires": {
+ "regenerator-transform": "^0.14.0"
+ }
+ },
+ "@babel/plugin-transform-reserved-words": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.8.3.tgz",
+ "integrity": "sha512-mwMxcycN3omKFDjDQUl+8zyMsBfjRFr0Zn/64I41pmjv4NJuqcYlEtezwYtw9TFd9WR1vN5kiM+O0gMZzO6L0A==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-shorthand-properties": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.8.3.tgz",
+ "integrity": "sha512-I9DI6Odg0JJwxCHzbzW08ggMdCezoWcuQRz3ptdudgwaHxTjxw5HgdFJmZIkIMlRymL6YiZcped4TTCB0JcC8w==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.8.3.tgz",
+ "integrity": "sha512-CkuTU9mbmAoFOI1tklFWYYbzX5qCIZVXPVy0jpXgGwkplCndQAa58s2jr66fTeQnA64bDox0HL4U56CFYoyC7g==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-sticky-regex": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.8.3.tgz",
+ "integrity": "sha512-9Spq0vGCD5Bb4Z/ZXXSK5wbbLFMG085qd2vhL1JYu1WcQ5bXqZBAYRzU1d+p79GcHs2szYv5pVQCX13QgldaWw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/helper-regex": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-template-literals": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.8.3.tgz",
+ "integrity": "sha512-820QBtykIQOLFT8NZOcTRJ1UNuztIELe4p9DCgvj4NK+PwluSJ49we7s9FB1HIGNIYT7wFUJ0ar2QpCDj0escQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-typeof-symbol": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.8.4.tgz",
+ "integrity": "sha512-2QKyfjGdvuNfHsb7qnBBlKclbD4CfshH2KvDabiijLMGXPHJXGxtDzwIF7bQP+T0ysw8fYTtxPafgfs/c1Lrqg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-unicode-regex": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.8.3.tgz",
+ "integrity": "sha512-+ufgJjYdmWfSQ+6NS9VGUR2ns8cjJjYbrbi11mZBTaWm+Fui/ncTLFF28Ei1okavY+xkojGr1eJxNsWYeA5aZw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/preset-env": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.8.4.tgz",
+ "integrity": "sha512-HihCgpr45AnSOHRbS5cWNTINs0TwaR8BS8xIIH+QwiW8cKL0llV91njQMpeMReEPVs+1Ao0x3RLEBLtt1hOq4w==",
+ "dev": true,
+ "requires": {
+ "@babel/compat-data": "^7.8.4",
+ "@babel/helper-compilation-targets": "^7.8.4",
+ "@babel/helper-module-imports": "^7.8.3",
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-proposal-async-generator-functions": "^7.8.3",
+ "@babel/plugin-proposal-dynamic-import": "^7.8.3",
+ "@babel/plugin-proposal-json-strings": "^7.8.3",
+ "@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.3",
+ "@babel/plugin-proposal-object-rest-spread": "^7.8.3",
+ "@babel/plugin-proposal-optional-catch-binding": "^7.8.3",
+ "@babel/plugin-proposal-optional-chaining": "^7.8.3",
+ "@babel/plugin-proposal-unicode-property-regex": "^7.8.3",
+ "@babel/plugin-syntax-async-generators": "^7.8.0",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.0",
+ "@babel/plugin-syntax-json-strings": "^7.8.0",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.0",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.0",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.0",
+ "@babel/plugin-syntax-top-level-await": "^7.8.3",
+ "@babel/plugin-transform-arrow-functions": "^7.8.3",
+ "@babel/plugin-transform-async-to-generator": "^7.8.3",
+ "@babel/plugin-transform-block-scoped-functions": "^7.8.3",
+ "@babel/plugin-transform-block-scoping": "^7.8.3",
+ "@babel/plugin-transform-classes": "^7.8.3",
+ "@babel/plugin-transform-computed-properties": "^7.8.3",
+ "@babel/plugin-transform-destructuring": "^7.8.3",
+ "@babel/plugin-transform-dotall-regex": "^7.8.3",
+ "@babel/plugin-transform-duplicate-keys": "^7.8.3",
+ "@babel/plugin-transform-exponentiation-operator": "^7.8.3",
+ "@babel/plugin-transform-for-of": "^7.8.4",
+ "@babel/plugin-transform-function-name": "^7.8.3",
+ "@babel/plugin-transform-literals": "^7.8.3",
+ "@babel/plugin-transform-member-expression-literals": "^7.8.3",
+ "@babel/plugin-transform-modules-amd": "^7.8.3",
+ "@babel/plugin-transform-modules-commonjs": "^7.8.3",
+ "@babel/plugin-transform-modules-systemjs": "^7.8.3",
+ "@babel/plugin-transform-modules-umd": "^7.8.3",
+ "@babel/plugin-transform-named-capturing-groups-regex": "^7.8.3",
+ "@babel/plugin-transform-new-target": "^7.8.3",
+ "@babel/plugin-transform-object-super": "^7.8.3",
+ "@babel/plugin-transform-parameters": "^7.8.4",
+ "@babel/plugin-transform-property-literals": "^7.8.3",
+ "@babel/plugin-transform-regenerator": "^7.8.3",
+ "@babel/plugin-transform-reserved-words": "^7.8.3",
+ "@babel/plugin-transform-shorthand-properties": "^7.8.3",
+ "@babel/plugin-transform-spread": "^7.8.3",
+ "@babel/plugin-transform-sticky-regex": "^7.8.3",
+ "@babel/plugin-transform-template-literals": "^7.8.3",
+ "@babel/plugin-transform-typeof-symbol": "^7.8.4",
+ "@babel/plugin-transform-unicode-regex": "^7.8.3",
+ "@babel/types": "^7.8.3",
+ "browserslist": "^4.8.5",
+ "core-js-compat": "^3.6.2",
+ "invariant": "^2.2.2",
+ "levenary": "^1.1.1",
+ "semver": "^5.5.0"
+ }
+ },
+ "@babel/preset-react": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.8.3.tgz",
+ "integrity": "sha512-9hx0CwZg92jGb7iHYQVgi0tOEHP/kM60CtWJQnmbATSPIQQ2xYzfoCI3EdqAhFBeeJwYMdWQuDUHMsuDbH9hyQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3",
+ "@babel/plugin-transform-react-display-name": "^7.8.3",
+ "@babel/plugin-transform-react-jsx": "^7.8.3",
+ "@babel/plugin-transform-react-jsx-self": "^7.8.3",
+ "@babel/plugin-transform-react-jsx-source": "^7.8.3"
+ }
+ },
+ "@babel/register": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/register/-/register-7.8.3.tgz",
+ "integrity": "sha512-t7UqebaWwo9nXWClIPLPloa5pN33A2leVs8Hf0e9g9YwUP8/H9NeR7DJU+4CXo23QtjChQv5a3DjEtT83ih1rg==",
+ "dev": true,
+ "requires": {
+ "find-cache-dir": "^2.0.0",
+ "lodash": "^4.17.13",
+ "make-dir": "^2.1.0",
+ "pirates": "^4.0.0",
+ "source-map-support": "^0.5.16"
+ }
+ },
+ "@babel/runtime": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.8.4.tgz",
+ "integrity": "sha512-neAp3zt80trRVBI1x0azq6c57aNBqYZH8KhMm3TaB7wEI5Q4A2SHfBHE8w9gOhI/lrqxtEbXZgQIrHP+wvSGwQ==",
+ "dev": true,
+ "requires": {
+ "regenerator-runtime": "^0.13.2"
+ },
+ "dependencies": {
+ "regenerator-runtime": {
+ "version": "0.13.3",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz",
+ "integrity": "sha512-naKIZz2GQ8JWh///G7L3X6LaQUAMp2lvb1rvwwsURe/VXwD6VMfr+/1NuNw3ag8v2kY1aQ/go5SNn79O9JU7yw==",
+ "dev": true
+ }
+ }
+ },
+ "@babel/runtime-corejs3": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.8.4.tgz",
+ "integrity": "sha512-+wpLqy5+fbQhvbllvlJEVRIpYj+COUWnnsm+I4jZlA8Lo7/MJmBhGTCHyk1/RWfOqBRJ2MbadddG6QltTKTlrg==",
+ "dev": true,
+ "requires": {
+ "core-js-pure": "^3.0.0",
+ "regenerator-runtime": "^0.13.2"
+ },
+ "dependencies": {
+ "regenerator-runtime": {
+ "version": "0.13.3",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz",
+ "integrity": "sha512-naKIZz2GQ8JWh///G7L3X6LaQUAMp2lvb1rvwwsURe/VXwD6VMfr+/1NuNw3ag8v2kY1aQ/go5SNn79O9JU7yw==",
+ "dev": true
+ }
+ }
+ },
+ "@babel/template": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.3.tgz",
+ "integrity": "sha512-04m87AcQgAFdvuoyiQ2kgELr2tV8B4fP/xJAVUL3Yb3bkNdMedD3d0rlSQr3PegP0cms3eHjl1F7PWlvWbU8FQ==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.8.3",
+ "@babel/parser": "^7.8.3",
+ "@babel/types": "^7.8.3"
+ }
+ },
+ "@babel/traverse": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.8.4.tgz",
+ "integrity": "sha512-NGLJPZwnVEyBPLI+bl9y9aSnxMhsKz42so7ApAv9D+b4vAFPpY013FTS9LdKxcABoIYFU52HcYga1pPlx454mg==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.8.3",
+ "@babel/generator": "^7.8.4",
+ "@babel/helper-function-name": "^7.8.3",
+ "@babel/helper-split-export-declaration": "^7.8.3",
+ "@babel/parser": "^7.8.4",
+ "@babel/types": "^7.8.3",
+ "debug": "^4.1.0",
+ "globals": "^11.1.0",
+ "lodash": "^4.17.13"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+ "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "@babel/types": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.8.3.tgz",
+ "integrity": "sha512-jBD+G8+LWpMBBWvVcdr4QysjUE4mU/syrhN17o1u3gx0/WzJB1kwiVZAXRtWbsIPOwW8pF/YJV5+nmetPzepXg==",
+ "dev": true,
+ "requires": {
+ "esutils": "^2.0.2",
+ "lodash": "^4.17.13",
+ "to-fast-properties": "^2.0.0"
+ },
+ "dependencies": {
+ "to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=",
+ "dev": true
+ }
+ }
+ },
+ "@tweenjs/tween.js": {
+ "version": "16.11.0",
+ "resolved": "https://registry.npmjs.org/@tweenjs/tween.js/-/tween.js-16.11.0.tgz",
+ "integrity": "sha1-bnqKPWx4oFfs1WBQh5MEBtTgWAA=",
+ "dev": true
+ },
+ "@types/babel-types": {
+ "version": "7.0.7",
+ "resolved": "https://registry.npmjs.org/@types/babel-types/-/babel-types-7.0.7.tgz",
+ "integrity": "sha512-dBtBbrc+qTHy1WdfHYjBwRln4+LWqASWakLHsWHR2NWHIFkv4W3O070IGoGLEBrJBvct3r0L1BUPuvURi7kYUQ=="
+ },
+ "@types/babylon": {
+ "version": "6.16.5",
+ "resolved": "https://registry.npmjs.org/@types/babylon/-/babylon-6.16.5.tgz",
+ "integrity": "sha512-xH2e58elpj1X4ynnKp9qSnWlsRTIs6n3tgLGNfwAGHwePw0mulHQllV34n0T25uYSu1k0hRKkWXF890B1yS47w==",
+ "requires": {
+ "@types/babel-types": "*"
+ }
+ },
+ "@types/bytebuffer": {
+ "version": "5.0.40",
+ "resolved": "https://registry.npmjs.org/@types/bytebuffer/-/bytebuffer-5.0.40.tgz",
+ "integrity": "sha512-h48dyzZrPMz25K6Q4+NCwWaxwXany2FhQg/ErOcdZS1ZpsaDnDMZg8JYLMTGz7uvXKrcKGJUZJlZObyfgdaN9g==",
+ "requires": {
+ "@types/long": "*",
+ "@types/node": "*"
+ }
+ },
+ "@types/jquery": {
+ "version": "2.0.54",
+ "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-2.0.54.tgz",
+ "integrity": "sha512-D/PomKwNkDfSKD13DEVQT/pq2TUjN54c6uB341fEZanIzkjfGe7UaFuuaLZbpEiS5j7Wk2MUHAZqZIoECw29lg==",
+ "dev": true
+ },
+ "@types/long": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz",
+ "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w=="
+ },
+ "@types/node": {
+ "version": "13.7.0",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-13.7.0.tgz",
+ "integrity": "sha512-GnZbirvmqZUzMgkFn70c74OQpTTUcCzlhQliTzYjQMqg+hVKcDnxdL19Ne3UdYzdMA/+W3eb646FWn/ZaT1NfQ=="
+ },
+ "@types/normalize-package-data": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.0.tgz",
+ "integrity": "sha512-f5j5b/Gf71L+dbqxIpQ4Z2WlmI/mPJ0fOkGGmFgtb6sAu97EPczzbS3/tJKxmcYDj55OX6ssqwDAWOHIYDRDGA==",
+ "dev": true
+ },
+ "@webassemblyjs/ast": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.8.5.tgz",
+ "integrity": "sha512-aJMfngIZ65+t71C3y2nBBg5FFG0Okt9m0XEgWZ7Ywgn1oMAT8cNwx00Uv1cQyHtidq0Xn94R4TAywO+LCQ+ZAQ==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/helper-module-context": "1.8.5",
+ "@webassemblyjs/helper-wasm-bytecode": "1.8.5",
+ "@webassemblyjs/wast-parser": "1.8.5"
+ }
+ },
+ "@webassemblyjs/floating-point-hex-parser": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.8.5.tgz",
+ "integrity": "sha512-9p+79WHru1oqBh9ewP9zW95E3XAo+90oth7S5Re3eQnECGq59ly1Ri5tsIipKGpiStHsUYmY3zMLqtk3gTcOtQ==",
+ "dev": true
+ },
+ "@webassemblyjs/helper-api-error": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.8.5.tgz",
+ "integrity": "sha512-Za/tnzsvnqdaSPOUXHyKJ2XI7PDX64kWtURyGiJJZKVEdFOsdKUCPTNEVFZq3zJ2R0G5wc2PZ5gvdTRFgm81zA==",
+ "dev": true
+ },
+ "@webassemblyjs/helper-buffer": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.8.5.tgz",
+ "integrity": "sha512-Ri2R8nOS0U6G49Q86goFIPNgjyl6+oE1abW1pS84BuhP1Qcr5JqMwRFT3Ah3ADDDYGEgGs1iyb1DGX+kAi/c/Q==",
+ "dev": true
+ },
+ "@webassemblyjs/helper-code-frame": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.8.5.tgz",
+ "integrity": "sha512-VQAadSubZIhNpH46IR3yWO4kZZjMxN1opDrzePLdVKAZ+DFjkGD/rf4v1jap744uPVU6yjL/smZbRIIJTOUnKQ==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/wast-printer": "1.8.5"
+ }
+ },
+ "@webassemblyjs/helper-fsm": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.8.5.tgz",
+ "integrity": "sha512-kRuX/saORcg8se/ft6Q2UbRpZwP4y7YrWsLXPbbmtepKr22i8Z4O3V5QE9DbZK908dh5Xya4Un57SDIKwB9eow==",
+ "dev": true
+ },
+ "@webassemblyjs/helper-module-context": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.8.5.tgz",
+ "integrity": "sha512-/O1B236mN7UNEU4t9X7Pj38i4VoU8CcMHyy3l2cV/kIF4U5KoHXDVqcDuOs1ltkac90IM4vZdHc52t1x8Yfs3g==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/ast": "1.8.5",
+ "mamacro": "^0.0.3"
+ }
+ },
+ "@webassemblyjs/helper-wasm-bytecode": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.8.5.tgz",
+ "integrity": "sha512-Cu4YMYG3Ddl72CbmpjU/wbP6SACcOPVbHN1dI4VJNJVgFwaKf1ppeFJrwydOG3NDHxVGuCfPlLZNyEdIYlQ6QQ==",
+ "dev": true
+ },
+ "@webassemblyjs/helper-wasm-section": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.8.5.tgz",
+ "integrity": "sha512-VV083zwR+VTrIWWtgIUpqfvVdK4ff38loRmrdDBgBT8ADXYsEZ5mPQ4Nde90N3UYatHdYoDIFb7oHzMncI02tA==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/ast": "1.8.5",
+ "@webassemblyjs/helper-buffer": "1.8.5",
+ "@webassemblyjs/helper-wasm-bytecode": "1.8.5",
+ "@webassemblyjs/wasm-gen": "1.8.5"
+ }
+ },
+ "@webassemblyjs/ieee754": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.8.5.tgz",
+ "integrity": "sha512-aaCvQYrvKbY/n6wKHb/ylAJr27GglahUO89CcGXMItrOBqRarUMxWLJgxm9PJNuKULwN5n1csT9bYoMeZOGF3g==",
+ "dev": true,
+ "requires": {
+ "@xtuc/ieee754": "^1.2.0"
+ }
+ },
+ "@webassemblyjs/leb128": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.8.5.tgz",
+ "integrity": "sha512-plYUuUwleLIziknvlP8VpTgO4kqNaH57Y3JnNa6DLpu/sGcP6hbVdfdX5aHAV716pQBKrfuU26BJK29qY37J7A==",
+ "dev": true,
+ "requires": {
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "@webassemblyjs/utf8": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.8.5.tgz",
+ "integrity": "sha512-U7zgftmQriw37tfD934UNInokz6yTmn29inT2cAetAsaU9YeVCveWEwhKL1Mg4yS7q//NGdzy79nlXh3bT8Kjw==",
+ "dev": true
+ },
+ "@webassemblyjs/wasm-edit": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.8.5.tgz",
+ "integrity": "sha512-A41EMy8MWw5yvqj7MQzkDjU29K7UJq1VrX2vWLzfpRHt3ISftOXqrtojn7nlPsZ9Ijhp5NwuODuycSvfAO/26Q==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/ast": "1.8.5",
+ "@webassemblyjs/helper-buffer": "1.8.5",
+ "@webassemblyjs/helper-wasm-bytecode": "1.8.5",
+ "@webassemblyjs/helper-wasm-section": "1.8.5",
+ "@webassemblyjs/wasm-gen": "1.8.5",
+ "@webassemblyjs/wasm-opt": "1.8.5",
+ "@webassemblyjs/wasm-parser": "1.8.5",
+ "@webassemblyjs/wast-printer": "1.8.5"
+ }
+ },
+ "@webassemblyjs/wasm-gen": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.8.5.tgz",
+ "integrity": "sha512-BCZBT0LURC0CXDzj5FXSc2FPTsxwp3nWcqXQdOZE4U7h7i8FqtFK5Egia6f9raQLpEKT1VL7zr4r3+QX6zArWg==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/ast": "1.8.5",
+ "@webassemblyjs/helper-wasm-bytecode": "1.8.5",
+ "@webassemblyjs/ieee754": "1.8.5",
+ "@webassemblyjs/leb128": "1.8.5",
+ "@webassemblyjs/utf8": "1.8.5"
+ }
+ },
+ "@webassemblyjs/wasm-opt": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.8.5.tgz",
+ "integrity": "sha512-HKo2mO/Uh9A6ojzu7cjslGaHaUU14LdLbGEKqTR7PBKwT6LdPtLLh9fPY33rmr5wcOMrsWDbbdCHq4hQUdd37Q==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/ast": "1.8.5",
+ "@webassemblyjs/helper-buffer": "1.8.5",
+ "@webassemblyjs/wasm-gen": "1.8.5",
+ "@webassemblyjs/wasm-parser": "1.8.5"
+ }
+ },
+ "@webassemblyjs/wasm-parser": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.8.5.tgz",
+ "integrity": "sha512-pi0SYE9T6tfcMkthwcgCpL0cM9nRYr6/6fjgDtL6q/ZqKHdMWvxitRi5JcZ7RI4SNJJYnYNaWy5UUrHQy998lw==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/ast": "1.8.5",
+ "@webassemblyjs/helper-api-error": "1.8.5",
+ "@webassemblyjs/helper-wasm-bytecode": "1.8.5",
+ "@webassemblyjs/ieee754": "1.8.5",
+ "@webassemblyjs/leb128": "1.8.5",
+ "@webassemblyjs/utf8": "1.8.5"
+ }
+ },
+ "@webassemblyjs/wast-parser": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.8.5.tgz",
+ "integrity": "sha512-daXC1FyKWHF1i11obK086QRlsMsY4+tIOKgBqI1lxAnkp9xe9YMcgOxm9kLe+ttjs5aWV2KKE1TWJCN57/Btsg==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/ast": "1.8.5",
+ "@webassemblyjs/floating-point-hex-parser": "1.8.5",
+ "@webassemblyjs/helper-api-error": "1.8.5",
+ "@webassemblyjs/helper-code-frame": "1.8.5",
+ "@webassemblyjs/helper-fsm": "1.8.5",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "@webassemblyjs/wast-printer": {
+ "version": "1.8.5",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.8.5.tgz",
+ "integrity": "sha512-w0U0pD4EhlnvRyeJzBqaVSJAo9w/ce7/WPogeXLzGkO6hzhr4GnQIZ4W4uUt5b9ooAaXPtnXlj0gzsXEOUNYMg==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/ast": "1.8.5",
+ "@webassemblyjs/wast-parser": "1.8.5",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "@webpack-contrib/config-loader": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@webpack-contrib/config-loader/-/config-loader-1.2.1.tgz",
+ "integrity": "sha512-C7XsS6bXft0aRlyt7YCLg+fm97Mb3tWd+i5fVVlEl0NW5HKy8LoXVKj3mB7ECcEHNEEdHhgzg8gxP+Or8cMj8Q==",
+ "dev": true,
+ "requires": {
+ "@webpack-contrib/schema-utils": "^1.0.0-beta.0",
+ "chalk": "^2.1.0",
+ "cosmiconfig": "^5.0.2",
+ "is-plain-obj": "^1.1.0",
+ "loud-rejection": "^1.6.0",
+ "merge-options": "^1.0.1",
+ "minimist": "^1.2.0",
+ "resolve": "^1.6.0",
+ "webpack-log": "^1.1.2"
+ }
+ },
+ "@webpack-contrib/schema-utils": {
+ "version": "1.0.0-beta.0",
+ "resolved": "https://registry.npmjs.org/@webpack-contrib/schema-utils/-/schema-utils-1.0.0-beta.0.tgz",
+ "integrity": "sha512-LonryJP+FxQQHsjGBi6W786TQB1Oym+agTpY0c+Kj8alnIw+DLUJb6SI8Y1GHGhLCH1yPRrucjObUmxNICQ1pg==",
+ "dev": true,
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-keywords": "^3.1.0",
+ "chalk": "^2.3.2",
+ "strip-ansi": "^4.0.0",
+ "text-table": "^0.2.0",
+ "webpack-log": "^1.1.2"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
+ "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+ "dev": true
+ },
+ "strip-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
+ "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^3.0.0"
+ }
+ }
+ }
+ },
+ "@xtuc/ieee754": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz",
+ "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==",
+ "dev": true
+ },
+ "@xtuc/long": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz",
+ "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==",
+ "dev": true
+ },
+ "abab": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.3.tgz",
+ "integrity": "sha512-tsFzPpcttalNjFBCFMqsKYQcWxxen1pgJR56by//QwvJc4/OUS3kPOOttx2tSIfjsylB0pYu7f5D3K1RCxUnUg==",
+ "dev": true
+ },
+ "abbrev": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
+ "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
+ "dev": true
+ },
+ "accepts": {
+ "version": "1.3.7",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz",
+ "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==",
+ "requires": {
+ "mime-types": "~2.1.24",
+ "negotiator": "0.6.2"
+ }
+ },
+ "acorn": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-3.3.0.tgz",
+ "integrity": "sha1-ReN/s56No/JbruP/U2niu18iAXo="
+ },
+ "acorn-globals": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-3.1.0.tgz",
+ "integrity": "sha1-/YJw9x+7SZawBPqIDuXUZXOnMb8=",
+ "requires": {
+ "acorn": "^4.0.4"
+ },
+ "dependencies": {
+ "acorn": {
+ "version": "4.0.13",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-4.0.13.tgz",
+ "integrity": "sha1-EFSVrlNh1pe9GVyCUZLhrX8lN4c="
+ }
+ }
+ },
+ "acorn-jsx": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.1.0.tgz",
+ "integrity": "sha512-tMUqwBWfLFbJbizRmEcWSLw6HnFzfdJs2sOJEOwwtVPMoH/0Ay+E703oZz78VSXZiiDcZrQ5XKjPIUQixhmgVw==",
+ "dev": true
+ },
+ "acorn-walk": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-6.2.0.tgz",
+ "integrity": "sha512-7evsyfH1cLOCdAzZAd43Cic04yKydNx0cF+7tiA19p1XnLLPU4dpCQOqpjqwokFe//vS0QqfqqjCS2JkiIs0cA==",
+ "dev": true
+ },
+ "airbnb-prop-types": {
+ "version": "2.15.0",
+ "resolved": "https://registry.npmjs.org/airbnb-prop-types/-/airbnb-prop-types-2.15.0.tgz",
+ "integrity": "sha512-jUh2/hfKsRjNFC4XONQrxo/n/3GG4Tn6Hl0WlFQN5PY9OMC9loSCoAYKnZsWaP8wEfd5xcrPloK0Zg6iS1xwVA==",
+ "dev": true,
+ "requires": {
+ "array.prototype.find": "^2.1.0",
+ "function.prototype.name": "^1.1.1",
+ "has": "^1.0.3",
+ "is-regex": "^1.0.4",
+ "object-is": "^1.0.1",
+ "object.assign": "^4.1.0",
+ "object.entries": "^1.1.0",
+ "prop-types": "^15.7.2",
+ "prop-types-exact": "^1.2.0",
+ "react-is": "^16.9.0"
+ }
+ },
+ "ajv": {
+ "version": "6.11.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.11.0.tgz",
+ "integrity": "sha512-nCprB/0syFYy9fVYU1ox1l2KN8S9I+tziH8D4zdZuLT3N6RMlGSGt5FSTpAiHB/Whv8Qs1cWHma1aMKZyaHRKA==",
+ "dev": true,
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ajv-errors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz",
+ "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==",
+ "dev": true
+ },
+ "ajv-keywords": {
+ "version": "3.4.1",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.4.1.tgz",
+ "integrity": "sha512-RO1ibKvd27e6FEShVFfPALuHI3WjSVNeK5FIsmme/LYRNxjKuNj+Dt7bucLa6NdSv3JcVTyMlm9kGR84z1XpaQ==",
+ "dev": true
+ },
+ "align-text": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/align-text/-/align-text-0.1.4.tgz",
+ "integrity": "sha1-DNkKVhCT810KmSVsIrcGlDP60Rc=",
+ "requires": {
+ "kind-of": "^3.0.2",
+ "longest": "^1.0.1",
+ "repeat-string": "^1.5.2"
+ }
+ },
+ "ansi-align": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-2.0.0.tgz",
+ "integrity": "sha1-w2rsy6VjuJzrVW82kPCx2eNUf38=",
+ "dev": true,
+ "requires": {
+ "string-width": "^2.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
+ "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+ "dev": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "dev": true
+ },
+ "string-width": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz",
+ "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==",
+ "dev": true,
+ "requires": {
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^4.0.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
+ "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^3.0.0"
+ }
+ }
+ }
+ },
+ "ansi-escapes": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz",
+ "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==",
+ "dev": true
+ },
+ "ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8="
+ },
+ "ansi-styles": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-1.0.0.tgz",
+ "integrity": "sha1-yxAt8cVvUSPquLZ817mAJ6AnkXg="
+ },
+ "anymatch": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz",
+ "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==",
+ "dev": true,
+ "requires": {
+ "micromatch": "^3.1.4",
+ "normalize-path": "^2.1.1"
+ },
+ "dependencies": {
+ "normalize-path": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz",
+ "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=",
+ "dev": true,
+ "requires": {
+ "remove-trailing-separator": "^1.0.1"
+ }
+ }
+ }
+ },
+ "append-transform": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/append-transform/-/append-transform-1.0.0.tgz",
+ "integrity": "sha512-P009oYkeHyU742iSZJzZZywj4QRJdnTWffaKuJQLablCZ1uz6/cW4yaRgcDaoQ+uwOxxnt0gRUcwfsNP2ri0gw==",
+ "dev": true,
+ "requires": {
+ "default-require-extensions": "^2.0.0"
+ }
+ },
+ "aproba": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz",
+ "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==",
+ "dev": true
+ },
+ "archy": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz",
+ "integrity": "sha1-+cjBN1fMHde8N5rHeyxipcKGjEA=",
+ "dev": true
+ },
+ "are-we-there-yet": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz",
+ "integrity": "sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w==",
+ "dev": true,
+ "requires": {
+ "delegates": "^1.0.0",
+ "readable-stream": "^2.0.6"
+ }
+ },
+ "argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dev": true,
+ "requires": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "aria-query": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-3.0.0.tgz",
+ "integrity": "sha1-ZbP8wcoRVajJrmTW7uKX8V1RM8w=",
+ "dev": true,
+ "requires": {
+ "ast-types-flow": "0.0.7",
+ "commander": "^2.11.0"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
+ "dev": true
+ }
+ }
+ },
+ "arr-diff": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
+ "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=",
+ "dev": true
+ },
+ "arr-flatten": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
+ "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==",
+ "dev": true
+ },
+ "arr-union": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
+ "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=",
+ "dev": true
+ },
+ "array-differ": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array-differ/-/array-differ-2.1.0.tgz",
+ "integrity": "sha512-KbUpJgx909ZscOc/7CLATBFam7P1Z1QRQInvgT0UztM9Q72aGKCunKASAl7WNW0tnPmPyEMeMhdsfWhfmW037w==",
+ "dev": true
+ },
+ "array-equal": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/array-equal/-/array-equal-1.0.0.tgz",
+ "integrity": "sha1-jCpe8kcv2ep0KwTHenUJO6J1fJM=",
+ "dev": true
+ },
+ "array-filter": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/array-filter/-/array-filter-1.0.0.tgz",
+ "integrity": "sha1-uveeYubvTCpMC4MSMtr/7CUfnYM=",
+ "dev": true
+ },
+ "array-find-index": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz",
+ "integrity": "sha1-3wEKoSh+Fku9pvlyOwqWoexBh6E=",
+ "dev": true
+ },
+ "array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
+ },
+ "array-includes": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.1.tgz",
+ "integrity": "sha512-c2VXaCHl7zPsvpkFsw4nxvFie4fh1ur9bpcgsVkIjqn0H/Xwdg+7fv3n2r/isyS8EBj5b06M9kHyZuIr4El6WQ==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.0",
+ "is-string": "^1.0.5"
+ }
+ },
+ "array-union": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz",
+ "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=",
+ "dev": true,
+ "requires": {
+ "array-uniq": "^1.0.1"
+ }
+ },
+ "array-uniq": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz",
+ "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=",
+ "dev": true
+ },
+ "array-unique": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
+ "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=",
+ "dev": true
+ },
+ "array.prototype.find": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array.prototype.find/-/array.prototype.find-2.1.0.tgz",
+ "integrity": "sha512-Wn41+K1yuO5p7wRZDl7890c3xvv5UBrfVXTVIe28rSQb6LS0fZMDrQB6PAcxQFRFy6vJTLDc3A2+3CjQdzVKRg==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.13.0"
+ }
+ },
+ "array.prototype.flat": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.3.tgz",
+ "integrity": "sha512-gBlRZV0VSmfPIeWfuuy56XZMvbVfbEUnOXUvt3F/eUUUSyzlgLxhEX4YAEpxNAogRGehPSnfXyPtYyKAhkzQhQ==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.0-next.1"
+ }
+ },
+ "arrify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz",
+ "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=",
+ "dev": true
+ },
+ "asap": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz",
+ "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY="
+ },
+ "ascli": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/ascli/-/ascli-1.0.1.tgz",
+ "integrity": "sha1-vPpZdKYvGOgcq660lzKrSoj5Brw=",
+ "requires": {
+ "colour": "~0.7.1",
+ "optjs": "~3.2.2"
+ }
+ },
+ "asn1": {
+ "version": "0.2.4",
+ "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz",
+ "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==",
+ "dev": true,
+ "requires": {
+ "safer-buffer": "~2.1.0"
+ }
+ },
+ "asn1.js": {
+ "version": "4.10.1",
+ "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz",
+ "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==",
+ "dev": true,
+ "requires": {
+ "bn.js": "^4.0.0",
+ "inherits": "^2.0.1",
+ "minimalistic-assert": "^1.0.0"
+ }
+ },
+ "assert": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz",
+ "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==",
+ "dev": true,
+ "requires": {
+ "object-assign": "^4.1.1",
+ "util": "0.10.3"
+ },
+ "dependencies": {
+ "inherits": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz",
+ "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=",
+ "dev": true
+ },
+ "util": {
+ "version": "0.10.3",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz",
+ "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=",
+ "dev": true,
+ "requires": {
+ "inherits": "2.0.1"
+ }
+ }
+ }
+ },
+ "assert-plus": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz",
+ "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=",
+ "dev": true
+ },
+ "assertion-error": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz",
+ "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==",
+ "dev": true
+ },
+ "assets-webpack-plugin": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/assets-webpack-plugin/-/assets-webpack-plugin-4.0.0.tgz",
+ "integrity": "sha512-0Mhe40xK7MkbQGp3D3zrRXNB27Y4MTYlkJyXlPwN8vFgUawtuLS/2Yip7un0V+4yxPh9RsKKbkkAmatoep0qZw==",
+ "dev": true,
+ "requires": {
+ "camelcase": "^5.0.0",
+ "escape-string-regexp": "^1.0.3",
+ "lodash.assign": "^4.2.0",
+ "lodash.merge": "^4.6.1",
+ "mkdirp": "^0.5.1"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true
+ }
+ }
+ },
+ "assign-symbols": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
+ "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=",
+ "dev": true
+ },
+ "ast-types-flow": {
+ "version": "0.0.7",
+ "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.7.tgz",
+ "integrity": "sha1-9wtzXGvKGlycItmCw+Oef+ujva0=",
+ "dev": true
+ },
+ "astral-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz",
+ "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==",
+ "dev": true
+ },
+ "async": {
+ "version": "2.6.3",
+ "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz",
+ "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==",
+ "requires": {
+ "lodash": "^4.17.14"
+ }
+ },
+ "async-each": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz",
+ "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==",
+ "dev": true
+ },
+ "async-limiter": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz",
+ "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==",
+ "dev": true
+ },
+ "asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=",
+ "dev": true
+ },
+ "atob": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
+ "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==",
+ "dev": true
+ },
+ "aws-sign2": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz",
+ "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=",
+ "dev": true
+ },
+ "aws4": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.9.1.tgz",
+ "integrity": "sha512-wMHVg2EOHaMRxbzgFJ9gtjOOCrI80OHLG14rxi28XwOW8ux6IiEbRCGGGqCtdAIg4FQCbW20k9RsT4y3gJlFug==",
+ "dev": true
+ },
+ "axios": {
+ "version": "0.18.1",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-0.18.1.tgz",
+ "integrity": "sha512-0BfJq4NSfQXd+SkFdrvFbG7addhYSBA2mQwISr46pD6E5iqkWg02RAs8vyTT/j0RTnoYmeXauBuSv1qKwR179g==",
+ "requires": {
+ "follow-redirects": "1.5.10",
+ "is-buffer": "^2.0.2"
+ }
+ },
+ "axios-mock-adapter": {
+ "version": "1.17.0",
+ "resolved": "https://registry.npmjs.org/axios-mock-adapter/-/axios-mock-adapter-1.17.0.tgz",
+ "integrity": "sha512-q3efmwJUOO4g+wsLNSk9Ps1UlJoF3fQ3FSEe4uEEhkRtu7SoiAVPj8R3Hc/WP55MBTVFzaDP9QkdJhdVhP8A1Q==",
+ "dev": true,
+ "requires": {
+ "deep-equal": "^1.0.1"
+ }
+ },
+ "axobject-query": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-2.1.1.tgz",
+ "integrity": "sha512-lF98xa/yvy6j3fBHAgQXIYl+J4eZadOSqsPojemUqClzNbBV38wWGpUbQbVEyf4eUF5yF7eHmGgGA2JiHyjeqw==",
+ "dev": true,
+ "requires": {
+ "@babel/runtime": "^7.7.4",
+ "@babel/runtime-corejs3": "^7.7.4"
+ }
+ },
+ "babel-eslint": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/babel-eslint/-/babel-eslint-9.0.0.tgz",
+ "integrity": "sha512-itv1MwE3TMbY0QtNfeL7wzak1mV47Uy+n6HtSOO4Xd7rvmO+tsGQSgyOEEgo6Y2vHZKZphaoelNeSVj4vkLA1g==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.0.0",
+ "@babel/parser": "^7.0.0",
+ "@babel/traverse": "^7.0.0",
+ "@babel/types": "^7.0.0",
+ "eslint-scope": "3.7.1",
+ "eslint-visitor-keys": "^1.0.0"
+ }
+ },
+ "babel-loader": {
+ "version": "8.0.6",
+ "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.0.6.tgz",
+ "integrity": "sha512-4BmWKtBOBm13uoUwd08UwjZlaw3O9GWf456R9j+5YykFZ6LUIjIKLc0zEZf+hauxPOJs96C8k6FvYD09vWzhYw==",
+ "dev": true,
+ "requires": {
+ "find-cache-dir": "^2.0.0",
+ "loader-utils": "^1.0.2",
+ "mkdirp": "^0.5.1",
+ "pify": "^4.0.1"
+ }
+ },
+ "babel-plugin-dynamic-import-node": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-1.2.0.tgz",
+ "integrity": "sha512-yeDwKaLgGdTpXL7RgGt5r6T4LmnTza/hUn5Ul8uZSGGMtEjYo13Nxai7SQaGCTEzUtg9Zq9qJn0EjEr7SeSlTQ==",
+ "dev": true,
+ "requires": {
+ "babel-plugin-syntax-dynamic-import": "^6.18.0"
+ }
+ },
+ "babel-plugin-lodash": {
+ "version": "3.3.4",
+ "resolved": "https://registry.npmjs.org/babel-plugin-lodash/-/babel-plugin-lodash-3.3.4.tgz",
+ "integrity": "sha512-yDZLjK7TCkWl1gpBeBGmuaDIFhZKmkoL+Cu2MUUjv5VxUZx/z7tBGBCBcQs5RI1Bkz5LLmNdjx7paOyQtMovyg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-module-imports": "^7.0.0-beta.49",
+ "@babel/types": "^7.0.0-beta.49",
+ "glob": "^7.1.1",
+ "lodash": "^4.17.10",
+ "require-package-name": "^2.0.1"
+ }
+ },
+ "babel-plugin-syntax-dynamic-import": {
+ "version": "6.18.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-syntax-dynamic-import/-/babel-plugin-syntax-dynamic-import-6.18.0.tgz",
+ "integrity": "sha1-jWomIpyDdFqZgqRBBRVyyqF5sdo=",
+ "dev": true
+ },
+ "babel-runtime": {
+ "version": "6.26.0",
+ "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz",
+ "integrity": "sha1-llxwWGaOgrVde/4E/yM3vItWR/4=",
+ "requires": {
+ "core-js": "^2.4.0",
+ "regenerator-runtime": "^0.11.0"
+ }
+ },
+ "babel-types": {
+ "version": "6.26.0",
+ "resolved": "https://registry.npmjs.org/babel-types/-/babel-types-6.26.0.tgz",
+ "integrity": "sha1-o7Bz+Uq0nrb6Vc1lInozQ4BjJJc=",
+ "requires": {
+ "babel-runtime": "^6.26.0",
+ "esutils": "^2.0.2",
+ "lodash": "^4.17.4",
+ "to-fast-properties": "^1.0.3"
+ }
+ },
+ "babylon": {
+ "version": "6.18.0",
+ "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz",
+ "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ=="
+ },
+ "balanced-match": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
+ "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c="
+ },
+ "base": {
+ "version": "0.11.2",
+ "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
+ "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
+ "dev": true,
+ "requires": {
+ "cache-base": "^1.0.1",
+ "class-utils": "^0.3.5",
+ "component-emitter": "^1.2.1",
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.1",
+ "mixin-deep": "^1.2.0",
+ "pascalcase": "^0.1.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ },
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "dev": true
+ }
+ }
+ },
+ "base64-js": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz",
+ "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==",
+ "dev": true
+ },
+ "bcrypt-pbkdf": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
+ "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=",
+ "dev": true,
+ "requires": {
+ "tweetnacl": "^0.14.3"
+ }
+ },
+ "bfj": {
+ "version": "6.1.2",
+ "resolved": "https://registry.npmjs.org/bfj/-/bfj-6.1.2.tgz",
+ "integrity": "sha512-BmBJa4Lip6BPRINSZ0BPEIfB1wUY/9rwbwvIHQA1KjX9om29B6id0wnWXq7m3bn5JrUVjeOTnVuhPT1FiHwPGw==",
+ "dev": true,
+ "requires": {
+ "bluebird": "^3.5.5",
+ "check-types": "^8.0.3",
+ "hoopy": "^0.1.4",
+ "tryer": "^1.0.1"
+ }
+ },
+ "big.js": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz",
+ "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==",
+ "dev": true
+ },
+ "binary-extensions": {
+ "version": "1.13.1",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz",
+ "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==",
+ "dev": true
+ },
+ "bindings": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz",
+ "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==",
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "file-uri-to-path": "1.0.0"
+ }
+ },
+ "bluebird": {
+ "version": "3.7.2",
+ "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
+ "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==",
+ "dev": true
+ },
+ "bn.js": {
+ "version": "4.11.8",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz",
+ "integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA==",
+ "dev": true
+ },
+ "body-parser": {
+ "version": "1.19.0",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz",
+ "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==",
+ "requires": {
+ "bytes": "3.1.0",
+ "content-type": "~1.0.4",
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "http-errors": "1.7.2",
+ "iconv-lite": "0.4.24",
+ "on-finished": "~2.3.0",
+ "qs": "6.7.0",
+ "raw-body": "2.4.0",
+ "type-is": "~1.6.17"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "qs": {
+ "version": "6.7.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
+ "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ=="
+ }
+ }
+ },
+ "boolbase": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
+ "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=",
+ "dev": true
+ },
+ "boxen": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/boxen/-/boxen-1.3.0.tgz",
+ "integrity": "sha512-TNPjfTr432qx7yOjQyaXm3dSR0MH9vXp7eT1BFSl/C51g+EFnOR9hTg1IreahGBmDNCehscshe45f+C1TBZbLw==",
+ "dev": true,
+ "requires": {
+ "ansi-align": "^2.0.0",
+ "camelcase": "^4.0.0",
+ "chalk": "^2.0.1",
+ "cli-boxes": "^1.0.0",
+ "string-width": "^2.0.0",
+ "term-size": "^1.2.0",
+ "widest-line": "^2.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
+ "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+ "dev": true
+ },
+ "camelcase": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz",
+ "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=",
+ "dev": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "dev": true
+ },
+ "string-width": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz",
+ "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==",
+ "dev": true,
+ "requires": {
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^4.0.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
+ "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^3.0.0"
+ }
+ }
+ }
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "braces": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
+ "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
+ "dev": true,
+ "requires": {
+ "arr-flatten": "^1.1.0",
+ "array-unique": "^0.3.2",
+ "extend-shallow": "^2.0.1",
+ "fill-range": "^4.0.0",
+ "isobject": "^3.0.1",
+ "repeat-element": "^1.1.2",
+ "snapdragon": "^0.8.1",
+ "snapdragon-node": "^2.0.1",
+ "split-string": "^3.0.2",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ }
+ }
+ },
+ "brorand": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz",
+ "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=",
+ "dev": true
+ },
+ "browser-process-hrtime": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/browser-process-hrtime/-/browser-process-hrtime-0.1.3.tgz",
+ "integrity": "sha512-bRFnI4NnjO6cnyLmOV/7PVoDEMJChlcfN0z4s1YMBY989/SvlfMI1lgCnkFUs53e9gQF+w7qu7XdllSTiSl8Aw==",
+ "dev": true
+ },
+ "browser-stdout": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz",
+ "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==",
+ "dev": true
+ },
+ "browserify-aes": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz",
+ "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==",
+ "dev": true,
+ "requires": {
+ "buffer-xor": "^1.0.3",
+ "cipher-base": "^1.0.0",
+ "create-hash": "^1.1.0",
+ "evp_bytestokey": "^1.0.3",
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "browserify-cipher": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz",
+ "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==",
+ "dev": true,
+ "requires": {
+ "browserify-aes": "^1.0.4",
+ "browserify-des": "^1.0.0",
+ "evp_bytestokey": "^1.0.0"
+ }
+ },
+ "browserify-des": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz",
+ "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==",
+ "dev": true,
+ "requires": {
+ "cipher-base": "^1.0.1",
+ "des.js": "^1.0.0",
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.1.2"
+ }
+ },
+ "browserify-rsa": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz",
+ "integrity": "sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=",
+ "dev": true,
+ "requires": {
+ "bn.js": "^4.1.0",
+ "randombytes": "^2.0.1"
+ }
+ },
+ "browserify-sign": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.0.4.tgz",
+ "integrity": "sha1-qk62jl17ZYuqa/alfmMMvXqT0pg=",
+ "dev": true,
+ "requires": {
+ "bn.js": "^4.1.1",
+ "browserify-rsa": "^4.0.0",
+ "create-hash": "^1.1.0",
+ "create-hmac": "^1.1.2",
+ "elliptic": "^6.0.0",
+ "inherits": "^2.0.1",
+ "parse-asn1": "^5.0.0"
+ }
+ },
+ "browserify-zlib": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz",
+ "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==",
+ "dev": true,
+ "requires": {
+ "pako": "~1.0.5"
+ }
+ },
+ "browserslist": {
+ "version": "4.8.6",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.8.6.tgz",
+ "integrity": "sha512-ZHao85gf0eZ0ESxLfCp73GG9O/VTytYDIkIiZDlURppLTI9wErSM/5yAKEq6rcUdxBLjMELmrYUJGg5sxGKMHg==",
+ "dev": true,
+ "requires": {
+ "caniuse-lite": "^1.0.30001023",
+ "electron-to-chromium": "^1.3.341",
+ "node-releases": "^1.1.47"
+ }
+ },
+ "buffer": {
+ "version": "4.9.2",
+ "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz",
+ "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==",
+ "dev": true,
+ "requires": {
+ "base64-js": "^1.0.2",
+ "ieee754": "^1.1.4",
+ "isarray": "^1.0.0"
+ }
+ },
+ "buffer-from": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz",
+ "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==",
+ "dev": true
+ },
+ "buffer-xor": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz",
+ "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=",
+ "dev": true
+ },
+ "builtin-status-codes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz",
+ "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=",
+ "dev": true
+ },
+ "bytebuffer": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/bytebuffer/-/bytebuffer-5.0.1.tgz",
+ "integrity": "sha1-WC7qSxqHO20CCkjVjfhfC7ps/d0=",
+ "requires": {
+ "long": "~3"
+ }
+ },
+ "bytes": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
+ "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg=="
+ },
+ "cacache": {
+ "version": "12.0.3",
+ "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.3.tgz",
+ "integrity": "sha512-kqdmfXEGFepesTuROHMs3MpFLWrPkSSpRqOw80RCflZXy/khxaArvFrQ7uJxSUduzAufc6G0g1VUCOZXxWavPw==",
+ "dev": true,
+ "requires": {
+ "bluebird": "^3.5.5",
+ "chownr": "^1.1.1",
+ "figgy-pudding": "^3.5.1",
+ "glob": "^7.1.4",
+ "graceful-fs": "^4.1.15",
+ "infer-owner": "^1.0.3",
+ "lru-cache": "^5.1.1",
+ "mississippi": "^3.0.0",
+ "mkdirp": "^0.5.1",
+ "move-concurrently": "^1.0.1",
+ "promise-inflight": "^1.0.1",
+ "rimraf": "^2.6.3",
+ "ssri": "^6.0.1",
+ "unique-filename": "^1.1.1",
+ "y18n": "^4.0.0"
+ },
+ "dependencies": {
+ "lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "requires": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "y18n": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz",
+ "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==",
+ "dev": true
+ }
+ }
+ },
+ "cache-base": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
+ "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
+ "dev": true,
+ "requires": {
+ "collection-visit": "^1.0.0",
+ "component-emitter": "^1.2.1",
+ "get-value": "^2.0.6",
+ "has-value": "^1.0.0",
+ "isobject": "^3.0.1",
+ "set-value": "^2.0.0",
+ "to-object-path": "^0.3.0",
+ "union-value": "^1.0.0",
+ "unset-value": "^1.0.0"
+ }
+ },
+ "caching-transform": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/caching-transform/-/caching-transform-3.0.2.tgz",
+ "integrity": "sha512-Mtgcv3lh3U0zRii/6qVgQODdPA4G3zhG+jtbCWj39RXuUFTMzH0vcdMtaJS1jPowd+It2Pqr6y3NJMQqOqCE2w==",
+ "dev": true,
+ "requires": {
+ "hasha": "^3.0.0",
+ "make-dir": "^2.0.0",
+ "package-hash": "^3.0.0",
+ "write-file-atomic": "^2.4.2"
+ }
+ },
+ "caller-callsite": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/caller-callsite/-/caller-callsite-2.0.0.tgz",
+ "integrity": "sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ=",
+ "dev": true,
+ "requires": {
+ "callsites": "^2.0.0"
+ },
+ "dependencies": {
+ "callsites": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-2.0.0.tgz",
+ "integrity": "sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA=",
+ "dev": true
+ }
+ }
+ },
+ "caller-path": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-2.0.0.tgz",
+ "integrity": "sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ=",
+ "dev": true,
+ "requires": {
+ "caller-callsite": "^2.0.0"
+ }
+ },
+ "callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "dev": true
+ },
+ "camelcase": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz",
+ "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8="
+ },
+ "camelcase-keys": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-4.2.0.tgz",
+ "integrity": "sha1-oqpfsa9oh1glnDLBQUJteJI7m3c=",
+ "dev": true,
+ "requires": {
+ "camelcase": "^4.1.0",
+ "map-obj": "^2.0.0",
+ "quick-lru": "^1.0.0"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz",
+ "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=",
+ "dev": true
+ }
+ }
+ },
+ "caniuse-lite": {
+ "version": "1.0.30001023",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001023.tgz",
+ "integrity": "sha512-C5TDMiYG11EOhVOA62W1p3UsJ2z4DsHtMBQtjzp3ZsUglcQn62WOUgW0y795c7A5uZ+GCEIvzkMatLIlAsbNTA==",
+ "dev": true
+ },
+ "canvas": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/canvas/-/canvas-2.6.1.tgz",
+ "integrity": "sha512-S98rKsPcuhfTcYbtF53UIJhcbgIAK533d1kJKMwsMwAIFgfd58MOyxRud3kktlzWiEkFliaJtvyZCBtud/XVEA==",
+ "dev": true,
+ "requires": {
+ "nan": "^2.14.0",
+ "node-pre-gyp": "^0.11.0",
+ "simple-get": "^3.0.3"
+ }
+ },
+ "capture-stack-trace": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/capture-stack-trace/-/capture-stack-trace-1.0.1.tgz",
+ "integrity": "sha512-mYQLZnx5Qt1JgB1WEiMCf2647plpGeQ2NMR/5L0HNZzGQo4fuSPnK+wjfPnKZV0aiJDgzmWqqkV/g7JD+DW0qw==",
+ "dev": true
+ },
+ "caseless": {
+ "version": "0.12.0",
+ "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz",
+ "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=",
+ "dev": true
+ },
+ "center-align": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/center-align/-/center-align-0.1.3.tgz",
+ "integrity": "sha1-qg0yYptu6XIgBBHL1EYckHvCt60=",
+ "requires": {
+ "align-text": "^0.1.3",
+ "lazy-cache": "^1.0.3"
+ }
+ },
+ "chai": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-4.2.0.tgz",
+ "integrity": "sha512-XQU3bhBukrOsQCuwZndwGcCVQHyZi53fQ6Ys1Fym7E4olpIqqZZhhoFJoaKVvV17lWQoXYwgWN2nF5crA8J2jw==",
+ "dev": true,
+ "requires": {
+ "assertion-error": "^1.1.0",
+ "check-error": "^1.0.2",
+ "deep-eql": "^3.0.1",
+ "get-func-name": "^2.0.0",
+ "pathval": "^1.1.0",
+ "type-detect": "^4.0.5"
+ }
+ },
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ }
+ }
+ },
+ "character-parser": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/character-parser/-/character-parser-2.2.0.tgz",
+ "integrity": "sha1-x84o821LzZdE5f/CxfzeHHMmH8A=",
+ "requires": {
+ "is-regex": "^1.0.3"
+ }
+ },
+ "chardet": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz",
+ "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==",
+ "dev": true
+ },
+ "chart.js": {
+ "version": "2.9.3",
+ "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-2.9.3.tgz",
+ "integrity": "sha512-+2jlOobSk52c1VU6fzkh3UwqHMdSlgH1xFv9FKMqHiNCpXsGPQa/+81AFa+i3jZ253Mq9aAycPwDjnn1XbRNNw==",
+ "dev": true,
+ "requires": {
+ "chartjs-color": "^2.1.0",
+ "moment": "^2.10.2"
+ }
+ },
+ "chartjs-color": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/chartjs-color/-/chartjs-color-2.4.1.tgz",
+ "integrity": "sha512-haqOg1+Yebys/Ts/9bLo/BqUcONQOdr/hoEr2LLTRl6C5LXctUdHxsCYfvQVg5JIxITrfCNUDr4ntqmQk9+/0w==",
+ "dev": true,
+ "requires": {
+ "chartjs-color-string": "^0.6.0",
+ "color-convert": "^1.9.3"
+ }
+ },
+ "chartjs-color-string": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/chartjs-color-string/-/chartjs-color-string-0.6.0.tgz",
+ "integrity": "sha512-TIB5OKn1hPJvO7JcteW4WY/63v6KwEdt6udfnDE9iCAZgy+V4SrbSxoIbTw/xkUIapjEI4ExGtD0+6D3KyFd7A==",
+ "dev": true,
+ "requires": {
+ "color-name": "^1.0.0"
+ }
+ },
+ "chartjs-plugin-zoom": {
+ "version": "0.7.5",
+ "resolved": "https://registry.npmjs.org/chartjs-plugin-zoom/-/chartjs-plugin-zoom-0.7.5.tgz",
+ "integrity": "sha512-OGVQXlw5meOD7ac+CBNO7yKg4Tk06eBb5LUIgpK/qgv7SjVB/89pWMQY3pxWnzCMI8FsoV3iTKQ2ZCOvh4+q6w==",
+ "dev": true,
+ "requires": {
+ "hammerjs": "^2.0.8"
+ }
+ },
+ "check-error": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz",
+ "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=",
+ "dev": true
+ },
+ "check-types": {
+ "version": "8.0.3",
+ "resolved": "https://registry.npmjs.org/check-types/-/check-types-8.0.3.tgz",
+ "integrity": "sha512-YpeKZngUmG65rLudJ4taU7VLkOCTMhNl/u4ctNC56LQS/zJTyNH0Lrtwm1tfTsbLlwvlfsA2d1c8vCf/Kh2KwQ==",
+ "dev": true
+ },
+ "cheerio": {
+ "version": "1.0.0-rc.3",
+ "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.3.tgz",
+ "integrity": "sha512-0td5ijfUPuubwLUu0OBoe98gZj8C/AA+RW3v67GPlGOrvxWjZmBXiBCRU+I8VEiNyJzjth40POfHiz2RB3gImA==",
+ "dev": true,
+ "requires": {
+ "css-select": "~1.2.0",
+ "dom-serializer": "~0.1.1",
+ "entities": "~1.1.1",
+ "htmlparser2": "^3.9.1",
+ "lodash": "^4.15.0",
+ "parse5": "^3.0.1"
+ }
+ },
+ "chokidar": {
+ "version": "2.1.8",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz",
+ "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==",
+ "dev": true,
+ "requires": {
+ "anymatch": "^2.0.0",
+ "async-each": "^1.0.1",
+ "braces": "^2.3.2",
+ "fsevents": "^1.2.7",
+ "glob-parent": "^3.1.0",
+ "inherits": "^2.0.3",
+ "is-binary-path": "^1.0.0",
+ "is-glob": "^4.0.0",
+ "normalize-path": "^3.0.0",
+ "path-is-absolute": "^1.0.0",
+ "readdirp": "^2.2.1",
+ "upath": "^1.1.1"
+ }
+ },
+ "chownr": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.3.tgz",
+ "integrity": "sha512-i70fVHhmV3DtTl6nqvZOnIjbY0Pe4kAUjwHj8z0zAdgBtYrJyYwLKCCuRBQ5ppkyL0AkN7HKRnETdmdp1zqNXw==",
+ "dev": true
+ },
+ "chroma-js": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/chroma-js/-/chroma-js-1.4.1.tgz",
+ "integrity": "sha512-jTwQiT859RTFN/vIf7s+Vl/Z2LcMrvMv3WUFmd/4u76AdlFC0NTNgqEEFPcRiHmAswPsMiQEDZLM8vX8qXpZNQ==",
+ "dev": true
+ },
+ "chrome-trace-event": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.2.tgz",
+ "integrity": "sha512-9e/zx1jw7B4CO+c/RXoCsfg/x1AfUBioy4owYH0bJprEYAx5hRFLRhWBqHAG57D0ZM4H7vxbP7bPe0VwhQRYDQ==",
+ "dev": true,
+ "requires": {
+ "tslib": "^1.9.0"
+ }
+ },
+ "ci-info": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
+ "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==",
+ "dev": true
+ },
+ "cipher-base": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz",
+ "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "class-utils": {
+ "version": "0.3.6",
+ "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
+ "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
+ "dev": true,
+ "requires": {
+ "arr-union": "^3.1.0",
+ "define-property": "^0.2.5",
+ "isobject": "^3.0.0",
+ "static-extend": "^0.1.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ }
+ }
+ },
+ "classnames": {
+ "version": "2.2.6",
+ "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.2.6.tgz",
+ "integrity": "sha512-JR/iSQOSt+LQIWwrwEzJ9uk0xfN3mTVYMwt1Ir5mUcSN6pU+V4zQFFaJsclJbPuAUQH+yfWef6tm7l1quW3C8Q==",
+ "dev": true
+ },
+ "clean-css": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.3.tgz",
+ "integrity": "sha512-VcMWDN54ZN/DS+g58HYL5/n4Zrqe8vHJpGA8KdgUXFU4fuP/aHNw8eld9SyEIyabIMJX/0RaY/fplOo5hYLSFA==",
+ "requires": {
+ "source-map": "~0.6.0"
+ }
+ },
+ "cli-boxes": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-1.0.0.tgz",
+ "integrity": "sha1-T6kXw+WclKAEzWH47lCdplFocUM=",
+ "dev": true
+ },
+ "cli-cursor": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz",
+ "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=",
+ "dev": true,
+ "requires": {
+ "restore-cursor": "^2.0.0"
+ }
+ },
+ "cli-spinners": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-1.3.1.tgz",
+ "integrity": "sha512-1QL4544moEsDVH9T/l6Cemov/37iv1RtoKf7NJ04A60+4MREXNfx/QvavbH6QoGdsD4N4Mwy49cmaINR/o2mdg==",
+ "dev": true
+ },
+ "cli-width": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.0.tgz",
+ "integrity": "sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk=",
+ "dev": true
+ },
+ "cliui": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz",
+ "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=",
+ "requires": {
+ "string-width": "^1.0.1",
+ "strip-ansi": "^3.0.1",
+ "wrap-ansi": "^2.0.0"
+ },
+ "dependencies": {
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ }
+ }
+ },
+ "clone": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz",
+ "integrity": "sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18=",
+ "dev": true
+ },
+ "code-point-at": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz",
+ "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c="
+ },
+ "collection-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
+ "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
+ "dev": true,
+ "requires": {
+ "map-visit": "^1.0.0",
+ "object-visit": "^1.0.0"
+ }
+ },
+ "color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dev": true,
+ "requires": {
+ "color-name": "1.1.3"
+ }
+ },
+ "color-hash": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/color-hash/-/color-hash-1.0.3.tgz",
+ "integrity": "sha1-wOeVLwbQIuVI5l2iOVEr1n04Ce4=",
+ "dev": true
+ },
+ "color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=",
+ "dev": true
+ },
+ "colors": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz",
+ "integrity": "sha1-BDP0TYCWgP3rYO0mDxsMJi6CpAs="
+ },
+ "colour": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/colour/-/colour-0.7.1.tgz",
+ "integrity": "sha1-nLFpkX7F0SwHNtPoaFdG3xyt93g="
+ },
+ "combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "dev": true,
+ "requires": {
+ "delayed-stream": "~1.0.0"
+ }
+ },
+ "commander": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
+ "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
+ "dev": true
+ },
+ "commondir": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
+ "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=",
+ "dev": true
+ },
+ "component-emitter": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
+ "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==",
+ "dev": true
+ },
+ "compressible": {
+ "version": "2.0.18",
+ "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz",
+ "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==",
+ "requires": {
+ "mime-db": ">= 1.43.0 < 2"
+ }
+ },
+ "compression": {
+ "version": "1.7.4",
+ "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz",
+ "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==",
+ "requires": {
+ "accepts": "~1.3.5",
+ "bytes": "3.0.0",
+ "compressible": "~2.0.16",
+ "debug": "2.6.9",
+ "on-headers": "~1.0.2",
+ "safe-buffer": "5.1.2",
+ "vary": "~1.1.2"
+ },
+ "dependencies": {
+ "bytes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
+ "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
+ },
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ }
+ }
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
+ },
+ "concat-stream": {
+ "version": "1.6.2",
+ "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz",
+ "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==",
+ "dev": true,
+ "requires": {
+ "buffer-from": "^1.0.0",
+ "inherits": "^2.0.3",
+ "readable-stream": "^2.2.2",
+ "typedarray": "^0.0.6"
+ }
+ },
+ "concurrently": {
+ "version": "3.6.1",
+ "resolved": "https://registry.npmjs.org/concurrently/-/concurrently-3.6.1.tgz",
+ "integrity": "sha512-/+ugz+gwFSEfTGUxn0KHkY+19XPRTXR8+7oUK/HxgiN1n7FjeJmkrbSiXAJfyQ0zORgJYPaenmymwon51YXH9Q==",
+ "dev": true,
+ "requires": {
+ "chalk": "^2.4.1",
+ "commander": "2.6.0",
+ "date-fns": "^1.23.0",
+ "lodash": "^4.5.1",
+ "read-pkg": "^3.0.0",
+ "rx": "2.3.24",
+ "spawn-command": "^0.0.2-1",
+ "supports-color": "^3.2.3",
+ "tree-kill": "^1.1.0"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.6.0.tgz",
+ "integrity": "sha1-nfflL7Kgyw+4kFjugMMQQiXzfh0=",
+ "dev": true
+ },
+ "has-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz",
+ "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=",
+ "dev": true
+ },
+ "supports-color": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz",
+ "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=",
+ "dev": true,
+ "requires": {
+ "has-flag": "^1.0.0"
+ }
+ }
+ }
+ },
+ "configstore": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/configstore/-/configstore-3.1.2.tgz",
+ "integrity": "sha512-vtv5HtGjcYUgFrXc6Kx747B83MRRVS5R1VTEQoXvuP+kMI+if6uywV0nDGoiydJRy4yk7h9od5Og0kxx4zUXmw==",
+ "dev": true,
+ "requires": {
+ "dot-prop": "^4.1.0",
+ "graceful-fs": "^4.1.2",
+ "make-dir": "^1.0.0",
+ "unique-string": "^1.0.0",
+ "write-file-atomic": "^2.0.0",
+ "xdg-basedir": "^3.0.0"
+ },
+ "dependencies": {
+ "make-dir": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz",
+ "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==",
+ "dev": true,
+ "requires": {
+ "pify": "^3.0.0"
+ }
+ },
+ "pify": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+ "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=",
+ "dev": true
+ }
+ }
+ },
+ "confusing-browser-globals": {
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/confusing-browser-globals/-/confusing-browser-globals-1.0.9.tgz",
+ "integrity": "sha512-KbS1Y0jMtyPgIxjO7ZzMAuUpAKMt1SzCL9fsrKsX6b0zJPTaT0SiSPmewwVZg9UAO83HVIlEhZF84LIjZ0lmAw==",
+ "dev": true
+ },
+ "console-browserify": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz",
+ "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==",
+ "dev": true
+ },
+ "console-control-strings": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz",
+ "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=",
+ "dev": true
+ },
+ "constantinople": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/constantinople/-/constantinople-3.1.2.tgz",
+ "integrity": "sha512-yePcBqEFhLOqSBtwYOGGS1exHo/s1xjekXiinh4itpNQGCu4KA1euPh1fg07N2wMITZXQkBz75Ntdt1ctGZouw==",
+ "requires": {
+ "@types/babel-types": "^7.0.0",
+ "@types/babylon": "^6.16.2",
+ "babel-types": "^6.26.0",
+ "babylon": "^6.18.0"
+ }
+ },
+ "constants-browserify": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz",
+ "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=",
+ "dev": true
+ },
+ "contains-path": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/contains-path/-/contains-path-0.1.0.tgz",
+ "integrity": "sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=",
+ "dev": true
+ },
+ "content-disposition": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz",
+ "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==",
+ "requires": {
+ "safe-buffer": "5.1.2"
+ }
+ },
+ "content-type": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+ "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
+ },
+ "convert-source-map": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz",
+ "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "~5.1.1"
+ }
+ },
+ "cookie": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz",
+ "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg=="
+ },
+ "cookie-session": {
+ "version": "2.0.0-rc.1",
+ "resolved": "https://registry.npmjs.org/cookie-session/-/cookie-session-2.0.0-rc.1.tgz",
+ "integrity": "sha512-zg80EsLe7S1J4y0XxV7SZ8Fbi90ZZoampuX2bfYDOvJfc//98sSlZC41YDzTTjtVbeU1VlVdBbldXOOyi5xzEw==",
+ "requires": {
+ "cookies": "0.8.0",
+ "debug": "3.2.6",
+ "on-headers": "~1.0.2",
+ "safe-buffer": "5.2.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.6",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz",
+ "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==",
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "safe-buffer": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.0.tgz",
+ "integrity": "sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg=="
+ }
+ }
+ },
+ "cookie-signature": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+ "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
+ },
+ "cookiejar": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.1.2.tgz",
+ "integrity": "sha512-Mw+adcfzPxcPeI+0WlvRrr/3lGVO0bD75SxX6811cxSh1Wbxx7xZBGK1eVtDf6si8rg2lhnUjsVLMFMfbRIuwA==",
+ "dev": true
+ },
+ "cookies": {
+ "version": "0.8.0",
+ "resolved": "https://registry.npmjs.org/cookies/-/cookies-0.8.0.tgz",
+ "integrity": "sha512-8aPsApQfebXnuI+537McwYsDtjVxGm8gTIzQI3FDW6t5t/DAhERxtnbEPN/8RX+uZthoz4eCOgloXaE5cYyNow==",
+ "requires": {
+ "depd": "~2.0.0",
+ "keygrip": "~1.1.0"
+ },
+ "dependencies": {
+ "depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="
+ }
+ }
+ },
+ "copy-concurrently": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz",
+ "integrity": "sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==",
+ "dev": true,
+ "requires": {
+ "aproba": "^1.1.1",
+ "fs-write-stream-atomic": "^1.0.8",
+ "iferr": "^0.1.5",
+ "mkdirp": "^0.5.1",
+ "rimraf": "^2.5.4",
+ "run-queue": "^1.0.0"
+ }
+ },
+ "copy-descriptor": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
+ "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=",
+ "dev": true
+ },
+ "copy-to-clipboard": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/copy-to-clipboard/-/copy-to-clipboard-3.2.0.tgz",
+ "integrity": "sha512-eOZERzvCmxS8HWzugj4Uxl8OJxa7T2k1Gi0X5qavwydHIfuSHq2dTD09LOg/XyGq4Zpb5IsR/2OJ5lbOegz78w==",
+ "dev": true,
+ "requires": {
+ "toggle-selection": "^1.0.6"
+ }
+ },
+ "core-js": {
+ "version": "2.6.11",
+ "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.11.tgz",
+ "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg=="
+ },
+ "core-js-compat": {
+ "version": "3.6.4",
+ "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.6.4.tgz",
+ "integrity": "sha512-zAa3IZPvsJ0slViBQ2z+vgyyTuhd3MFn1rBQjZSKVEgB0UMYhUkCj9jJUVPgGTGqWvsBVmfnruXgTcNyTlEiSA==",
+ "dev": true,
+ "requires": {
+ "browserslist": "^4.8.3",
+ "semver": "7.0.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz",
+ "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==",
+ "dev": true
+ }
+ }
+ },
+ "core-js-pure": {
+ "version": "3.6.4",
+ "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.6.4.tgz",
+ "integrity": "sha512-epIhRLkXdgv32xIUFaaAry2wdxZYBi6bgM7cB136dzzXXa+dFyRLTZeLUJxnd8ShrmyVXBub63n2NHo2JAt8Cw==",
+ "dev": true
+ },
+ "core-util-is": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
+ "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=",
+ "dev": true
+ },
+ "cosmiconfig": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz",
+ "integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==",
+ "dev": true,
+ "requires": {
+ "import-fresh": "^2.0.0",
+ "is-directory": "^0.3.1",
+ "js-yaml": "^3.13.1",
+ "parse-json": "^4.0.0"
+ },
+ "dependencies": {
+ "import-fresh": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz",
+ "integrity": "sha1-2BNVwVYS04bGH53dOSLUMEgipUY=",
+ "dev": true,
+ "requires": {
+ "caller-path": "^2.0.0",
+ "resolve-from": "^3.0.0"
+ }
+ },
+ "resolve-from": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz",
+ "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=",
+ "dev": true
+ }
+ }
+ },
+ "coveralls": {
+ "version": "3.0.9",
+ "resolved": "https://registry.npmjs.org/coveralls/-/coveralls-3.0.9.tgz",
+ "integrity": "sha512-nNBg3B1+4iDox5A5zqHKzUTiwl2ey4k2o0NEcVZYvl+GOSJdKBj4AJGKLv6h3SvWch7tABHePAQOSZWM9E2hMg==",
+ "dev": true,
+ "requires": {
+ "js-yaml": "^3.13.1",
+ "lcov-parse": "^1.0.0",
+ "log-driver": "^1.2.7",
+ "minimist": "^1.2.0",
+ "request": "^2.88.0"
+ }
+ },
+ "cp-file": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/cp-file/-/cp-file-6.2.0.tgz",
+ "integrity": "sha512-fmvV4caBnofhPe8kOcitBwSn2f39QLjnAnGq3gO9dfd75mUytzKNZB1hde6QHunW2Rt+OwuBOMc3i1tNElbszA==",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.2",
+ "make-dir": "^2.0.0",
+ "nested-error-stacks": "^2.0.0",
+ "pify": "^4.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "create-ecdh": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz",
+ "integrity": "sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==",
+ "dev": true,
+ "requires": {
+ "bn.js": "^4.1.0",
+ "elliptic": "^6.0.0"
+ }
+ },
+ "create-error-class": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/create-error-class/-/create-error-class-3.0.2.tgz",
+ "integrity": "sha1-Br56vvlHo/FKMP1hBnHUAbyot7Y=",
+ "dev": true,
+ "requires": {
+ "capture-stack-trace": "^1.0.0"
+ }
+ },
+ "create-hash": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz",
+ "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==",
+ "dev": true,
+ "requires": {
+ "cipher-base": "^1.0.1",
+ "inherits": "^2.0.1",
+ "md5.js": "^1.3.4",
+ "ripemd160": "^2.0.1",
+ "sha.js": "^2.4.0"
+ }
+ },
+ "create-hmac": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz",
+ "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==",
+ "dev": true,
+ "requires": {
+ "cipher-base": "^1.0.3",
+ "create-hash": "^1.1.0",
+ "inherits": "^2.0.1",
+ "ripemd160": "^2.0.0",
+ "safe-buffer": "^5.0.1",
+ "sha.js": "^2.4.8"
+ }
+ },
+ "create-react-class": {
+ "version": "15.6.3",
+ "resolved": "https://registry.npmjs.org/create-react-class/-/create-react-class-15.6.3.tgz",
+ "integrity": "sha512-M+/3Q6E6DLO6Yx3OwrWjwHBnvfXXYA7W+dFjt/ZDBemHO1DDZhsalX/NUtnTYclN6GfnBDRh4qRHjcDHmlJBJg==",
+ "dev": true,
+ "requires": {
+ "fbjs": "^0.8.9",
+ "loose-envify": "^1.3.1",
+ "object-assign": "^4.1.1"
+ }
+ },
+ "cross-spawn": {
+ "version": "6.0.5",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz",
+ "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==",
+ "dev": true,
+ "requires": {
+ "nice-try": "^1.0.4",
+ "path-key": "^2.0.1",
+ "semver": "^5.5.0",
+ "shebang-command": "^1.2.0",
+ "which": "^1.2.9"
+ }
+ },
+ "crypto-browserify": {
+ "version": "3.12.0",
+ "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz",
+ "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==",
+ "dev": true,
+ "requires": {
+ "browserify-cipher": "^1.0.0",
+ "browserify-sign": "^4.0.0",
+ "create-ecdh": "^4.0.0",
+ "create-hash": "^1.1.0",
+ "create-hmac": "^1.1.0",
+ "diffie-hellman": "^5.0.0",
+ "inherits": "^2.0.1",
+ "pbkdf2": "^3.0.3",
+ "public-encrypt": "^4.0.0",
+ "randombytes": "^2.0.0",
+ "randomfill": "^1.0.3"
+ }
+ },
+ "crypto-random-string": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-1.0.0.tgz",
+ "integrity": "sha1-ojD2T1aDEOFJgAmUB5DsmVRbyn4=",
+ "dev": true
+ },
+ "css-loader": {
+ "version": "3.4.2",
+ "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-3.4.2.tgz",
+ "integrity": "sha512-jYq4zdZT0oS0Iykt+fqnzVLRIeiPWhka+7BqPn+oSIpWJAHak5tmB/WZrJ2a21JhCeFyNnnlroSl8c+MtVndzA==",
+ "dev": true,
+ "requires": {
+ "camelcase": "^5.3.1",
+ "cssesc": "^3.0.0",
+ "icss-utils": "^4.1.1",
+ "loader-utils": "^1.2.3",
+ "normalize-path": "^3.0.0",
+ "postcss": "^7.0.23",
+ "postcss-modules-extract-imports": "^2.0.0",
+ "postcss-modules-local-by-default": "^3.0.2",
+ "postcss-modules-scope": "^2.1.1",
+ "postcss-modules-values": "^3.0.0",
+ "postcss-value-parser": "^4.0.2",
+ "schema-utils": "^2.6.0"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true
+ }
+ }
+ },
+ "css-select": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-1.2.0.tgz",
+ "integrity": "sha1-KzoRBTnFNV8c2NMUYj6HCxIeyFg=",
+ "dev": true,
+ "requires": {
+ "boolbase": "~1.0.0",
+ "css-what": "2.1",
+ "domutils": "1.5.1",
+ "nth-check": "~1.0.1"
+ }
+ },
+ "css-what": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-2.1.3.tgz",
+ "integrity": "sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg==",
+ "dev": true
+ },
+ "cssesc": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
+ "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
+ "dev": true
+ },
+ "cssom": {
+ "version": "0.3.8",
+ "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz",
+ "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==",
+ "dev": true
+ },
+ "cssstyle": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-1.4.0.tgz",
+ "integrity": "sha512-GBrLZYZ4X4x6/QEoBnIrqb8B/f5l4+8me2dkom/j1Gtbxy0kBv6OGzKuAsGM75bkGwGAFkt56Iwg28S3XTZgSA==",
+ "dev": true,
+ "requires": {
+ "cssom": "0.3.x"
+ }
+ },
+ "currently-unhandled": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz",
+ "integrity": "sha1-mI3zP+qxke95mmE2nddsF635V+o=",
+ "dev": true,
+ "requires": {
+ "array-find-index": "^1.0.1"
+ }
+ },
+ "cycle": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz",
+ "integrity": "sha1-IegLK+hYD5i0aPN5QwZisEbDStI="
+ },
+ "cyclist": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz",
+ "integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk=",
+ "dev": true
+ },
+ "d": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz",
+ "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==",
+ "dev": true,
+ "requires": {
+ "es5-ext": "^0.10.50",
+ "type": "^1.0.1"
+ }
+ },
+ "d3-array": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-1.2.4.tgz",
+ "integrity": "sha512-KHW6M86R+FUPYGb3R5XiYjXPq7VzwxZ22buHhAEVG5ztoEcZZMLov530mmccaqA1GghZArjQV46fuc8kUqhhHw==",
+ "dev": true
+ },
+ "d3-collection": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/d3-collection/-/d3-collection-1.0.7.tgz",
+ "integrity": "sha512-ii0/r5f4sjKNTfh84Di+DpztYwqKhEyUlKoPrzUFfeSkWxjW49xU2QzO9qrPrNkpdI0XJkfzvmTu8V2Zylln6A==",
+ "dev": true
+ },
+ "d3-color": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-1.4.0.tgz",
+ "integrity": "sha512-TzNPeJy2+iEepfiL92LAAB7fvnp/dV2YwANPVHdDWmYMm23qIJBYww3qT8I8C1wXrmrg4UWs7BKc2tKIgyjzHg==",
+ "dev": true
+ },
+ "d3-contour": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-1.3.2.tgz",
+ "integrity": "sha512-hoPp4K/rJCu0ladiH6zmJUEz6+u3lgR+GSm/QdM2BBvDraU39Vr7YdDCicJcxP1z8i9B/2dJLgDC1NcvlF8WCg==",
+ "dev": true,
+ "requires": {
+ "d3-array": "^1.1.1"
+ }
+ },
+ "d3-format": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-1.4.3.tgz",
+ "integrity": "sha512-mm/nE2Y9HgGyjP+rKIekeITVgBtX97o1nrvHCWX8F/yBYyevUTvu9vb5pUnKwrcSw7o7GuwMOWjS9gFDs4O+uQ==",
+ "dev": true
+ },
+ "d3-geo": {
+ "version": "1.11.9",
+ "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-1.11.9.tgz",
+ "integrity": "sha512-9edcH6J3s/Aa3KJITWqFJbyB/8q3mMlA9Fi7z6yy+FAYMnRaxmC7jBhUnsINxVWD14GmqX3DK8uk7nV6/Ekt4A==",
+ "dev": true,
+ "requires": {
+ "d3-array": "1"
+ }
+ },
+ "d3-hexbin": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/d3-hexbin/-/d3-hexbin-0.2.2.tgz",
+ "integrity": "sha1-nFg32s/UcasFM3qeke8Qv8T5iDE=",
+ "dev": true
+ },
+ "d3-hierarchy": {
+ "version": "1.1.9",
+ "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-1.1.9.tgz",
+ "integrity": "sha512-j8tPxlqh1srJHAtxfvOUwKNYJkQuBFdM1+JAUfq6xqH5eAqf93L7oG1NVqDa4CpFZNvnNKtCYEUC8KY9yEn9lQ==",
+ "dev": true
+ },
+ "d3-interpolate": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-1.4.0.tgz",
+ "integrity": "sha512-V9znK0zc3jOPV4VD2zZn0sDhZU3WAE2bmlxdIwwQPPzPjvyLkd8B3JUVdS1IDUFDkWZ72c9qnv1GK2ZagTZ8EA==",
+ "dev": true,
+ "requires": {
+ "d3-color": "1"
+ }
+ },
+ "d3-path": {
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz",
+ "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==",
+ "dev": true
+ },
+ "d3-sankey": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.7.1.tgz",
+ "integrity": "sha1-0imDImj8aaf+yEgD6WwiVqYUxSE=",
+ "dev": true,
+ "requires": {
+ "d3-array": "1",
+ "d3-collection": "1",
+ "d3-shape": "^1.2.0"
+ }
+ },
+ "d3-scale": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-1.0.7.tgz",
+ "integrity": "sha512-KvU92czp2/qse5tUfGms6Kjig0AhHOwkzXG0+PqIJB3ke0WUv088AHMZI0OssO9NCkXt4RP8yju9rpH8aGB7Lw==",
+ "dev": true,
+ "requires": {
+ "d3-array": "^1.2.0",
+ "d3-collection": "1",
+ "d3-color": "1",
+ "d3-format": "1",
+ "d3-interpolate": "1",
+ "d3-time": "1",
+ "d3-time-format": "2"
+ }
+ },
+ "d3-shape": {
+ "version": "1.3.7",
+ "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz",
+ "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==",
+ "dev": true,
+ "requires": {
+ "d3-path": "1"
+ }
+ },
+ "d3-time": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-1.1.0.tgz",
+ "integrity": "sha512-Xh0isrZ5rPYYdqhAVk8VLnMEidhz5aP7htAADH6MfzgmmicPkTo8LhkLxci61/lCB7n7UmE3bN0leRt+qvkLxA==",
+ "dev": true
+ },
+ "d3-time-format": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-2.2.3.tgz",
+ "integrity": "sha512-RAHNnD8+XvC4Zc4d2A56Uw0yJoM7bsvOlJR33bclxq399Rak/b9bhvu/InjxdWhPtkgU53JJcleJTGkNRnN6IA==",
+ "dev": true,
+ "requires": {
+ "d3-time": "1"
+ }
+ },
+ "d3-voronoi": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/d3-voronoi/-/d3-voronoi-1.1.4.tgz",
+ "integrity": "sha512-dArJ32hchFsrQ8uMiTBLq256MpnZjeuBtdHpaDlYuQyjU0CVzCJl/BVW+SkszaAeH95D/8gxqAhgx0ouAWAfRg==",
+ "dev": true
+ },
+ "dagre": {
+ "version": "0.8.5",
+ "resolved": "https://registry.npmjs.org/dagre/-/dagre-0.8.5.tgz",
+ "integrity": "sha512-/aTqmnRta7x7MCCpExk7HQL2O4owCT2h8NT//9I1OQ9vt29Pa0BzSAkR5lwFUcQ7491yVi/3CXU9jQ5o0Mn2Sw==",
+ "requires": {
+ "graphlib": "^2.1.8",
+ "lodash": "^4.17.15"
+ }
+ },
+ "damerau-levenshtein": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.6.tgz",
+ "integrity": "sha512-JVrozIeElnj3QzfUIt8tB8YMluBJom4Vw9qTPpjGYQ9fYlB3D/rb6OordUxf3xeFB35LKWs0xqcO5U6ySvBtug==",
+ "dev": true
+ },
+ "dashdash": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz",
+ "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=",
+ "dev": true,
+ "requires": {
+ "assert-plus": "^1.0.0"
+ }
+ },
+ "data-urls": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-1.1.0.tgz",
+ "integrity": "sha512-YTWYI9se1P55u58gL5GkQHW4P6VJBJ5iBT+B5a7i2Tjadhv52paJG0qHX4A0OR6/t52odI64KP2YvFpkDOi3eQ==",
+ "dev": true,
+ "requires": {
+ "abab": "^2.0.0",
+ "whatwg-mimetype": "^2.2.0",
+ "whatwg-url": "^7.0.0"
+ }
+ },
+ "date-fns": {
+ "version": "1.30.1",
+ "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-1.30.1.tgz",
+ "integrity": "sha512-hBSVCvSmWC+QypYObzwGOd9wqdDpOt+0wl0KbU+R+uuZBS1jN8VsD1ss3irQDknRj5NvxiTF6oj/nDRnN/UQNw==",
+ "dev": true
+ },
+ "debug": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
+ "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "decamelize": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
+ "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA="
+ },
+ "decamelize-keys": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz",
+ "integrity": "sha1-0XGoeTMlKAfrPLYdwcFEXQeN8tk=",
+ "dev": true,
+ "requires": {
+ "decamelize": "^1.1.0",
+ "map-obj": "^1.0.0"
+ },
+ "dependencies": {
+ "map-obj": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz",
+ "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=",
+ "dev": true
+ }
+ }
+ },
+ "decode-uri-component": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+ "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=",
+ "dev": true
+ },
+ "decompress-response": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-4.2.1.tgz",
+ "integrity": "sha512-jOSne2qbyE+/r8G1VU+G/82LBs2Fs4LAsTiLSHOCOMZQl2OKZ6i8i4IyHemTe+/yIXOtTcRQMzPcgyhoFlqPkw==",
+ "dev": true,
+ "requires": {
+ "mimic-response": "^2.0.0"
+ }
+ },
+ "deep-eql": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz",
+ "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==",
+ "dev": true,
+ "requires": {
+ "type-detect": "^4.0.0"
+ }
+ },
+ "deep-equal": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.1.1.tgz",
+ "integrity": "sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g==",
+ "dev": true,
+ "requires": {
+ "is-arguments": "^1.0.4",
+ "is-date-object": "^1.0.1",
+ "is-regex": "^1.0.4",
+ "object-is": "^1.0.1",
+ "object-keys": "^1.1.1",
+ "regexp.prototype.flags": "^1.2.0"
+ }
+ },
+ "deep-extend": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+ "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
+ "dev": true
+ },
+ "deep-is": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz",
+ "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=",
+ "dev": true
+ },
+ "deepmerge": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.2.2.tgz",
+ "integrity": "sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg=="
+ },
+ "default-require-extensions": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/default-require-extensions/-/default-require-extensions-2.0.0.tgz",
+ "integrity": "sha1-9fj7sYp9bVCyH2QfZJ67Uiz+JPc=",
+ "dev": true,
+ "requires": {
+ "strip-bom": "^3.0.0"
+ }
+ },
+ "defaults": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.3.tgz",
+ "integrity": "sha1-xlYFHpgX2f8I7YgUd/P+QBnz730=",
+ "dev": true,
+ "requires": {
+ "clone": "^1.0.2"
+ },
+ "dependencies": {
+ "clone": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz",
+ "integrity": "sha1-2jCcwmPfFZlMaIypAheco8fNfH4=",
+ "dev": true
+ }
+ }
+ },
+ "define-properties": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz",
+ "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==",
+ "dev": true,
+ "requires": {
+ "object-keys": "^1.0.12"
+ }
+ },
+ "define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ },
+ "dependencies": {
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ },
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "dev": true
+ }
+ }
+ },
+ "delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=",
+ "dev": true
+ },
+ "delegates": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz",
+ "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=",
+ "dev": true
+ },
+ "depd": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+ "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
+ },
+ "des.js": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz",
+ "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.1",
+ "minimalistic-assert": "^1.0.0"
+ }
+ },
+ "destroy": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
+ "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
+ },
+ "detect-libc": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz",
+ "integrity": "sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=",
+ "dev": true
+ },
+ "diff": {
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz",
+ "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==",
+ "dev": true
+ },
+ "diffie-hellman": {
+ "version": "5.0.3",
+ "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz",
+ "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==",
+ "dev": true,
+ "requires": {
+ "bn.js": "^4.1.0",
+ "miller-rabin": "^4.0.0",
+ "randombytes": "^2.0.0"
+ }
+ },
+ "discontinuous-range": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/discontinuous-range/-/discontinuous-range-1.0.0.tgz",
+ "integrity": "sha1-44Mx8IRLukm5qctxx3FYWqsbxlo=",
+ "dev": true
+ },
+ "doctrine": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
+ "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
+ "dev": true,
+ "requires": {
+ "esutils": "^2.0.2"
+ }
+ },
+ "doctypes": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/doctypes/-/doctypes-1.1.0.tgz",
+ "integrity": "sha1-6oCxBqh1OHdOijpKWv4pPeSJ4Kk="
+ },
+ "dom-serializer": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.1.1.tgz",
+ "integrity": "sha512-l0IU0pPzLWSHBcieZbpOKgkIn3ts3vAh7ZuFyXNwJxJXk/c4Gwj9xaTJwIDVQCXawWD0qb3IzMGH5rglQaO0XA==",
+ "dev": true,
+ "requires": {
+ "domelementtype": "^1.3.0",
+ "entities": "^1.1.1"
+ }
+ },
+ "dom-walk": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.1.tgz",
+ "integrity": "sha1-ZyIm3HTI95mtNTB9+TaroRrNYBg=",
+ "dev": true
+ },
+ "domain-browser": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz",
+ "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==",
+ "dev": true
+ },
+ "domelementtype": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
+ "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==",
+ "dev": true
+ },
+ "domexception": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/domexception/-/domexception-1.0.1.tgz",
+ "integrity": "sha512-raigMkn7CJNNo6Ihro1fzG7wr3fHuYVytzquZKX5n0yizGsTcYgzdIUwj1X9pK0VvjeihV+XiclP+DjwbsSKug==",
+ "dev": true,
+ "requires": {
+ "webidl-conversions": "^4.0.2"
+ }
+ },
+ "domhandler": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz",
+ "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==",
+ "dev": true,
+ "requires": {
+ "domelementtype": "1"
+ }
+ },
+ "domutils": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz",
+ "integrity": "sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8=",
+ "dev": true,
+ "requires": {
+ "dom-serializer": "0",
+ "domelementtype": "1"
+ }
+ },
+ "dot-prop": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-4.2.0.tgz",
+ "integrity": "sha512-tUMXrxlExSW6U2EXiiKGSBVdYgtV8qlHL+C10TsW4PURY/ic+eaysnSkwB4kA/mBlCyy/IKDJ+Lc3wbWeaXtuQ==",
+ "dev": true,
+ "requires": {
+ "is-obj": "^1.0.0"
+ }
+ },
+ "duplexer": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.1.tgz",
+ "integrity": "sha1-rOb/gIwc5mtX0ev5eXessCM0z8E=",
+ "dev": true
+ },
+ "duplexer3": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz",
+ "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=",
+ "dev": true
+ },
+ "duplexify": {
+ "version": "3.7.1",
+ "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz",
+ "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==",
+ "dev": true,
+ "requires": {
+ "end-of-stream": "^1.0.0",
+ "inherits": "^2.0.1",
+ "readable-stream": "^2.0.0",
+ "stream-shift": "^1.0.0"
+ }
+ },
+ "ecc-jsbn": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz",
+ "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=",
+ "dev": true,
+ "requires": {
+ "jsbn": "~0.1.0",
+ "safer-buffer": "^2.1.0"
+ }
+ },
+ "ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
+ },
+ "ejs": {
+ "version": "2.7.4",
+ "resolved": "https://registry.npmjs.org/ejs/-/ejs-2.7.4.tgz",
+ "integrity": "sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA=="
+ },
+ "electron-to-chromium": {
+ "version": "1.3.344",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.344.tgz",
+ "integrity": "sha512-tvbx2Wl8WBR+ym3u492D0L6/jH+8NoQXqe46+QhbWH3voVPauGuZYeb1QAXYoOAWuiP2dbSvlBx0kQ1F3hu/Mw==",
+ "dev": true
+ },
+ "elliptic": {
+ "version": "6.5.2",
+ "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.2.tgz",
+ "integrity": "sha512-f4x70okzZbIQl/NSRLkI/+tteV/9WqL98zx+SQ69KbXxmVrmjwsNUPn/gYJJ0sHvEak24cZgHIPegRePAtA/xw==",
+ "dev": true,
+ "requires": {
+ "bn.js": "^4.4.0",
+ "brorand": "^1.0.1",
+ "hash.js": "^1.0.0",
+ "hmac-drbg": "^1.0.0",
+ "inherits": "^2.0.1",
+ "minimalistic-assert": "^1.0.0",
+ "minimalistic-crypto-utils": "^1.0.0"
+ }
+ },
+ "emitter-component": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/emitter-component/-/emitter-component-1.1.1.tgz",
+ "integrity": "sha1-Bl4tvtaVm/RwZ57avq95gdEAOrY=",
+ "dev": true
+ },
+ "emoji-regex": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==",
+ "dev": true
+ },
+ "emojis-list": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz",
+ "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=",
+ "dev": true
+ },
+ "encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
+ },
+ "encoding": {
+ "version": "0.1.12",
+ "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.12.tgz",
+ "integrity": "sha1-U4tm8+5izRq1HsMjgp0flIDHS+s=",
+ "dev": true,
+ "requires": {
+ "iconv-lite": "~0.4.13"
+ }
+ },
+ "end-of-stream": {
+ "version": "1.4.4",
+ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
+ "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
+ "dev": true,
+ "requires": {
+ "once": "^1.4.0"
+ }
+ },
+ "enhanced-resolve": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz",
+ "integrity": "sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA==",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.2",
+ "memory-fs": "^0.5.0",
+ "tapable": "^1.0.0"
+ },
+ "dependencies": {
+ "memory-fs": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz",
+ "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==",
+ "dev": true,
+ "requires": {
+ "errno": "^0.1.3",
+ "readable-stream": "^2.0.1"
+ }
+ }
+ }
+ },
+ "entities": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz",
+ "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==",
+ "dev": true
+ },
+ "enzyme": {
+ "version": "3.11.0",
+ "resolved": "https://registry.npmjs.org/enzyme/-/enzyme-3.11.0.tgz",
+ "integrity": "sha512-Dw8/Gs4vRjxY6/6i9wU0V+utmQO9kvh9XLnz3LIudviOnVYDEe2ec+0k+NQoMamn1VrjKgCUOWj5jG/5M5M0Qw==",
+ "dev": true,
+ "requires": {
+ "array.prototype.flat": "^1.2.3",
+ "cheerio": "^1.0.0-rc.3",
+ "enzyme-shallow-equal": "^1.0.1",
+ "function.prototype.name": "^1.1.2",
+ "has": "^1.0.3",
+ "html-element-map": "^1.2.0",
+ "is-boolean-object": "^1.0.1",
+ "is-callable": "^1.1.5",
+ "is-number-object": "^1.0.4",
+ "is-regex": "^1.0.5",
+ "is-string": "^1.0.5",
+ "is-subset": "^0.1.1",
+ "lodash.escape": "^4.0.1",
+ "lodash.isequal": "^4.5.0",
+ "object-inspect": "^1.7.0",
+ "object-is": "^1.0.2",
+ "object.assign": "^4.1.0",
+ "object.entries": "^1.1.1",
+ "object.values": "^1.1.1",
+ "raf": "^3.4.1",
+ "rst-selector-parser": "^2.2.3",
+ "string.prototype.trim": "^1.2.1"
+ }
+ },
+ "enzyme-adapter-react-16": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/enzyme-adapter-react-16/-/enzyme-adapter-react-16-1.15.2.tgz",
+ "integrity": "sha512-SkvDrb8xU3lSxID8Qic9rB8pvevDbLybxPK6D/vW7PrT0s2Cl/zJYuXvsd1EBTz0q4o3iqG3FJhpYz3nUNpM2Q==",
+ "dev": true,
+ "requires": {
+ "enzyme-adapter-utils": "^1.13.0",
+ "enzyme-shallow-equal": "^1.0.1",
+ "has": "^1.0.3",
+ "object.assign": "^4.1.0",
+ "object.values": "^1.1.1",
+ "prop-types": "^15.7.2",
+ "react-is": "^16.12.0",
+ "react-test-renderer": "^16.0.0-0",
+ "semver": "^5.7.0"
+ }
+ },
+ "enzyme-adapter-utils": {
+ "version": "1.13.0",
+ "resolved": "https://registry.npmjs.org/enzyme-adapter-utils/-/enzyme-adapter-utils-1.13.0.tgz",
+ "integrity": "sha512-YuEtfQp76Lj5TG1NvtP2eGJnFKogk/zT70fyYHXK2j3v6CtuHqc8YmgH/vaiBfL8K1SgVVbQXtTcgQZFwzTVyQ==",
+ "dev": true,
+ "requires": {
+ "airbnb-prop-types": "^2.15.0",
+ "function.prototype.name": "^1.1.2",
+ "object.assign": "^4.1.0",
+ "object.fromentries": "^2.0.2",
+ "prop-types": "^15.7.2",
+ "semver": "^5.7.1"
+ }
+ },
+ "enzyme-shallow-equal": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/enzyme-shallow-equal/-/enzyme-shallow-equal-1.0.1.tgz",
+ "integrity": "sha512-hGA3i1so8OrYOZSM9whlkNmVHOicJpsjgTzC+wn2JMJXhq1oO4kA4bJ5MsfzSIcC71aLDKzJ6gZpIxrqt3QTAQ==",
+ "dev": true,
+ "requires": {
+ "has": "^1.0.3",
+ "object-is": "^1.0.2"
+ }
+ },
+ "errno": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.7.tgz",
+ "integrity": "sha512-MfrRBDWzIWifgq6tJj60gkAwtLNb6sQPlcFrSOflcP1aFmmruKQ2wRnze/8V6kgyz7H3FF8Npzv78mZ7XLLflg==",
+ "dev": true,
+ "requires": {
+ "prr": "~1.0.1"
+ }
+ },
+ "error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "dev": true,
+ "requires": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "es-abstract": {
+ "version": "1.17.4",
+ "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.4.tgz",
+ "integrity": "sha512-Ae3um/gb8F0mui/jPL+QiqmglkUsaQf7FwBEHYIFkztkneosu9imhqHpBzQ3h1vit8t5iQ74t6PEVvphBZiuiQ==",
+ "dev": true,
+ "requires": {
+ "es-to-primitive": "^1.2.1",
+ "function-bind": "^1.1.1",
+ "has": "^1.0.3",
+ "has-symbols": "^1.0.1",
+ "is-callable": "^1.1.5",
+ "is-regex": "^1.0.5",
+ "object-inspect": "^1.7.0",
+ "object-keys": "^1.1.1",
+ "object.assign": "^4.1.0",
+ "string.prototype.trimleft": "^2.1.1",
+ "string.prototype.trimright": "^2.1.1"
+ }
+ },
+ "es-to-primitive": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
+ "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
+ "dev": true,
+ "requires": {
+ "is-callable": "^1.1.4",
+ "is-date-object": "^1.0.1",
+ "is-symbol": "^1.0.2"
+ }
+ },
+ "es5-ext": {
+ "version": "0.10.53",
+ "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz",
+ "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==",
+ "dev": true,
+ "requires": {
+ "es6-iterator": "~2.0.3",
+ "es6-symbol": "~3.1.3",
+ "next-tick": "~1.0.0"
+ }
+ },
+ "es6-error": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz",
+ "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==",
+ "dev": true
+ },
+ "es6-iterator": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz",
+ "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=",
+ "dev": true,
+ "requires": {
+ "d": "1",
+ "es5-ext": "^0.10.35",
+ "es6-symbol": "^3.1.1"
+ }
+ },
+ "es6-symbol": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz",
+ "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==",
+ "dev": true,
+ "requires": {
+ "d": "^1.0.1",
+ "ext": "^1.1.2"
+ }
+ },
+ "escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
+ },
+ "escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
+ "dev": true
+ },
+ "escodegen": {
+ "version": "1.13.0",
+ "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.13.0.tgz",
+ "integrity": "sha512-eYk2dCkxR07DsHA/X2hRBj0CFAZeri/LyDMc0C8JT1Hqi6JnVpMhJ7XFITbb0+yZS3lVkaPL2oCkZ3AVmeVbMw==",
+ "dev": true,
+ "requires": {
+ "esprima": "^4.0.1",
+ "estraverse": "^4.2.0",
+ "esutils": "^2.0.2",
+ "optionator": "^0.8.1",
+ "source-map": "~0.6.1"
+ }
+ },
+ "eslint": {
+ "version": "5.16.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-5.16.0.tgz",
+ "integrity": "sha512-S3Rz11i7c8AA5JPv7xAH+dOyq/Cu/VXHiHXBPOU1k/JAM5dXqQPt3qcrhpHSorXmrpu2g0gkIBVXAqCpzfoZIg==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.0.0",
+ "ajv": "^6.9.1",
+ "chalk": "^2.1.0",
+ "cross-spawn": "^6.0.5",
+ "debug": "^4.0.1",
+ "doctrine": "^3.0.0",
+ "eslint-scope": "^4.0.3",
+ "eslint-utils": "^1.3.1",
+ "eslint-visitor-keys": "^1.0.0",
+ "espree": "^5.0.1",
+ "esquery": "^1.0.1",
+ "esutils": "^2.0.2",
+ "file-entry-cache": "^5.0.1",
+ "functional-red-black-tree": "^1.0.1",
+ "glob": "^7.1.2",
+ "globals": "^11.7.0",
+ "ignore": "^4.0.6",
+ "import-fresh": "^3.0.0",
+ "imurmurhash": "^0.1.4",
+ "inquirer": "^6.2.2",
+ "js-yaml": "^3.13.0",
+ "json-stable-stringify-without-jsonify": "^1.0.1",
+ "levn": "^0.3.0",
+ "lodash": "^4.17.11",
+ "minimatch": "^3.0.4",
+ "mkdirp": "^0.5.1",
+ "natural-compare": "^1.4.0",
+ "optionator": "^0.8.2",
+ "path-is-inside": "^1.0.2",
+ "progress": "^2.0.0",
+ "regexpp": "^2.0.1",
+ "semver": "^5.5.1",
+ "strip-ansi": "^4.0.0",
+ "strip-json-comments": "^2.0.1",
+ "table": "^5.2.3",
+ "text-table": "^0.2.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
+ "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+ "dev": true
+ },
+ "debug": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+ "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "eslint-scope": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz",
+ "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==",
+ "dev": true,
+ "requires": {
+ "esrecurse": "^4.1.0",
+ "estraverse": "^4.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ },
+ "strip-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
+ "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^3.0.0"
+ }
+ }
+ }
+ },
+ "eslint-config-airbnb": {
+ "version": "17.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-config-airbnb/-/eslint-config-airbnb-17.1.1.tgz",
+ "integrity": "sha512-xCu//8a/aWqagKljt+1/qAM62BYZeNq04HmdevG5yUGWpja0I/xhqd6GdLRch5oetEGFiJAnvtGuTEAese53Qg==",
+ "dev": true,
+ "requires": {
+ "eslint-config-airbnb-base": "^13.2.0",
+ "object.assign": "^4.1.0",
+ "object.entries": "^1.1.0"
+ }
+ },
+ "eslint-config-airbnb-base": {
+ "version": "13.2.0",
+ "resolved": "https://registry.npmjs.org/eslint-config-airbnb-base/-/eslint-config-airbnb-base-13.2.0.tgz",
+ "integrity": "sha512-1mg/7eoB4AUeB0X1c/ho4vb2gYkNH8Trr/EgCT/aGmKhhG+F6vF5s8+iRBlWAzFIAphxIdp3YfEKgEl0f9Xg+w==",
+ "dev": true,
+ "requires": {
+ "confusing-browser-globals": "^1.0.5",
+ "object.assign": "^4.1.0",
+ "object.entries": "^1.1.0"
+ }
+ },
+ "eslint-import-resolver-node": {
+ "version": "0.3.3",
+ "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.3.tgz",
+ "integrity": "sha512-b8crLDo0M5RSe5YG8Pu2DYBj71tSB6OvXkfzwbJU2w7y8P4/yo0MyF8jU26IEuEuHF2K5/gcAJE3LhQGqBBbVg==",
+ "dev": true,
+ "requires": {
+ "debug": "^2.6.9",
+ "resolve": "^1.13.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ }
+ }
+ },
+ "eslint-module-utils": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.5.2.tgz",
+ "integrity": "sha512-LGScZ/JSlqGKiT8OC+cYRxseMjyqt6QO54nl281CK93unD89ijSeRV6An8Ci/2nvWVKe8K/Tqdm75RQoIOCr+Q==",
+ "dev": true,
+ "requires": {
+ "debug": "^2.6.9",
+ "pkg-dir": "^2.0.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "find-up": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz",
+ "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=",
+ "dev": true,
+ "requires": {
+ "locate-path": "^2.0.0"
+ }
+ },
+ "locate-path": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz",
+ "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=",
+ "dev": true,
+ "requires": {
+ "p-locate": "^2.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "p-limit": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz",
+ "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==",
+ "dev": true,
+ "requires": {
+ "p-try": "^1.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz",
+ "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=",
+ "dev": true,
+ "requires": {
+ "p-limit": "^1.1.0"
+ }
+ },
+ "p-try": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz",
+ "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=",
+ "dev": true
+ },
+ "pkg-dir": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz",
+ "integrity": "sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=",
+ "dev": true,
+ "requires": {
+ "find-up": "^2.1.0"
+ }
+ }
+ }
+ },
+ "eslint-plugin-import": {
+ "version": "2.20.1",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.20.1.tgz",
+ "integrity": "sha512-qQHgFOTjguR+LnYRoToeZWT62XM55MBVXObHM6SKFd1VzDcX/vqT1kAz8ssqigh5eMj8qXcRoXXGZpPP6RfdCw==",
+ "dev": true,
+ "requires": {
+ "array-includes": "^3.0.3",
+ "array.prototype.flat": "^1.2.1",
+ "contains-path": "^0.1.0",
+ "debug": "^2.6.9",
+ "doctrine": "1.5.0",
+ "eslint-import-resolver-node": "^0.3.2",
+ "eslint-module-utils": "^2.4.1",
+ "has": "^1.0.3",
+ "minimatch": "^3.0.4",
+ "object.values": "^1.1.0",
+ "read-pkg-up": "^2.0.0",
+ "resolve": "^1.12.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "doctrine": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-1.5.0.tgz",
+ "integrity": "sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=",
+ "dev": true,
+ "requires": {
+ "esutils": "^2.0.2",
+ "isarray": "^1.0.0"
+ }
+ }
+ }
+ },
+ "eslint-plugin-json": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-json/-/eslint-plugin-json-1.4.0.tgz",
+ "integrity": "sha512-CECvgRAWtUzuepdlPWd+VA7fhyF9HT183pZnl8wQw5x699Mk/MbME/q8xtULBfooi3LUbj6fToieNmsvUcDxWA==",
+ "dev": true,
+ "requires": {
+ "vscode-json-languageservice": "^3.2.1"
+ }
+ },
+ "eslint-plugin-jsx-a11y": {
+ "version": "6.2.3",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.2.3.tgz",
+ "integrity": "sha512-CawzfGt9w83tyuVekn0GDPU9ytYtxyxyFZ3aSWROmnRRFQFT2BiPJd7jvRdzNDi6oLWaS2asMeYSNMjWTV4eNg==",
+ "dev": true,
+ "requires": {
+ "@babel/runtime": "^7.4.5",
+ "aria-query": "^3.0.0",
+ "array-includes": "^3.0.3",
+ "ast-types-flow": "^0.0.7",
+ "axobject-query": "^2.0.2",
+ "damerau-levenshtein": "^1.0.4",
+ "emoji-regex": "^7.0.2",
+ "has": "^1.0.3",
+ "jsx-ast-utils": "^2.2.1"
+ }
+ },
+ "eslint-plugin-prettier": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-3.1.2.tgz",
+ "integrity": "sha512-GlolCC9y3XZfv3RQfwGew7NnuFDKsfI4lbvRK+PIIo23SFH+LemGs4cKwzAaRa+Mdb+lQO/STaIayno8T5sJJA==",
+ "dev": true,
+ "requires": {
+ "prettier-linter-helpers": "^1.0.0"
+ }
+ },
+ "eslint-plugin-react": {
+ "version": "7.18.3",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.18.3.tgz",
+ "integrity": "sha512-Bt56LNHAQCoou88s8ViKRjMB2+36XRejCQ1VoLj716KI1MoE99HpTVvIThJ0rvFmG4E4Gsq+UgToEjn+j044Bg==",
+ "dev": true,
+ "requires": {
+ "array-includes": "^3.1.1",
+ "doctrine": "^2.1.0",
+ "has": "^1.0.3",
+ "jsx-ast-utils": "^2.2.3",
+ "object.entries": "^1.1.1",
+ "object.fromentries": "^2.0.2",
+ "object.values": "^1.1.1",
+ "prop-types": "^15.7.2",
+ "resolve": "^1.14.2",
+ "string.prototype.matchall": "^4.0.2"
+ },
+ "dependencies": {
+ "doctrine": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
+ "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
+ "dev": true,
+ "requires": {
+ "esutils": "^2.0.2"
+ }
+ }
+ }
+ },
+ "eslint-scope": {
+ "version": "3.7.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-3.7.1.tgz",
+ "integrity": "sha1-PWPD7f2gLgbgGkUq2IyqzHzctug=",
+ "dev": true,
+ "requires": {
+ "esrecurse": "^4.1.0",
+ "estraverse": "^4.1.1"
+ }
+ },
+ "eslint-utils": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz",
+ "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==",
+ "dev": true,
+ "requires": {
+ "eslint-visitor-keys": "^1.1.0"
+ }
+ },
+ "eslint-visitor-keys": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz",
+ "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==",
+ "dev": true
+ },
+ "espree": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/espree/-/espree-5.0.1.tgz",
+ "integrity": "sha512-qWAZcWh4XE/RwzLJejfcofscgMc9CamR6Tn1+XRXNzrvUSSbiAjGOI/fggztjIi7y9VLPqnICMIPiGyr8JaZ0A==",
+ "dev": true,
+ "requires": {
+ "acorn": "^6.0.7",
+ "acorn-jsx": "^5.0.0",
+ "eslint-visitor-keys": "^1.0.0"
+ },
+ "dependencies": {
+ "acorn": {
+ "version": "6.4.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.0.tgz",
+ "integrity": "sha512-gac8OEcQ2Li1dxIEWGZzsp2BitJxwkwcOm0zHAJLcPJaVvm58FRnk6RkuLRpU1EujipU2ZFODv2P9DLMfnV8mw==",
+ "dev": true
+ }
+ }
+ },
+ "esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "dev": true
+ },
+ "esquery": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.0.1.tgz",
+ "integrity": "sha512-SmiyZ5zIWH9VM+SRUReLS5Q8a7GxtRdxEBVZpm98rJM7Sb+A9DVCndXfkeFUd3byderg+EbDkfnevfCwynWaNA==",
+ "dev": true,
+ "requires": {
+ "estraverse": "^4.0.0"
+ }
+ },
+ "esrecurse": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz",
+ "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==",
+ "dev": true,
+ "requires": {
+ "estraverse": "^4.1.0"
+ }
+ },
+ "estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
+ "dev": true
+ },
+ "esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="
+ },
+ "etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
+ },
+ "events": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/events/-/events-3.1.0.tgz",
+ "integrity": "sha512-Rv+u8MLHNOdMjTAFeT3nCjHn2aGlx435FP/sDHNaRhDEMwyI/aB22Kj2qIN8R0cw3z28psEQLYwxVKLsKrMgWg==",
+ "dev": true
+ },
+ "evp_bytestokey": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz",
+ "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==",
+ "dev": true,
+ "requires": {
+ "md5.js": "^1.3.4",
+ "safe-buffer": "^5.1.1"
+ }
+ },
+ "execa": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz",
+ "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==",
+ "dev": true,
+ "requires": {
+ "cross-spawn": "^6.0.0",
+ "get-stream": "^4.0.0",
+ "is-stream": "^1.1.0",
+ "npm-run-path": "^2.0.0",
+ "p-finally": "^1.0.0",
+ "signal-exit": "^3.0.0",
+ "strip-eof": "^1.0.0"
+ }
+ },
+ "exenv": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/exenv/-/exenv-1.2.2.tgz",
+ "integrity": "sha1-KueOhdmJQVhnCwPUe+wfA72Ru50=",
+ "dev": true
+ },
+ "expand-brackets": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
+ "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
+ "dev": true,
+ "requires": {
+ "debug": "^2.3.3",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "posix-character-classes": "^0.1.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ }
+ }
+ },
+ "express": {
+ "version": "4.17.1",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz",
+ "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==",
+ "requires": {
+ "accepts": "~1.3.7",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.19.0",
+ "content-disposition": "0.5.3",
+ "content-type": "~1.0.4",
+ "cookie": "0.4.0",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "~1.1.2",
+ "fresh": "0.5.2",
+ "merge-descriptors": "1.0.1",
+ "methods": "~1.1.2",
+ "on-finished": "~2.3.0",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.7",
+ "proxy-addr": "~2.0.5",
+ "qs": "6.7.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.1.2",
+ "send": "0.17.1",
+ "serve-static": "1.14.1",
+ "setprototypeof": "1.1.1",
+ "statuses": "~1.5.0",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "qs": {
+ "version": "6.7.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
+ "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ=="
+ }
+ }
+ },
+ "express-session": {
+ "version": "1.17.0",
+ "resolved": "https://registry.npmjs.org/express-session/-/express-session-1.17.0.tgz",
+ "integrity": "sha512-t4oX2z7uoSqATbMfsxWMbNjAL0T5zpvcJCk3Z9wnPPN7ibddhnmDZXHfEcoBMG2ojKXZoCyPMc5FbtK+G7SoDg==",
+ "requires": {
+ "cookie": "0.4.0",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "~2.0.0",
+ "on-headers": "~1.0.2",
+ "parseurl": "~1.3.3",
+ "safe-buffer": "5.2.0",
+ "uid-safe": "~2.1.5"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="
+ },
+ "safe-buffer": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.0.tgz",
+ "integrity": "sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg=="
+ }
+ }
+ },
+ "express-winston": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/express-winston/-/express-winston-2.6.0.tgz",
+ "integrity": "sha512-m4qvQrrIErAZFMQman8CKnQB8sgVG0dSp/wRFv1ZyoWPpP/6waDZywteAdjMF57uJ5+9O7tkwZb5k9w80ZyvAA==",
+ "requires": {
+ "chalk": "~0.4.0",
+ "lodash": "~4.17.5"
+ },
+ "dependencies": {
+ "chalk": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-0.4.0.tgz",
+ "integrity": "sha1-UZmj3c0MHv4jvAjBsCewYXbgxk8=",
+ "requires": {
+ "ansi-styles": "~1.0.0",
+ "has-color": "~0.1.0",
+ "strip-ansi": "~0.1.0"
+ }
+ }
+ }
+ },
+ "ext": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz",
+ "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==",
+ "dev": true,
+ "requires": {
+ "type": "^2.0.0"
+ },
+ "dependencies": {
+ "type": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/type/-/type-2.0.0.tgz",
+ "integrity": "sha512-KBt58xCHry4Cejnc2ISQAF7QY+ORngsWfxezO68+12hKV6lQY8P/psIkcbjeHWn7MqcgciWJyCCevFMJdIXpow==",
+ "dev": true
+ }
+ }
+ },
+ "extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "dev": true
+ },
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "dependencies": {
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "external-editor": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz",
+ "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==",
+ "dev": true,
+ "requires": {
+ "chardet": "^0.7.0",
+ "iconv-lite": "^0.4.24",
+ "tmp": "^0.0.33"
+ }
+ },
+ "extglob": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
+ "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
+ "dev": true,
+ "requires": {
+ "array-unique": "^0.3.2",
+ "define-property": "^1.0.0",
+ "expand-brackets": "^2.1.4",
+ "extend-shallow": "^2.0.1",
+ "fragment-cache": "^0.2.1",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ },
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ },
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "dev": true
+ }
+ }
+ },
+ "extsprintf": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz",
+ "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=",
+ "dev": true
+ },
+ "eyes": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz",
+ "integrity": "sha1-Ys8SAjTGg3hdkCNIqADvPgzCC8A="
+ },
+ "fast-deep-equal": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz",
+ "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==",
+ "dev": true
+ },
+ "fast-diff": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/fast-diff/-/fast-diff-1.2.0.tgz",
+ "integrity": "sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==",
+ "dev": true
+ },
+ "fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "dev": true
+ },
+ "fast-levenshtein": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
+ "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=",
+ "dev": true
+ },
+ "fbjs": {
+ "version": "0.8.17",
+ "resolved": "https://registry.npmjs.org/fbjs/-/fbjs-0.8.17.tgz",
+ "integrity": "sha1-xNWY6taUkRJlPWWIsBpc3Nn5D90=",
+ "dev": true,
+ "requires": {
+ "core-js": "^1.0.0",
+ "isomorphic-fetch": "^2.1.1",
+ "loose-envify": "^1.0.0",
+ "object-assign": "^4.1.0",
+ "promise": "^7.1.1",
+ "setimmediate": "^1.0.5",
+ "ua-parser-js": "^0.7.18"
+ },
+ "dependencies": {
+ "core-js": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/core-js/-/core-js-1.2.7.tgz",
+ "integrity": "sha1-ZSKUwUZR2yj6k70tX/KYOk8IxjY=",
+ "dev": true
+ }
+ }
+ },
+ "figgy-pudding": {
+ "version": "3.5.1",
+ "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.1.tgz",
+ "integrity": "sha512-vNKxJHTEKNThjfrdJwHc7brvM6eVevuO5nTj6ez8ZQ1qbXTvGthucRF7S4vf2cr71QVnT70V34v0S1DyQsti0w==",
+ "dev": true
+ },
+ "figures": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz",
+ "integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=",
+ "dev": true,
+ "requires": {
+ "escape-string-regexp": "^1.0.5"
+ }
+ },
+ "file-entry-cache": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz",
+ "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==",
+ "dev": true,
+ "requires": {
+ "flat-cache": "^2.0.1"
+ }
+ },
+ "file-loader": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-4.3.0.tgz",
+ "integrity": "sha512-aKrYPYjF1yG3oX0kWRrqrSMfgftm7oJW5M+m4owoldH5C51C0RkIwB++JbRvEW3IU6/ZG5n8UvEcdgwOt2UOWA==",
+ "dev": true,
+ "requires": {
+ "loader-utils": "^1.2.3",
+ "schema-utils": "^2.5.0"
+ }
+ },
+ "file-uri-to-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz",
+ "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==",
+ "dev": true,
+ "optional": true
+ },
+ "filesize": {
+ "version": "3.6.1",
+ "resolved": "https://registry.npmjs.org/filesize/-/filesize-3.6.1.tgz",
+ "integrity": "sha512-7KjR1vv6qnicaPMi1iiTcI85CyYwRO/PSFCu6SvqL8jN2Wjt/NIYQTFtFs7fSDCYOstUkEWIQGFUg5YZQfjlcg==",
+ "dev": true
+ },
+ "fill-keys": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/fill-keys/-/fill-keys-1.0.2.tgz",
+ "integrity": "sha1-mo+jb06K1jTjv2tPPIiCVRRS6yA=",
+ "dev": true,
+ "requires": {
+ "is-object": "~1.0.1",
+ "merge-descriptors": "~1.0.0"
+ }
+ },
+ "fill-range": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
+ "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1",
+ "to-regex-range": "^2.1.0"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ }
+ }
+ },
+ "finalhandler": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz",
+ "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==",
+ "requires": {
+ "debug": "2.6.9",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "on-finished": "~2.3.0",
+ "parseurl": "~1.3.3",
+ "statuses": "~1.5.0",
+ "unpipe": "~1.0.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ }
+ }
+ },
+ "find-cache-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz",
+ "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==",
+ "dev": true,
+ "requires": {
+ "commondir": "^1.0.1",
+ "make-dir": "^2.0.0",
+ "pkg-dir": "^3.0.0"
+ }
+ },
+ "find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "dev": true,
+ "requires": {
+ "locate-path": "^3.0.0"
+ }
+ },
+ "finished": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/finished/-/finished-1.2.2.tgz",
+ "integrity": "sha1-QWCOr639ZWg7RqEiC8Sx7D2u3Ng=",
+ "requires": {
+ "ee-first": "1.0.3"
+ },
+ "dependencies": {
+ "ee-first": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.0.3.tgz",
+ "integrity": "sha1-bJjECJq+y1p7hcGsRJqmA9Oz2r4="
+ }
+ }
+ },
+ "flat": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/flat/-/flat-4.1.0.tgz",
+ "integrity": "sha512-Px/TiLIznH7gEDlPXcUD4KnBusa6kR6ayRUVcnEAbreRIuhkqow/mun59BuRXwoYk7ZQOLW1ZM05ilIvK38hFw==",
+ "requires": {
+ "is-buffer": "~2.0.3"
+ }
+ },
+ "flat-cache": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz",
+ "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==",
+ "dev": true,
+ "requires": {
+ "flatted": "^2.0.0",
+ "rimraf": "2.6.3",
+ "write": "1.0.3"
+ },
+ "dependencies": {
+ "rimraf": {
+ "version": "2.6.3",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz",
+ "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==",
+ "dev": true,
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ }
+ }
+ },
+ "flatted": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.1.tgz",
+ "integrity": "sha512-a1hQMktqW9Nmqr5aktAux3JMNqaucxGcjtjWnZLHX7yyPCmlSV3M54nGYbqT8K+0GhF3NBgmJCc3ma+WOgX8Jg==",
+ "dev": true
+ },
+ "flush-write-stream": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz",
+ "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.3",
+ "readable-stream": "^2.3.6"
+ }
+ },
+ "follow-redirects": {
+ "version": "1.5.10",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz",
+ "integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==",
+ "requires": {
+ "debug": "=3.1.0"
+ }
+ },
+ "for-in": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
+ "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=",
+ "dev": true
+ },
+ "foreground-child": {
+ "version": "1.5.6",
+ "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-1.5.6.tgz",
+ "integrity": "sha1-T9ca0t/elnibmApcCilZN8svXOk=",
+ "dev": true,
+ "requires": {
+ "cross-spawn": "^4",
+ "signal-exit": "^3.0.0"
+ },
+ "dependencies": {
+ "cross-spawn": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-4.0.2.tgz",
+ "integrity": "sha1-e5JHYhwjrf3ThWAEqCPL45dCTUE=",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^4.0.1",
+ "which": "^1.2.9"
+ }
+ }
+ }
+ },
+ "forever-agent": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz",
+ "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=",
+ "dev": true
+ },
+ "form-data": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
+ "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==",
+ "dev": true,
+ "requires": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.6",
+ "mime-types": "^2.1.12"
+ }
+ },
+ "formatio": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/formatio/-/formatio-1.1.1.tgz",
+ "integrity": "sha1-XtPM1jZVEJc4NGXZlhmRAOhhYek=",
+ "dev": true,
+ "requires": {
+ "samsam": "~1.1"
+ }
+ },
+ "formidable": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/formidable/-/formidable-1.2.1.tgz",
+ "integrity": "sha512-Fs9VRguL0gqGHkXS5GQiMCr1VhZBxz0JnJs4JmMp/2jL18Fmbzvv7vOFRU+U8TBkHEE/CX1qDXzJplVULgsLeg==",
+ "dev": true
+ },
+ "forwarded": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
+ "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
+ },
+ "fragment-cache": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
+ "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
+ "dev": true,
+ "requires": {
+ "map-cache": "^0.2.2"
+ }
+ },
+ "fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
+ },
+ "from2": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
+ "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.1",
+ "readable-stream": "^2.0.0"
+ }
+ },
+ "fs-minipass": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-1.2.7.tgz",
+ "integrity": "sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA==",
+ "dev": true,
+ "requires": {
+ "minipass": "^2.6.0"
+ }
+ },
+ "fs-readdir-recursive": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz",
+ "integrity": "sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA==",
+ "dev": true
+ },
+ "fs-write-stream-atomic": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz",
+ "integrity": "sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.2",
+ "iferr": "^0.1.5",
+ "imurmurhash": "^0.1.4",
+ "readable-stream": "1 || 2"
+ }
+ },
+ "fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
+ },
+ "fsevents": {
+ "version": "1.2.11",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.11.tgz",
+ "integrity": "sha512-+ux3lx6peh0BpvY0JebGyZoiR4D+oYzdPZMKJwkZ+sFkNJzpL7tXc/wehS49gUAxg3tmMHPHZkA8JU2rhhgDHw==",
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "bindings": "^1.5.0",
+ "nan": "^2.12.1",
+ "node-pre-gyp": "*"
+ },
+ "dependencies": {
+ "abbrev": {
+ "version": "1.1.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "ansi-regex": {
+ "version": "2.1.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "aproba": {
+ "version": "1.2.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "are-we-there-yet": {
+ "version": "1.1.5",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "delegates": "^1.0.0",
+ "readable-stream": "^2.0.6"
+ }
+ },
+ "balanced-match": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "chownr": {
+ "version": "1.1.3",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "code-point-at": {
+ "version": "1.1.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "console-control-strings": {
+ "version": "1.1.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "core-util-is": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "debug": {
+ "version": "3.2.6",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "deep-extend": {
+ "version": "0.6.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "delegates": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "detect-libc": {
+ "version": "1.0.3",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "fs-minipass": {
+ "version": "1.2.7",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "minipass": "^2.6.0"
+ }
+ },
+ "fs.realpath": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "gauge": {
+ "version": "2.7.4",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "aproba": "^1.0.3",
+ "console-control-strings": "^1.0.0",
+ "has-unicode": "^2.0.0",
+ "object-assign": "^4.1.0",
+ "signal-exit": "^3.0.0",
+ "string-width": "^1.0.1",
+ "strip-ansi": "^3.0.1",
+ "wide-align": "^1.1.0"
+ }
+ },
+ "glob": {
+ "version": "7.1.6",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "has-unicode": {
+ "version": "2.0.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "iconv-lite": {
+ "version": "0.4.24",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ }
+ },
+ "ignore-walk": {
+ "version": "3.0.3",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "minimatch": "^3.0.4"
+ }
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.4",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "ini": {
+ "version": "1.3.5",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "number-is-nan": "^1.0.0"
+ }
+ },
+ "isarray": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "minimatch": {
+ "version": "3.0.4",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "0.0.8",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "minipass": {
+ "version": "2.9.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.0"
+ }
+ },
+ "minizlib": {
+ "version": "1.3.3",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "minipass": "^2.9.0"
+ }
+ },
+ "mkdirp": {
+ "version": "0.5.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "minimist": "0.0.8"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "needle": {
+ "version": "2.4.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "debug": "^3.2.6",
+ "iconv-lite": "^0.4.4",
+ "sax": "^1.2.4"
+ }
+ },
+ "node-pre-gyp": {
+ "version": "0.14.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "detect-libc": "^1.0.2",
+ "mkdirp": "^0.5.1",
+ "needle": "^2.2.1",
+ "nopt": "^4.0.1",
+ "npm-packlist": "^1.1.6",
+ "npmlog": "^4.0.2",
+ "rc": "^1.2.7",
+ "rimraf": "^2.6.1",
+ "semver": "^5.3.0",
+ "tar": "^4.4.2"
+ }
+ },
+ "nopt": {
+ "version": "4.0.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "abbrev": "1",
+ "osenv": "^0.1.4"
+ }
+ },
+ "npm-bundled": {
+ "version": "1.1.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "npm-normalize-package-bin": "^1.0.1"
+ }
+ },
+ "npm-normalize-package-bin": {
+ "version": "1.0.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "npm-packlist": {
+ "version": "1.4.7",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "ignore-walk": "^3.0.1",
+ "npm-bundled": "^1.0.1"
+ }
+ },
+ "npmlog": {
+ "version": "4.1.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "are-we-there-yet": "~1.1.2",
+ "console-control-strings": "~1.1.0",
+ "gauge": "~2.7.3",
+ "set-blocking": "~2.0.0"
+ }
+ },
+ "number-is-nan": {
+ "version": "1.0.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "object-assign": {
+ "version": "4.1.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "once": {
+ "version": "1.4.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "os-homedir": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "os-tmpdir": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "osenv": {
+ "version": "0.1.5",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "os-homedir": "^1.0.0",
+ "os-tmpdir": "^1.0.0"
+ }
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "process-nextick-args": {
+ "version": "2.0.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "rc": {
+ "version": "1.2.8",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "deep-extend": "^0.6.0",
+ "ini": "~1.3.0",
+ "minimist": "^1.2.0",
+ "strip-json-comments": "~2.0.1"
+ },
+ "dependencies": {
+ "minimist": {
+ "version": "1.2.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ }
+ }
+ },
+ "readable-stream": {
+ "version": "2.3.6",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "rimraf": {
+ "version": "2.7.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "safer-buffer": {
+ "version": "2.1.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "sax": {
+ "version": "1.2.4",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "semver": {
+ "version": "5.7.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "set-blocking": {
+ "version": "2.0.0",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "signal-exit": {
+ "version": "3.0.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "string-width": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "code-point-at": "^1.0.0",
+ "is-fullwidth-code-point": "^1.0.0",
+ "strip-ansi": "^3.0.0"
+ }
+ },
+ "string_decoder": {
+ "version": "1.1.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "strip-json-comments": {
+ "version": "2.0.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "tar": {
+ "version": "4.4.13",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "chownr": "^1.1.1",
+ "fs-minipass": "^1.2.5",
+ "minipass": "^2.8.6",
+ "minizlib": "^1.2.1",
+ "mkdirp": "^0.5.0",
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.3"
+ }
+ },
+ "util-deprecate": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "wide-align": {
+ "version": "1.1.3",
+ "bundled": true,
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "string-width": "^1.0.2 || 2"
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ },
+ "yallist": {
+ "version": "3.1.1",
+ "bundled": true,
+ "dev": true,
+ "optional": true
+ }
+ }
+ },
+ "function-bind": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
+ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
+ },
+ "function.prototype.name": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.2.tgz",
+ "integrity": "sha512-C8A+LlHBJjB2AdcRPorc5JvJ5VUoWlXdEHLOJdCI7kjHEtGTpHQUiqMvCIKUwIsGwZX2jZJy761AXsn356bJQg==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.0-next.1",
+ "functions-have-names": "^1.2.0"
+ }
+ },
+ "functional-red-black-tree": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz",
+ "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=",
+ "dev": true
+ },
+ "functions-have-names": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.1.tgz",
+ "integrity": "sha512-j48B/ZI7VKs3sgeI2cZp7WXWmZXu7Iq5pl5/vptV5N2mq+DGFuS/ulaDjtaoLpYzuD6u8UgrUKHfgo7fDTSiBA==",
+ "dev": true
+ },
+ "gauge": {
+ "version": "2.7.4",
+ "resolved": "https://registry.npmjs.org/gauge/-/gauge-2.7.4.tgz",
+ "integrity": "sha1-LANAXHU4w51+s3sxcCLjJfsBi/c=",
+ "dev": true,
+ "requires": {
+ "aproba": "^1.0.3",
+ "console-control-strings": "^1.0.0",
+ "has-unicode": "^2.0.0",
+ "object-assign": "^4.1.0",
+ "signal-exit": "^3.0.0",
+ "string-width": "^1.0.1",
+ "strip-ansi": "^3.0.1",
+ "wide-align": "^1.1.0"
+ },
+ "dependencies": {
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ }
+ }
+ },
+ "gensync": {
+ "version": "1.0.0-beta.1",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.1.tgz",
+ "integrity": "sha512-r8EC6NO1sngH/zdD9fiRDLdcgnbayXah+mLgManTaIZJqEC1MZstmnox8KpnI2/fxQwrp5OpCOYWLp4rBl4Jcg==",
+ "dev": true
+ },
+ "get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "dev": true
+ },
+ "get-func-name": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz",
+ "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=",
+ "dev": true
+ },
+ "get-stdin": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-7.0.0.tgz",
+ "integrity": "sha512-zRKcywvrXlXsA0v0i9Io4KDRaAw7+a1ZpjRwl9Wox8PFlVCCHra7E9c4kqXCoCM9nR5tBkaTTZRBoCm60bFqTQ==",
+ "dev": true
+ },
+ "get-stream": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz",
+ "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==",
+ "dev": true,
+ "requires": {
+ "pump": "^3.0.0"
+ }
+ },
+ "get-value": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
+ "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=",
+ "dev": true
+ },
+ "getpass": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz",
+ "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=",
+ "dev": true,
+ "requires": {
+ "assert-plus": "^1.0.0"
+ }
+ },
+ "glob": {
+ "version": "7.1.6",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
+ "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "glob-parent": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz",
+ "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=",
+ "dev": true,
+ "requires": {
+ "is-glob": "^3.1.0",
+ "path-dirname": "^1.0.0"
+ },
+ "dependencies": {
+ "is-glob": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz",
+ "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=",
+ "dev": true,
+ "requires": {
+ "is-extglob": "^2.1.0"
+ }
+ }
+ }
+ },
+ "global": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/global/-/global-4.4.0.tgz",
+ "integrity": "sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==",
+ "dev": true,
+ "requires": {
+ "min-document": "^2.19.0",
+ "process": "^0.11.10"
+ }
+ },
+ "global-dirs": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-0.1.1.tgz",
+ "integrity": "sha1-sxnA3UYH81PzvpzKTHL8FIxJ9EU=",
+ "dev": true,
+ "requires": {
+ "ini": "^1.3.4"
+ }
+ },
+ "globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "dev": true
+ },
+ "google-protobuf": {
+ "version": "3.11.2",
+ "resolved": "https://registry.npmjs.org/google-protobuf/-/google-protobuf-3.11.2.tgz",
+ "integrity": "sha512-T4fin7lcYLUPj2ChUZ4DvfuuHtg3xi1621qeRZt2J7SvOQusOzq+sDT4vbotWTCjUXJoR36CA016LlhtPy80uQ=="
+ },
+ "got": {
+ "version": "6.7.1",
+ "resolved": "https://registry.npmjs.org/got/-/got-6.7.1.tgz",
+ "integrity": "sha1-JAzQV4WpoY5WHcG0S0HHY+8ejbA=",
+ "dev": true,
+ "requires": {
+ "create-error-class": "^3.0.0",
+ "duplexer3": "^0.1.4",
+ "get-stream": "^3.0.0",
+ "is-redirect": "^1.0.0",
+ "is-retry-allowed": "^1.0.0",
+ "is-stream": "^1.0.0",
+ "lowercase-keys": "^1.0.0",
+ "safe-buffer": "^5.0.1",
+ "timed-out": "^4.0.0",
+ "unzip-response": "^2.0.1",
+ "url-parse-lax": "^1.0.0"
+ },
+ "dependencies": {
+ "get-stream": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz",
+ "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=",
+ "dev": true
+ }
+ }
+ },
+ "graceful-fs": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz",
+ "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==",
+ "dev": true
+ },
+ "graphlib": {
+ "version": "2.1.8",
+ "resolved": "https://registry.npmjs.org/graphlib/-/graphlib-2.1.8.tgz",
+ "integrity": "sha512-jcLLfkpoVGmH7/InMC/1hIvOPSUh38oJtGhvrOFGzioE1DZ+0YW16RgmOJhHiuWTvGiJQ9Z1Ik43JvkRPRvE+A==",
+ "requires": {
+ "lodash": "^4.17.15"
+ }
+ },
+ "growl": {
+ "version": "1.10.5",
+ "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz",
+ "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==",
+ "dev": true
+ },
+ "grpc": {
+ "version": "1.24.2",
+ "resolved": "https://registry.npmjs.org/grpc/-/grpc-1.24.2.tgz",
+ "integrity": "sha512-EG3WH6AWMVvAiV15d+lr+K77HJ/KV/3FvMpjKjulXHbTwgDZkhkcWbwhxFAoTdxTkQvy0WFcO3Nog50QBbHZWw==",
+ "requires": {
+ "@types/bytebuffer": "^5.0.40",
+ "lodash.camelcase": "^4.3.0",
+ "lodash.clone": "^4.5.0",
+ "nan": "^2.13.2",
+ "node-pre-gyp": "^0.14.0",
+ "protobufjs": "^5.0.3"
+ },
+ "dependencies": {
+ "abbrev": {
+ "version": "1.1.1",
+ "bundled": true
+ },
+ "ansi-regex": {
+ "version": "2.1.1",
+ "bundled": true
+ },
+ "aproba": {
+ "version": "1.2.0",
+ "bundled": true
+ },
+ "are-we-there-yet": {
+ "version": "1.1.5",
+ "bundled": true,
+ "requires": {
+ "delegates": "^1.0.0",
+ "readable-stream": "^2.0.6"
+ }
+ },
+ "balanced-match": {
+ "version": "1.0.0",
+ "bundled": true
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "bundled": true,
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "chownr": {
+ "version": "1.1.3",
+ "bundled": true
+ },
+ "code-point-at": {
+ "version": "1.1.0",
+ "bundled": true
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "bundled": true
+ },
+ "console-control-strings": {
+ "version": "1.1.0",
+ "bundled": true
+ },
+ "core-util-is": {
+ "version": "1.0.2",
+ "bundled": true
+ },
+ "debug": {
+ "version": "3.2.6",
+ "bundled": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "deep-extend": {
+ "version": "0.6.0",
+ "bundled": true
+ },
+ "delegates": {
+ "version": "1.0.0",
+ "bundled": true
+ },
+ "detect-libc": {
+ "version": "1.0.3",
+ "bundled": true
+ },
+ "fs-minipass": {
+ "version": "1.2.7",
+ "bundled": true,
+ "requires": {
+ "minipass": "^2.6.0"
+ }
+ },
+ "fs.realpath": {
+ "version": "1.0.0",
+ "bundled": true
+ },
+ "gauge": {
+ "version": "2.7.4",
+ "bundled": true,
+ "requires": {
+ "aproba": "^1.0.3",
+ "console-control-strings": "^1.0.0",
+ "has-unicode": "^2.0.0",
+ "object-assign": "^4.1.0",
+ "signal-exit": "^3.0.0",
+ "string-width": "^1.0.1",
+ "strip-ansi": "^3.0.1",
+ "wide-align": "^1.1.0"
+ }
+ },
+ "glob": {
+ "version": "7.1.4",
+ "bundled": true,
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "has-unicode": {
+ "version": "2.0.1",
+ "bundled": true
+ },
+ "iconv-lite": {
+ "version": "0.4.24",
+ "bundled": true,
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ }
+ },
+ "ignore-walk": {
+ "version": "3.0.3",
+ "bundled": true,
+ "requires": {
+ "minimatch": "^3.0.4"
+ }
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "bundled": true,
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.4",
+ "bundled": true
+ },
+ "ini": {
+ "version": "1.3.5",
+ "bundled": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "1.0.0",
+ "bundled": true,
+ "requires": {
+ "number-is-nan": "^1.0.0"
+ }
+ },
+ "isarray": {
+ "version": "1.0.0",
+ "bundled": true
+ },
+ "minimatch": {
+ "version": "3.0.4",
+ "bundled": true,
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "1.2.0",
+ "bundled": true
+ },
+ "minipass": {
+ "version": "2.9.0",
+ "bundled": true,
+ "requires": {
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.0"
+ }
+ },
+ "minizlib": {
+ "version": "1.3.3",
+ "bundled": true,
+ "requires": {
+ "minipass": "^2.9.0"
+ }
+ },
+ "mkdirp": {
+ "version": "0.5.1",
+ "bundled": true,
+ "requires": {
+ "minimist": "0.0.8"
+ },
+ "dependencies": {
+ "minimist": {
+ "version": "0.0.8",
+ "bundled": true
+ }
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "bundled": true
+ },
+ "needle": {
+ "version": "2.4.0",
+ "bundled": true,
+ "requires": {
+ "debug": "^3.2.6",
+ "iconv-lite": "^0.4.4",
+ "sax": "^1.2.4"
+ }
+ },
+ "node-pre-gyp": {
+ "version": "0.14.0",
+ "bundled": true,
+ "requires": {
+ "detect-libc": "^1.0.2",
+ "mkdirp": "^0.5.1",
+ "needle": "^2.2.1",
+ "nopt": "^4.0.1",
+ "npm-packlist": "^1.1.6",
+ "npmlog": "^4.0.2",
+ "rc": "^1.2.7",
+ "rimraf": "^2.6.1",
+ "semver": "^5.3.0",
+ "tar": "^4.4.2"
+ }
+ },
+ "nopt": {
+ "version": "4.0.1",
+ "bundled": true,
+ "requires": {
+ "abbrev": "1",
+ "osenv": "^0.1.4"
+ }
+ },
+ "npm-bundled": {
+ "version": "1.0.6",
+ "bundled": true
+ },
+ "npm-packlist": {
+ "version": "1.4.6",
+ "bundled": true,
+ "requires": {
+ "ignore-walk": "^3.0.1",
+ "npm-bundled": "^1.0.1"
+ }
+ },
+ "npmlog": {
+ "version": "4.1.2",
+ "bundled": true,
+ "requires": {
+ "are-we-there-yet": "~1.1.2",
+ "console-control-strings": "~1.1.0",
+ "gauge": "~2.7.3",
+ "set-blocking": "~2.0.0"
+ }
+ },
+ "number-is-nan": {
+ "version": "1.0.1",
+ "bundled": true
+ },
+ "object-assign": {
+ "version": "4.1.1",
+ "bundled": true
+ },
+ "once": {
+ "version": "1.4.0",
+ "bundled": true,
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "os-homedir": {
+ "version": "1.0.2",
+ "bundled": true
+ },
+ "os-tmpdir": {
+ "version": "1.0.2",
+ "bundled": true
+ },
+ "osenv": {
+ "version": "0.1.5",
+ "bundled": true,
+ "requires": {
+ "os-homedir": "^1.0.0",
+ "os-tmpdir": "^1.0.0"
+ }
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "bundled": true
+ },
+ "process-nextick-args": {
+ "version": "2.0.1",
+ "bundled": true
+ },
+ "rc": {
+ "version": "1.2.8",
+ "bundled": true,
+ "requires": {
+ "deep-extend": "^0.6.0",
+ "ini": "~1.3.0",
+ "minimist": "^1.2.0",
+ "strip-json-comments": "~2.0.1"
+ }
+ },
+ "readable-stream": {
+ "version": "2.3.6",
+ "bundled": true,
+ "requires": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "rimraf": {
+ "version": "2.7.1",
+ "bundled": true,
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "bundled": true
+ },
+ "safer-buffer": {
+ "version": "2.1.2",
+ "bundled": true
+ },
+ "sax": {
+ "version": "1.2.4",
+ "bundled": true
+ },
+ "semver": {
+ "version": "5.7.1",
+ "bundled": true
+ },
+ "set-blocking": {
+ "version": "2.0.0",
+ "bundled": true
+ },
+ "signal-exit": {
+ "version": "3.0.2",
+ "bundled": true
+ },
+ "string-width": {
+ "version": "1.0.2",
+ "bundled": true,
+ "requires": {
+ "code-point-at": "^1.0.0",
+ "is-fullwidth-code-point": "^1.0.0",
+ "strip-ansi": "^3.0.0"
+ }
+ },
+ "string_decoder": {
+ "version": "1.1.1",
+ "bundled": true,
+ "requires": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "bundled": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "strip-json-comments": {
+ "version": "2.0.1",
+ "bundled": true
+ },
+ "tar": {
+ "version": "4.4.13",
+ "bundled": true,
+ "requires": {
+ "chownr": "^1.1.1",
+ "fs-minipass": "^1.2.5",
+ "minipass": "^2.8.6",
+ "minizlib": "^1.2.1",
+ "mkdirp": "^0.5.0",
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.3"
+ }
+ },
+ "util-deprecate": {
+ "version": "1.0.2",
+ "bundled": true
+ },
+ "wide-align": {
+ "version": "1.1.3",
+ "bundled": true,
+ "requires": {
+ "string-width": "^1.0.2 || 2"
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "bundled": true
+ },
+ "yallist": {
+ "version": "3.1.1",
+ "bundled": true
+ }
+ }
+ },
+ "grpc-tools": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/grpc-tools/-/grpc-tools-1.8.1.tgz",
+ "integrity": "sha512-CvZLshEDbum8ZtB8r3bn6JsrHs3L7S1jf7PTa02nZSLmcLTKbiXH5UYrte06Kh7SdzFmkxPMaOsys2rCs+HRjA==",
+ "dev": true,
+ "requires": {
+ "node-pre-gyp": "^0.12.0"
+ },
+ "dependencies": {
+ "abbrev": {
+ "version": "1.1.1",
+ "bundled": true,
+ "dev": true
+ },
+ "ansi-regex": {
+ "version": "2.1.1",
+ "bundled": true,
+ "dev": true
+ },
+ "aproba": {
+ "version": "1.2.0",
+ "bundled": true,
+ "dev": true
+ },
+ "are-we-there-yet": {
+ "version": "1.1.5",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "delegates": "^1.0.0",
+ "readable-stream": "^2.0.6"
+ }
+ },
+ "balanced-match": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "chownr": {
+ "version": "1.1.1",
+ "bundled": true,
+ "dev": true
+ },
+ "code-point-at": {
+ "version": "1.1.0",
+ "bundled": true,
+ "dev": true
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "bundled": true,
+ "dev": true
+ },
+ "console-control-strings": {
+ "version": "1.1.0",
+ "bundled": true,
+ "dev": true
+ },
+ "core-util-is": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true
+ },
+ "debug": {
+ "version": "2.6.9",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "deep-extend": {
+ "version": "0.6.0",
+ "bundled": true,
+ "dev": true
+ },
+ "delegates": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true
+ },
+ "detect-libc": {
+ "version": "1.0.3",
+ "bundled": true,
+ "dev": true
+ },
+ "fs-minipass": {
+ "version": "1.2.5",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "minipass": "^2.2.1"
+ }
+ },
+ "fs.realpath": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true
+ },
+ "gauge": {
+ "version": "2.7.4",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "aproba": "^1.0.3",
+ "console-control-strings": "^1.0.0",
+ "has-unicode": "^2.0.0",
+ "object-assign": "^4.1.0",
+ "signal-exit": "^3.0.0",
+ "string-width": "^1.0.1",
+ "strip-ansi": "^3.0.1",
+ "wide-align": "^1.1.0"
+ }
+ },
+ "glob": {
+ "version": "7.1.3",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "has-unicode": {
+ "version": "2.0.1",
+ "bundled": true,
+ "dev": true
+ },
+ "iconv-lite": {
+ "version": "0.4.24",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ }
+ },
+ "ignore-walk": {
+ "version": "3.0.1",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "minimatch": "^3.0.4"
+ }
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.3",
+ "bundled": true,
+ "dev": true
+ },
+ "ini": {
+ "version": "1.3.5",
+ "bundled": true,
+ "dev": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "number-is-nan": "^1.0.0"
+ }
+ },
+ "isarray": {
+ "version": "1.0.0",
+ "bundled": true,
+ "dev": true
+ },
+ "minimatch": {
+ "version": "3.0.4",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "0.0.8",
+ "bundled": true,
+ "dev": true
+ },
+ "minipass": {
+ "version": "2.3.5",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.0"
+ }
+ },
+ "minizlib": {
+ "version": "1.2.1",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "minipass": "^2.2.1"
+ }
+ },
+ "mkdirp": {
+ "version": "0.5.1",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "minimist": "0.0.8"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "bundled": true,
+ "dev": true
+ },
+ "needle": {
+ "version": "2.2.4",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "debug": "^2.1.2",
+ "iconv-lite": "^0.4.4",
+ "sax": "^1.2.4"
+ }
+ },
+ "node-pre-gyp": {
+ "version": "0.12.0",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "detect-libc": "^1.0.2",
+ "mkdirp": "^0.5.1",
+ "needle": "^2.2.1",
+ "nopt": "^4.0.1",
+ "npm-packlist": "^1.1.6",
+ "npmlog": "^4.0.2",
+ "rc": "^1.2.7",
+ "rimraf": "^2.6.1",
+ "semver": "^5.3.0",
+ "tar": "^4"
+ }
+ },
+ "nopt": {
+ "version": "4.0.1",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "abbrev": "1",
+ "osenv": "^0.1.4"
+ }
+ },
+ "npm-bundled": {
+ "version": "1.0.6",
+ "bundled": true,
+ "dev": true
+ },
+ "npm-packlist": {
+ "version": "1.4.1",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "ignore-walk": "^3.0.1",
+ "npm-bundled": "^1.0.1"
+ }
+ },
+ "npmlog": {
+ "version": "4.1.2",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "are-we-there-yet": "~1.1.2",
+ "console-control-strings": "~1.1.0",
+ "gauge": "~2.7.3",
+ "set-blocking": "~2.0.0"
+ }
+ },
+ "number-is-nan": {
+ "version": "1.0.1",
+ "bundled": true,
+ "dev": true
+ },
+ "object-assign": {
+ "version": "4.1.1",
+ "bundled": true,
+ "dev": true
+ },
+ "once": {
+ "version": "1.4.0",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "os-homedir": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true
+ },
+ "os-tmpdir": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true
+ },
+ "osenv": {
+ "version": "0.1.5",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "os-homedir": "^1.0.0",
+ "os-tmpdir": "^1.0.0"
+ }
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "bundled": true,
+ "dev": true
+ },
+ "process-nextick-args": {
+ "version": "2.0.0",
+ "bundled": true,
+ "dev": true
+ },
+ "rc": {
+ "version": "1.2.8",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "deep-extend": "^0.6.0",
+ "ini": "~1.3.0",
+ "minimist": "^1.2.0",
+ "strip-json-comments": "~2.0.1"
+ },
+ "dependencies": {
+ "minimist": {
+ "version": "1.2.0",
+ "bundled": true,
+ "dev": true
+ }
+ }
+ },
+ "readable-stream": {
+ "version": "2.3.6",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "rimraf": {
+ "version": "2.6.3",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "bundled": true,
+ "dev": true
+ },
+ "safer-buffer": {
+ "version": "2.1.2",
+ "bundled": true,
+ "dev": true
+ },
+ "sax": {
+ "version": "1.2.4",
+ "bundled": true,
+ "dev": true
+ },
+ "semver": {
+ "version": "5.6.0",
+ "bundled": true,
+ "dev": true
+ },
+ "set-blocking": {
+ "version": "2.0.0",
+ "bundled": true,
+ "dev": true
+ },
+ "signal-exit": {
+ "version": "3.0.2",
+ "bundled": true,
+ "dev": true
+ },
+ "string-width": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "code-point-at": "^1.0.0",
+ "is-fullwidth-code-point": "^1.0.0",
+ "strip-ansi": "^3.0.0"
+ }
+ },
+ "string_decoder": {
+ "version": "1.1.1",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "strip-json-comments": {
+ "version": "2.0.1",
+ "bundled": true,
+ "dev": true
+ },
+ "tar": {
+ "version": "4.4.8",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "chownr": "^1.1.1",
+ "fs-minipass": "^1.2.5",
+ "minipass": "^2.3.4",
+ "minizlib": "^1.1.1",
+ "mkdirp": "^0.5.0",
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.2"
+ }
+ },
+ "util-deprecate": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true
+ },
+ "wide-align": {
+ "version": "1.1.3",
+ "bundled": true,
+ "dev": true,
+ "requires": {
+ "string-width": "^1.0.2 || 2"
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "bundled": true,
+ "dev": true
+ },
+ "yallist": {
+ "version": "3.0.3",
+ "bundled": true,
+ "dev": true
+ }
+ }
+ },
+ "gzip-size": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-5.1.1.tgz",
+ "integrity": "sha512-FNHi6mmoHvs1mxZAds4PpdCS6QG8B4C1krxJsMutgxl5t3+GlRTzzI3NEkifXx2pVsOvJdOGSmIgDhQ55FwdPA==",
+ "dev": true,
+ "requires": {
+ "duplexer": "^0.1.1",
+ "pify": "^4.0.1"
+ }
+ },
+ "hammerjs": {
+ "version": "2.0.8",
+ "resolved": "https://registry.npmjs.org/hammerjs/-/hammerjs-2.0.8.tgz",
+ "integrity": "sha1-BO93hiz/K7edMPdpIJWTAiK/YPE=",
+ "dev": true
+ },
+ "har-schema": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz",
+ "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=",
+ "dev": true
+ },
+ "har-validator": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.3.tgz",
+ "integrity": "sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==",
+ "dev": true,
+ "requires": {
+ "ajv": "^6.5.5",
+ "har-schema": "^2.0.0"
+ }
+ },
+ "has": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
+ "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
+ "requires": {
+ "function-bind": "^1.1.1"
+ }
+ },
+ "has-ansi": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz",
+ "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "has-color": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/has-color/-/has-color-0.1.7.tgz",
+ "integrity": "sha1-ZxRKUmDDT8PMpnfQQdr1L+e3iy8="
+ },
+ "has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
+ "dev": true
+ },
+ "has-symbols": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.1.tgz",
+ "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==",
+ "dev": true
+ },
+ "has-unicode": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz",
+ "integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=",
+ "dev": true
+ },
+ "has-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
+ "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
+ "dev": true,
+ "requires": {
+ "get-value": "^2.0.6",
+ "has-values": "^1.0.0",
+ "isobject": "^3.0.0"
+ }
+ },
+ "has-values": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
+ "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
+ "dev": true,
+ "requires": {
+ "is-number": "^3.0.0",
+ "kind-of": "^4.0.0"
+ },
+ "dependencies": {
+ "is-buffer": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==",
+ "dev": true
+ },
+ "kind-of": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
+ "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "hash-base": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.0.4.tgz",
+ "integrity": "sha1-X8hoaEfs1zSZQDMZprCj8/auSRg=",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "hash.js": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz",
+ "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.3",
+ "minimalistic-assert": "^1.0.1"
+ }
+ },
+ "hasha": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/hasha/-/hasha-3.0.0.tgz",
+ "integrity": "sha1-UqMvq4Vp1BymmmH/GiFPjrfIvTk=",
+ "dev": true,
+ "requires": {
+ "is-stream": "^1.0.1"
+ }
+ },
+ "he": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz",
+ "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=",
+ "dev": true
+ },
+ "history": {
+ "version": "4.10.1",
+ "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz",
+ "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==",
+ "dev": true,
+ "requires": {
+ "@babel/runtime": "^7.1.2",
+ "loose-envify": "^1.2.0",
+ "resolve-pathname": "^3.0.0",
+ "tiny-invariant": "^1.0.2",
+ "tiny-warning": "^1.0.0",
+ "value-equal": "^1.0.1"
+ }
+ },
+ "hmac-drbg": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz",
+ "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=",
+ "dev": true,
+ "requires": {
+ "hash.js": "^1.0.3",
+ "minimalistic-assert": "^1.0.0",
+ "minimalistic-crypto-utils": "^1.0.1"
+ }
+ },
+ "hoek": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/hoek/-/hoek-4.2.1.tgz",
+ "integrity": "sha512-QLg82fGkfnJ/4iy1xZ81/9SIJiq1NGFUMGs6ParyjBZr6jW2Ufj/snDqTHixNlHdPNwN2RLVD0Pi3igeK9+JfA==",
+ "dev": true
+ },
+ "hoist-non-react-statics": {
+ "version": "3.3.2",
+ "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz",
+ "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==",
+ "dev": true,
+ "requires": {
+ "react-is": "^16.7.0"
+ }
+ },
+ "hoopy": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/hoopy/-/hoopy-0.1.4.tgz",
+ "integrity": "sha512-HRcs+2mr52W0K+x8RzcLzuPPmVIKMSv97RGHy0Ea9y/mpcaK+xTrjICA04KAHi4GRzxliNqNJEFYWHghy3rSfQ==",
+ "dev": true
+ },
+ "hosted-git-info": {
+ "version": "2.8.5",
+ "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.5.tgz",
+ "integrity": "sha512-kssjab8CvdXfcXMXVcvsXum4Hwdq9XGtRD3TteMEvEbq0LXyiNQr6AprqKqfeaDXze7SxWvRxdpwE6ku7ikLkg==",
+ "dev": true
+ },
+ "html-element-map": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/html-element-map/-/html-element-map-1.2.0.tgz",
+ "integrity": "sha512-0uXq8HsuG1v2TmQ8QkIhzbrqeskE4kn52Q18QJ9iAA/SnHoEKXWiUxHQtclRsCFWEUD2So34X+0+pZZu862nnw==",
+ "dev": true,
+ "requires": {
+ "array-filter": "^1.0.0"
+ }
+ },
+ "html-encoding-sniffer": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-1.0.2.tgz",
+ "integrity": "sha512-71lZziiDnsuabfdYiUeWdCVyKuqwWi23L8YeIgV9jSSZHCtb6wB1BKWooH7L3tn4/FuZJMVWyNaIDr4RGmaSYw==",
+ "dev": true,
+ "requires": {
+ "whatwg-encoding": "^1.0.1"
+ }
+ },
+ "html-escaper": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.0.tgz",
+ "integrity": "sha512-a4u9BeERWGu/S8JiWEAQcdrg9v4QArtP9keViQjGMdff20fBdd8waotXaNmODqBe6uZ3Nafi7K/ho4gCQHV3Ig==",
+ "dev": true
+ },
+ "htmlparser2": {
+ "version": "3.10.1",
+ "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz",
+ "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==",
+ "dev": true,
+ "requires": {
+ "domelementtype": "^1.3.1",
+ "domhandler": "^2.3.0",
+ "domutils": "^1.5.1",
+ "entities": "^1.1.1",
+ "inherits": "^2.0.1",
+ "readable-stream": "^3.1.1"
+ },
+ "dependencies": {
+ "readable-stream": {
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.5.0.tgz",
+ "integrity": "sha512-gSz026xs2LfxBPudDuI41V1lka8cxg64E66SGe78zJlsUofOg/yqwezdIcdfwik6B4h8LFmWPA9ef9X3FiNFLA==",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ }
+ }
+ }
+ },
+ "http-errors": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz",
+ "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==",
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.1",
+ "statuses": ">= 1.5.0 < 2",
+ "toidentifier": "1.0.0"
+ }
+ },
+ "http-signature": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz",
+ "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=",
+ "dev": true,
+ "requires": {
+ "assert-plus": "^1.0.0",
+ "jsprim": "^1.2.2",
+ "sshpk": "^1.7.0"
+ }
+ },
+ "https": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/https/-/https-1.0.0.tgz",
+ "integrity": "sha1-PDfHrhqO65ZpBKKtHpdaGUt+06Q="
+ },
+ "https-browserify": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz",
+ "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=",
+ "dev": true
+ },
+ "husky": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/husky/-/husky-2.7.0.tgz",
+ "integrity": "sha512-LIi8zzT6PyFpcYKdvWRCn/8X+6SuG2TgYYMrM6ckEYhlp44UcEduVymZGIZNLiwOUjrEud+78w/AsAiqJA/kRg==",
+ "dev": true,
+ "requires": {
+ "cosmiconfig": "^5.2.0",
+ "execa": "^1.0.0",
+ "find-up": "^3.0.0",
+ "get-stdin": "^7.0.0",
+ "is-ci": "^2.0.0",
+ "pkg-dir": "^4.1.0",
+ "please-upgrade-node": "^3.1.1",
+ "read-pkg": "^5.1.1",
+ "run-node": "^1.0.0",
+ "slash": "^3.0.0"
+ },
+ "dependencies": {
+ "locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "dev": true,
+ "requires": {
+ "p-locate": "^4.1.0"
+ }
+ },
+ "p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "dev": true,
+ "requires": {
+ "p-limit": "^2.2.0"
+ }
+ },
+ "parse-json": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.0.0.tgz",
+ "integrity": "sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.0.0",
+ "error-ex": "^1.3.1",
+ "json-parse-better-errors": "^1.0.1",
+ "lines-and-columns": "^1.1.6"
+ }
+ },
+ "path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "dev": true
+ },
+ "pkg-dir": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
+ "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
+ "dev": true,
+ "requires": {
+ "find-up": "^4.0.0"
+ },
+ "dependencies": {
+ "find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "dev": true,
+ "requires": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ }
+ }
+ }
+ },
+ "read-pkg": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz",
+ "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==",
+ "dev": true,
+ "requires": {
+ "@types/normalize-package-data": "^2.4.0",
+ "normalize-package-data": "^2.5.0",
+ "parse-json": "^5.0.0",
+ "type-fest": "^0.6.0"
+ }
+ },
+ "slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "dev": true
+ }
+ }
+ },
+ "iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ }
+ },
+ "icss-utils": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-4.1.1.tgz",
+ "integrity": "sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA==",
+ "dev": true,
+ "requires": {
+ "postcss": "^7.0.14"
+ }
+ },
+ "ieee754": {
+ "version": "1.1.13",
+ "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz",
+ "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==",
+ "dev": true
+ },
+ "iferr": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/iferr/-/iferr-0.1.5.tgz",
+ "integrity": "sha1-xg7taebY/bazEEofy8ocGS3FtQE=",
+ "dev": true
+ },
+ "ignore": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
+ "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==",
+ "dev": true
+ },
+ "ignore-by-default": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-1.0.1.tgz",
+ "integrity": "sha1-SMptcvbGo68Aqa1K5odr44ieKwk=",
+ "dev": true
+ },
+ "ignore-styles": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ignore-styles/-/ignore-styles-5.0.1.tgz",
+ "integrity": "sha1-tJ7yJ0va/NikiAqWa/440aC/RnE=",
+ "dev": true
+ },
+ "ignore-walk": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/ignore-walk/-/ignore-walk-3.0.3.tgz",
+ "integrity": "sha512-m7o6xuOaT1aqheYHKf8W6J5pYH85ZI9w077erOzLje3JsB1gkafkAhHHY19dqjulgIZHFm32Cp5uNZgcQqdJKw==",
+ "dev": true,
+ "requires": {
+ "minimatch": "^3.0.4"
+ }
+ },
+ "image-size": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/image-size/-/image-size-0.5.5.tgz",
+ "integrity": "sha1-Cd/Uq50g4p6xw+gLiZA3jfnjy5w=",
+ "dev": true,
+ "optional": true
+ },
+ "import-fresh": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz",
+ "integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==",
+ "dev": true,
+ "requires": {
+ "parent-module": "^1.0.0",
+ "resolve-from": "^4.0.0"
+ }
+ },
+ "import-lazy": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz",
+ "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM=",
+ "dev": true
+ },
+ "import-local": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/import-local/-/import-local-1.0.0.tgz",
+ "integrity": "sha512-vAaZHieK9qjGo58agRBg+bhHX3hoTZU/Oa3GESWLz7t1U62fk63aHuDJJEteXoDeTCcPmUT+z38gkHPZkkmpmQ==",
+ "dev": true,
+ "requires": {
+ "pkg-dir": "^2.0.0",
+ "resolve-cwd": "^2.0.0"
+ },
+ "dependencies": {
+ "find-up": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz",
+ "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=",
+ "dev": true,
+ "requires": {
+ "locate-path": "^2.0.0"
+ }
+ },
+ "locate-path": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz",
+ "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=",
+ "dev": true,
+ "requires": {
+ "p-locate": "^2.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "p-limit": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz",
+ "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==",
+ "dev": true,
+ "requires": {
+ "p-try": "^1.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz",
+ "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=",
+ "dev": true,
+ "requires": {
+ "p-limit": "^1.1.0"
+ }
+ },
+ "p-try": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz",
+ "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=",
+ "dev": true
+ },
+ "pkg-dir": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz",
+ "integrity": "sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=",
+ "dev": true,
+ "requires": {
+ "find-up": "^2.1.0"
+ }
+ }
+ }
+ },
+ "imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=",
+ "dev": true
+ },
+ "indent-string": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz",
+ "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=",
+ "dev": true
+ },
+ "indexes-of": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz",
+ "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc=",
+ "dev": true
+ },
+ "infer-owner": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz",
+ "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==",
+ "dev": true
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
+ },
+ "ini": {
+ "version": "1.3.5",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz",
+ "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==",
+ "dev": true
+ },
+ "inquirer": {
+ "version": "6.5.2",
+ "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-6.5.2.tgz",
+ "integrity": "sha512-cntlB5ghuB0iuO65Ovoi8ogLHiWGs/5yNrtUcKjFhSSiVeAIVpD7koaSU9RM8mpXw5YDi9RdYXGQMaOURB7ycQ==",
+ "dev": true,
+ "requires": {
+ "ansi-escapes": "^3.2.0",
+ "chalk": "^2.4.2",
+ "cli-cursor": "^2.1.0",
+ "cli-width": "^2.0.0",
+ "external-editor": "^3.0.3",
+ "figures": "^2.0.0",
+ "lodash": "^4.17.12",
+ "mute-stream": "0.0.7",
+ "run-async": "^2.2.0",
+ "rxjs": "^6.4.0",
+ "string-width": "^2.1.0",
+ "strip-ansi": "^5.1.0",
+ "through": "^2.3.6"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
+ "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+ "dev": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "dev": true
+ },
+ "string-width": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz",
+ "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==",
+ "dev": true,
+ "requires": {
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^4.0.0"
+ },
+ "dependencies": {
+ "strip-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
+ "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^3.0.0"
+ }
+ }
+ }
+ },
+ "strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^4.1.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+ "dev": true
+ }
+ }
+ }
+ }
+ },
+ "internal-slot": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.2.tgz",
+ "integrity": "sha512-2cQNfwhAfJIkU4KZPkDI+Gj5yNNnbqi40W9Gge6dfnk4TocEVm00B3bdiL+JINrbGJil2TeHvM4rETGzk/f/0g==",
+ "dev": true,
+ "requires": {
+ "es-abstract": "^1.17.0-next.1",
+ "has": "^1.0.3",
+ "side-channel": "^1.0.2"
+ }
+ },
+ "invariant": {
+ "version": "2.2.4",
+ "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz",
+ "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==",
+ "dev": true,
+ "requires": {
+ "loose-envify": "^1.0.0"
+ }
+ },
+ "invert-kv": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz",
+ "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY="
+ },
+ "ipaddr.js": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.0.tgz",
+ "integrity": "sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA=="
+ },
+ "irregular-plurals": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/irregular-plurals/-/irregular-plurals-2.0.0.tgz",
+ "integrity": "sha512-Y75zBYLkh0lJ9qxeHlMjQ7bSbyiSqNW/UOPWDmzC7cXskL1hekSITh1Oc6JV0XCWWZ9DE8VYSB71xocLk3gmGw==",
+ "dev": true
+ },
+ "is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ }
+ },
+ "is-arguments": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.0.4.tgz",
+ "integrity": "sha512-xPh0Rmt8NE65sNzvyUmWgI1tz3mKq74lGA0mL8LYZcoIzKOzDh6HmrYm3d18k60nHerC8A9Km8kYu87zfSFnLA==",
+ "dev": true
+ },
+ "is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=",
+ "dev": true
+ },
+ "is-binary-path": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz",
+ "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=",
+ "dev": true,
+ "requires": {
+ "binary-extensions": "^1.0.0"
+ }
+ },
+ "is-boolean-object": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.0.1.tgz",
+ "integrity": "sha512-TqZuVwa/sppcrhUCAYkGBk7w0yxfQQnxq28fjkO53tnK9FQXmdwz2JS5+GjsWQ6RByES1K40nI+yDic5c9/aAQ==",
+ "dev": true
+ },
+ "is-buffer": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.4.tgz",
+ "integrity": "sha512-Kq1rokWXOPXWuaMAqZiJW4XxsmD9zGx9q4aePabbn3qCRGedtH7Cm+zV8WETitMfu1wdh+Rvd6w5egwSngUX2A=="
+ },
+ "is-callable": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.5.tgz",
+ "integrity": "sha512-ESKv5sMCJB2jnHTWZ3O5itG+O128Hsus4K4Qh1h2/cgn2vbgnLSVqfV46AeJA9D5EeeLa9w81KUXMtn34zhX+Q==",
+ "dev": true
+ },
+ "is-ci": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz",
+ "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==",
+ "dev": true,
+ "requires": {
+ "ci-info": "^2.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ }
+ },
+ "is-date-object": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz",
+ "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==",
+ "dev": true
+ },
+ "is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+ "dev": true
+ }
+ }
+ },
+ "is-directory": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz",
+ "integrity": "sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE=",
+ "dev": true
+ },
+ "is-expression": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-expression/-/is-expression-3.0.0.tgz",
+ "integrity": "sha1-Oayqa+f9HzRx3ELHQW5hwkMXrJ8=",
+ "requires": {
+ "acorn": "~4.0.2",
+ "object-assign": "^4.0.1"
+ },
+ "dependencies": {
+ "acorn": {
+ "version": "4.0.13",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-4.0.13.tgz",
+ "integrity": "sha1-EFSVrlNh1pe9GVyCUZLhrX8lN4c="
+ }
+ }
+ },
+ "is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=",
+ "dev": true
+ },
+ "is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=",
+ "dev": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz",
+ "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=",
+ "requires": {
+ "number-is-nan": "^1.0.0"
+ }
+ },
+ "is-generator-function": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.7.tgz",
+ "integrity": "sha512-YZc5EwyO4f2kWCax7oegfuSr9mFz1ZvieNYBEjmukLxgXfBUbxAWGVF7GZf0zidYtoBl3WvC07YK0wT76a+Rtw==",
+ "dev": true
+ },
+ "is-glob": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
+ "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
+ "dev": true,
+ "requires": {
+ "is-extglob": "^2.1.1"
+ }
+ },
+ "is-installed-globally": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.1.0.tgz",
+ "integrity": "sha1-Df2Y9akRFxbdU13aZJL2e/PSWoA=",
+ "dev": true,
+ "requires": {
+ "global-dirs": "^0.1.0",
+ "is-path-inside": "^1.0.0"
+ }
+ },
+ "is-npm": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-1.0.0.tgz",
+ "integrity": "sha1-8vtjpl5JBbQGyGBydloaTceTufQ=",
+ "dev": true
+ },
+ "is-number": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+ "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ }
+ },
+ "is-number-object": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.4.tgz",
+ "integrity": "sha512-zohwelOAur+5uXtk8O3GPQ1eAcu4ZX3UwxQhUlfFFMNpUd83gXgjbhJh6HmB6LUNV/ieOLQuDwJO3dWJosUeMw==",
+ "dev": true
+ },
+ "is-obj": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz",
+ "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=",
+ "dev": true
+ },
+ "is-object": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-object/-/is-object-1.0.1.tgz",
+ "integrity": "sha1-iVJojF7C/9awPsyF52ngKQMINHA=",
+ "dev": true
+ },
+ "is-path-inside": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-1.0.1.tgz",
+ "integrity": "sha1-jvW33lBDej/cprToZe96pVy0gDY=",
+ "dev": true,
+ "requires": {
+ "path-is-inside": "^1.0.1"
+ }
+ },
+ "is-plain-obj": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz",
+ "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=",
+ "dev": true
+ },
+ "is-plain-object": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+ "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+ "dev": true,
+ "requires": {
+ "isobject": "^3.0.1"
+ }
+ },
+ "is-promise": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.1.0.tgz",
+ "integrity": "sha1-eaKp7OfwlugPNtKy87wWwf9L8/o="
+ },
+ "is-redirect": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-redirect/-/is-redirect-1.0.0.tgz",
+ "integrity": "sha1-HQPd7VO9jbDzDCbk+V02/HyH3CQ=",
+ "dev": true
+ },
+ "is-regex": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.5.tgz",
+ "integrity": "sha512-vlKW17SNq44owv5AQR3Cq0bQPEb8+kF3UKZ2fiZNOWtztYE5i0CzCZxFDwO58qAOWtxdBRVO/V5Qin1wjCqFYQ==",
+ "requires": {
+ "has": "^1.0.3"
+ }
+ },
+ "is-retry-allowed": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz",
+ "integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==",
+ "dev": true
+ },
+ "is-stream": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz",
+ "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=",
+ "dev": true
+ },
+ "is-string": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz",
+ "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==",
+ "dev": true
+ },
+ "is-subset": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-subset/-/is-subset-0.1.1.tgz",
+ "integrity": "sha1-ilkRfZMt4d4A8kX83TnOQ/HpOaY=",
+ "dev": true
+ },
+ "is-symbol": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz",
+ "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==",
+ "dev": true,
+ "requires": {
+ "has-symbols": "^1.0.1"
+ }
+ },
+ "is-typedarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
+ "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=",
+ "dev": true
+ },
+ "is-windows": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+ "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==",
+ "dev": true
+ },
+ "is-wsl": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz",
+ "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=",
+ "dev": true
+ },
+ "isarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+ "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+ "dev": true
+ },
+ "isemail": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/isemail/-/isemail-2.2.1.tgz",
+ "integrity": "sha1-A1PT2aYpUQgMJiwqoKQrjqjp4qY=",
+ "dev": true
+ },
+ "isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=",
+ "dev": true
+ },
+ "isobject": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+ "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=",
+ "dev": true
+ },
+ "isomorphic-fetch": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz",
+ "integrity": "sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk=",
+ "dev": true,
+ "requires": {
+ "node-fetch": "^1.0.1",
+ "whatwg-fetch": ">=0.10.0"
+ }
+ },
+ "isstream": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
+ "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo="
+ },
+ "istanbul-lib-coverage": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.5.tgz",
+ "integrity": "sha512-8aXznuEPCJvGnMSRft4udDRDtb1V3pkQkMMI5LI+6HuQz5oQ4J2UFn1H82raA3qJtyOLkkwVqICBQkjnGtn5mA==",
+ "dev": true
+ },
+ "istanbul-lib-hook": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-hook/-/istanbul-lib-hook-2.0.7.tgz",
+ "integrity": "sha512-vrRztU9VRRFDyC+aklfLoeXyNdTfga2EI3udDGn4cZ6fpSXpHLV9X6CHvfoMCPtggg8zvDDmC4b9xfu0z6/llA==",
+ "dev": true,
+ "requires": {
+ "append-transform": "^1.0.0"
+ }
+ },
+ "istanbul-lib-instrument": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-3.3.0.tgz",
+ "integrity": "sha512-5nnIN4vo5xQZHdXno/YDXJ0G+I3dAm4XgzfSVTPLQpj/zAV2dV6Juy0yaf10/zrJOJeHoN3fraFe+XRq2bFVZA==",
+ "dev": true,
+ "requires": {
+ "@babel/generator": "^7.4.0",
+ "@babel/parser": "^7.4.3",
+ "@babel/template": "^7.4.0",
+ "@babel/traverse": "^7.4.3",
+ "@babel/types": "^7.4.0",
+ "istanbul-lib-coverage": "^2.0.5",
+ "semver": "^6.0.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
+ "dev": true
+ }
+ }
+ },
+ "istanbul-lib-report": {
+ "version": "2.0.8",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-2.0.8.tgz",
+ "integrity": "sha512-fHBeG573EIihhAblwgxrSenp0Dby6tJMFR/HvlerBsrCTD5bkUuoNtn3gVh29ZCS824cGGBPn7Sg7cNk+2xUsQ==",
+ "dev": true,
+ "requires": {
+ "istanbul-lib-coverage": "^2.0.5",
+ "make-dir": "^2.1.0",
+ "supports-color": "^6.1.0"
+ },
+ "dependencies": {
+ "supports-color": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz",
+ "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ }
+ }
+ },
+ "istanbul-lib-source-maps": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-3.0.6.tgz",
+ "integrity": "sha512-R47KzMtDJH6X4/YW9XTx+jrLnZnscW4VpNN+1PViSYTejLVPWv7oov+Duf8YQSPyVRUvueQqz1TcsC6mooZTXw==",
+ "dev": true,
+ "requires": {
+ "debug": "^4.1.1",
+ "istanbul-lib-coverage": "^2.0.5",
+ "make-dir": "^2.1.0",
+ "rimraf": "^2.6.3",
+ "source-map": "^0.6.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+ "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "istanbul-reports": {
+ "version": "2.2.7",
+ "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-2.2.7.tgz",
+ "integrity": "sha512-uu1F/L1o5Y6LzPVSVZXNOoD/KXpJue9aeLRd0sM9uMXfZvzomB0WxVamWb5ue8kA2vVWEmW7EG+A5n3f1kqHKg==",
+ "dev": true,
+ "requires": {
+ "html-escaper": "^2.0.0"
+ }
+ },
+ "items": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/items/-/items-2.1.2.tgz",
+ "integrity": "sha512-kezcEqgB97BGeZZYtX/MA8AG410ptURstvnz5RAgyFZ8wQFPMxHY8GpTq+/ZHKT3frSlIthUq7EvLt9xn3TvXg==",
+ "dev": true
+ },
+ "joi": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/joi/-/joi-9.2.0.tgz",
+ "integrity": "sha1-M4WseQGSEwy+Iw6ALsAskhW7/to=",
+ "dev": true,
+ "requires": {
+ "hoek": "4.x.x",
+ "isemail": "2.x.x",
+ "items": "2.x.x",
+ "moment": "2.x.x",
+ "topo": "2.x.x"
+ }
+ },
+ "js-cookie": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/js-cookie/-/js-cookie-2.2.1.tgz",
+ "integrity": "sha512-HvdH2LzI/EAZcUwA8+0nKNtWHqS+ZmijLA30RwZA0bo7ToCckjK5MkGhjED9KoRcXO6BaGI3I9UIzSA1FKFPOQ==",
+ "dev": true
+ },
+ "js-stringify": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/js-stringify/-/js-stringify-1.0.2.tgz",
+ "integrity": "sha1-Fzb939lyTyijaCrcYjCufk6Weds="
+ },
+ "js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "dev": true
+ },
+ "js-yaml": {
+ "version": "3.13.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz",
+ "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==",
+ "dev": true,
+ "requires": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ }
+ },
+ "jsbn": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz",
+ "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=",
+ "dev": true
+ },
+ "jsdom": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-11.12.0.tgz",
+ "integrity": "sha512-y8Px43oyiBM13Zc1z780FrfNLJCXTL40EWlty/LXUtcjykRBNgLlCjWXpfSPBl2iv+N7koQN+dvqszHZgT/Fjw==",
+ "dev": true,
+ "requires": {
+ "abab": "^2.0.0",
+ "acorn": "^5.5.3",
+ "acorn-globals": "^4.1.0",
+ "array-equal": "^1.0.0",
+ "cssom": ">= 0.3.2 < 0.4.0",
+ "cssstyle": "^1.0.0",
+ "data-urls": "^1.0.0",
+ "domexception": "^1.0.1",
+ "escodegen": "^1.9.1",
+ "html-encoding-sniffer": "^1.0.2",
+ "left-pad": "^1.3.0",
+ "nwsapi": "^2.0.7",
+ "parse5": "4.0.0",
+ "pn": "^1.1.0",
+ "request": "^2.87.0",
+ "request-promise-native": "^1.0.5",
+ "sax": "^1.2.4",
+ "symbol-tree": "^3.2.2",
+ "tough-cookie": "^2.3.4",
+ "w3c-hr-time": "^1.0.1",
+ "webidl-conversions": "^4.0.2",
+ "whatwg-encoding": "^1.0.3",
+ "whatwg-mimetype": "^2.1.0",
+ "whatwg-url": "^6.4.1",
+ "ws": "^5.2.0",
+ "xml-name-validator": "^3.0.0"
+ },
+ "dependencies": {
+ "acorn": {
+ "version": "5.7.3",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.3.tgz",
+ "integrity": "sha512-T/zvzYRfbVojPWahDsE5evJdHb3oJoQfFbsrKM7w5Zcs++Tr257tia3BmMP8XYVjp1S9RZXQMh7gao96BlqZOw==",
+ "dev": true
+ },
+ "acorn-globals": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-4.3.4.tgz",
+ "integrity": "sha512-clfQEh21R+D0leSbUdWf3OcfqyaCSAQ8Ryq00bofSekfr9W8u1jyYZo6ir0xu9Gtcf7BjcHJpnbZH7JOCpP60A==",
+ "dev": true,
+ "requires": {
+ "acorn": "^6.0.1",
+ "acorn-walk": "^6.0.1"
+ },
+ "dependencies": {
+ "acorn": {
+ "version": "6.4.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.0.tgz",
+ "integrity": "sha512-gac8OEcQ2Li1dxIEWGZzsp2BitJxwkwcOm0zHAJLcPJaVvm58FRnk6RkuLRpU1EujipU2ZFODv2P9DLMfnV8mw==",
+ "dev": true
+ }
+ }
+ },
+ "parse5": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-4.0.0.tgz",
+ "integrity": "sha512-VrZ7eOd3T1Fk4XWNXMgiGBK/z0MG48BWG2uQNU4I72fkQuKUTZpl+u9k+CxEG0twMVzSmXEEz12z5Fnw1jIQFA==",
+ "dev": true
+ },
+ "whatwg-url": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-6.5.0.tgz",
+ "integrity": "sha512-rhRZRqx/TLJQWUpQ6bmrt2UV4f0HCQ463yQuONJqC6fO2VoEb1pTYddbe59SkYq87aoM5A3bdhMZiUiVws+fzQ==",
+ "dev": true,
+ "requires": {
+ "lodash.sortby": "^4.7.0",
+ "tr46": "^1.0.1",
+ "webidl-conversions": "^4.0.2"
+ }
+ }
+ }
+ },
+ "jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
+ "dev": true
+ },
+ "json-parse-better-errors": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz",
+ "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==",
+ "dev": true
+ },
+ "json-schema": {
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz",
+ "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=",
+ "dev": true
+ },
+ "json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+ "dev": true
+ },
+ "json-stable-stringify-without-jsonify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
+ "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=",
+ "dev": true
+ },
+ "json-stringify-safe": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
+ "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=",
+ "dev": true
+ },
+ "json5": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.1.1.tgz",
+ "integrity": "sha512-l+3HXD0GEI3huGq1njuqtzYK8OYJyXMkOLtQ53pjWh89tvWS2h6l+1zMkYWqlb57+SiQodKZyvMEFb2X+KrFhQ==",
+ "dev": true,
+ "requires": {
+ "minimist": "^1.2.0"
+ }
+ },
+ "jsonc-parser": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-2.2.0.tgz",
+ "integrity": "sha512-4fLQxW1j/5fWj6p78vAlAafoCKtuBm6ghv+Ij5W2DrDx0qE+ZdEl2c6Ko1mgJNF5ftX1iEWQQ4Ap7+3GlhjkOA==",
+ "dev": true
+ },
+ "jsprim": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz",
+ "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=",
+ "dev": true,
+ "requires": {
+ "assert-plus": "1.0.0",
+ "extsprintf": "1.3.0",
+ "json-schema": "0.2.3",
+ "verror": "1.10.0"
+ }
+ },
+ "jstransformer": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/jstransformer/-/jstransformer-1.0.0.tgz",
+ "integrity": "sha1-7Yvwkh4vPx7U1cGkT2hwntJHIsM=",
+ "requires": {
+ "is-promise": "^2.0.0",
+ "promise": "^7.0.1"
+ }
+ },
+ "jsx-ast-utils": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-2.2.3.tgz",
+ "integrity": "sha512-EdIHFMm+1BPynpKOpdPqiOsvnIrInRGJD7bzPZdPkjitQEqpdpUuFpq4T0npZFKTiB3RhWFdGN+oqOJIdhDhQA==",
+ "dev": true,
+ "requires": {
+ "array-includes": "^3.0.3",
+ "object.assign": "^4.1.0"
+ }
+ },
+ "keycharm": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/keycharm/-/keycharm-0.2.0.tgz",
+ "integrity": "sha1-+m6i5DuQpoAohD0n8gddNajD5vk=",
+ "dev": true
+ },
+ "keygrip": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/keygrip/-/keygrip-1.1.0.tgz",
+ "integrity": "sha512-iYSchDJ+liQ8iwbSI2QqsQOvqv58eJCEanyJPJi+Khyu8smkcKSFUCbPwzFcL7YVtZ6eONjqRX/38caJ7QjRAQ==",
+ "requires": {
+ "tsscmp": "1.0.6"
+ }
+ },
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ },
+ "dependencies": {
+ "is-buffer": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w=="
+ }
+ }
+ },
+ "latest-version": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-3.1.0.tgz",
+ "integrity": "sha1-ogU4P+oyKzO1rjsYq+4NwvNW7hU=",
+ "dev": true,
+ "requires": {
+ "package-json": "^4.0.0"
+ }
+ },
+ "lazy-cache": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/lazy-cache/-/lazy-cache-1.0.4.tgz",
+ "integrity": "sha1-odePw6UEdMuAhF07O24dpJpEbo4="
+ },
+ "lcid": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz",
+ "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=",
+ "requires": {
+ "invert-kv": "^1.0.0"
+ }
+ },
+ "lcov-parse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/lcov-parse/-/lcov-parse-1.0.0.tgz",
+ "integrity": "sha1-6w1GtUER68VhrLTECO+TY73I9+A=",
+ "dev": true
+ },
+ "left-pad": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/left-pad/-/left-pad-1.3.0.tgz",
+ "integrity": "sha512-XI5MPzVNApjAyhQzphX8BkmKsKUxD4LdyK24iZeQGinBN9yTQT3bFlCBy/aVx2HrNcqQGsdot8ghrjyrvMCoEA==",
+ "dev": true
+ },
+ "less": {
+ "version": "3.10.3",
+ "resolved": "https://registry.npmjs.org/less/-/less-3.10.3.tgz",
+ "integrity": "sha512-vz32vqfgmoxF1h3K4J+yKCtajH0PWmjkIFgbs5d78E/c/e+UQTnI+lWK+1eQRE95PXM2mC3rJlLSSP9VQHnaow==",
+ "dev": true,
+ "requires": {
+ "clone": "^2.1.2",
+ "errno": "^0.1.1",
+ "graceful-fs": "^4.1.2",
+ "image-size": "~0.5.0",
+ "mime": "^1.4.1",
+ "mkdirp": "^0.5.0",
+ "promise": "^7.1.1",
+ "request": "^2.83.0",
+ "source-map": "~0.6.0"
+ }
+ },
+ "less-loader": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/less-loader/-/less-loader-4.1.0.tgz",
+ "integrity": "sha512-KNTsgCE9tMOM70+ddxp9yyt9iHqgmSs0yTZc5XH5Wo+g80RWRIYNqE58QJKm/yMud5wZEvz50ugRDuzVIkyahg==",
+ "dev": true,
+ "requires": {
+ "clone": "^2.1.1",
+ "loader-utils": "^1.1.0",
+ "pify": "^3.0.0"
+ },
+ "dependencies": {
+ "pify": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+ "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=",
+ "dev": true
+ }
+ }
+ },
+ "leven": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
+ "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==",
+ "dev": true
+ },
+ "levenary": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/levenary/-/levenary-1.1.1.tgz",
+ "integrity": "sha512-mkAdOIt79FD6irqjYSs4rdbnlT5vRonMEvBVPVb3XmevfS8kgRXwfes0dhPdEtzTWD/1eNE/Bm/G1iRt6DcnQQ==",
+ "dev": true,
+ "requires": {
+ "leven": "^3.1.0"
+ }
+ },
+ "levn": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz",
+ "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=",
+ "dev": true,
+ "requires": {
+ "prelude-ls": "~1.1.2",
+ "type-check": "~0.3.2"
+ }
+ },
+ "lines-and-columns": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz",
+ "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=",
+ "dev": true
+ },
+ "load-json-file": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz",
+ "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.2",
+ "parse-json": "^4.0.0",
+ "pify": "^3.0.0",
+ "strip-bom": "^3.0.0"
+ },
+ "dependencies": {
+ "pify": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+ "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=",
+ "dev": true
+ }
+ }
+ },
+ "loader-runner": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz",
+ "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==",
+ "dev": true
+ },
+ "loader-utils": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.2.3.tgz",
+ "integrity": "sha512-fkpz8ejdnEMG3s37wGL07iSBDg99O9D5yflE9RGNH3hRdx9SOwYfnGYdZOUIZitN8E+E2vkq3MUMYMvPYl5ZZA==",
+ "dev": true,
+ "requires": {
+ "big.js": "^5.2.2",
+ "emojis-list": "^2.0.0",
+ "json5": "^1.0.1"
+ },
+ "dependencies": {
+ "json5": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
+ "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
+ "dev": true,
+ "requires": {
+ "minimist": "^1.2.0"
+ }
+ }
+ }
+ },
+ "locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "dev": true,
+ "requires": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "lodash": {
+ "version": "4.17.15",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
+ "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A=="
+ },
+ "lodash-webpack-plugin": {
+ "version": "0.11.5",
+ "resolved": "https://registry.npmjs.org/lodash-webpack-plugin/-/lodash-webpack-plugin-0.11.5.tgz",
+ "integrity": "sha512-QWfEIYxpixOdbd6KBe5g6MDWcyTgP3trDXwKHFqTlXrWiLcs/67fGQ0IWeRyhWlTITQIgMpJAYd2oeIztuV5VA==",
+ "dev": true,
+ "requires": {
+ "lodash": "^4.17.4"
+ }
+ },
+ "lodash.assign": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/lodash.assign/-/lodash.assign-4.2.0.tgz",
+ "integrity": "sha1-DZnzzNem0mHRm9rrkkUAXShYCOc=",
+ "dev": true
+ },
+ "lodash.camelcase": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz",
+ "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY="
+ },
+ "lodash.clone": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.clone/-/lodash.clone-4.5.0.tgz",
+ "integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y="
+ },
+ "lodash.escape": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/lodash.escape/-/lodash.escape-4.0.1.tgz",
+ "integrity": "sha1-yQRGkMIeBClL6qUXcS/e0fqI3pg=",
+ "dev": true
+ },
+ "lodash.flattendeep": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz",
+ "integrity": "sha1-+wMJF/hqMTTlvJvsDWngAT3f7bI=",
+ "dev": true
+ },
+ "lodash.isequal": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz",
+ "integrity": "sha1-QVxEePK8wwEgwizhDtMib30+GOA=",
+ "dev": true
+ },
+ "lodash.merge": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
+ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==",
+ "dev": true
+ },
+ "lodash.sortby": {
+ "version": "4.7.0",
+ "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz",
+ "integrity": "sha1-7dFMgk4sycHgsKG0K7UhBRakJDg=",
+ "dev": true
+ },
+ "log-driver": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/log-driver/-/log-driver-1.2.7.tgz",
+ "integrity": "sha512-U7KCmLdqsGHBLeWqYlFA0V0Sl6P08EE1ZrmA9cxjUE0WVqT9qnyVDPz1kzpFEP0jdJuFnasWIfSd7fsaNXkpbg==",
+ "dev": true
+ },
+ "log-symbols": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-2.2.0.tgz",
+ "integrity": "sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg==",
+ "dev": true,
+ "requires": {
+ "chalk": "^2.0.1"
+ }
+ },
+ "loglevelnext": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/loglevelnext/-/loglevelnext-1.0.5.tgz",
+ "integrity": "sha512-V/73qkPuJmx4BcBF19xPBr+0ZRVBhc4POxvZTZdMeXpJ4NItXSJ/MSwuFT0kQJlCbXvdlZoQQ/418bS1y9Jh6A==",
+ "dev": true,
+ "requires": {
+ "es6-symbol": "^3.1.1",
+ "object.assign": "^4.1.0"
+ }
+ },
+ "lolex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/lolex/-/lolex-1.3.2.tgz",
+ "integrity": "sha1-fD2mL/yzDw9agKJWbKJORdigHzE=",
+ "dev": true
+ },
+ "long": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/long/-/long-3.2.0.tgz",
+ "integrity": "sha1-2CG3E4yhy1gcFymQ7xTbIAtcR0s="
+ },
+ "longest": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz",
+ "integrity": "sha1-MKCy2jj3N3DoKUoNIuZiXtd9AJc="
+ },
+ "loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "dev": true,
+ "requires": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ }
+ },
+ "loud-rejection": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/loud-rejection/-/loud-rejection-1.6.0.tgz",
+ "integrity": "sha1-W0b4AUft7leIcPCG0Eghz5mOVR8=",
+ "dev": true,
+ "requires": {
+ "currently-unhandled": "^0.4.1",
+ "signal-exit": "^3.0.0"
+ }
+ },
+ "lowercase-keys": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz",
+ "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==",
+ "dev": true
+ },
+ "lru-cache": {
+ "version": "4.1.5",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz",
+ "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==",
+ "dev": true,
+ "requires": {
+ "pseudomap": "^1.0.2",
+ "yallist": "^2.1.2"
+ },
+ "dependencies": {
+ "yallist": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz",
+ "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=",
+ "dev": true
+ }
+ }
+ },
+ "make-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
+ "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
+ "dev": true,
+ "requires": {
+ "pify": "^4.0.1",
+ "semver": "^5.6.0"
+ }
+ },
+ "mamacro": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/mamacro/-/mamacro-0.0.3.tgz",
+ "integrity": "sha512-qMEwh+UujcQ+kbz3T6V+wAmO2U8veoq2w+3wY8MquqwVA3jChfwY+Tk52GZKDfACEPjuZ7r2oJLejwpt8jtwTA==",
+ "dev": true
+ },
+ "map-cache": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
+ "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=",
+ "dev": true
+ },
+ "map-obj": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-2.0.0.tgz",
+ "integrity": "sha1-plzSkIepJZi4eRJXpSPgISIqwfk=",
+ "dev": true
+ },
+ "map-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
+ "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
+ "dev": true,
+ "requires": {
+ "object-visit": "^1.0.0"
+ }
+ },
+ "md5.js": {
+ "version": "1.3.5",
+ "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz",
+ "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==",
+ "dev": true,
+ "requires": {
+ "hash-base": "^3.0.0",
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.1.2"
+ }
+ },
+ "meant": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/meant/-/meant-1.0.1.tgz",
+ "integrity": "sha512-UakVLFjKkbbUwNWJ2frVLnnAtbb7D7DsloxRd3s/gDpI8rdv8W5Hp3NaDb+POBI1fQdeussER6NB8vpcRURvlg==",
+ "dev": true
+ },
+ "measured": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/measured/-/measured-1.1.0.tgz",
+ "integrity": "sha1-f2ozre53vGehZIloxXNgjCkbmsQ=",
+ "requires": {
+ "inherits": "^2.0"
+ }
+ },
+ "media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
+ },
+ "memory-fs": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz",
+ "integrity": "sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=",
+ "dev": true,
+ "requires": {
+ "errno": "^0.1.3",
+ "readable-stream": "^2.0.1"
+ }
+ },
+ "meow": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/meow/-/meow-5.0.0.tgz",
+ "integrity": "sha512-CbTqYU17ABaLefO8vCU153ZZlprKYWDljcndKKDCFcYQITzWCXZAVk4QMFZPgvzrnUQ3uItnIE/LoUOwrT15Ig==",
+ "dev": true,
+ "requires": {
+ "camelcase-keys": "^4.0.0",
+ "decamelize-keys": "^1.0.0",
+ "loud-rejection": "^1.0.0",
+ "minimist-options": "^3.0.1",
+ "normalize-package-data": "^2.3.4",
+ "read-pkg-up": "^3.0.0",
+ "redent": "^2.0.0",
+ "trim-newlines": "^2.0.0",
+ "yargs-parser": "^10.0.0"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz",
+ "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=",
+ "dev": true
+ },
+ "find-up": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz",
+ "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=",
+ "dev": true,
+ "requires": {
+ "locate-path": "^2.0.0"
+ }
+ },
+ "locate-path": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz",
+ "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=",
+ "dev": true,
+ "requires": {
+ "p-locate": "^2.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "p-limit": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz",
+ "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==",
+ "dev": true,
+ "requires": {
+ "p-try": "^1.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz",
+ "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=",
+ "dev": true,
+ "requires": {
+ "p-limit": "^1.1.0"
+ }
+ },
+ "p-try": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz",
+ "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=",
+ "dev": true
+ },
+ "read-pkg-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-3.0.0.tgz",
+ "integrity": "sha1-PtSWaF26D4/hGNBpHcUfSh/5bwc=",
+ "dev": true,
+ "requires": {
+ "find-up": "^2.0.0",
+ "read-pkg": "^3.0.0"
+ }
+ },
+ "yargs-parser": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-10.1.0.tgz",
+ "integrity": "sha512-VCIyR1wJoEBZUqk5PA+oOBF6ypbwh5aNB3I50guxAL/quggdfs4TtNHQrSazFA3fYZ+tEqfs0zIGlv0c/rgjbQ==",
+ "dev": true,
+ "requires": {
+ "camelcase": "^4.1.0"
+ }
+ }
+ }
+ },
+ "merge-descriptors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
+ "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
+ },
+ "merge-options": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/merge-options/-/merge-options-1.0.1.tgz",
+ "integrity": "sha512-iuPV41VWKWBIOpBsjoxjDZw8/GbSfZ2mk7N1453bwMrfzdrIk7EzBd+8UVR6rkw67th7xnk9Dytl3J+lHPdxvg==",
+ "dev": true,
+ "requires": {
+ "is-plain-obj": "^1.1"
+ }
+ },
+ "merge-source-map": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.1.0.tgz",
+ "integrity": "sha512-Qkcp7P2ygktpMPh2mCQZaf3jhN6D3Z/qVZHSdWvQ+2Ef5HgRAPBO57A77+ENm0CPx2+1Ce/MYKi3ymqdfuqibw==",
+ "dev": true,
+ "requires": {
+ "source-map": "^0.6.1"
+ }
+ },
+ "methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
+ },
+ "micromatch": {
+ "version": "3.1.10",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
+ "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
+ "dev": true,
+ "requires": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "braces": "^2.3.1",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "extglob": "^2.0.4",
+ "fragment-cache": "^0.2.1",
+ "kind-of": "^6.0.2",
+ "nanomatch": "^1.2.9",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "dev": true
+ }
+ }
+ },
+ "miller-rabin": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz",
+ "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==",
+ "dev": true,
+ "requires": {
+ "bn.js": "^4.0.0",
+ "brorand": "^1.0.1"
+ }
+ },
+ "mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
+ },
+ "mime-db": {
+ "version": "1.43.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.43.0.tgz",
+ "integrity": "sha512-+5dsGEEovYbT8UY9yD7eE4XTc4UwJ1jBYlgaQQF38ENsKR3wj/8q8RFZrF9WIZpB2V1ArTVFUva8sAul1NzRzQ=="
+ },
+ "mime-types": {
+ "version": "2.1.26",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.26.tgz",
+ "integrity": "sha512-01paPWYgLrkqAyrlDorC1uDwl2p3qZT7yl806vW7DvDoxwXi46jsjFbg+WdwotBIk6/MbEhO/dh5aZ5sNj/dWQ==",
+ "requires": {
+ "mime-db": "1.43.0"
+ }
+ },
+ "mimic-fn": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz",
+ "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==",
+ "dev": true
+ },
+ "mimic-response": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-2.0.0.tgz",
+ "integrity": "sha512-8ilDoEapqA4uQ3TwS0jakGONKXVJqpy+RpM+3b7pLdOjghCrEiGp9SRkFbUHAmZW9vdnrENWHjaweIoTIJExSQ==",
+ "dev": true
+ },
+ "min-document": {
+ "version": "2.19.0",
+ "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz",
+ "integrity": "sha1-e9KC4/WELtKVu3SM3Z8f+iyCRoU=",
+ "dev": true,
+ "requires": {
+ "dom-walk": "^0.1.0"
+ }
+ },
+ "mini-css-extract-plugin": {
+ "version": "0.4.5",
+ "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-0.4.5.tgz",
+ "integrity": "sha512-dqBanNfktnp2hwL2YguV9Jh91PFX7gu7nRLs4TGsbAfAG6WOtlynFRYzwDwmmeSb5uIwHo9nx1ta0f7vAZVp2w==",
+ "dev": true,
+ "requires": {
+ "loader-utils": "^1.1.0",
+ "schema-utils": "^1.0.0",
+ "webpack-sources": "^1.1.0"
+ },
+ "dependencies": {
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dev": true,
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "minimalistic-assert": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz",
+ "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==",
+ "dev": true
+ },
+ "minimalistic-crypto-utils": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz",
+ "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=",
+ "dev": true
+ },
+ "minimatch": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
+ "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
+ "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=",
+ "dev": true
+ },
+ "minimist-options": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-3.0.2.tgz",
+ "integrity": "sha512-FyBrT/d0d4+uiZRbqznPXqw3IpZZG3gl3wKWiX784FycUKVwBt0uLBFkQrtE4tZOrgo78nZp2jnKz3L65T5LdQ==",
+ "dev": true,
+ "requires": {
+ "arrify": "^1.0.1",
+ "is-plain-obj": "^1.1.0"
+ }
+ },
+ "minipass": {
+ "version": "2.9.0",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-2.9.0.tgz",
+ "integrity": "sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.0"
+ }
+ },
+ "minizlib": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.3.3.tgz",
+ "integrity": "sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==",
+ "dev": true,
+ "requires": {
+ "minipass": "^2.9.0"
+ }
+ },
+ "mississippi": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz",
+ "integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==",
+ "dev": true,
+ "requires": {
+ "concat-stream": "^1.5.0",
+ "duplexify": "^3.4.2",
+ "end-of-stream": "^1.1.0",
+ "flush-write-stream": "^1.0.0",
+ "from2": "^2.1.0",
+ "parallel-transform": "^1.1.0",
+ "pump": "^3.0.0",
+ "pumpify": "^1.3.3",
+ "stream-each": "^1.1.0",
+ "through2": "^2.0.0"
+ }
+ },
+ "mixin-deep": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
+ "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
+ "dev": true,
+ "requires": {
+ "for-in": "^1.0.2",
+ "is-extendable": "^1.0.1"
+ },
+ "dependencies": {
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "mkdirp": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz",
+ "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=",
+ "dev": true,
+ "requires": {
+ "minimist": "0.0.8"
+ },
+ "dependencies": {
+ "minimist": {
+ "version": "0.0.8",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
+ "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=",
+ "dev": true
+ }
+ }
+ },
+ "mobx": {
+ "version": "5.15.4",
+ "resolved": "https://registry.npmjs.org/mobx/-/mobx-5.15.4.tgz",
+ "integrity": "sha512-xRFJxSU2Im3nrGCdjSuOTFmxVDGeqOHL+TyADCGbT0k4HHqGmx5u2yaHNryvoORpI4DfbzjJ5jPmuv+d7sioFw==",
+ "dev": true
+ },
+ "mobx-react": {
+ "version": "6.1.4",
+ "resolved": "https://registry.npmjs.org/mobx-react/-/mobx-react-6.1.4.tgz",
+ "integrity": "sha512-wzrJF1RflhyLh8ne4FJfMbG8ZgRFmZ62b4nbyhJzwQpAmrkSnSsAWG9mIff4ffV/Q7OU+uOYf7rXvSmiuUe4cw==",
+ "dev": true,
+ "requires": {
+ "mobx-react-lite": "^1.4.2"
+ }
+ },
+ "mobx-react-lite": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/mobx-react-lite/-/mobx-react-lite-1.5.2.tgz",
+ "integrity": "sha512-PyZmARqqWtpuQaAoHF5pKX7h6TKNLwq6vtovm4zZvG6sEbMRHHSqioGXSeQbpRmG8Kw8uln3q/W1yMO5IfL5Sg==",
+ "dev": true
+ },
+ "mobx-utils": {
+ "version": "5.5.3",
+ "resolved": "https://registry.npmjs.org/mobx-utils/-/mobx-utils-5.5.3.tgz",
+ "integrity": "sha512-tCj3WLHp3y2/OZADAg9KHGtJNNwwEa8ZY92E6dnVuDoV2OaTV+e2N4S23ogsoxJ72ZhFJhNPcy7ppPJRb1Emhg==",
+ "dev": true
+ },
+ "mocha": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/mocha/-/mocha-5.2.0.tgz",
+ "integrity": "sha512-2IUgKDhc3J7Uug+FxMXuqIyYzH7gJjXECKe/w43IGgQHTSj3InJi+yAA7T24L9bQMRKiUEHxEX37G5JpVUGLcQ==",
+ "dev": true,
+ "requires": {
+ "browser-stdout": "1.3.1",
+ "commander": "2.15.1",
+ "debug": "3.1.0",
+ "diff": "3.5.0",
+ "escape-string-regexp": "1.0.5",
+ "glob": "7.1.2",
+ "growl": "1.10.5",
+ "he": "1.1.1",
+ "minimatch": "3.0.4",
+ "mkdirp": "0.5.1",
+ "supports-color": "5.4.0"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "2.15.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz",
+ "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==",
+ "dev": true
+ },
+ "glob": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz",
+ "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==",
+ "dev": true,
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "supports-color": {
+ "version": "5.4.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz",
+ "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ }
+ }
+ },
+ "mocha-lcov-reporter": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/mocha-lcov-reporter/-/mocha-lcov-reporter-1.3.0.tgz",
+ "integrity": "sha1-Rpve9PivyaEWBW8HnfYYLQr7A4Q=",
+ "dev": true
+ },
+ "module-not-found-error": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/module-not-found-error/-/module-not-found-error-1.0.1.tgz",
+ "integrity": "sha1-z4tP9PKWQGdNbN0CsOO8UjwrvcA=",
+ "dev": true
+ },
+ "moment": {
+ "version": "2.24.0",
+ "resolved": "https://registry.npmjs.org/moment/-/moment-2.24.0.tgz",
+ "integrity": "sha512-bV7f+6l2QigeBBZSM/6yTNq4P2fNpSWj/0e7jQcy87A8e7o2nAfP/34/2ky5Vw4B9S446EtIhodAzkFCcR4dQg=="
+ },
+ "moo": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/moo/-/moo-0.5.1.tgz",
+ "integrity": "sha512-I1mnb5xn4fO80BH9BLcF0yLypy2UKl+Cb01Fu0hJRkJjlCRtxZMWkTdAtDd5ZqCOxtCkhmRwyI57vWT+1iZ67w==",
+ "dev": true
+ },
+ "move-concurrently": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz",
+ "integrity": "sha1-viwAX9oy4LKa8fBdfEszIUxwH5I=",
+ "dev": true,
+ "requires": {
+ "aproba": "^1.1.1",
+ "copy-concurrently": "^1.0.0",
+ "fs-write-stream-atomic": "^1.0.8",
+ "mkdirp": "^0.5.1",
+ "rimraf": "^2.5.4",
+ "run-queue": "^1.0.3"
+ }
+ },
+ "mri": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/mri/-/mri-1.1.4.tgz",
+ "integrity": "sha512-6y7IjGPm8AzlvoUrwAaw1tLnUBudaS3752vcd8JtrpGGQn+rXIe63LFVHm/YMwtqAuh+LJPCFdlLYPWM1nYn6w==",
+ "dev": true
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
+ },
+ "multimatch": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/multimatch/-/multimatch-3.0.0.tgz",
+ "integrity": "sha512-22foS/gqQfANZ3o+W7ST2x25ueHDVNWl/b9OlGcLpy/iKxjCpvcNCM51YCenUi7Mt/jAjjqv8JwZRs8YP5sRjA==",
+ "dev": true,
+ "requires": {
+ "array-differ": "^2.0.3",
+ "array-union": "^1.0.2",
+ "arrify": "^1.0.1",
+ "minimatch": "^3.0.4"
+ }
+ },
+ "mute-stream": {
+ "version": "0.0.7",
+ "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.7.tgz",
+ "integrity": "sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s=",
+ "dev": true
+ },
+ "nan": {
+ "version": "2.14.0",
+ "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.0.tgz",
+ "integrity": "sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg=="
+ },
+ "nanomatch": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
+ "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
+ "dev": true,
+ "requires": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "fragment-cache": "^0.2.1",
+ "is-windows": "^1.0.2",
+ "kind-of": "^6.0.2",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "dev": true
+ }
+ }
+ },
+ "natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=",
+ "dev": true
+ },
+ "nearley": {
+ "version": "2.19.1",
+ "resolved": "https://registry.npmjs.org/nearley/-/nearley-2.19.1.tgz",
+ "integrity": "sha512-xq47GIUGXxU9vQg7g/y1o1xuKnkO7ev4nRWqftmQrLkfnE/FjRqDaGOUakM8XHPn/6pW3bGjU2wgoJyId90rqg==",
+ "dev": true,
+ "requires": {
+ "commander": "^2.19.0",
+ "moo": "^0.5.0",
+ "railroad-diagrams": "^1.0.0",
+ "randexp": "0.4.6",
+ "semver": "^5.4.1"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
+ "dev": true
+ }
+ }
+ },
+ "needle": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/needle/-/needle-2.4.0.tgz",
+ "integrity": "sha512-4Hnwzr3mi5L97hMYeNl8wRW/Onhy4nUKR/lVemJ8gJedxxUyBLm9kkrDColJvoSfwi0jCNhD+xCdOtiGDQiRZg==",
+ "dev": true,
+ "requires": {
+ "debug": "^3.2.6",
+ "iconv-lite": "^0.4.4",
+ "sax": "^1.2.4"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.6",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz",
+ "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "negotiator": {
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz",
+ "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw=="
+ },
+ "neo-async": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.1.tgz",
+ "integrity": "sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==",
+ "dev": true
+ },
+ "nested-error-stacks": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/nested-error-stacks/-/nested-error-stacks-2.1.0.tgz",
+ "integrity": "sha512-AO81vsIO1k1sM4Zrd6Hu7regmJN1NSiAja10gc4bX3F0wd+9rQmcuHQaHVQCYIEC8iFXnE+mavh23GOt7wBgug==",
+ "dev": true
+ },
+ "next-tick": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz",
+ "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=",
+ "dev": true
+ },
+ "nice-try": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz",
+ "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==",
+ "dev": true
+ },
+ "node-fetch": {
+ "version": "1.7.3",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-1.7.3.tgz",
+ "integrity": "sha512-NhZ4CsKx7cYm2vSrBAr2PvFOe6sWDf0UYLRqA6svUYg7+/TSfVAu49jYC4BvQ4Sms9SZgdqGBgroqfDhJdTyKQ==",
+ "dev": true,
+ "requires": {
+ "encoding": "^0.1.11",
+ "is-stream": "^1.0.1"
+ }
+ },
+ "node-forge": {
+ "version": "0.7.6",
+ "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.7.6.tgz",
+ "integrity": "sha512-sol30LUpz1jQFBjOKwbjxijiE3b6pjd74YwfD0fJOKPjF+fONKb2Yg8rYgS6+bK6VDl+/wfr4IYpC7jDzLUIfw=="
+ },
+ "node-libs-browser": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz",
+ "integrity": "sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==",
+ "dev": true,
+ "requires": {
+ "assert": "^1.1.1",
+ "browserify-zlib": "^0.2.0",
+ "buffer": "^4.3.0",
+ "console-browserify": "^1.1.0",
+ "constants-browserify": "^1.0.0",
+ "crypto-browserify": "^3.11.0",
+ "domain-browser": "^1.1.1",
+ "events": "^3.0.0",
+ "https-browserify": "^1.0.0",
+ "os-browserify": "^0.3.0",
+ "path-browserify": "0.0.1",
+ "process": "^0.11.10",
+ "punycode": "^1.2.4",
+ "querystring-es3": "^0.2.0",
+ "readable-stream": "^2.3.3",
+ "stream-browserify": "^2.0.1",
+ "stream-http": "^2.7.2",
+ "string_decoder": "^1.0.0",
+ "timers-browserify": "^2.0.4",
+ "tty-browserify": "0.0.0",
+ "url": "^0.11.0",
+ "util": "^0.11.0",
+ "vm-browserify": "^1.0.1"
+ },
+ "dependencies": {
+ "punycode": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
+ "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=",
+ "dev": true
+ },
+ "util": {
+ "version": "0.11.1",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.11.1.tgz",
+ "integrity": "sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==",
+ "dev": true,
+ "requires": {
+ "inherits": "2.0.3"
+ }
+ }
+ }
+ },
+ "node-modules-regexp": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-modules-regexp/-/node-modules-regexp-1.0.0.tgz",
+ "integrity": "sha1-jZ2+KJZKSsVxLpExZCEHxx6Q7EA=",
+ "dev": true
+ },
+ "node-pre-gyp": {
+ "version": "0.11.0",
+ "resolved": "https://registry.npmjs.org/node-pre-gyp/-/node-pre-gyp-0.11.0.tgz",
+ "integrity": "sha512-TwWAOZb0j7e9eGaf9esRx3ZcLaE5tQ2lvYy1pb5IAaG1a2e2Kv5Lms1Y4hpj+ciXJRofIxxlt5haeQ/2ANeE0Q==",
+ "dev": true,
+ "requires": {
+ "detect-libc": "^1.0.2",
+ "mkdirp": "^0.5.1",
+ "needle": "^2.2.1",
+ "nopt": "^4.0.1",
+ "npm-packlist": "^1.1.6",
+ "npmlog": "^4.0.2",
+ "rc": "^1.2.7",
+ "rimraf": "^2.6.1",
+ "semver": "^5.3.0",
+ "tar": "^4"
+ }
+ },
+ "node-releases": {
+ "version": "1.1.47",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.47.tgz",
+ "integrity": "sha512-k4xjVPx5FpwBUj0Gw7uvFOTF4Ep8Hok1I6qjwL3pLfwe7Y0REQSAqOwwv9TWBCUtMHxcXfY4PgRLRozcChvTcA==",
+ "dev": true,
+ "requires": {
+ "semver": "^6.3.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
+ "dev": true
+ }
+ }
+ },
+ "nodemon": {
+ "version": "1.19.4",
+ "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-1.19.4.tgz",
+ "integrity": "sha512-VGPaqQBNk193lrJFotBU8nvWZPqEZY2eIzymy2jjY0fJ9qIsxA0sxQ8ATPl0gZC645gijYEc1jtZvpS8QWzJGQ==",
+ "dev": true,
+ "requires": {
+ "chokidar": "^2.1.8",
+ "debug": "^3.2.6",
+ "ignore-by-default": "^1.0.1",
+ "minimatch": "^3.0.4",
+ "pstree.remy": "^1.1.7",
+ "semver": "^5.7.1",
+ "supports-color": "^5.5.0",
+ "touch": "^3.1.0",
+ "undefsafe": "^2.0.2",
+ "update-notifier": "^2.5.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.6",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz",
+ "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ }
+ }
+ },
+ "nopt": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.1.tgz",
+ "integrity": "sha1-0NRoWv1UFRk8jHUFYC0NF81kR00=",
+ "dev": true,
+ "requires": {
+ "abbrev": "1",
+ "osenv": "^0.1.4"
+ }
+ },
+ "normalize-package-data": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz",
+ "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==",
+ "dev": true,
+ "requires": {
+ "hosted-git-info": "^2.1.4",
+ "resolve": "^1.10.0",
+ "semver": "2 || 3 || 4 || 5",
+ "validate-npm-package-license": "^3.0.1"
+ }
+ },
+ "normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "dev": true
+ },
+ "npm-bundled": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/npm-bundled/-/npm-bundled-1.1.1.tgz",
+ "integrity": "sha512-gqkfgGePhTpAEgUsGEgcq1rqPXA+tv/aVBlgEzfXwA1yiUJF7xtEt3CtVwOjNYQOVknDk0F20w58Fnm3EtG0fA==",
+ "dev": true,
+ "requires": {
+ "npm-normalize-package-bin": "^1.0.1"
+ }
+ },
+ "npm-normalize-package-bin": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-1.0.1.tgz",
+ "integrity": "sha512-EPfafl6JL5/rU+ot6P3gRSCpPDW5VmIzX959Ob1+ySFUuuYHWHekXpwdUZcKP5C+DS4GEtdJluwBjnsNDl+fSA==",
+ "dev": true
+ },
+ "npm-packlist": {
+ "version": "1.4.8",
+ "resolved": "https://registry.npmjs.org/npm-packlist/-/npm-packlist-1.4.8.tgz",
+ "integrity": "sha512-5+AZgwru5IevF5ZdnFglB5wNlHG1AOOuw28WhUq8/8emhBmLv6jX5by4WJCh7lW0uSYZYS6DXqIsyZVIXRZU9A==",
+ "dev": true,
+ "requires": {
+ "ignore-walk": "^3.0.1",
+ "npm-bundled": "^1.0.1",
+ "npm-normalize-package-bin": "^1.0.1"
+ }
+ },
+ "npm-run-path": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz",
+ "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=",
+ "dev": true,
+ "requires": {
+ "path-key": "^2.0.0"
+ }
+ },
+ "npmlog": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz",
+ "integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==",
+ "dev": true,
+ "requires": {
+ "are-we-there-yet": "~1.1.2",
+ "console-control-strings": "~1.1.0",
+ "gauge": "~2.7.3",
+ "set-blocking": "~2.0.0"
+ }
+ },
+ "nth-check": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz",
+ "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==",
+ "dev": true,
+ "requires": {
+ "boolbase": "~1.0.0"
+ }
+ },
+ "number-is-nan": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz",
+ "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0="
+ },
+ "numeral": {
+ "version": "1.5.6",
+ "resolved": "https://registry.npmjs.org/numeral/-/numeral-1.5.6.tgz",
+ "integrity": "sha1-ODHbloRRuc9q/5v5WSXx7443sz8=",
+ "dev": true
+ },
+ "nwsapi": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.0.tgz",
+ "integrity": "sha512-h2AatdwYH+JHiZpv7pt/gSX1XoRGb7L/qSIeuqA6GwYoF9w1vP1cw42TO0aI2pNyshRK5893hNSl+1//vHK7hQ==",
+ "dev": true
+ },
+ "nyc": {
+ "version": "14.1.1",
+ "resolved": "https://registry.npmjs.org/nyc/-/nyc-14.1.1.tgz",
+ "integrity": "sha512-OI0vm6ZGUnoGZv/tLdZ2esSVzDwUC88SNs+6JoSOMVxA+gKMB8Tk7jBwgemLx4O40lhhvZCVw1C+OYLOBOPXWw==",
+ "dev": true,
+ "requires": {
+ "archy": "^1.0.0",
+ "caching-transform": "^3.0.2",
+ "convert-source-map": "^1.6.0",
+ "cp-file": "^6.2.0",
+ "find-cache-dir": "^2.1.0",
+ "find-up": "^3.0.0",
+ "foreground-child": "^1.5.6",
+ "glob": "^7.1.3",
+ "istanbul-lib-coverage": "^2.0.5",
+ "istanbul-lib-hook": "^2.0.7",
+ "istanbul-lib-instrument": "^3.3.0",
+ "istanbul-lib-report": "^2.0.8",
+ "istanbul-lib-source-maps": "^3.0.6",
+ "istanbul-reports": "^2.2.4",
+ "js-yaml": "^3.13.1",
+ "make-dir": "^2.1.0",
+ "merge-source-map": "^1.1.0",
+ "resolve-from": "^4.0.0",
+ "rimraf": "^2.6.3",
+ "signal-exit": "^3.0.2",
+ "spawn-wrap": "^1.4.2",
+ "test-exclude": "^5.2.3",
+ "uuid": "^3.3.2",
+ "yargs": "^13.2.2",
+ "yargs-parser": "^13.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+ "dev": true
+ },
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "cliui": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz",
+ "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==",
+ "dev": true,
+ "requires": {
+ "string-width": "^3.1.0",
+ "strip-ansi": "^5.2.0",
+ "wrap-ansi": "^5.1.0"
+ }
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "dev": true
+ },
+ "string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "dev": true,
+ "requires": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^4.1.0"
+ }
+ },
+ "wrap-ansi": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz",
+ "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^3.2.0",
+ "string-width": "^3.0.0",
+ "strip-ansi": "^5.0.0"
+ }
+ },
+ "y18n": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz",
+ "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==",
+ "dev": true
+ },
+ "yargs": {
+ "version": "13.3.0",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.0.tgz",
+ "integrity": "sha512-2eehun/8ALW8TLoIl7MVaRUrg+yCnenu8B4kBlRxj3GJGDKU1Og7sMXPNm1BYyM1DOJmTZ4YeN/Nwxv+8XJsUA==",
+ "dev": true,
+ "requires": {
+ "cliui": "^5.0.0",
+ "find-up": "^3.0.0",
+ "get-caller-file": "^2.0.1",
+ "require-directory": "^2.1.1",
+ "require-main-filename": "^2.0.0",
+ "set-blocking": "^2.0.0",
+ "string-width": "^3.0.0",
+ "which-module": "^2.0.0",
+ "y18n": "^4.0.0",
+ "yargs-parser": "^13.1.1"
+ }
+ }
+ }
+ },
+ "oauth-sign": {
+ "version": "0.9.0",
+ "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz",
+ "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==",
+ "dev": true
+ },
+ "object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM="
+ },
+ "object-copy": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
+ "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
+ "dev": true,
+ "requires": {
+ "copy-descriptor": "^0.1.0",
+ "define-property": "^0.2.5",
+ "kind-of": "^3.0.3"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ }
+ }
+ },
+ "object-inspect": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.7.0.tgz",
+ "integrity": "sha512-a7pEHdh1xKIAgTySUGgLMx/xwDZskN1Ud6egYYN3EdRW4ZMPNEDUTF+hwy2LUC+Bl+SyLXANnwz/jyh/qutKUw==",
+ "dev": true
+ },
+ "object-is": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.0.2.tgz",
+ "integrity": "sha512-Epah+btZd5wrrfjkJZq1AOB9O6OxUQto45hzFd7lXGrpHPGE0W1k+426yrZV+k6NJOzLNNW/nVsmZdIWsAqoOQ==",
+ "dev": true
+ },
+ "object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
+ "dev": true
+ },
+ "object-visit": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
+ "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
+ "dev": true,
+ "requires": {
+ "isobject": "^3.0.0"
+ }
+ },
+ "object.assign": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz",
+ "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.2",
+ "function-bind": "^1.1.1",
+ "has-symbols": "^1.0.0",
+ "object-keys": "^1.0.11"
+ }
+ },
+ "object.entries": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.1.tgz",
+ "integrity": "sha512-ilqR7BgdyZetJutmDPfXCDffGa0/Yzl2ivVNpbx/g4UeWrCdRnFDUBrKJGLhGieRHDATnyZXWBeCb29k9CJysQ==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.0-next.1",
+ "function-bind": "^1.1.1",
+ "has": "^1.0.3"
+ }
+ },
+ "object.fromentries": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.2.tgz",
+ "integrity": "sha512-r3ZiBH7MQppDJVLx6fhD618GKNG40CZYH9wgwdhKxBDDbQgjeWGGd4AtkZad84d291YxvWe7bJGuE65Anh0dxQ==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.0-next.1",
+ "function-bind": "^1.1.1",
+ "has": "^1.0.3"
+ }
+ },
+ "object.pick": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
+ "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
+ "dev": true,
+ "requires": {
+ "isobject": "^3.0.1"
+ }
+ },
+ "object.values": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.1.tgz",
+ "integrity": "sha512-WTa54g2K8iu0kmS/us18jEmdv1a4Wi//BZ/DTVYEcH0XhLM5NYdpDHja3gt57VrZLcNAO2WGA+KpWsDBaHt6eA==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.0-next.1",
+ "function-bind": "^1.1.1",
+ "has": "^1.0.3"
+ }
+ },
+ "on-finished": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+ "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ },
+ "on-headers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz",
+ "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA=="
+ },
+ "once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "onetime": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz",
+ "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=",
+ "dev": true,
+ "requires": {
+ "mimic-fn": "^1.0.0"
+ }
+ },
+ "opener": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.1.tgz",
+ "integrity": "sha512-goYSy5c2UXE4Ra1xixabeVh1guIX/ZV/YokJksb6q2lubWu6UbvPQ20p542/sFIll1nl8JnCyK9oBaOcCWXwvA==",
+ "dev": true
+ },
+ "opn": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/opn/-/opn-5.5.0.tgz",
+ "integrity": "sha512-PqHpggC9bLV0VeWcdKhkpxY+3JTzetLSqTCWL/z/tFIbI6G8JCjondXklT1JinczLz2Xib62sSp0T/gKT4KksA==",
+ "dev": true,
+ "requires": {
+ "is-wsl": "^1.1.0"
+ }
+ },
+ "optionator": {
+ "version": "0.8.3",
+ "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz",
+ "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==",
+ "dev": true,
+ "requires": {
+ "deep-is": "~0.1.3",
+ "fast-levenshtein": "~2.0.6",
+ "levn": "~0.3.0",
+ "prelude-ls": "~1.1.2",
+ "type-check": "~0.3.2",
+ "word-wrap": "~1.2.3"
+ }
+ },
+ "optjs": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/optjs/-/optjs-3.2.2.tgz",
+ "integrity": "sha1-aabOicRCpEQDFBrS+bNwvVu29O4="
+ },
+ "ora": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/ora/-/ora-2.1.0.tgz",
+ "integrity": "sha512-hNNlAd3gfv/iPmsNxYoAPLvxg7HuPozww7fFonMZvL84tP6Ox5igfk5j/+a9rtJJwqMgKK+JgWsAQik5o0HTLA==",
+ "dev": true,
+ "requires": {
+ "chalk": "^2.3.1",
+ "cli-cursor": "^2.1.0",
+ "cli-spinners": "^1.1.0",
+ "log-symbols": "^2.2.0",
+ "strip-ansi": "^4.0.0",
+ "wcwidth": "^1.0.1"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
+ "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+ "dev": true
+ },
+ "strip-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
+ "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^3.0.0"
+ }
+ }
+ }
+ },
+ "os-browserify": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz",
+ "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=",
+ "dev": true
+ },
+ "os-homedir": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz",
+ "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=",
+ "dev": true
+ },
+ "os-locale": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz",
+ "integrity": "sha1-IPnxeuKe00XoveWDsT0gCYA8FNk=",
+ "requires": {
+ "lcid": "^1.0.0"
+ }
+ },
+ "os-tmpdir": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
+ "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=",
+ "dev": true
+ },
+ "osenv": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz",
+ "integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==",
+ "dev": true,
+ "requires": {
+ "os-homedir": "^1.0.0",
+ "os-tmpdir": "^1.0.0"
+ }
+ },
+ "p-finally": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
+ "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=",
+ "dev": true
+ },
+ "p-limit": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz",
+ "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==",
+ "dev": true,
+ "requires": {
+ "p-try": "^2.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "dev": true,
+ "requires": {
+ "p-limit": "^2.0.0"
+ }
+ },
+ "p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
+ "dev": true
+ },
+ "package-hash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/package-hash/-/package-hash-3.0.0.tgz",
+ "integrity": "sha512-lOtmukMDVvtkL84rJHI7dpTYq+0rli8N2wlnqUcBuDWCfVhRUfOmnR9SsoHFMLpACvEV60dX7rd0rFaYDZI+FA==",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.15",
+ "hasha": "^3.0.0",
+ "lodash.flattendeep": "^4.4.0",
+ "release-zalgo": "^1.0.0"
+ }
+ },
+ "package-json": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/package-json/-/package-json-4.0.1.tgz",
+ "integrity": "sha1-iGmgQBJTZhxMTKPabCEh7VVfXu0=",
+ "dev": true,
+ "requires": {
+ "got": "^6.7.1",
+ "registry-auth-token": "^3.0.1",
+ "registry-url": "^3.0.3",
+ "semver": "^5.1.0"
+ }
+ },
+ "pako": {
+ "version": "1.0.11",
+ "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz",
+ "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==",
+ "dev": true
+ },
+ "parallel-transform": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz",
+ "integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==",
+ "dev": true,
+ "requires": {
+ "cyclist": "^1.0.1",
+ "inherits": "^2.0.3",
+ "readable-stream": "^2.1.5"
+ }
+ },
+ "parent-module": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+ "dev": true,
+ "requires": {
+ "callsites": "^3.0.0"
+ }
+ },
+ "parse-asn1": {
+ "version": "5.1.5",
+ "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.5.tgz",
+ "integrity": "sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ==",
+ "dev": true,
+ "requires": {
+ "asn1.js": "^4.0.0",
+ "browserify-aes": "^1.0.0",
+ "create-hash": "^1.1.0",
+ "evp_bytestokey": "^1.0.0",
+ "pbkdf2": "^3.0.3",
+ "safe-buffer": "^5.1.1"
+ }
+ },
+ "parse-json": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz",
+ "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=",
+ "dev": true,
+ "requires": {
+ "error-ex": "^1.3.1",
+ "json-parse-better-errors": "^1.0.1"
+ }
+ },
+ "parse5": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-3.0.3.tgz",
+ "integrity": "sha512-rgO9Zg5LLLkfJF9E6CCmXlSE4UVceloys8JrFqCcHloC3usd/kJCyPDwH2SOlzix2j3xaP9sUX3e8+kvkuleAA==",
+ "dev": true,
+ "requires": {
+ "@types/node": "*"
+ }
+ },
+ "parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="
+ },
+ "particles.js": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/particles.js/-/particles.js-2.0.0.tgz",
+ "integrity": "sha1-IThsQyjWx/lngKIB6W7t/AnHNvY="
+ },
+ "pascalcase": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
+ "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=",
+ "dev": true
+ },
+ "passport": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/passport/-/passport-0.4.1.tgz",
+ "integrity": "sha512-IxXgZZs8d7uFSt3eqNjM9NQ3g3uQCW5avD8mRNoXV99Yig50vjuaez6dQK2qC0kVWPRTujxY0dWgGfT09adjYg==",
+ "requires": {
+ "passport-strategy": "1.x.x",
+ "pause": "0.0.1"
+ }
+ },
+ "passport-saml": {
+ "version": "0.33.0",
+ "resolved": "https://registry.npmjs.org/passport-saml/-/passport-saml-0.33.0.tgz",
+ "integrity": "sha1-UbmfGdztVtJG7k4oh+MvBjIfvs8=",
+ "requires": {
+ "passport-strategy": "*",
+ "q": "^1.5.0",
+ "xml-crypto": "^0.10.1",
+ "xml-encryption": "^0.11.0",
+ "xml2js": "0.4.x",
+ "xmlbuilder": "^9.0.4",
+ "xmldom": "0.1.x"
+ }
+ },
+ "passport-strategy": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/passport-strategy/-/passport-strategy-1.0.0.tgz",
+ "integrity": "sha1-tVOaqPwiWj0a0XlHbd8ja0QPUuQ="
+ },
+ "path-browserify": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz",
+ "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==",
+ "dev": true
+ },
+ "path-dirname": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz",
+ "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=",
+ "dev": true
+ },
+ "path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=",
+ "dev": true
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18="
+ },
+ "path-is-inside": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz",
+ "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=",
+ "dev": true
+ },
+ "path-key": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz",
+ "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=",
+ "dev": true
+ },
+ "path-parse": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
+ "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw=="
+ },
+ "path-to-regexp": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
+ "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
+ },
+ "path-type": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz",
+ "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==",
+ "dev": true,
+ "requires": {
+ "pify": "^3.0.0"
+ },
+ "dependencies": {
+ "pify": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+ "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=",
+ "dev": true
+ }
+ }
+ },
+ "pathval": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.0.tgz",
+ "integrity": "sha1-uULm1L3mUwBe9rcTYd74cn0GReA=",
+ "dev": true
+ },
+ "pause": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/pause/-/pause-0.0.1.tgz",
+ "integrity": "sha1-HUCLP9t2kjuVQ9lvtMnf1TXZy10="
+ },
+ "pbkdf2": {
+ "version": "3.0.17",
+ "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.17.tgz",
+ "integrity": "sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA==",
+ "dev": true,
+ "requires": {
+ "create-hash": "^1.1.2",
+ "create-hmac": "^1.1.4",
+ "ripemd160": "^2.0.1",
+ "safe-buffer": "^5.0.1",
+ "sha.js": "^2.4.8"
+ }
+ },
+ "performance-now": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz",
+ "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=",
+ "dev": true
+ },
+ "pify": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz",
+ "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==",
+ "dev": true
+ },
+ "pirates": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.1.tgz",
+ "integrity": "sha512-WuNqLTbMI3tmfef2TKxlQmAiLHKtFhlsCZnPIpuv2Ow0RDVO8lfy1Opf4NUzlMXLjPl+Men7AuVdX6TA+s+uGA==",
+ "dev": true,
+ "requires": {
+ "node-modules-regexp": "^1.0.0"
+ }
+ },
+ "pkg-dir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz",
+ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==",
+ "dev": true,
+ "requires": {
+ "find-up": "^3.0.0"
+ }
+ },
+ "please-upgrade-node": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/please-upgrade-node/-/please-upgrade-node-3.2.0.tgz",
+ "integrity": "sha512-gQR3WpIgNIKwBMVLkpMUeR3e1/E1y42bqDQZfql+kDeXd8COYfM8PQA4X6y7a8u9Ua9FHmsrrmirW2vHs45hWg==",
+ "dev": true,
+ "requires": {
+ "semver-compare": "^1.0.0"
+ }
+ },
+ "plur": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/plur/-/plur-3.1.1.tgz",
+ "integrity": "sha512-t1Ax8KUvV3FFII8ltczPn2tJdjqbd1sIzu6t4JL7nQ3EyeL/lTrj5PWKb06ic5/6XYDr65rQ4uzQEGN70/6X5w==",
+ "dev": true,
+ "requires": {
+ "irregular-plurals": "^2.0.0"
+ }
+ },
+ "pn": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/pn/-/pn-1.1.0.tgz",
+ "integrity": "sha512-2qHaIQr2VLRFoxe2nASzsV6ef4yOOH+Fi9FBOVH6cqeSgUnoyySPZkxzLuzd+RYOQTRpROA0ztTMqxROKSb/nA==",
+ "dev": true
+ },
+ "posix-character-classes": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
+ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=",
+ "dev": true
+ },
+ "postcss": {
+ "version": "7.0.26",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.26.tgz",
+ "integrity": "sha512-IY4oRjpXWYshuTDFxMVkJDtWIk2LhsTlu8bZnbEJA4+bYT16Lvpo8Qv6EvDumhYRgzjZl489pmsY3qVgJQ08nA==",
+ "dev": true,
+ "requires": {
+ "chalk": "^2.4.2",
+ "source-map": "^0.6.1",
+ "supports-color": "^6.1.0"
+ },
+ "dependencies": {
+ "supports-color": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz",
+ "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ }
+ }
+ },
+ "postcss-modules-extract-imports": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz",
+ "integrity": "sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ==",
+ "dev": true,
+ "requires": {
+ "postcss": "^7.0.5"
+ }
+ },
+ "postcss-modules-local-by-default": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-3.0.2.tgz",
+ "integrity": "sha512-jM/V8eqM4oJ/22j0gx4jrp63GSvDH6v86OqyTHHUvk4/k1vceipZsaymiZ5PvocqZOl5SFHiFJqjs3la0wnfIQ==",
+ "dev": true,
+ "requires": {
+ "icss-utils": "^4.1.1",
+ "postcss": "^7.0.16",
+ "postcss-selector-parser": "^6.0.2",
+ "postcss-value-parser": "^4.0.0"
+ }
+ },
+ "postcss-modules-scope": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-2.1.1.tgz",
+ "integrity": "sha512-OXRUPecnHCg8b9xWvldG/jUpRIGPNRka0r4D4j0ESUU2/5IOnpsjfPPmDprM3Ih8CgZ8FXjWqaniK5v4rWt3oQ==",
+ "dev": true,
+ "requires": {
+ "postcss": "^7.0.6",
+ "postcss-selector-parser": "^6.0.0"
+ }
+ },
+ "postcss-modules-values": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-3.0.0.tgz",
+ "integrity": "sha512-1//E5jCBrZ9DmRX+zCtmQtRSV6PV42Ix7Bzj9GbwJceduuf7IqP8MgeTXuRDHOWj2m0VzZD5+roFWDuU8RQjcg==",
+ "dev": true,
+ "requires": {
+ "icss-utils": "^4.0.0",
+ "postcss": "^7.0.6"
+ }
+ },
+ "postcss-selector-parser": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.2.tgz",
+ "integrity": "sha512-36P2QR59jDTOAiIkqEprfJDsoNrvwFei3eCqKd1Y0tUsBimsq39BLp7RD+JWny3WgB1zGhJX8XVePwm9k4wdBg==",
+ "dev": true,
+ "requires": {
+ "cssesc": "^3.0.0",
+ "indexes-of": "^1.0.1",
+ "uniq": "^1.0.1"
+ }
+ },
+ "postcss-value-parser": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.0.2.tgz",
+ "integrity": "sha512-LmeoohTpp/K4UiyQCwuGWlONxXamGzCMtFxLq4W1nZVGIQLYvMCJx3yAF9qyyuFpflABI9yVdtJAqbihOsCsJQ==",
+ "dev": true
+ },
+ "prelude-ls": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz",
+ "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=",
+ "dev": true
+ },
+ "prepend-http": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-1.0.4.tgz",
+ "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=",
+ "dev": true
+ },
+ "prettier": {
+ "version": "1.19.1",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz",
+ "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==",
+ "dev": true
+ },
+ "prettier-linter-helpers": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz",
+ "integrity": "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==",
+ "dev": true,
+ "requires": {
+ "fast-diff": "^1.1.2"
+ }
+ },
+ "pretty-bytes": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.3.0.tgz",
+ "integrity": "sha512-hjGrh+P926p4R4WbaB6OckyRtO0F0/lQBiT+0gnxjV+5kjPBrfVBFCsCLbMqVQeydvIoouYTCmmEURiH3R1Bdg==",
+ "dev": true
+ },
+ "pretty-quick": {
+ "version": "1.11.1",
+ "resolved": "https://registry.npmjs.org/pretty-quick/-/pretty-quick-1.11.1.tgz",
+ "integrity": "sha512-kSXCkcETfak7EQXz6WOkCeCqpbC4GIzrN/vaneTGMP/fAtD8NerA9bPhCUqHAks1geo7biZNl5uEMPceeneLuA==",
+ "dev": true,
+ "requires": {
+ "chalk": "^2.3.0",
+ "execa": "^0.8.0",
+ "find-up": "^2.1.0",
+ "ignore": "^3.3.7",
+ "mri": "^1.1.0",
+ "multimatch": "^3.0.0"
+ },
+ "dependencies": {
+ "cross-spawn": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz",
+ "integrity": "sha1-6L0O/uWPz/b4+UUQoKVUu/ojVEk=",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^4.0.1",
+ "shebang-command": "^1.2.0",
+ "which": "^1.2.9"
+ }
+ },
+ "execa": {
+ "version": "0.8.0",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-0.8.0.tgz",
+ "integrity": "sha1-2NdrvBtVIX7RkP1t1J08d07PyNo=",
+ "dev": true,
+ "requires": {
+ "cross-spawn": "^5.0.1",
+ "get-stream": "^3.0.0",
+ "is-stream": "^1.1.0",
+ "npm-run-path": "^2.0.0",
+ "p-finally": "^1.0.0",
+ "signal-exit": "^3.0.0",
+ "strip-eof": "^1.0.0"
+ }
+ },
+ "find-up": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz",
+ "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=",
+ "dev": true,
+ "requires": {
+ "locate-path": "^2.0.0"
+ }
+ },
+ "get-stream": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz",
+ "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=",
+ "dev": true
+ },
+ "ignore": {
+ "version": "3.3.10",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz",
+ "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==",
+ "dev": true
+ },
+ "locate-path": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz",
+ "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=",
+ "dev": true,
+ "requires": {
+ "p-locate": "^2.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "p-limit": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz",
+ "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==",
+ "dev": true,
+ "requires": {
+ "p-try": "^1.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz",
+ "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=",
+ "dev": true,
+ "requires": {
+ "p-limit": "^1.1.0"
+ }
+ },
+ "p-try": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz",
+ "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=",
+ "dev": true
+ }
+ }
+ },
+ "private": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz",
+ "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==",
+ "dev": true
+ },
+ "process": {
+ "version": "0.11.10",
+ "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz",
+ "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=",
+ "dev": true
+ },
+ "process-nextick-args": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
+ "dev": true
+ },
+ "progress": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz",
+ "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==",
+ "dev": true
+ },
+ "progress-bar-webpack-plugin": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/progress-bar-webpack-plugin/-/progress-bar-webpack-plugin-1.12.1.tgz",
+ "integrity": "sha512-tVbPB5xBbqNwdH3mwcxzjL1r1Vrm/xGu93OsqVSAbCaXGoKFvfWIh0gpMDpn2kYsPVRSAIK0pBkP9Vfs+JJibQ==",
+ "dev": true,
+ "requires": {
+ "chalk": "^1.1.1",
+ "object.assign": "^4.0.1",
+ "progress": "^1.1.8"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz",
+ "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
+ "dev": true
+ },
+ "chalk": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz",
+ "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^2.2.1",
+ "escape-string-regexp": "^1.0.2",
+ "has-ansi": "^2.0.0",
+ "strip-ansi": "^3.0.0",
+ "supports-color": "^2.0.0"
+ }
+ },
+ "progress": {
+ "version": "1.1.8",
+ "resolved": "https://registry.npmjs.org/progress/-/progress-1.1.8.tgz",
+ "integrity": "sha1-4mDHj2Fhzdmw5WzD4Khd4Xx6V74=",
+ "dev": true
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "supports-color": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
+ "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=",
+ "dev": true
+ }
+ }
+ },
+ "promise": {
+ "version": "7.3.1",
+ "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz",
+ "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==",
+ "requires": {
+ "asap": "~2.0.3"
+ }
+ },
+ "promise-inflight": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz",
+ "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=",
+ "dev": true
+ },
+ "prop-types": {
+ "version": "15.7.2",
+ "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz",
+ "integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==",
+ "dev": true,
+ "requires": {
+ "loose-envify": "^1.4.0",
+ "object-assign": "^4.1.1",
+ "react-is": "^16.8.1"
+ }
+ },
+ "prop-types-exact": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/prop-types-exact/-/prop-types-exact-1.2.0.tgz",
+ "integrity": "sha512-K+Tk3Kd9V0odiXFP9fwDHUYRyvK3Nun3GVyPapSIs5OBkITAm15W0CPFD/YKTkMUAbc0b9CUwRQp2ybiBIq+eA==",
+ "dev": true,
+ "requires": {
+ "has": "^1.0.3",
+ "object.assign": "^4.1.0",
+ "reflect.ownkeys": "^0.2.0"
+ }
+ },
+ "propagating-hammerjs": {
+ "version": "1.4.7",
+ "resolved": "https://registry.npmjs.org/propagating-hammerjs/-/propagating-hammerjs-1.4.7.tgz",
+ "integrity": "sha512-oW9Wd+W2Tp5uOz6Fh4mEU7p+FoyU85smLH/mPga83Loh0pHa6AH4ZHGywvwMk3TWP31l7iUsvJyW265p4Ipwrg==",
+ "dev": true,
+ "requires": {
+ "hammerjs": "^2.0.8"
+ }
+ },
+ "protobufjs": {
+ "version": "5.0.3",
+ "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-5.0.3.tgz",
+ "integrity": "sha512-55Kcx1MhPZX0zTbVosMQEO5R6/rikNXd9b6RQK4KSPcrSIIwoXTtebIczUrXlwaSrbz4x8XUVThGPob1n8I4QA==",
+ "requires": {
+ "ascli": "~1",
+ "bytebuffer": "~5",
+ "glob": "^7.0.5",
+ "yargs": "^3.10.0"
+ }
+ },
+ "proxy-addr": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.5.tgz",
+ "integrity": "sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ==",
+ "requires": {
+ "forwarded": "~0.1.2",
+ "ipaddr.js": "1.9.0"
+ }
+ },
+ "proxyquire": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/proxyquire/-/proxyquire-2.1.3.tgz",
+ "integrity": "sha512-BQWfCqYM+QINd+yawJz23tbBM40VIGXOdDw3X344KcclI/gtBbdWF6SlQ4nK/bYhF9d27KYug9WzljHC6B9Ysg==",
+ "dev": true,
+ "requires": {
+ "fill-keys": "^1.0.2",
+ "module-not-found-error": "^1.0.1",
+ "resolve": "^1.11.1"
+ }
+ },
+ "prr": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz",
+ "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=",
+ "dev": true
+ },
+ "pseudomap": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz",
+ "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=",
+ "dev": true
+ },
+ "psl": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/psl/-/psl-1.7.0.tgz",
+ "integrity": "sha512-5NsSEDv8zY70ScRnOTn7bK7eanl2MvFrOrS/R6x+dBt5g1ghnj9Zv90kO8GwT8gxcu2ANyFprnFYB85IogIJOQ==",
+ "dev": true
+ },
+ "pstree.remy": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/pstree.remy/-/pstree.remy-1.1.7.tgz",
+ "integrity": "sha512-xsMgrUwRpuGskEzBFkH8NmTimbZ5PcPup0LA8JJkHIm2IMUbQcpo3yeLNWVrufEYjh8YwtSVh0xz6UeWc5Oh5A==",
+ "dev": true
+ },
+ "public-encrypt": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz",
+ "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==",
+ "dev": true,
+ "requires": {
+ "bn.js": "^4.1.0",
+ "browserify-rsa": "^4.0.0",
+ "create-hash": "^1.1.0",
+ "parse-asn1": "^5.0.0",
+ "randombytes": "^2.0.1",
+ "safe-buffer": "^5.1.2"
+ }
+ },
+ "pug": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/pug/-/pug-2.0.4.tgz",
+ "integrity": "sha512-XhoaDlvi6NIzL49nu094R2NA6P37ijtgMDuWE+ofekDChvfKnzFal60bhSdiy8y2PBO6fmz3oMEIcfpBVRUdvw==",
+ "requires": {
+ "pug-code-gen": "^2.0.2",
+ "pug-filters": "^3.1.1",
+ "pug-lexer": "^4.1.0",
+ "pug-linker": "^3.0.6",
+ "pug-load": "^2.0.12",
+ "pug-parser": "^5.0.1",
+ "pug-runtime": "^2.0.5",
+ "pug-strip-comments": "^1.0.4"
+ }
+ },
+ "pug-attrs": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/pug-attrs/-/pug-attrs-2.0.4.tgz",
+ "integrity": "sha512-TaZ4Z2TWUPDJcV3wjU3RtUXMrd3kM4Wzjbe3EWnSsZPsJ3LDI0F3yCnf2/W7PPFF+edUFQ0HgDL1IoxSz5K8EQ==",
+ "requires": {
+ "constantinople": "^3.0.1",
+ "js-stringify": "^1.0.1",
+ "pug-runtime": "^2.0.5"
+ }
+ },
+ "pug-code-gen": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/pug-code-gen/-/pug-code-gen-2.0.2.tgz",
+ "integrity": "sha512-kROFWv/AHx/9CRgoGJeRSm+4mLWchbgpRzTEn8XCiwwOy6Vh0gAClS8Vh5TEJ9DBjaP8wCjS3J6HKsEsYdvaCw==",
+ "requires": {
+ "constantinople": "^3.1.2",
+ "doctypes": "^1.1.0",
+ "js-stringify": "^1.0.1",
+ "pug-attrs": "^2.0.4",
+ "pug-error": "^1.3.3",
+ "pug-runtime": "^2.0.5",
+ "void-elements": "^2.0.1",
+ "with": "^5.0.0"
+ }
+ },
+ "pug-error": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/pug-error/-/pug-error-1.3.3.tgz",
+ "integrity": "sha512-qE3YhESP2mRAWMFJgKdtT5D7ckThRScXRwkfo+Erqga7dyJdY3ZquspprMCj/9sJ2ijm5hXFWQE/A3l4poMWiQ=="
+ },
+ "pug-filters": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/pug-filters/-/pug-filters-3.1.1.tgz",
+ "integrity": "sha512-lFfjNyGEyVWC4BwX0WyvkoWLapI5xHSM3xZJFUhx4JM4XyyRdO8Aucc6pCygnqV2uSgJFaJWW3Ft1wCWSoQkQg==",
+ "requires": {
+ "clean-css": "^4.1.11",
+ "constantinople": "^3.0.1",
+ "jstransformer": "1.0.0",
+ "pug-error": "^1.3.3",
+ "pug-walk": "^1.1.8",
+ "resolve": "^1.1.6",
+ "uglify-js": "^2.6.1"
+ }
+ },
+ "pug-lexer": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/pug-lexer/-/pug-lexer-4.1.0.tgz",
+ "integrity": "sha512-i55yzEBtjm0mlplW4LoANq7k3S8gDdfC6+LThGEvsK4FuobcKfDAwt6V4jKPH9RtiE3a2Akfg5UpafZ1OksaPA==",
+ "requires": {
+ "character-parser": "^2.1.1",
+ "is-expression": "^3.0.0",
+ "pug-error": "^1.3.3"
+ }
+ },
+ "pug-linker": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/pug-linker/-/pug-linker-3.0.6.tgz",
+ "integrity": "sha512-bagfuHttfQOpANGy1Y6NJ+0mNb7dD2MswFG2ZKj22s8g0wVsojpRlqveEQHmgXXcfROB2RT6oqbPYr9EN2ZWzg==",
+ "requires": {
+ "pug-error": "^1.3.3",
+ "pug-walk": "^1.1.8"
+ }
+ },
+ "pug-load": {
+ "version": "2.0.12",
+ "resolved": "https://registry.npmjs.org/pug-load/-/pug-load-2.0.12.tgz",
+ "integrity": "sha512-UqpgGpyyXRYgJs/X60sE6SIf8UBsmcHYKNaOccyVLEuT6OPBIMo6xMPhoJnqtB3Q3BbO4Z3Bjz5qDsUWh4rXsg==",
+ "requires": {
+ "object-assign": "^4.1.0",
+ "pug-walk": "^1.1.8"
+ }
+ },
+ "pug-parser": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/pug-parser/-/pug-parser-5.0.1.tgz",
+ "integrity": "sha512-nGHqK+w07p5/PsPIyzkTQfzlYfuqoiGjaoqHv1LjOv2ZLXmGX1O+4Vcvps+P4LhxZ3drYSljjq4b+Naid126wA==",
+ "requires": {
+ "pug-error": "^1.3.3",
+ "token-stream": "0.0.1"
+ }
+ },
+ "pug-runtime": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/pug-runtime/-/pug-runtime-2.0.5.tgz",
+ "integrity": "sha512-P+rXKn9un4fQY77wtpcuFyvFaBww7/91f3jHa154qU26qFAnOe6SW1CbIDcxiG5lLK9HazYrMCCuDvNgDQNptw=="
+ },
+ "pug-strip-comments": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/pug-strip-comments/-/pug-strip-comments-1.0.4.tgz",
+ "integrity": "sha512-i5j/9CS4yFhSxHp5iKPHwigaig/VV9g+FgReLJWWHEHbvKsbqL0oP/K5ubuLco6Wu3Kan5p7u7qk8A4oLLh6vw==",
+ "requires": {
+ "pug-error": "^1.3.3"
+ }
+ },
+ "pug-walk": {
+ "version": "1.1.8",
+ "resolved": "https://registry.npmjs.org/pug-walk/-/pug-walk-1.1.8.tgz",
+ "integrity": "sha512-GMu3M5nUL3fju4/egXwZO0XLi6fW/K3T3VTgFQ14GxNi8btlxgT5qZL//JwZFm/2Fa64J/PNS8AZeys3wiMkVA=="
+ },
+ "pump": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
+ "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
+ "dev": true,
+ "requires": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ },
+ "pumpify": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz",
+ "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==",
+ "dev": true,
+ "requires": {
+ "duplexify": "^3.6.0",
+ "inherits": "^2.0.3",
+ "pump": "^2.0.0"
+ },
+ "dependencies": {
+ "pump": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz",
+ "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==",
+ "dev": true,
+ "requires": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ }
+ }
+ },
+ "punycode": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
+ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==",
+ "dev": true
+ },
+ "q": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz",
+ "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc="
+ },
+ "qs": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.9.1.tgz",
+ "integrity": "sha512-Cxm7/SS/y/Z3MHWSxXb8lIFqgqBowP5JMlTUFyJN88y0SGQhVmZnqFK/PeuMX9LzUyWsqqhNxIyg0jlzq946yA=="
+ },
+ "querystring": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz",
+ "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=",
+ "dev": true
+ },
+ "querystring-es3": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz",
+ "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=",
+ "dev": true
+ },
+ "quick-lru": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-1.1.0.tgz",
+ "integrity": "sha1-Q2CxfGETatOAeDl/8RQW4Ybc+7g=",
+ "dev": true
+ },
+ "raf": {
+ "version": "3.4.1",
+ "resolved": "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz",
+ "integrity": "sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA==",
+ "dev": true,
+ "requires": {
+ "performance-now": "^2.1.0"
+ }
+ },
+ "railroad-diagrams": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/railroad-diagrams/-/railroad-diagrams-1.0.0.tgz",
+ "integrity": "sha1-635iZ1SN3t+4mcG5Dlc3RVnN234=",
+ "dev": true
+ },
+ "randexp": {
+ "version": "0.4.6",
+ "resolved": "https://registry.npmjs.org/randexp/-/randexp-0.4.6.tgz",
+ "integrity": "sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ==",
+ "dev": true,
+ "requires": {
+ "discontinuous-range": "1.0.0",
+ "ret": "~0.1.10"
+ }
+ },
+ "random-bytes": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/random-bytes/-/random-bytes-1.0.0.tgz",
+ "integrity": "sha1-T2ih3Arli9P7lYSMMDJNt11kNgs="
+ },
+ "randombytes": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
+ "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "^5.1.0"
+ }
+ },
+ "randomfill": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz",
+ "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==",
+ "dev": true,
+ "requires": {
+ "randombytes": "^2.0.5",
+ "safe-buffer": "^5.1.0"
+ }
+ },
+ "range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
+ },
+ "raw-body": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz",
+ "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==",
+ "requires": {
+ "bytes": "3.1.0",
+ "http-errors": "1.7.2",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ }
+ },
+ "rc": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
+ "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
+ "dev": true,
+ "requires": {
+ "deep-extend": "^0.6.0",
+ "ini": "~1.3.0",
+ "minimist": "^1.2.0",
+ "strip-json-comments": "~2.0.1"
+ }
+ },
+ "react": {
+ "version": "16.12.0",
+ "resolved": "https://registry.npmjs.org/react/-/react-16.12.0.tgz",
+ "integrity": "sha512-fglqy3k5E+81pA8s+7K0/T3DBCF0ZDOher1elBFzF7O6arXJgzyu/FW+COxFvAWXJoJN9KIZbT2LXlukwphYTA==",
+ "dev": true,
+ "requires": {
+ "loose-envify": "^1.1.0",
+ "object-assign": "^4.1.1",
+ "prop-types": "^15.6.2"
+ }
+ },
+ "react-bootstrap-table": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/react-bootstrap-table/-/react-bootstrap-table-4.3.1.tgz",
+ "integrity": "sha1-9wS+Vbf2vwVX0vxb7G0l/TB9DN4=",
+ "dev": true,
+ "requires": {
+ "classnames": "^2.1.2",
+ "prop-types": "^15.5.10",
+ "react-modal": "^3.1.7",
+ "react-s-alert": "^1.3.2"
+ }
+ },
+ "react-chartjs-2": {
+ "version": "2.9.0",
+ "resolved": "https://registry.npmjs.org/react-chartjs-2/-/react-chartjs-2-2.9.0.tgz",
+ "integrity": "sha512-IYwqUUnQRAJ9SNA978vxulHJTcUFTJk2LDVfbAyk0TnJFZZG7+6U/2flsE4MCw6WCbBjTTypy8T82Ch7XrPtRw==",
+ "dev": true,
+ "requires": {
+ "lodash": "^4.17.4",
+ "prop-types": "^15.5.8"
+ }
+ },
+ "react-circular-progressbar": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/react-circular-progressbar/-/react-circular-progressbar-0.8.1.tgz",
+ "integrity": "sha512-ys2+LcenXWfY5TejPtekl5CvOGb1dPoalyVW/08n8Wo6OPFax5kLrdBFLFi5F/bBYTpNiTp4xUAWKuyrWE2n/g==",
+ "dev": true,
+ "requires": {
+ "prop-types": "^15.5.10"
+ }
+ },
+ "react-copy-to-clipboard": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.2.tgz",
+ "integrity": "sha512-/2t5mLMMPuN5GmdXo6TebFa8IoFxZ+KTDDqYhcDm0PhkgEzSxVvIX26G20s1EB02A4h2UZgwtfymZ3lGJm0OLg==",
+ "dev": true,
+ "requires": {
+ "copy-to-clipboard": "^3",
+ "prop-types": "^15.5.8"
+ }
+ },
+ "react-datetime": {
+ "version": "2.16.3",
+ "resolved": "https://registry.npmjs.org/react-datetime/-/react-datetime-2.16.3.tgz",
+ "integrity": "sha512-amWfb5iGEiyqjLmqCLlPpu2oN415jK8wX1qoTq7qn6EYiU7qQgbNHglww014PT4O/3G5eo/3kbJu/M/IxxTyGw==",
+ "dev": true,
+ "requires": {
+ "create-react-class": "^15.5.2",
+ "object-assign": "^3.0.0",
+ "prop-types": "^15.5.7",
+ "react-onclickoutside": "^6.5.0"
+ },
+ "dependencies": {
+ "object-assign": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-3.0.0.tgz",
+ "integrity": "sha1-m+3VygiXlJvKR+f/QIBi1Un1h/I=",
+ "dev": true
+ }
+ }
+ },
+ "react-dom": {
+ "version": "16.12.0",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.12.0.tgz",
+ "integrity": "sha512-LMxFfAGrcS3kETtQaCkTKjMiifahaMySFDn71fZUNpPHZQEzmk/GiAeIT8JSOrHB23fnuCOMruL2a8NYlw+8Gw==",
+ "dev": true,
+ "requires": {
+ "loose-envify": "^1.1.0",
+ "object-assign": "^4.1.1",
+ "prop-types": "^15.6.2",
+ "scheduler": "^0.18.0"
+ }
+ },
+ "react-ga": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/react-ga/-/react-ga-2.7.0.tgz",
+ "integrity": "sha512-AjC7UOZMvygrWTc2hKxTDvlMXEtbmA0IgJjmkhgmQQ3RkXrWR11xEagLGFGaNyaPnmg24oaIiaNPnEoftUhfXA==",
+ "dev": true
+ },
+ "react-hot-loader": {
+ "version": "4.12.19",
+ "resolved": "https://registry.npmjs.org/react-hot-loader/-/react-hot-loader-4.12.19.tgz",
+ "integrity": "sha512-p8AnA4QE2GtrvkdmqnKrEiijtVlqdTIDCHZOwItkI9kW51bt5XnQ/4Anz8giiWf9kqBpEQwsmnChDCAFBRyR/Q==",
+ "dev": true,
+ "requires": {
+ "fast-levenshtein": "^2.0.6",
+ "global": "^4.3.0",
+ "hoist-non-react-statics": "^3.3.0",
+ "loader-utils": "^1.1.0",
+ "prop-types": "^15.6.1",
+ "react-lifecycles-compat": "^3.0.4",
+ "shallowequal": "^1.1.0",
+ "source-map": "^0.7.3"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.7.3",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz",
+ "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==",
+ "dev": true
+ }
+ }
+ },
+ "react-input-autosize": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/react-input-autosize/-/react-input-autosize-2.2.2.tgz",
+ "integrity": "sha512-jQJgYCA3S0j+cuOwzuCd1OjmBmnZLdqQdiLKRYrsMMzbjUrVDS5RvJUDwJqA7sKuksDuzFtm6hZGKFu7Mjk5aw==",
+ "dev": true,
+ "requires": {
+ "prop-types": "^15.5.8"
+ }
+ },
+ "react-is": {
+ "version": "16.12.0",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.12.0.tgz",
+ "integrity": "sha512-rPCkf/mWBtKc97aLL9/txD8DZdemK0vkA3JMLShjlJB3Pj3s+lpf1KaBzMfQrAmhMQB0n1cU/SUGgKKBCe837Q==",
+ "dev": true
+ },
+ "react-lifecycles-compat": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz",
+ "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==",
+ "dev": true
+ },
+ "react-modal": {
+ "version": "3.11.1",
+ "resolved": "https://registry.npmjs.org/react-modal/-/react-modal-3.11.1.tgz",
+ "integrity": "sha512-8uN744Yq0X2lbfSLxsEEc2UV3RjSRb4yDVxRQ1aGzPo86QjNOwhQSukDb8U8kR+636TRTvfMren10fgOjAy9eA==",
+ "dev": true,
+ "requires": {
+ "exenv": "^1.2.0",
+ "prop-types": "^15.5.10",
+ "react-lifecycles-compat": "^3.0.0",
+ "warning": "^4.0.3"
+ }
+ },
+ "react-motion": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/react-motion/-/react-motion-0.5.2.tgz",
+ "integrity": "sha512-9q3YAvHoUiWlP3cK0v+w1N5Z23HXMj4IF4YuvjvWegWqNPfLXsOBE/V7UvQGpXxHFKRQQcNcVQE31g9SB/6qgQ==",
+ "dev": true,
+ "requires": {
+ "performance-now": "^0.2.0",
+ "prop-types": "^15.5.8",
+ "raf": "^3.1.0"
+ },
+ "dependencies": {
+ "performance-now": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-0.2.0.tgz",
+ "integrity": "sha1-M+8wxcd9TqIcWlOGnZG1bY8lVeU=",
+ "dev": true
+ }
+ }
+ },
+ "react-onclickoutside": {
+ "version": "6.9.0",
+ "resolved": "https://registry.npmjs.org/react-onclickoutside/-/react-onclickoutside-6.9.0.tgz",
+ "integrity": "sha512-8ltIY3bC7oGhj2nPAvWOGi+xGFybPNhJM0V1H8hY/whNcXgmDeaeoCMPPd8VatrpTsUWjb/vGzrmu6SrXVty3A==",
+ "dev": true
+ },
+ "react-router": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/react-router/-/react-router-4.3.1.tgz",
+ "integrity": "sha512-yrvL8AogDh2X42Dt9iknk4wF4V8bWREPirFfS9gLU1huk6qK41sg7Z/1S81jjTrGHxa3B8R3J6xIkDAA6CVarg==",
+ "dev": true,
+ "requires": {
+ "history": "^4.7.2",
+ "hoist-non-react-statics": "^2.5.0",
+ "invariant": "^2.2.4",
+ "loose-envify": "^1.3.1",
+ "path-to-regexp": "^1.7.0",
+ "prop-types": "^15.6.1",
+ "warning": "^4.0.1"
+ },
+ "dependencies": {
+ "hoist-non-react-statics": {
+ "version": "2.5.5",
+ "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-2.5.5.tgz",
+ "integrity": "sha512-rqcy4pJo55FTTLWt+bU8ukscqHeE/e9KWvsOW2b/a3afxQZhwkQdT1rPPCJ0rYXdj4vNcasY8zHTH+jF/qStxw==",
+ "dev": true
+ },
+ "isarray": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+ "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+ "dev": true
+ },
+ "path-to-regexp": {
+ "version": "1.8.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz",
+ "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==",
+ "dev": true,
+ "requires": {
+ "isarray": "0.0.1"
+ }
+ }
+ }
+ },
+ "react-router-dom": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-4.3.1.tgz",
+ "integrity": "sha512-c/MlywfxDdCp7EnB7YfPMOfMD3tOtIjrQlj/CKfNMBxdmpJP8xcz5P/UAFn3JbnQCNUxsHyVVqllF9LhgVyFCA==",
+ "dev": true,
+ "requires": {
+ "history": "^4.7.2",
+ "invariant": "^2.2.4",
+ "loose-envify": "^1.3.1",
+ "prop-types": "^15.6.1",
+ "react-router": "^4.3.1",
+ "warning": "^4.0.1"
+ }
+ },
+ "react-s-alert": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/react-s-alert/-/react-s-alert-1.4.1.tgz",
+ "integrity": "sha512-+cSpVPe6YeGklhlo7zbVlB0Z6jdiU9HPmEVzp5nIhNm9lvdL7rVO2Jx09pCwT99GmODyoN0iNhbQku6r7six8A==",
+ "dev": true,
+ "requires": {
+ "babel-runtime": "^6.23.0"
+ }
+ },
+ "react-select": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/react-select/-/react-select-1.3.0.tgz",
+ "integrity": "sha512-g/QAU1HZrzSfxkwMAo/wzi6/ezdWye302RGZevsATec07hI/iSxcpB1hejFIp7V63DJ8mwuign6KmB3VjdlinQ==",
+ "dev": true,
+ "requires": {
+ "classnames": "^2.2.4",
+ "prop-types": "^15.5.8",
+ "react-input-autosize": "^2.1.2"
+ }
+ },
+ "react-sparklines": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/react-sparklines/-/react-sparklines-1.7.0.tgz",
+ "integrity": "sha512-bJFt9K4c5Z0k44G8KtxIhbG+iyxrKjBZhdW6afP+R7EnIq+iKjbWbEFISrf3WKNFsda+C46XAfnX0StS5fbDcg==",
+ "dev": true,
+ "requires": {
+ "prop-types": "^15.5.10"
+ }
+ },
+ "react-test-renderer": {
+ "version": "16.12.0",
+ "resolved": "https://registry.npmjs.org/react-test-renderer/-/react-test-renderer-16.12.0.tgz",
+ "integrity": "sha512-Vj/teSqt2oayaWxkbhQ6gKis+t5JrknXfPVo+aIJ8QwYAqMPH77uptOdrlphyxl8eQI/rtkOYg86i/UWkpFu0w==",
+ "dev": true,
+ "requires": {
+ "object-assign": "^4.1.1",
+ "prop-types": "^15.6.2",
+ "react-is": "^16.8.6",
+ "scheduler": "^0.18.0"
+ }
+ },
+ "react-typist": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/react-typist/-/react-typist-2.0.5.tgz",
+ "integrity": "sha512-iZCkeqeegO0TlkTMiH2JD1tvMtY9RrXkRylnAI6m8aCVAUUwNzoWTVF7CKLij6THeOMcUDCznLDDvNp55s+YZA==",
+ "dev": true,
+ "requires": {
+ "prop-types": "^15.5.10"
+ }
+ },
+ "react-vis": {
+ "version": "1.11.7",
+ "resolved": "https://registry.npmjs.org/react-vis/-/react-vis-1.11.7.tgz",
+ "integrity": "sha512-vJqS12l/6RHeSq8DVl4PzX0j8iPgbT8H8PtgTRsimKsBNcPjPseO4RICw1FUPrwj8MPrrna34LBtzyC4ATd5Ow==",
+ "dev": true,
+ "requires": {
+ "d3-array": "^1.2.0",
+ "d3-collection": "^1.0.3",
+ "d3-color": "^1.0.3",
+ "d3-contour": "^1.1.0",
+ "d3-format": "^1.2.0",
+ "d3-geo": "^1.6.4",
+ "d3-hexbin": "^0.2.2",
+ "d3-hierarchy": "^1.1.4",
+ "d3-interpolate": "^1.1.4",
+ "d3-sankey": "^0.7.1",
+ "d3-scale": "^1.0.5",
+ "d3-shape": "^1.1.0",
+ "d3-voronoi": "^1.1.2",
+ "deep-equal": "^1.0.1",
+ "global": "^4.3.1",
+ "hoek": "4.2.1",
+ "prop-types": "^15.5.8",
+ "react-motion": "^0.5.2"
+ }
+ },
+ "read-pkg": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz",
+ "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=",
+ "dev": true,
+ "requires": {
+ "load-json-file": "^4.0.0",
+ "normalize-package-data": "^2.3.2",
+ "path-type": "^3.0.0"
+ }
+ },
+ "read-pkg-up": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz",
+ "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=",
+ "dev": true,
+ "requires": {
+ "find-up": "^2.0.0",
+ "read-pkg": "^2.0.0"
+ },
+ "dependencies": {
+ "find-up": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz",
+ "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=",
+ "dev": true,
+ "requires": {
+ "locate-path": "^2.0.0"
+ }
+ },
+ "load-json-file": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz",
+ "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.2",
+ "parse-json": "^2.2.0",
+ "pify": "^2.0.0",
+ "strip-bom": "^3.0.0"
+ }
+ },
+ "locate-path": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz",
+ "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=",
+ "dev": true,
+ "requires": {
+ "p-locate": "^2.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "p-limit": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz",
+ "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==",
+ "dev": true,
+ "requires": {
+ "p-try": "^1.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz",
+ "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=",
+ "dev": true,
+ "requires": {
+ "p-limit": "^1.1.0"
+ }
+ },
+ "p-try": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz",
+ "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=",
+ "dev": true
+ },
+ "parse-json": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz",
+ "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=",
+ "dev": true,
+ "requires": {
+ "error-ex": "^1.2.0"
+ }
+ },
+ "path-type": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz",
+ "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=",
+ "dev": true,
+ "requires": {
+ "pify": "^2.0.0"
+ }
+ },
+ "pify": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
+ "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=",
+ "dev": true
+ },
+ "read-pkg": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz",
+ "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=",
+ "dev": true,
+ "requires": {
+ "load-json-file": "^2.0.0",
+ "normalize-package-data": "^2.3.2",
+ "path-type": "^2.0.0"
+ }
+ }
+ }
+ },
+ "readable-stream": {
+ "version": "2.3.7",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
+ "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
+ "dev": true,
+ "requires": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "readdirp": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz",
+ "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.11",
+ "micromatch": "^3.1.10",
+ "readable-stream": "^2.0.2"
+ }
+ },
+ "redent": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/redent/-/redent-2.0.0.tgz",
+ "integrity": "sha1-wbIAe0LVfrE4kHmzyDM2OdXhzKo=",
+ "dev": true,
+ "requires": {
+ "indent-string": "^3.0.0",
+ "strip-indent": "^2.0.0"
+ }
+ },
+ "reflect.ownkeys": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/reflect.ownkeys/-/reflect.ownkeys-0.2.0.tgz",
+ "integrity": "sha1-dJrO7H8/34tj+SegSAnpDFwLNGA=",
+ "dev": true
+ },
+ "regenerate": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz",
+ "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==",
+ "dev": true
+ },
+ "regenerate-unicode-properties": {
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.1.0.tgz",
+ "integrity": "sha512-LGZzkgtLY79GeXLm8Dp0BVLdQlWICzBnJz/ipWUgo59qBaZ+BHtq51P2q1uVZlppMuUAT37SDk39qUbjTWB7bA==",
+ "dev": true,
+ "requires": {
+ "regenerate": "^1.4.0"
+ }
+ },
+ "regenerator-runtime": {
+ "version": "0.11.1",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz",
+ "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg=="
+ },
+ "regenerator-transform": {
+ "version": "0.14.1",
+ "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.1.tgz",
+ "integrity": "sha512-flVuee02C3FKRISbxhXl9mGzdbWUVHubl1SMaknjxkFB1/iqpJhArQUvRxOOPEc/9tAiX0BaQ28FJH10E4isSQ==",
+ "dev": true,
+ "requires": {
+ "private": "^0.1.6"
+ }
+ },
+ "regex-not": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
+ "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^3.0.2",
+ "safe-regex": "^1.1.0"
+ }
+ },
+ "regexp.prototype.flags": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.0.tgz",
+ "integrity": "sha512-2+Q0C5g951OlYlJz6yu5/M33IcsESLlLfsyIaLJaG4FA2r4yP8MvVMJUUP/fVBkSpbbbZlS5gynbEWLipiiXiQ==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.0-next.1"
+ }
+ },
+ "regexpp": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz",
+ "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==",
+ "dev": true
+ },
+ "regexpu-core": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.6.0.tgz",
+ "integrity": "sha512-YlVaefl8P5BnFYOITTNzDvan1ulLOiXJzCNZxduTIosN17b87h3bvG9yHMoHaRuo88H4mQ06Aodj5VtYGGGiTg==",
+ "dev": true,
+ "requires": {
+ "regenerate": "^1.4.0",
+ "regenerate-unicode-properties": "^8.1.0",
+ "regjsgen": "^0.5.0",
+ "regjsparser": "^0.6.0",
+ "unicode-match-property-ecmascript": "^1.0.4",
+ "unicode-match-property-value-ecmascript": "^1.1.0"
+ }
+ },
+ "registry-auth-token": {
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-3.4.0.tgz",
+ "integrity": "sha512-4LM6Fw8eBQdwMYcES4yTnn2TqIasbXuwDx3um+QRs7S55aMKCBKBxvPXl2RiUjHwuJLTyYfxSpmfSAjQpcuP+A==",
+ "dev": true,
+ "requires": {
+ "rc": "^1.1.6",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "registry-url": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-3.1.0.tgz",
+ "integrity": "sha1-PU74cPc93h138M+aOBQyRE4XSUI=",
+ "dev": true,
+ "requires": {
+ "rc": "^1.0.1"
+ }
+ },
+ "regjsgen": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.1.tgz",
+ "integrity": "sha512-5qxzGZjDs9w4tzT3TPhCJqWdCc3RLYwy9J2NB0nm5Lz+S273lvWcpjaTGHsT1dc6Hhfq41uSEOw8wBmxrKOuyg==",
+ "dev": true
+ },
+ "regjsparser": {
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.2.tgz",
+ "integrity": "sha512-E9ghzUtoLwDekPT0DYCp+c4h+bvuUpe6rRHCTYn6eGoqj1LgKXxT6I0Il4WbjhQkOghzi/V+y03bPKvbllL93Q==",
+ "dev": true,
+ "requires": {
+ "jsesc": "~0.5.0"
+ },
+ "dependencies": {
+ "jsesc": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz",
+ "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=",
+ "dev": true
+ }
+ }
+ },
+ "release-zalgo": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz",
+ "integrity": "sha1-CXALflB0Mpc5Mw5TXFqQ+2eFFzA=",
+ "dev": true,
+ "requires": {
+ "es6-error": "^4.0.1"
+ }
+ },
+ "remove-trailing-separator": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz",
+ "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=",
+ "dev": true
+ },
+ "repeat-element": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz",
+ "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==",
+ "dev": true
+ },
+ "repeat-string": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+ "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc="
+ },
+ "request": {
+ "version": "2.88.0",
+ "resolved": "https://registry.npmjs.org/request/-/request-2.88.0.tgz",
+ "integrity": "sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg==",
+ "dev": true,
+ "requires": {
+ "aws-sign2": "~0.7.0",
+ "aws4": "^1.8.0",
+ "caseless": "~0.12.0",
+ "combined-stream": "~1.0.6",
+ "extend": "~3.0.2",
+ "forever-agent": "~0.6.1",
+ "form-data": "~2.3.2",
+ "har-validator": "~5.1.0",
+ "http-signature": "~1.2.0",
+ "is-typedarray": "~1.0.0",
+ "isstream": "~0.1.2",
+ "json-stringify-safe": "~5.0.1",
+ "mime-types": "~2.1.19",
+ "oauth-sign": "~0.9.0",
+ "performance-now": "^2.1.0",
+ "qs": "~6.5.2",
+ "safe-buffer": "^5.1.2",
+ "tough-cookie": "~2.4.3",
+ "tunnel-agent": "^0.6.0",
+ "uuid": "^3.3.2"
+ },
+ "dependencies": {
+ "qs": {
+ "version": "6.5.2",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
+ "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==",
+ "dev": true
+ }
+ }
+ },
+ "request-promise-core": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/request-promise-core/-/request-promise-core-1.1.3.tgz",
+ "integrity": "sha512-QIs2+ArIGQVp5ZYbWD5ZLCY29D5CfWizP8eWnm8FoGD1TX61veauETVQbrV60662V0oFBkrDOuaBI8XgtuyYAQ==",
+ "dev": true,
+ "requires": {
+ "lodash": "^4.17.15"
+ }
+ },
+ "request-promise-native": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/request-promise-native/-/request-promise-native-1.0.8.tgz",
+ "integrity": "sha512-dapwLGqkHtwL5AEbfenuzjTYg35Jd6KPytsC2/TLkVMz8rm+tNt72MGUWT1RP/aYawMpN6HqbNGBQaRcBtjQMQ==",
+ "dev": true,
+ "requires": {
+ "request-promise-core": "1.1.3",
+ "stealthy-require": "^1.1.1",
+ "tough-cookie": "^2.3.3"
+ }
+ },
+ "require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=",
+ "dev": true
+ },
+ "require-main-filename": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz",
+ "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==",
+ "dev": true
+ },
+ "require-package-name": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/require-package-name/-/require-package-name-2.0.1.tgz",
+ "integrity": "sha1-wR6XJ2tluOKSP3Xav1+y7ww4Qbk=",
+ "dev": true
+ },
+ "resolve": {
+ "version": "1.15.0",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.15.0.tgz",
+ "integrity": "sha512-+hTmAldEGE80U2wJJDC1lebb5jWqvTYAfm3YZ1ckk1gBr0MnCqUKlwK1e+anaFljIl+F5tR5IoZcm4ZDA1zMQw==",
+ "requires": {
+ "path-parse": "^1.0.6"
+ }
+ },
+ "resolve-cwd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-2.0.0.tgz",
+ "integrity": "sha1-AKn3OHVW4nA46uIyyqNypqWbZlo=",
+ "dev": true,
+ "requires": {
+ "resolve-from": "^3.0.0"
+ },
+ "dependencies": {
+ "resolve-from": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz",
+ "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=",
+ "dev": true
+ }
+ }
+ },
+ "resolve-from": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
+ "dev": true
+ },
+ "resolve-pathname": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz",
+ "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==",
+ "dev": true
+ },
+ "resolve-url": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
+ "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=",
+ "dev": true
+ },
+ "restore-cursor": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz",
+ "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=",
+ "dev": true,
+ "requires": {
+ "onetime": "^2.0.0",
+ "signal-exit": "^3.0.2"
+ }
+ },
+ "ret": {
+ "version": "0.1.15",
+ "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
+ "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==",
+ "dev": true
+ },
+ "right-align": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/right-align/-/right-align-0.1.3.tgz",
+ "integrity": "sha1-YTObci/mo1FWiSENJOFMlhSGE+8=",
+ "requires": {
+ "align-text": "^0.1.1"
+ }
+ },
+ "rimraf": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz",
+ "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==",
+ "dev": true,
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ },
+ "ripemd160": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz",
+ "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==",
+ "dev": true,
+ "requires": {
+ "hash-base": "^3.0.0",
+ "inherits": "^2.0.1"
+ }
+ },
+ "rst-selector-parser": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/rst-selector-parser/-/rst-selector-parser-2.2.3.tgz",
+ "integrity": "sha1-gbIw6i/MYGbInjRy3nlChdmwPZE=",
+ "dev": true,
+ "requires": {
+ "lodash.flattendeep": "^4.4.0",
+ "nearley": "^2.7.10"
+ }
+ },
+ "run-async": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.3.0.tgz",
+ "integrity": "sha1-A3GrSuC91yDUFm19/aZP96RFpsA=",
+ "dev": true,
+ "requires": {
+ "is-promise": "^2.1.0"
+ }
+ },
+ "run-node": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/run-node/-/run-node-1.0.0.tgz",
+ "integrity": "sha512-kc120TBlQ3mih1LSzdAJXo4xn/GWS2ec0l3S+syHDXP9uRr0JAT8Qd3mdMuyjqCzeZktgP3try92cEgf9Nks8A==",
+ "dev": true
+ },
+ "run-queue": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/run-queue/-/run-queue-1.0.3.tgz",
+ "integrity": "sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec=",
+ "dev": true,
+ "requires": {
+ "aproba": "^1.1.1"
+ }
+ },
+ "rx": {
+ "version": "2.3.24",
+ "resolved": "https://registry.npmjs.org/rx/-/rx-2.3.24.tgz",
+ "integrity": "sha1-FPlQpCF9fjXapxu8vljv9o6ksrc=",
+ "dev": true
+ },
+ "rxjs": {
+ "version": "6.5.4",
+ "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.5.4.tgz",
+ "integrity": "sha512-naMQXcgEo3csAEGvw/NydRA0fuS2nDZJiw1YUWFKU7aPPAPGZEsD4Iimit96qwCieH6y614MCLYwdkrWx7z/7Q==",
+ "dev": true,
+ "requires": {
+ "tslib": "^1.9.0"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ },
+ "safe-regex": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
+ "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
+ "dev": true,
+ "requires": {
+ "ret": "~0.1.10"
+ }
+ },
+ "safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+ },
+ "samsam": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/samsam/-/samsam-1.1.2.tgz",
+ "integrity": "sha1-vsEf3IOp/aBjQBIQ5AF2wwJNFWc=",
+ "dev": true
+ },
+ "sax": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
+ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw=="
+ },
+ "scheduler": {
+ "version": "0.18.0",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.18.0.tgz",
+ "integrity": "sha512-agTSHR1Nbfi6ulI0kYNK0203joW2Y5W4po4l+v03tOoiJKpTBbxpNhWDvqc/4IcOw+KLmSiQLTasZ4cab2/UWQ==",
+ "dev": true,
+ "requires": {
+ "loose-envify": "^1.1.0",
+ "object-assign": "^4.1.1"
+ }
+ },
+ "schema-utils": {
+ "version": "2.6.4",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.6.4.tgz",
+ "integrity": "sha512-VNjcaUxVnEeun6B2fiiUDjXXBtD4ZSH7pdbfIu1pOFwgptDPLMo/z9jr4sUfsjFVPqDCEin/F7IYlq7/E6yDbQ==",
+ "dev": true,
+ "requires": {
+ "ajv": "^6.10.2",
+ "ajv-keywords": "^3.4.1"
+ }
+ },
+ "seedrandom": {
+ "version": "3.0.5",
+ "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-3.0.5.tgz",
+ "integrity": "sha512-8OwmbklUNzwezjGInmZ+2clQmExQPvomqjL7LFqOYqtmuxRgQYqOD3mHaU+MvZn5FLUeVxVfQjwLZW/n/JFuqg=="
+ },
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true
+ },
+ "semver-compare": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/semver-compare/-/semver-compare-1.0.0.tgz",
+ "integrity": "sha1-De4hahyUGrN+nvsXiPavxf9VN/w=",
+ "dev": true
+ },
+ "semver-diff": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-2.1.0.tgz",
+ "integrity": "sha1-S7uEN8jTfksM8aaP1ybsbWRdbTY=",
+ "dev": true,
+ "requires": {
+ "semver": "^5.0.3"
+ }
+ },
+ "send": {
+ "version": "0.17.1",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz",
+ "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==",
+ "requires": {
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "destroy": "~1.0.4",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "~1.7.2",
+ "mime": "1.6.0",
+ "ms": "2.1.1",
+ "on-finished": "~2.3.0",
+ "range-parser": "~1.2.1",
+ "statuses": "~1.5.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ },
+ "dependencies": {
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
+ }
+ }
+ },
+ "ms": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
+ "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg=="
+ }
+ }
+ },
+ "serialize-javascript": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-2.1.2.tgz",
+ "integrity": "sha512-rs9OggEUF0V4jUSecXazOYsLfu7OGK2qIn3c7IPBiffz32XniEp/TX9Xmc9LQfK2nQ2QKHvZ2oygKUGU0lG4jQ==",
+ "dev": true
+ },
+ "serve-favicon": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/serve-favicon/-/serve-favicon-2.5.0.tgz",
+ "integrity": "sha1-k10kDN/g9YBTB/3+ln2IlCosvPA=",
+ "requires": {
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "ms": "2.1.1",
+ "parseurl": "~1.3.2",
+ "safe-buffer": "5.1.1"
+ },
+ "dependencies": {
+ "ms": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
+ "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg=="
+ },
+ "safe-buffer": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz",
+ "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg=="
+ }
+ }
+ },
+ "serve-static": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz",
+ "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==",
+ "requires": {
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.17.1"
+ }
+ },
+ "set-blocking": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz",
+ "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=",
+ "dev": true
+ },
+ "set-value": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
+ "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "is-extendable": "^0.1.1",
+ "is-plain-object": "^2.0.3",
+ "split-string": "^3.0.1"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ }
+ }
+ },
+ "setimmediate": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz",
+ "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=",
+ "dev": true
+ },
+ "setprototypeof": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz",
+ "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw=="
+ },
+ "sha.js": {
+ "version": "2.4.11",
+ "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz",
+ "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "shallowequal": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz",
+ "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==",
+ "dev": true
+ },
+ "shebang-command": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz",
+ "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=",
+ "dev": true,
+ "requires": {
+ "shebang-regex": "^1.0.0"
+ }
+ },
+ "shebang-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz",
+ "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=",
+ "dev": true
+ },
+ "side-channel": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.2.tgz",
+ "integrity": "sha512-7rL9YlPHg7Ancea1S96Pa8/QWb4BtXL/TZvS6B8XFetGBeuhAsfmUspK6DokBeZ64+Kj9TCNRD/30pVz1BvQNA==",
+ "dev": true,
+ "requires": {
+ "es-abstract": "^1.17.0-next.1",
+ "object-inspect": "^1.7.0"
+ }
+ },
+ "signal-exit": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz",
+ "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=",
+ "dev": true
+ },
+ "simple-concat": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.0.tgz",
+ "integrity": "sha1-c0TLuLbib7J9ZrL8hvn21Zl1IcY=",
+ "dev": true
+ },
+ "simple-get": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-3.1.0.tgz",
+ "integrity": "sha512-bCR6cP+aTdScaQCnQKbPKtJOKDp/hj9EDLJo3Nw4y1QksqaovlW/bnptB6/c1e+qmNIDHRK+oXFDdEqBT8WzUA==",
+ "dev": true,
+ "requires": {
+ "decompress-response": "^4.2.0",
+ "once": "^1.3.1",
+ "simple-concat": "^1.0.0"
+ }
+ },
+ "sinon": {
+ "version": "1.17.7",
+ "resolved": "https://registry.npmjs.org/sinon/-/sinon-1.17.7.tgz",
+ "integrity": "sha1-RUKk9JugxFwF6y6d2dID4rjv4L8=",
+ "dev": true,
+ "requires": {
+ "formatio": "1.1.1",
+ "lolex": "1.3.2",
+ "samsam": "1.1.2",
+ "util": ">=0.10.3 <1"
+ }
+ },
+ "slash": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz",
+ "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==",
+ "dev": true
+ },
+ "slice-ansi": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz",
+ "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^3.2.0",
+ "astral-regex": "^1.0.0",
+ "is-fullwidth-code-point": "^2.0.0"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "dev": true
+ }
+ }
+ },
+ "snapdragon": {
+ "version": "0.8.2",
+ "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
+ "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
+ "dev": true,
+ "requires": {
+ "base": "^0.11.1",
+ "debug": "^2.2.0",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "map-cache": "^0.2.2",
+ "source-map": "^0.5.6",
+ "source-map-resolve": "^0.5.0",
+ "use": "^3.1.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ },
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "dev": true
+ }
+ }
+ },
+ "snapdragon-node": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
+ "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
+ "dev": true,
+ "requires": {
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.0",
+ "snapdragon-util": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ },
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "dev": true
+ }
+ }
+ },
+ "snapdragon-util": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
+ "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.2.0"
+ }
+ },
+ "source-list-map": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz",
+ "integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==",
+ "dev": true
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ },
+ "source-map-resolve": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz",
+ "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==",
+ "dev": true,
+ "requires": {
+ "atob": "^2.1.2",
+ "decode-uri-component": "^0.2.0",
+ "resolve-url": "^0.2.1",
+ "source-map-url": "^0.4.0",
+ "urix": "^0.1.0"
+ }
+ },
+ "source-map-support": {
+ "version": "0.5.16",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.16.tgz",
+ "integrity": "sha512-efyLRJDr68D9hBBNIPWFjhpFzURh+KJykQwvMyW5UiZzYwoF6l4YMMDIJJEyFWxWCqfyxLzz6tSfUFR+kXXsVQ==",
+ "dev": true,
+ "requires": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ }
+ },
+ "source-map-url": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz",
+ "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=",
+ "dev": true
+ },
+ "spawn-command": {
+ "version": "0.0.2-1",
+ "resolved": "https://registry.npmjs.org/spawn-command/-/spawn-command-0.0.2-1.tgz",
+ "integrity": "sha1-YvXpRmmBwbeW3Fkpk34RycaSG9A=",
+ "dev": true
+ },
+ "spawn-wrap": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-1.4.3.tgz",
+ "integrity": "sha512-IgB8md0QW/+tWqcavuFgKYR/qIRvJkRLPJDFaoXtLLUaVcCDK0+HeFTkmQHj3eprcYhc+gOl0aEA1w7qZlYezw==",
+ "dev": true,
+ "requires": {
+ "foreground-child": "^1.5.6",
+ "mkdirp": "^0.5.0",
+ "os-homedir": "^1.0.1",
+ "rimraf": "^2.6.2",
+ "signal-exit": "^3.0.2",
+ "which": "^1.3.0"
+ }
+ },
+ "spdx-correct": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.0.tgz",
+ "integrity": "sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q==",
+ "dev": true,
+ "requires": {
+ "spdx-expression-parse": "^3.0.0",
+ "spdx-license-ids": "^3.0.0"
+ }
+ },
+ "spdx-exceptions": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz",
+ "integrity": "sha512-2XQACfElKi9SlVb1CYadKDXvoajPgBVPn/gOQLrTvHdElaVhr7ZEbqJaRnJLVNeaI4cMEAgVCeBMKF6MWRDCRA==",
+ "dev": true
+ },
+ "spdx-expression-parse": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz",
+ "integrity": "sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==",
+ "dev": true,
+ "requires": {
+ "spdx-exceptions": "^2.1.0",
+ "spdx-license-ids": "^3.0.0"
+ }
+ },
+ "spdx-license-ids": {
+ "version": "3.0.5",
+ "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz",
+ "integrity": "sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==",
+ "dev": true
+ },
+ "split-string": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
+ "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^3.0.0"
+ }
+ },
+ "sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
+ "dev": true
+ },
+ "sshpk": {
+ "version": "1.16.1",
+ "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz",
+ "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==",
+ "dev": true,
+ "requires": {
+ "asn1": "~0.2.3",
+ "assert-plus": "^1.0.0",
+ "bcrypt-pbkdf": "^1.0.0",
+ "dashdash": "^1.12.0",
+ "ecc-jsbn": "~0.1.1",
+ "getpass": "^0.1.1",
+ "jsbn": "~0.1.0",
+ "safer-buffer": "^2.0.2",
+ "tweetnacl": "~0.14.0"
+ }
+ },
+ "ssri": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/ssri/-/ssri-6.0.1.tgz",
+ "integrity": "sha512-3Wge10hNcT1Kur4PDFwEieXSCMCJs/7WvSACcrMYrNp+b8kDL1/0wJch5Ni2WrtwEa2IO8OsVfeKIciKCDx/QA==",
+ "dev": true,
+ "requires": {
+ "figgy-pudding": "^3.5.1"
+ }
+ },
+ "stack-trace": {
+ "version": "0.0.10",
+ "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz",
+ "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA="
+ },
+ "static-extend": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
+ "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
+ "dev": true,
+ "requires": {
+ "define-property": "^0.2.5",
+ "object-copy": "^0.1.0"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ }
+ }
+ },
+ "statsd-client": {
+ "version": "0.4.4",
+ "resolved": "https://registry.npmjs.org/statsd-client/-/statsd-client-0.4.4.tgz",
+ "integrity": "sha512-GjAReJDNZomTTTaIaDuDddWknHO2GXmXS/9JKy6iQFOHNSQ4yeaRGP18oNgahl+c3XTUfUWBYIUnipznNh5Vww=="
+ },
+ "statuses": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow="
+ },
+ "stealthy-require": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/stealthy-require/-/stealthy-require-1.1.1.tgz",
+ "integrity": "sha1-NbCYdbT/SfJqd35QmzCQoyJr8ks=",
+ "dev": true
+ },
+ "stream-browserify": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz",
+ "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==",
+ "dev": true,
+ "requires": {
+ "inherits": "~2.0.1",
+ "readable-stream": "^2.0.2"
+ }
+ },
+ "stream-each": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/stream-each/-/stream-each-1.2.3.tgz",
+ "integrity": "sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==",
+ "dev": true,
+ "requires": {
+ "end-of-stream": "^1.1.0",
+ "stream-shift": "^1.0.0"
+ }
+ },
+ "stream-http": {
+ "version": "2.8.3",
+ "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz",
+ "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==",
+ "dev": true,
+ "requires": {
+ "builtin-status-codes": "^3.0.0",
+ "inherits": "^2.0.1",
+ "readable-stream": "^2.3.6",
+ "to-arraybuffer": "^1.0.0",
+ "xtend": "^4.0.0"
+ }
+ },
+ "stream-shift": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz",
+ "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==",
+ "dev": true
+ },
+ "string-width": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz",
+ "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=",
+ "requires": {
+ "code-point-at": "^1.0.0",
+ "is-fullwidth-code-point": "^1.0.0",
+ "strip-ansi": "^3.0.0"
+ },
+ "dependencies": {
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ }
+ }
+ },
+ "string.prototype.matchall": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.2.tgz",
+ "integrity": "sha512-N/jp6O5fMf9os0JU3E72Qhf590RSRZU/ungsL/qJUYVTNv7hTG0P/dbPjxINVN9jpscu3nzYwKESU3P3RY5tOg==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.0",
+ "has-symbols": "^1.0.1",
+ "internal-slot": "^1.0.2",
+ "regexp.prototype.flags": "^1.3.0",
+ "side-channel": "^1.0.2"
+ }
+ },
+ "string.prototype.trim": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.1.tgz",
+ "integrity": "sha512-MjGFEeqixw47dAMFMtgUro/I0+wNqZB5GKXGt1fFr24u3TzDXCPu7J9Buppzoe3r/LqkSDLDDJzE15RGWDGAVw==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.0-next.1",
+ "function-bind": "^1.1.1"
+ }
+ },
+ "string.prototype.trimleft": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimleft/-/string.prototype.trimleft-2.1.1.tgz",
+ "integrity": "sha512-iu2AGd3PuP5Rp7x2kEZCrB2Nf41ehzh+goo8TV7z8/XDBbsvc6HQIlUl9RjkZ4oyrW1XM5UwlGl1oVEaDjg6Ag==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "function-bind": "^1.1.1"
+ }
+ },
+ "string.prototype.trimright": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimright/-/string.prototype.trimright-2.1.1.tgz",
+ "integrity": "sha512-qFvWL3/+QIgZXVmJBfpHmxLB7xsUXz6HsUmP8+5dRaC3Q7oKUv9Vo6aMCRZC1smrtyECFsIT30PqBJ1gTjAs+g==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3",
+ "function-bind": "^1.1.1"
+ }
+ },
+ "string_decoder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+ "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-0.1.1.tgz",
+ "integrity": "sha1-OeipjQRNFQZgq+SmgIrPcLt7yZE="
+ },
+ "strip-bom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
+ "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=",
+ "dev": true
+ },
+ "strip-eof": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz",
+ "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=",
+ "dev": true
+ },
+ "strip-indent": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-2.0.0.tgz",
+ "integrity": "sha1-XvjbKV0B5u1sv3qrlpmNeCJSe2g=",
+ "dev": true
+ },
+ "strip-json-comments": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
+ "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=",
+ "dev": true
+ },
+ "style-loader": {
+ "version": "0.21.0",
+ "resolved": "https://registry.npmjs.org/style-loader/-/style-loader-0.21.0.tgz",
+ "integrity": "sha512-T+UNsAcl3Yg+BsPKs1vd22Fr8sVT+CJMtzqc6LEw9bbJZb43lm9GoeIfUcDEefBSWC0BhYbcdupV1GtI4DGzxg==",
+ "dev": true,
+ "requires": {
+ "loader-utils": "^1.1.0",
+ "schema-utils": "^0.4.5"
+ },
+ "dependencies": {
+ "schema-utils": {
+ "version": "0.4.7",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-0.4.7.tgz",
+ "integrity": "sha512-v/iwU6wvwGK8HbU9yi3/nhGzP0yGSuhQMzL6ySiec1FSrZZDkhm4noOSWzrNFo/jEc+SJY6jRTwuwbSXJPDUnQ==",
+ "dev": true,
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "superagent": {
+ "version": "3.8.3",
+ "resolved": "https://registry.npmjs.org/superagent/-/superagent-3.8.3.tgz",
+ "integrity": "sha512-GLQtLMCoEIK4eDv6OGtkOoSMt3D+oq0y3dsxMuYuDvaNUvuT8eFBuLmfR0iYYzHC1e8hpzC6ZsxbuP6DIalMFA==",
+ "dev": true,
+ "requires": {
+ "component-emitter": "^1.2.0",
+ "cookiejar": "^2.1.0",
+ "debug": "^3.1.0",
+ "extend": "^3.0.0",
+ "form-data": "^2.3.1",
+ "formidable": "^1.2.0",
+ "methods": "^1.1.1",
+ "mime": "^1.4.1",
+ "qs": "^6.5.1",
+ "readable-stream": "^2.3.5"
+ }
+ },
+ "supertest": {
+ "version": "3.4.2",
+ "resolved": "https://registry.npmjs.org/supertest/-/supertest-3.4.2.tgz",
+ "integrity": "sha512-WZWbwceHUo2P36RoEIdXvmqfs47idNNZjCuJOqDz6rvtkk8ym56aU5oglORCpPeXGxT7l9rkJ41+O1lffQXYSA==",
+ "dev": true,
+ "requires": {
+ "methods": "^1.1.2",
+ "superagent": "^3.8.3"
+ }
+ },
+ "supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ },
+ "svg-url-loader": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/svg-url-loader/-/svg-url-loader-3.0.3.tgz",
+ "integrity": "sha512-MKGiRNDs8fnHcZcPkhGcw9+130IXyFM9H8m6T7u3ScUuZYEeVzX0vNMru30D4MCF6vMYas5iw/Ru9lwFKBjaGw==",
+ "dev": true,
+ "requires": {
+ "file-loader": "~4.3.0",
+ "loader-utils": "~1.2.3"
+ }
+ },
+ "symbol-tree": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz",
+ "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==",
+ "dev": true
+ },
+ "table": {
+ "version": "5.4.6",
+ "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz",
+ "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==",
+ "dev": true,
+ "requires": {
+ "ajv": "^6.10.2",
+ "lodash": "^4.17.14",
+ "slice-ansi": "^2.1.0",
+ "string-width": "^3.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+ "dev": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "dev": true
+ },
+ "string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "dev": true,
+ "requires": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^4.1.0"
+ }
+ }
+ }
+ },
+ "tapable": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz",
+ "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==",
+ "dev": true
+ },
+ "tar": {
+ "version": "4.4.13",
+ "resolved": "https://registry.npmjs.org/tar/-/tar-4.4.13.tgz",
+ "integrity": "sha512-w2VwSrBoHa5BsSyH+KxEqeQBAllHhccyMFVHtGtdMpF4W7IRWfZjFiQceJPChOeTsSDVUpER2T8FA93pr0L+QA==",
+ "dev": true,
+ "requires": {
+ "chownr": "^1.1.1",
+ "fs-minipass": "^1.2.5",
+ "minipass": "^2.8.6",
+ "minizlib": "^1.2.1",
+ "mkdirp": "^0.5.0",
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.3"
+ }
+ },
+ "term-size": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/term-size/-/term-size-1.2.0.tgz",
+ "integrity": "sha1-RYuDiH8oj8Vtb/+/rSYuJmOO+mk=",
+ "dev": true,
+ "requires": {
+ "execa": "^0.7.0"
+ },
+ "dependencies": {
+ "cross-spawn": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz",
+ "integrity": "sha1-6L0O/uWPz/b4+UUQoKVUu/ojVEk=",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^4.0.1",
+ "shebang-command": "^1.2.0",
+ "which": "^1.2.9"
+ }
+ },
+ "execa": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz",
+ "integrity": "sha1-lEvs00zEHuMqY6n68nrVpl/Fl3c=",
+ "dev": true,
+ "requires": {
+ "cross-spawn": "^5.0.1",
+ "get-stream": "^3.0.0",
+ "is-stream": "^1.1.0",
+ "npm-run-path": "^2.0.0",
+ "p-finally": "^1.0.0",
+ "signal-exit": "^3.0.0",
+ "strip-eof": "^1.0.0"
+ }
+ },
+ "get-stream": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz",
+ "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=",
+ "dev": true
+ }
+ }
+ },
+ "terser": {
+ "version": "4.6.3",
+ "resolved": "https://registry.npmjs.org/terser/-/terser-4.6.3.tgz",
+ "integrity": "sha512-Lw+ieAXmY69d09IIc/yqeBqXpEQIpDGZqT34ui1QWXIUpR2RjbqEkT8X7Lgex19hslSqcWM5iMN2kM11eMsESQ==",
+ "dev": true,
+ "requires": {
+ "commander": "^2.20.0",
+ "source-map": "~0.6.1",
+ "source-map-support": "~0.5.12"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
+ "dev": true
+ }
+ }
+ },
+ "terser-webpack-plugin": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.3.tgz",
+ "integrity": "sha512-QMxecFz/gHQwteWwSo5nTc6UaICqN1bMedC5sMtUc7y3Ha3Q8y6ZO0iCR8pq4RJC8Hjf0FEPEHZqcMB/+DFCrA==",
+ "dev": true,
+ "requires": {
+ "cacache": "^12.0.2",
+ "find-cache-dir": "^2.1.0",
+ "is-wsl": "^1.1.0",
+ "schema-utils": "^1.0.0",
+ "serialize-javascript": "^2.1.2",
+ "source-map": "^0.6.1",
+ "terser": "^4.1.2",
+ "webpack-sources": "^1.4.0",
+ "worker-farm": "^1.7.0"
+ },
+ "dependencies": {
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dev": true,
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "test-exclude": {
+ "version": "5.2.3",
+ "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-5.2.3.tgz",
+ "integrity": "sha512-M+oxtseCFO3EDtAaGH7iiej3CBkzXqFMbzqYAACdzKui4eZA+pq3tZEwChvOdNfa7xxy8BfbmgJSIr43cC/+2g==",
+ "dev": true,
+ "requires": {
+ "glob": "^7.1.3",
+ "minimatch": "^3.0.4",
+ "read-pkg-up": "^4.0.0",
+ "require-main-filename": "^2.0.0"
+ },
+ "dependencies": {
+ "read-pkg-up": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-4.0.0.tgz",
+ "integrity": "sha512-6etQSH7nJGsK0RbG/2TeDzZFa8shjQ1um+SwQQ5cwKy0dhSXdOncEhb1CPpvQG4h7FyOV6EB6YlV0yJvZQNAkA==",
+ "dev": true,
+ "requires": {
+ "find-up": "^3.0.0",
+ "read-pkg": "^3.0.0"
+ }
+ }
+ }
+ },
+ "text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=",
+ "dev": true
+ },
+ "three": {
+ "version": "0.106.2",
+ "resolved": "https://registry.npmjs.org/three/-/three-0.106.2.tgz",
+ "integrity": "sha512-4Tlx43uoxnIaZFW2Bzkd1rXsatvVHEWAZJy8LuE+s6Q8c66ogNnhfq1bHiBKPAnXP230LD11H/ScIZc2LZMviA==",
+ "dev": true
+ },
+ "through": {
+ "version": "2.3.8",
+ "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
+ "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
+ "dev": true
+ },
+ "through2": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
+ "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
+ "dev": true,
+ "requires": {
+ "readable-stream": "~2.3.6",
+ "xtend": "~4.0.1"
+ }
+ },
+ "timeago.js": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/timeago.js/-/timeago.js-3.0.2.tgz",
+ "integrity": "sha1-MqZ+fA2IfqQspYjTquJvd95edsw=",
+ "dev": true,
+ "requires": {
+ "@types/jquery": "^2.0.40"
+ }
+ },
+ "timed-out": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-4.0.1.tgz",
+ "integrity": "sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8=",
+ "dev": true
+ },
+ "timers-browserify": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.11.tgz",
+ "integrity": "sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ==",
+ "dev": true,
+ "requires": {
+ "setimmediate": "^1.0.4"
+ }
+ },
+ "tiny-invariant": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.1.0.tgz",
+ "integrity": "sha512-ytxQvrb1cPc9WBEI/HSeYYoGD0kWnGEOR8RY6KomWLBVhqz0RgTwVO9dLrGz7dC+nN9llyI7OKAgRq8Vq4ZBSw==",
+ "dev": true
+ },
+ "tiny-warning": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz",
+ "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==",
+ "dev": true
+ },
+ "titleize": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/titleize/-/titleize-1.0.1.tgz",
+ "integrity": "sha512-rUwGDruKq1gX+FFHbTl5qjI7teVO7eOe+C8IcQ7QT+1BK3eEUXJqbZcBOeaRP4FwSC/C1A5jDoIVta0nIQ9yew==",
+ "dev": true
+ },
+ "tmp": {
+ "version": "0.0.33",
+ "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz",
+ "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==",
+ "dev": true,
+ "requires": {
+ "os-tmpdir": "~1.0.2"
+ }
+ },
+ "to-arraybuffer": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz",
+ "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=",
+ "dev": true
+ },
+ "to-fast-properties": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz",
+ "integrity": "sha1-uDVx+k2MJbguIxsG46MFXeTKGkc="
+ },
+ "to-object-path": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
+ "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ }
+ },
+ "to-regex": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
+ "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
+ "dev": true,
+ "requires": {
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "regex-not": "^1.0.2",
+ "safe-regex": "^1.1.0"
+ }
+ },
+ "to-regex-range": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
+ "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
+ "dev": true,
+ "requires": {
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1"
+ }
+ },
+ "toggle-selection": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz",
+ "integrity": "sha1-bkWxJj8gF/oKzH2J14sVuL932jI=",
+ "dev": true
+ },
+ "toidentifier": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
+ "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw=="
+ },
+ "token-stream": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/token-stream/-/token-stream-0.0.1.tgz",
+ "integrity": "sha1-zu78cXp2xDFvEm0LnbqlXX598Bo="
+ },
+ "topo": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/topo/-/topo-2.0.2.tgz",
+ "integrity": "sha1-zVYVdSU5BXwNwEkaYhw7xvvh0YI=",
+ "dev": true,
+ "requires": {
+ "hoek": "4.x.x"
+ }
+ },
+ "touch": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/touch/-/touch-3.1.0.tgz",
+ "integrity": "sha512-WBx8Uy5TLtOSRtIq+M03/sKDrXCLHxwDcquSP2c43Le03/9serjQBIztjRz6FkJez9D/hleyAXTBGLwwZUw9lA==",
+ "dev": true,
+ "requires": {
+ "nopt": "~1.0.10"
+ },
+ "dependencies": {
+ "nopt": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz",
+ "integrity": "sha1-bd0hvSoxQXuScn3Vhfim83YI6+4=",
+ "dev": true,
+ "requires": {
+ "abbrev": "1"
+ }
+ }
+ }
+ },
+ "tough-cookie": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.4.3.tgz",
+ "integrity": "sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ==",
+ "dev": true,
+ "requires": {
+ "psl": "^1.1.24",
+ "punycode": "^1.4.1"
+ },
+ "dependencies": {
+ "punycode": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
+ "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=",
+ "dev": true
+ }
+ }
+ },
+ "tr46": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz",
+ "integrity": "sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk=",
+ "dev": true,
+ "requires": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "tree-kill": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz",
+ "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==",
+ "dev": true
+ },
+ "trim-newlines": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-2.0.0.tgz",
+ "integrity": "sha1-tAPQuRvlDDMd/EuC7s6yLD3hbSA=",
+ "dev": true
+ },
+ "tryer": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/tryer/-/tryer-1.0.1.tgz",
+ "integrity": "sha512-c3zayb8/kWWpycWYg87P71E1S1ZL6b6IJxfb5fvsUgsf0S2MVGaDhDXXjDMpdCpfWXqptc+4mXwmiy1ypXqRAA==",
+ "dev": true
+ },
+ "tslib": {
+ "version": "1.10.0",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.10.0.tgz",
+ "integrity": "sha512-qOebF53frne81cf0S9B41ByenJ3/IuH8yJKngAX35CmiZySA0khhkovshKK+jGCaMnVomla7gVlIcc3EvKPbTQ==",
+ "dev": true
+ },
+ "tsscmp": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/tsscmp/-/tsscmp-1.0.6.tgz",
+ "integrity": "sha512-LxhtAkPDTkVCMQjt2h6eBVY28KCjikZqZfMcC15YBeNjkgUpdCfBu5HoiOTDu86v6smE8yOjyEktJ8hlbANHQA=="
+ },
+ "tty-browserify": {
+ "version": "0.0.0",
+ "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz",
+ "integrity": "sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY=",
+ "dev": true
+ },
+ "tunnel-agent": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
+ "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "tweetnacl": {
+ "version": "0.14.5",
+ "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
+ "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=",
+ "dev": true
+ },
+ "type": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz",
+ "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==",
+ "dev": true
+ },
+ "type-check": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz",
+ "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=",
+ "dev": true,
+ "requires": {
+ "prelude-ls": "~1.1.2"
+ }
+ },
+ "type-detect": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
+ "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==",
+ "dev": true
+ },
+ "type-fest": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz",
+ "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==",
+ "dev": true
+ },
+ "type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "requires": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ }
+ },
+ "typedarray": {
+ "version": "0.0.6",
+ "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
+ "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=",
+ "dev": true
+ },
+ "ua-parser-js": {
+ "version": "0.7.21",
+ "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.21.tgz",
+ "integrity": "sha512-+O8/qh/Qj8CgC6eYBVBykMrNtp5Gebn4dlGD/kKXVkJNDwyrAwSIqwz8CDf+tsAIWVycKcku6gIXJ0qwx/ZXaQ==",
+ "dev": true
+ },
+ "uglify-js": {
+ "version": "2.8.29",
+ "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-2.8.29.tgz",
+ "integrity": "sha1-KcVzMUgFe7Th913zW3qcty5qWd0=",
+ "requires": {
+ "source-map": "~0.5.1",
+ "uglify-to-browserify": "~1.0.0",
+ "yargs": "~3.10.0"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-1.2.1.tgz",
+ "integrity": "sha1-m7UwTS4LVmmLLHWLCKPqqdqlijk="
+ },
+ "cliui": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-2.1.0.tgz",
+ "integrity": "sha1-S0dXYP+AJkx2LDoXGQMukcf+oNE=",
+ "requires": {
+ "center-align": "^0.1.1",
+ "right-align": "^0.1.1",
+ "wordwrap": "0.0.2"
+ }
+ },
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w="
+ },
+ "window-size": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.0.tgz",
+ "integrity": "sha1-VDjNLqk7IC76Ohn+iIeu58lPnJ0="
+ },
+ "yargs": {
+ "version": "3.10.0",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.10.0.tgz",
+ "integrity": "sha1-9+572FfdfB0tOMDnTvvWgdFDH9E=",
+ "requires": {
+ "camelcase": "^1.0.2",
+ "cliui": "^2.1.0",
+ "decamelize": "^1.0.0",
+ "window-size": "0.1.0"
+ }
+ }
+ }
+ },
+ "uglify-to-browserify": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz",
+ "integrity": "sha1-bgkk1r2mta/jSeOabWMoUKD4grc=",
+ "optional": true
+ },
+ "uid-safe": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/uid-safe/-/uid-safe-2.1.5.tgz",
+ "integrity": "sha512-KPHm4VL5dDXKz01UuEd88Df+KzynaohSL9fBh096KWAxSKZQDI2uBrVqtvRM4rwrIrRRKsdLNML/lnaaVSRioA==",
+ "requires": {
+ "random-bytes": "~1.0.0"
+ }
+ },
+ "undefsafe": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/undefsafe/-/undefsafe-2.0.2.tgz",
+ "integrity": "sha1-Il9rngM3Zj4Njnz9aG/Cg2zKznY=",
+ "dev": true,
+ "requires": {
+ "debug": "^2.2.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ }
+ }
+ },
+ "unicode-canonical-property-names-ecmascript": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz",
+ "integrity": "sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ==",
+ "dev": true
+ },
+ "unicode-match-property-ecmascript": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz",
+ "integrity": "sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==",
+ "dev": true,
+ "requires": {
+ "unicode-canonical-property-names-ecmascript": "^1.0.4",
+ "unicode-property-aliases-ecmascript": "^1.0.4"
+ }
+ },
+ "unicode-match-property-value-ecmascript": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.1.0.tgz",
+ "integrity": "sha512-hDTHvaBk3RmFzvSl0UVrUmC3PuW9wKVnpoUDYH0JDkSIovzw+J5viQmeYHxVSBptubnr7PbH2e0fnpDRQnQl5g==",
+ "dev": true
+ },
+ "unicode-property-aliases-ecmascript": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.5.tgz",
+ "integrity": "sha512-L5RAqCfXqAwR3RriF8pM0lU0w4Ryf/GgzONwi6KnL1taJQa7x1TCxdJnILX59WIGOwR57IVxn7Nej0fz1Ny6fw==",
+ "dev": true
+ },
+ "union-value": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
+ "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
+ "dev": true,
+ "requires": {
+ "arr-union": "^3.1.0",
+ "get-value": "^2.0.6",
+ "is-extendable": "^0.1.1",
+ "set-value": "^2.0.1"
+ }
+ },
+ "uniq": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz",
+ "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=",
+ "dev": true
+ },
+ "unique-filename": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz",
+ "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==",
+ "dev": true,
+ "requires": {
+ "unique-slug": "^2.0.0"
+ }
+ },
+ "unique-slug": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz",
+ "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==",
+ "dev": true,
+ "requires": {
+ "imurmurhash": "^0.1.4"
+ }
+ },
+ "unique-string": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-1.0.0.tgz",
+ "integrity": "sha1-nhBXzKhRq7kzmPizOuGHuZyuwRo=",
+ "dev": true,
+ "requires": {
+ "crypto-random-string": "^1.0.0"
+ }
+ },
+ "unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
+ },
+ "unset-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
+ "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
+ "dev": true,
+ "requires": {
+ "has-value": "^0.3.1",
+ "isobject": "^3.0.0"
+ },
+ "dependencies": {
+ "has-value": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
+ "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
+ "dev": true,
+ "requires": {
+ "get-value": "^2.0.3",
+ "has-values": "^0.1.4",
+ "isobject": "^2.0.0"
+ },
+ "dependencies": {
+ "isobject": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
+ "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
+ "dev": true,
+ "requires": {
+ "isarray": "1.0.0"
+ }
+ }
+ }
+ },
+ "has-values": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
+ "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=",
+ "dev": true
+ }
+ }
+ },
+ "unzip-response": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/unzip-response/-/unzip-response-2.0.1.tgz",
+ "integrity": "sha1-0vD3N9FrBhXnKmk17QQhRXLVb5c=",
+ "dev": true
+ },
+ "upath": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz",
+ "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==",
+ "dev": true
+ },
+ "update-notifier": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-2.5.0.tgz",
+ "integrity": "sha512-gwMdhgJHGuj/+wHJJs9e6PcCszpxR1b236igrOkUofGhqJuG+amlIKwApH1IW1WWl7ovZxsX49lMBWLxSdm5Dw==",
+ "dev": true,
+ "requires": {
+ "boxen": "^1.2.1",
+ "chalk": "^2.0.1",
+ "configstore": "^3.0.0",
+ "import-lazy": "^2.1.0",
+ "is-ci": "^1.0.10",
+ "is-installed-globally": "^0.1.0",
+ "is-npm": "^1.0.0",
+ "latest-version": "^3.0.0",
+ "semver-diff": "^2.0.0",
+ "xdg-basedir": "^3.0.0"
+ },
+ "dependencies": {
+ "ci-info": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-1.6.0.tgz",
+ "integrity": "sha512-vsGdkwSCDpWmP80ncATX7iea5DWQemg1UgCW5J8tqjU3lYw4FBYuj89J0CTVomA7BEfvSZd84GmHko+MxFQU2A==",
+ "dev": true
+ },
+ "is-ci": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-1.2.1.tgz",
+ "integrity": "sha512-s6tfsaQaQi3JNciBH6shVqEDvhGut0SUXr31ag8Pd8BBbVVlcGfWhpPmEOoM6RJ5TFhbypvf5yyRw/VXW1IiWg==",
+ "dev": true,
+ "requires": {
+ "ci-info": "^1.5.0"
+ }
+ }
+ }
+ },
+ "uri-js": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz",
+ "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==",
+ "dev": true,
+ "requires": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "urix": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
+ "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=",
+ "dev": true
+ },
+ "url": {
+ "version": "0.11.0",
+ "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz",
+ "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=",
+ "dev": true,
+ "requires": {
+ "punycode": "1.3.2",
+ "querystring": "0.2.0"
+ },
+ "dependencies": {
+ "punycode": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz",
+ "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=",
+ "dev": true
+ }
+ }
+ },
+ "url-parse-lax": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-1.0.0.tgz",
+ "integrity": "sha1-evjzA2Rem9eaJy56FKxovAYJ2nM=",
+ "dev": true,
+ "requires": {
+ "prepend-http": "^1.0.1"
+ }
+ },
+ "use": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
+ "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==",
+ "dev": true
+ },
+ "util": {
+ "version": "0.12.1",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.12.1.tgz",
+ "integrity": "sha512-MREAtYOp+GTt9/+kwf00IYoHZyjM8VU4aVrkzUlejyqaIjd2GztVl5V9hGXKlvBKE3gENn/FMfHE5v6hElXGcQ==",
+ "dev": true,
+ "requires": {
+ "inherits": "^2.0.3",
+ "is-arguments": "^1.0.4",
+ "is-generator-function": "^1.0.7",
+ "object.entries": "^1.1.0",
+ "safe-buffer": "^5.1.2"
+ }
+ },
+ "util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
+ "dev": true
+ },
+ "utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
+ },
+ "uuid": {
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
+ "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==",
+ "dev": true
+ },
+ "v8-compile-cache": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.1.0.tgz",
+ "integrity": "sha512-usZBT3PW+LOjM25wbqIlZwPeJV+3OSz3M1k1Ws8snlW39dZyYL9lOGC5FgPVHfk0jKmjiDV8Z0mIbVQPiwFs7g==",
+ "dev": true
+ },
+ "validate-npm-package-license": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz",
+ "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==",
+ "dev": true,
+ "requires": {
+ "spdx-correct": "^3.0.0",
+ "spdx-expression-parse": "^3.0.0"
+ }
+ },
+ "value-equal": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz",
+ "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==",
+ "dev": true
+ },
+ "vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
+ },
+ "verror": {
+ "version": "1.10.0",
+ "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz",
+ "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=",
+ "dev": true,
+ "requires": {
+ "assert-plus": "^1.0.0",
+ "core-util-is": "1.0.2",
+ "extsprintf": "^1.2.0"
+ }
+ },
+ "vis": {
+ "version": "4.21.0-EOL",
+ "resolved": "https://registry.npmjs.org/vis/-/vis-4.21.0-EOL.tgz",
+ "integrity": "sha512-JVS1mywKg5S88XbkDJPfCb3n+vlg5fMA8Ae2hzs3KHAwD4ryM5qwlbFZ6ReDfY8te7I4NLCpuCoywJQEehvJlQ==",
+ "dev": true,
+ "requires": {
+ "emitter-component": "^1.1.1",
+ "hammerjs": "^2.0.8",
+ "keycharm": "^0.2.0",
+ "moment": "^2.18.1",
+ "propagating-hammerjs": "^1.4.6"
+ }
+ },
+ "vizceral": {
+ "version": "4.9.0",
+ "resolved": "https://registry.npmjs.org/vizceral/-/vizceral-4.9.0.tgz",
+ "integrity": "sha512-K90lI+mLsEgqccgs77dtV8L6C7/ljxGnmGZivy7VMsi2/hbQKy9wih/1FEIAzzXUZ/830a8XPxsepMGNYcxQFg==",
+ "dev": true,
+ "requires": {
+ "@tweenjs/tween.js": "^16.8.0",
+ "chroma-js": "^1.1.1",
+ "hammerjs": "^2.0.8",
+ "lodash": "^4.17.14",
+ "numeral": "^1.5.3",
+ "three": "^0.106.2"
+ }
+ },
+ "vizceral-react": {
+ "version": "4.8.0",
+ "resolved": "https://registry.npmjs.org/vizceral-react/-/vizceral-react-4.8.0.tgz",
+ "integrity": "sha512-k9T/+wOfkoVy6Bw+7U1Jp/8KkebN2mtgPb9SJWQDK050u7u3SS944Q4AsSrRWK8oCqQRblzPYcy50DLlq1xfGw==",
+ "dev": true,
+ "requires": {
+ "lodash": "^4.17.14",
+ "prop-types": "^15.6.1",
+ "vizceral": "^4.9.0"
+ }
+ },
+ "vm-browserify": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz",
+ "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==",
+ "dev": true
+ },
+ "void-elements": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-2.0.1.tgz",
+ "integrity": "sha1-wGavtYK7HLQSjWDqkjkulNXp2+w="
+ },
+ "vscode-json-languageservice": {
+ "version": "3.4.12",
+ "resolved": "https://registry.npmjs.org/vscode-json-languageservice/-/vscode-json-languageservice-3.4.12.tgz",
+ "integrity": "sha512-+tA0KPVM1pDfORZqsQen7bY5buBpQGDTVYEobm5MoGtXNeZY2Kn0iy5wIQqXveb28LRv/I5xKE87dmNJTEaijQ==",
+ "dev": true,
+ "requires": {
+ "jsonc-parser": "^2.2.0",
+ "vscode-languageserver-textdocument": "^1.0.1-next.1",
+ "vscode-languageserver-types": "^3.15.0",
+ "vscode-nls": "^4.1.1",
+ "vscode-uri": "^2.1.1"
+ }
+ },
+ "vscode-languageserver-textdocument": {
+ "version": "1.0.1-next.1",
+ "resolved": "https://registry.npmjs.org/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.1-next.1.tgz",
+ "integrity": "sha512-Cmt0KsNxouns+d7/Kw/jWtWU9Z3h56z1qAA8utjDOEqrDcrTs2rDXv3EJRa99nuKM3wVf6DbWym1VqL9q71XPA==",
+ "dev": true
+ },
+ "vscode-languageserver-types": {
+ "version": "3.15.1",
+ "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.15.1.tgz",
+ "integrity": "sha512-+a9MPUQrNGRrGU630OGbYVQ+11iOIovjCkqxajPa9w57Sd5ruK8WQNsslzpa0x/QJqC8kRc2DUxWjIFwoNm4ZQ==",
+ "dev": true
+ },
+ "vscode-nls": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/vscode-nls/-/vscode-nls-4.1.1.tgz",
+ "integrity": "sha512-4R+2UoUUU/LdnMnFjePxfLqNhBS8lrAFyX7pjb2ud/lqDkrUavFUTcG7wR0HBZFakae0Q6KLBFjMS6W93F403A==",
+ "dev": true
+ },
+ "vscode-uri": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-2.1.1.tgz",
+ "integrity": "sha512-eY9jmGoEnVf8VE8xr5znSah7Qt1P/xsCdErz+g8HYZtJ7bZqKH5E3d+6oVNm1AC/c6IHUDokbmVXKOi4qPAC9A==",
+ "dev": true
+ },
+ "w3c-hr-time": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/w3c-hr-time/-/w3c-hr-time-1.0.1.tgz",
+ "integrity": "sha1-gqwr/2PZUOqeMYmlimViX+3xkEU=",
+ "dev": true,
+ "requires": {
+ "browser-process-hrtime": "^0.1.2"
+ }
+ },
+ "wait-on": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-2.1.2.tgz",
+ "integrity": "sha512-Jm6pzZkbswtcRUXohxY1Ek5MrL16AwHj83drgW2FTQuglHuhZhVMyBLPIYG0rL1wvr5rdC1uzRuU/7Bc+B9Pwg==",
+ "dev": true,
+ "requires": {
+ "core-js": "^2.4.1",
+ "joi": "^9.2.0",
+ "minimist": "^1.2.0",
+ "request": "^2.78.0",
+ "rx": "^4.1.0"
+ },
+ "dependencies": {
+ "rx": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/rx/-/rx-4.1.0.tgz",
+ "integrity": "sha1-pfE/957zt0D+MKqAP7CfmIBdR4I=",
+ "dev": true
+ }
+ }
+ },
+ "warning": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz",
+ "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==",
+ "dev": true,
+ "requires": {
+ "loose-envify": "^1.0.0"
+ }
+ },
+ "watchpack": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.6.0.tgz",
+ "integrity": "sha512-i6dHe3EyLjMmDlU1/bGQpEw25XSjkJULPuAVKCbNRefQVq48yXKUpwg538F7AZTf9kyr57zj++pQFltUa5H7yA==",
+ "dev": true,
+ "requires": {
+ "chokidar": "^2.0.2",
+ "graceful-fs": "^4.1.2",
+ "neo-async": "^2.5.0"
+ }
+ },
+ "wcwidth": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz",
+ "integrity": "sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g=",
+ "dev": true,
+ "requires": {
+ "defaults": "^1.0.3"
+ }
+ },
+ "webidl-conversions": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz",
+ "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==",
+ "dev": true
+ },
+ "webpack": {
+ "version": "4.41.5",
+ "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.41.5.tgz",
+ "integrity": "sha512-wp0Co4vpyumnp3KlkmpM5LWuzvZYayDwM2n17EHFr4qxBBbRokC7DJawPJC7TfSFZ9HZ6GsdH40EBj4UV0nmpw==",
+ "dev": true,
+ "requires": {
+ "@webassemblyjs/ast": "1.8.5",
+ "@webassemblyjs/helper-module-context": "1.8.5",
+ "@webassemblyjs/wasm-edit": "1.8.5",
+ "@webassemblyjs/wasm-parser": "1.8.5",
+ "acorn": "^6.2.1",
+ "ajv": "^6.10.2",
+ "ajv-keywords": "^3.4.1",
+ "chrome-trace-event": "^1.0.2",
+ "enhanced-resolve": "^4.1.0",
+ "eslint-scope": "^4.0.3",
+ "json-parse-better-errors": "^1.0.2",
+ "loader-runner": "^2.4.0",
+ "loader-utils": "^1.2.3",
+ "memory-fs": "^0.4.1",
+ "micromatch": "^3.1.10",
+ "mkdirp": "^0.5.1",
+ "neo-async": "^2.6.1",
+ "node-libs-browser": "^2.2.1",
+ "schema-utils": "^1.0.0",
+ "tapable": "^1.1.3",
+ "terser-webpack-plugin": "^1.4.3",
+ "watchpack": "^1.6.0",
+ "webpack-sources": "^1.4.1"
+ },
+ "dependencies": {
+ "acorn": {
+ "version": "6.4.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.0.tgz",
+ "integrity": "sha512-gac8OEcQ2Li1dxIEWGZzsp2BitJxwkwcOm0zHAJLcPJaVvm58FRnk6RkuLRpU1EujipU2ZFODv2P9DLMfnV8mw==",
+ "dev": true
+ },
+ "eslint-scope": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz",
+ "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==",
+ "dev": true,
+ "requires": {
+ "esrecurse": "^4.1.0",
+ "estraverse": "^4.1.1"
+ }
+ },
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dev": true,
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "webpack-bundle-analyzer": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-3.6.0.tgz",
+ "integrity": "sha512-orUfvVYEfBMDXgEKAKVvab5iQ2wXneIEorGNsyuOyVYpjYrI7CUOhhXNDd3huMwQ3vNNWWlGP+hzflMFYNzi2g==",
+ "dev": true,
+ "requires": {
+ "acorn": "^6.0.7",
+ "acorn-walk": "^6.1.1",
+ "bfj": "^6.1.1",
+ "chalk": "^2.4.1",
+ "commander": "^2.18.0",
+ "ejs": "^2.6.1",
+ "express": "^4.16.3",
+ "filesize": "^3.6.1",
+ "gzip-size": "^5.0.0",
+ "lodash": "^4.17.15",
+ "mkdirp": "^0.5.1",
+ "opener": "^1.5.1",
+ "ws": "^6.0.0"
+ },
+ "dependencies": {
+ "acorn": {
+ "version": "6.4.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.0.tgz",
+ "integrity": "sha512-gac8OEcQ2Li1dxIEWGZzsp2BitJxwkwcOm0zHAJLcPJaVvm58FRnk6RkuLRpU1EujipU2ZFODv2P9DLMfnV8mw==",
+ "dev": true
+ },
+ "commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
+ "dev": true
+ },
+ "ws": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz",
+ "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==",
+ "dev": true,
+ "requires": {
+ "async-limiter": "~1.0.0"
+ }
+ }
+ }
+ },
+ "webpack-command": {
+ "version": "0.4.2",
+ "resolved": "https://registry.npmjs.org/webpack-command/-/webpack-command-0.4.2.tgz",
+ "integrity": "sha512-2JZRlV+eT2nsw0DGDS/F4ndv0e/QVkyYj4/1fagp9DbjRagQ02zuVzELp/QF5mrCESKKvnXiBQoaBJUOjAMp8w==",
+ "dev": true,
+ "requires": {
+ "@webpack-contrib/config-loader": "^1.2.0",
+ "@webpack-contrib/schema-utils": "^1.0.0-beta.0",
+ "camelcase": "^5.0.0",
+ "chalk": "^2.3.2",
+ "debug": "^3.1.0",
+ "decamelize": "^2.0.0",
+ "enhanced-resolve": "^4.0.0",
+ "import-local": "^1.0.0",
+ "isobject": "^3.0.1",
+ "loader-utils": "^1.1.0",
+ "log-symbols": "^2.2.0",
+ "loud-rejection": "^1.6.0",
+ "meant": "^1.0.1",
+ "meow": "^5.0.0",
+ "merge-options": "^1.0.0",
+ "object.values": "^1.0.4",
+ "opn": "^5.3.0",
+ "ora": "^2.1.0",
+ "plur": "^3.0.0",
+ "pretty-bytes": "^5.0.0",
+ "strip-ansi": "^4.0.0",
+ "text-table": "^0.2.0",
+ "titleize": "^1.0.1",
+ "update-notifier": "^2.3.0",
+ "v8-compile-cache": "^2.0.0",
+ "webpack-log": "^1.1.2",
+ "wordwrap": "^1.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
+ "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+ "dev": true
+ },
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true
+ },
+ "decamelize": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-2.0.0.tgz",
+ "integrity": "sha512-Ikpp5scV3MSYxY39ymh45ZLEecsTdv/Xj2CaQfI8RLMuwi7XvjX9H/fhraiSuU+C5w5NTDu4ZU72xNiZnurBPg==",
+ "dev": true,
+ "requires": {
+ "xregexp": "4.0.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
+ "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^3.0.0"
+ }
+ },
+ "wordwrap": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
+ "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=",
+ "dev": true
+ }
+ }
+ },
+ "webpack-log": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/webpack-log/-/webpack-log-1.2.0.tgz",
+ "integrity": "sha512-U9AnICnu50HXtiqiDxuli5gLB5PGBo7VvcHx36jRZHwK4vzOYLbImqT4lwWwoMHdQWwEKw736fCHEekokTEKHA==",
+ "dev": true,
+ "requires": {
+ "chalk": "^2.1.0",
+ "log-symbols": "^2.1.0",
+ "loglevelnext": "^1.0.1",
+ "uuid": "^3.1.0"
+ }
+ },
+ "webpack-sources": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz",
+ "integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==",
+ "dev": true,
+ "requires": {
+ "source-list-map": "^2.0.0",
+ "source-map": "~0.6.1"
+ }
+ },
+ "whatwg-encoding": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz",
+ "integrity": "sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw==",
+ "dev": true,
+ "requires": {
+ "iconv-lite": "0.4.24"
+ }
+ },
+ "whatwg-fetch": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.0.0.tgz",
+ "integrity": "sha512-9GSJUgz1D4MfyKU7KRqwOjXCXTqWdFNvEr7eUBYchQiVc744mqK/MzXPNR2WsPkmkOa4ywfg8C2n8h+13Bey1Q==",
+ "dev": true
+ },
+ "whatwg-mimetype": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz",
+ "integrity": "sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g==",
+ "dev": true
+ },
+ "whatwg-url": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz",
+ "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==",
+ "dev": true,
+ "requires": {
+ "lodash.sortby": "^4.7.0",
+ "tr46": "^1.0.1",
+ "webidl-conversions": "^4.0.2"
+ }
+ },
+ "which": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+ "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+ "dev": true,
+ "requires": {
+ "isexe": "^2.0.0"
+ }
+ },
+ "which-module": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz",
+ "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=",
+ "dev": true
+ },
+ "wide-align": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz",
+ "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==",
+ "dev": true,
+ "requires": {
+ "string-width": "^1.0.2 || 2"
+ }
+ },
+ "widest-line": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-2.0.1.tgz",
+ "integrity": "sha512-Ba5m9/Fa4Xt9eb2ELXt77JxVDV8w7qQrH0zS/TWSJdLyAwQjWoOzpzj5lwVftDz6n/EOu3tNACS84v509qwnJA==",
+ "dev": true,
+ "requires": {
+ "string-width": "^2.1.1"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
+ "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=",
+ "dev": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "dev": true
+ },
+ "string-width": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz",
+ "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==",
+ "dev": true,
+ "requires": {
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^4.0.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
+ "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^3.0.0"
+ }
+ }
+ }
+ },
+ "window-size": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.4.tgz",
+ "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY="
+ },
+ "winston": {
+ "version": "2.4.4",
+ "resolved": "https://registry.npmjs.org/winston/-/winston-2.4.4.tgz",
+ "integrity": "sha512-NBo2Pepn4hK4V01UfcWcDlmiVTs7VTB1h7bgnB0rgP146bYhMxX0ypCz3lBOfNxCO4Zuek7yeT+y/zM1OfMw4Q==",
+ "requires": {
+ "async": "~1.0.0",
+ "colors": "1.0.x",
+ "cycle": "1.0.x",
+ "eyes": "0.1.x",
+ "isstream": "0.1.x",
+ "stack-trace": "0.0.x"
+ },
+ "dependencies": {
+ "async": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/async/-/async-1.0.0.tgz",
+ "integrity": "sha1-+PwEyjoTeErenhZBr5hXjPvWR6k="
+ }
+ }
+ },
+ "with": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/with/-/with-5.1.1.tgz",
+ "integrity": "sha1-+k2qktrzLE6pTtRTyB8EaGtXXf4=",
+ "requires": {
+ "acorn": "^3.1.0",
+ "acorn-globals": "^3.0.0"
+ }
+ },
+ "word-wrap": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
+ "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
+ "dev": true
+ },
+ "wordwrap": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz",
+ "integrity": "sha1-t5Zpu0LstAn4PVg8rVLKF+qhZD8="
+ },
+ "worker-farm": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/worker-farm/-/worker-farm-1.7.0.tgz",
+ "integrity": "sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==",
+ "dev": true,
+ "requires": {
+ "errno": "~0.1.7"
+ }
+ },
+ "wrap-ansi": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz",
+ "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=",
+ "requires": {
+ "string-width": "^1.0.1",
+ "strip-ansi": "^3.0.1"
+ },
+ "dependencies": {
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ }
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
+ },
+ "write": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz",
+ "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==",
+ "dev": true,
+ "requires": {
+ "mkdirp": "^0.5.1"
+ }
+ },
+ "write-file-atomic": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-2.4.3.tgz",
+ "integrity": "sha512-GaETH5wwsX+GcnzhPgKcKjJ6M2Cq3/iZp1WyY/X1CSqrW+jVNM9Y7D8EC2sM4ZG/V8wZlSniJnCKWPmBYAucRQ==",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.11",
+ "imurmurhash": "^0.1.4",
+ "signal-exit": "^3.0.2"
+ }
+ },
+ "ws": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz",
+ "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==",
+ "dev": true,
+ "requires": {
+ "async-limiter": "~1.0.0"
+ }
+ },
+ "xdg-basedir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-3.0.0.tgz",
+ "integrity": "sha1-SWsswQnsqNus/i3HK2A8F8WHCtQ=",
+ "dev": true
+ },
+ "xml-crypto": {
+ "version": "0.10.1",
+ "resolved": "https://registry.npmjs.org/xml-crypto/-/xml-crypto-0.10.1.tgz",
+ "integrity": "sha1-+DL3TM9W8kr8rhFjofyrRNlndKg=",
+ "requires": {
+ "xmldom": "=0.1.19",
+ "xpath.js": ">=0.0.3"
+ },
+ "dependencies": {
+ "xmldom": {
+ "version": "0.1.19",
+ "resolved": "https://registry.npmjs.org/xmldom/-/xmldom-0.1.19.tgz",
+ "integrity": "sha1-Yx/Ad3bv2EEYvyUXGzftTQdaCrw="
+ }
+ }
+ },
+ "xml-encryption": {
+ "version": "0.11.2",
+ "resolved": "https://registry.npmjs.org/xml-encryption/-/xml-encryption-0.11.2.tgz",
+ "integrity": "sha512-jVvES7i5ovdO7N+NjgncA326xYKjhqeAnnvIgRnY7ROLCfFqEDLwP0Sxp/30SHG0AXQV1048T5yinOFyvwGFzg==",
+ "requires": {
+ "async": "^2.1.5",
+ "ejs": "^2.5.6",
+ "node-forge": "^0.7.0",
+ "xmldom": "~0.1.15",
+ "xpath": "0.0.27"
+ }
+ },
+ "xml-name-validator": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-3.0.0.tgz",
+ "integrity": "sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw==",
+ "dev": true
+ },
+ "xml2js": {
+ "version": "0.4.23",
+ "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz",
+ "integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==",
+ "requires": {
+ "sax": ">=0.6.0",
+ "xmlbuilder": "~11.0.0"
+ },
+ "dependencies": {
+ "xmlbuilder": {
+ "version": "11.0.1",
+ "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz",
+ "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA=="
+ }
+ }
+ },
+ "xmlbuilder": {
+ "version": "9.0.7",
+ "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz",
+ "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0="
+ },
+ "xmldom": {
+ "version": "0.1.31",
+ "resolved": "https://registry.npmjs.org/xmldom/-/xmldom-0.1.31.tgz",
+ "integrity": "sha512-yS2uJflVQs6n+CyjHoaBmVSqIDevTAWrzMmjG1Gc7h1qQ7uVozNhEPJAwZXWyGQ/Gafo3fCwrcaokezLPupVyQ=="
+ },
+ "xpath": {
+ "version": "0.0.27",
+ "resolved": "https://registry.npmjs.org/xpath/-/xpath-0.0.27.tgz",
+ "integrity": "sha512-fg03WRxtkCV6ohClePNAECYsmpKKTv5L8y/X3Dn1hQrec3POx2jHZ/0P2qQ6HvsrU1BmeqXcof3NGGueG6LxwQ=="
+ },
+ "xpath.js": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/xpath.js/-/xpath.js-1.1.0.tgz",
+ "integrity": "sha512-jg+qkfS4K8E7965sqaUl8mRngXiKb3WZGfONgE18pr03FUQiuSV6G+Ej4tS55B+rIQSFEIw3phdVAQ4pPqNWfQ=="
+ },
+ "xregexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/xregexp/-/xregexp-4.0.0.tgz",
+ "integrity": "sha512-PHyM+sQouu7xspQQwELlGwwd05mXUFqwFYfqPO0cC7x4fxyHnnuetmQr6CjJiafIDoH4MogHb9dOoJzR/Y4rFg==",
+ "dev": true
+ },
+ "xtend": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
+ "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
+ "dev": true
+ },
+ "y18n": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz",
+ "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE="
+ },
+ "yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true
+ },
+ "yargs": {
+ "version": "3.32.0",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.32.0.tgz",
+ "integrity": "sha1-AwiOnr+edWtpdRYR0qXvWRSCyZU=",
+ "requires": {
+ "camelcase": "^2.0.1",
+ "cliui": "^3.0.3",
+ "decamelize": "^1.1.1",
+ "os-locale": "^1.4.0",
+ "string-width": "^1.0.1",
+ "window-size": "^0.1.4",
+ "y18n": "^3.2.0"
+ }
+ },
+ "yargs-parser": {
+ "version": "13.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.1.tgz",
+ "integrity": "sha512-oVAVsHz6uFrg3XQheFII8ESO2ssAf9luWuAd6Wexsu4F3OtIW0o8IribPXYrD4WC24LWtPrJlGy87y5udK+dxQ==",
+ "dev": true,
+ "requires": {
+ "camelcase": "^5.0.0",
+ "decamelize": "^1.2.0"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true
+ }
+ }
+ }
+ }
+}
diff --git a/ui/package.json b/ui/package.json
new file mode 100644
index 000000000..bbd1b6eb6
--- /dev/null
+++ b/ui/package.json
@@ -0,0 +1,159 @@
+{
+ "name": "haystack-ui",
+ "version": "1.0.0",
+ "private": true,
+ "scripts": {
+ "clean": "rm -rf public/bundles/ && rm -rf static_codegen",
+ "codegen": "mkdir -p static_codegen && grpc_tools_node_protoc --js_out=import_style=commonjs,binary:./static_codegen/ --grpc_out=./static_codegen --plugin=protoc-gen-grpc=`which grpc_tools_node_protoc_plugin` -I ./haystack-idl/proto/ ./haystack-idl/proto/span.proto && grpc_tools_node_protoc --js_out=import_style=commonjs,binary:./static_codegen/ --grpc_out=./static_codegen --plugin=protoc-gen-grpc=`which grpc_tools_node_protoc_plugin` -I ./haystack-idl/proto/api/ -I ./haystack-idl/proto/ ./haystack-idl/proto/api/traceReader.proto && grpc_tools_node_protoc --js_out=import_style=commonjs,binary:./static_codegen/ --grpc_out=./static_codegen --plugin=protoc-gen-grpc=`which grpc_tools_node_protoc_plugin` -I ./haystack-idl/proto/api/ -I ./haystack-idl/proto/ ./haystack-idl/proto/api/anomaly/anomalyReader.proto && grpc_tools_node_protoc --js_out=import_style=commonjs,binary:./static_codegen/ --grpc_out=./static_codegen --plugin=protoc-gen-grpc=`which grpc_tools_node_protoc_plugin` -I ./haystack-idl/proto/api/ -I ./haystack-idl/proto/ ./haystack-idl/proto/api/subscription/subscriptionManagement.proto",
+ "coverage": "nyc --extension=.jsx --reporter=lcov --reporter=text npm test && nyc report --reporter=text-lcov | coveralls",
+ "coverage:fast": "nyc --extension=.jsx --reporter=lcov --reporter=text npm run mocha",
+ "eslint": "echo 'Running eslint...' && eslint --ext js,jsx src server universal && echo 'Done' ",
+ "mocha": "mocha --require ignore-styles --compilers js:@babel/register --require ./test/src/test_helper.js \"test/**/*@(.js|.jsx)\" --exit --timeout 10000",
+ "test": "npm run eslint && npm run mocha",
+ "build": "npm run clean && npm run codegen && npm run coverage && webpack --mode=production",
+ "start": "node server/start.js",
+ "start:dev": "npm run clean && npm run codegen && npm run test && concurrently -r \"npm run watch\" \"wait-on public/bundles/report.html && npm run start:devserver\"",
+ "start:dev:fast": "npm run clean && npm run codegen && concurrently -r \"npm run watch\" \"wait-on public/bundles/report.html && npm run start:devserver\"",
+ "start:dev:insights": "HAYSTACK_PROP_CONNECTORS_SERVICE__INSIGHTS_ENABLE__SERVICE__INSIGHTS=true npm run start:dev:fast",
+ "start:dev:insights:mock": "HAYSTACK_PROP_CONNECTORS_SERVICE__INSIGHTS_ENABLE__SERVICE__INSIGHTS=true HAYSTACK_PROP_CONNECTORS_TRACES_CONNECTOR__NAME=mock npm run start:dev:fast",
+ "start:devserver": "NODE_ENV=development nodemon --inspect --ignore 'public/*' server/start.js",
+ "watch": "NODE_ENV=development webpack -w --mode=development"
+ },
+ "engines": {
+ "node": ">=10.0.0",
+ "npm": ">=6.0.0"
+ },
+ "husky": {
+ "hooks": {
+ "pre-commit": "pretty-quick --staged"
+ }
+ },
+ "dependencies": {
+ "axios": "^0.18.1",
+ "body-parser": "^1.18.2",
+ "compression": "^1.7.2",
+ "cookie-session": "^2.0.0-beta.3",
+ "dagre": "^0.8.4",
+ "deepmerge": "^4.0.0",
+ "express": "^4.16.3",
+ "express-session": "^1.15.6",
+ "express-winston": "^2.5.1",
+ "finished": "^1.2.2",
+ "flat": "^4.0.0",
+ "google-protobuf": "^3.5.0",
+ "grpc": "^1.20.1",
+ "https": "^1.0.0",
+ "lodash": "^4.17.15",
+ "measured": "^1.1.0",
+ "moment": "^2.22.1",
+ "particles.js": "^2.0.0",
+ "passport": "^0.4.0",
+ "passport-saml": "^0.33.0",
+ "pug": "^2.0.3",
+ "q": "^1.5.1",
+ "qs": "^6.5.2",
+ "seedrandom": "^3.0.1",
+ "serve-favicon": "^2.5.0",
+ "statsd-client": "^0.4.2",
+ "winston": "^2.4.2"
+ },
+ "devDependencies": {
+ "@babel/cli": "^7.0.0",
+ "@babel/core": "^7.0.0",
+ "@babel/plugin-proposal-class-properties": "^7.0.0",
+ "@babel/plugin-proposal-decorators": "^7.0.0",
+ "@babel/preset-env": "^7.0.0",
+ "@babel/preset-react": "^7.0.0",
+ "@babel/register": "^7.0.0",
+ "assets-webpack-plugin": "^4.0.0",
+ "axios-mock-adapter": "^1.15.0",
+ "babel-eslint": "^9.0.0",
+ "babel-loader": "^8.0.0",
+ "babel-plugin-dynamic-import-node": "^1.2.0",
+ "babel-plugin-lodash": "^3.3.2",
+ "canvas": "^2.0.0",
+ "chai": "^4.1.2",
+ "chalk": "^2.4.1",
+ "chart.js": "^2.9.0",
+ "chartjs-plugin-zoom": "^0.7.5",
+ "color-hash": "^1.0.3",
+ "concurrently": "^3.6.0",
+ "coveralls": "^3.0.1",
+ "css-loader": "^3.2.0",
+ "enzyme": "^3.3.0",
+ "enzyme-adapter-react-16": "^1.1.1",
+ "eslint": "^5.16.0",
+ "eslint-config-airbnb": "^17.1.0",
+ "eslint-plugin-import": "^2.17.3",
+ "eslint-plugin-json": "^1.4.0",
+ "eslint-plugin-jsx-a11y": "^6.2.1",
+ "eslint-plugin-prettier": "^3.1.0",
+ "eslint-plugin-react": "^7.13.0",
+ "grpc-tools": "^1.8.0",
+ "history": "^4.7.2",
+ "husky": "^2.4.1",
+ "ignore-styles": "^5.0.1",
+ "js-cookie": "^2.2.0",
+ "jsdom": "^11.10.0",
+ "less": "^3.0.4",
+ "less-loader": "^4.1.0",
+ "lodash-webpack-plugin": "^0.11.5",
+ "mini-css-extract-plugin": "^0.4.0",
+ "mobx": "^5.14.2",
+ "mobx-react": "^6.1.4",
+ "mobx-utils": "^5.5.1",
+ "mocha": "5.2.0",
+ "mocha-lcov-reporter": "^1.3.0",
+ "nodemon": "^1.18.3",
+ "nyc": "^14.1.1",
+ "prettier": "^1.18.2",
+ "pretty-quick": "^1.11.0",
+ "progress-bar-webpack-plugin": "^1.11.0",
+ "prop-types": "^15.6.1",
+ "proxyquire": "^2.0.1",
+ "react": "^16.11.0",
+ "react-bootstrap-table": "^4.0.0-beta.9",
+ "react-chartjs-2": "^2.9.0",
+ "react-circular-progressbar": "^0.8.0",
+ "react-copy-to-clipboard": "^5.0.1",
+ "react-datetime": "^2.14.0",
+ "react-dom": "^16.11.0",
+ "react-ga": "^2.5.0",
+ "react-hot-loader": "^4.1.3",
+ "react-modal": "^3.4.4",
+ "react-router": "^4.2.0",
+ "react-router-dom": "^4.2.2",
+ "react-select": "^1.2.1",
+ "react-sparklines": "^1.7.0",
+ "react-test-renderer": "^16.3.2",
+ "react-typist": "^2.0.4",
+ "react-vis": "^1.9.3",
+ "sinon": "^1.17.7",
+ "style-loader": "^0.21.0",
+ "supertest": "^3.0.0",
+ "svg-url-loader": "^3.0.0",
+ "timeago.js": "^3.0.2",
+ "vis": "^4.21.0-EOL",
+ "vizceral": "^4.7.3",
+ "vizceral-react": "^4.6.4",
+ "wait-on": "^2.1.0",
+ "webpack": "^4.17.1",
+ "webpack-bundle-analyzer": "^3.4.1",
+ "webpack-command": "^0.4.1",
+ "whatwg-url": "^7.0.0"
+ },
+ "nyc": {
+ "exclude": [
+ "**/*.spec.*",
+ "build",
+ "coverage",
+ "haystack-idl",
+ "public",
+ "static_codegen",
+ "server/connectors/trends/stub/*",
+ "server/connectors/traces/stub/*",
+ "server/connectors/serviceGraph/stub/*",
+ "server/connectors/alerts/stub/*"
+ ]
+ }
+}
diff --git a/ui/public/assets.json b/ui/public/assets.json
new file mode 100644
index 000000000..1d8d322e4
--- /dev/null
+++ b/ui/public/assets.json
@@ -0,0 +1 @@
+{"ServiceInsights":{"css":"/bundles/style/ServiceInsights.css","js":"/bundles/js/ServiceInsights.js"},"app":{"css":"/bundles/style/app.css","js":"/bundles/js/app.js"},"commons":{"js":"/bundles/js/commons.js"},"serviceGraphContainer":{"js":"/bundles/js/serviceGraphContainer.js"},"servicePerformance":{"css":"/bundles/style/servicePerformance.css","js":"/bundles/js/servicePerformance.js"},"vendors~ServiceInsights":{"js":"/bundles/js/vendors~ServiceInsights.js"},"vendors~ServiceInsights~serviceGraphContainer":{"js":"/bundles/js/vendors~ServiceInsights~serviceGraphContainer.js"},"vendors~serviceGraphContainer":{"js":"/bundles/js/vendors~serviceGraphContainer.js"},"vendors~servicePerformance":{"js":"/bundles/js/vendors~servicePerformance.js"},"vendors~vis":{"js":"/bundles/js/vendors~vis.js"}}
\ No newline at end of file
diff --git a/ui/public/favicon.ico b/ui/public/favicon.ico
new file mode 100755
index 000000000..b2222bda2
Binary files /dev/null and b/ui/public/favicon.ico differ
diff --git a/ui/public/fonts/LICENSE_THEMIFY b/ui/public/fonts/LICENSE_THEMIFY
new file mode 100644
index 000000000..7af402ad8
--- /dev/null
+++ b/ui/public/fonts/LICENSE_THEMIFY
@@ -0,0 +1,5 @@
+LICENSE
+
+- Themify Icons font licensed under: http://scripts.sil.org/OFL
+- Code licensed under: http://opensource.org/licenses/mit-license.html
+- All brand icons are copyright/trademarks of their respective owners.
diff --git a/ui/public/fonts/LICENSE_TITILLIUM_WEB b/ui/public/fonts/LICENSE_TITILLIUM_WEB
new file mode 100755
index 000000000..bbaa23a63
--- /dev/null
+++ b/ui/public/fonts/LICENSE_TITILLIUM_WEB
@@ -0,0 +1,93 @@
+Copyright (c) 2009-2011 by Accademia di Belle Arti di Urbino and students of MA course of Visual design. Some rights reserved.
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+http://scripts.sil.org/OFL
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/ui/public/fonts/themify.ttf b/ui/public/fonts/themify.ttf
new file mode 100755
index 000000000..5d627e701
Binary files /dev/null and b/ui/public/fonts/themify.ttf differ
diff --git a/ui/public/fonts/themify.woff b/ui/public/fonts/themify.woff
new file mode 100755
index 000000000..847ebd183
Binary files /dev/null and b/ui/public/fonts/themify.woff differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-200.ttf b/ui/public/fonts/titillium-web-v5-latin-200.ttf
new file mode 100644
index 000000000..28144d584
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-200.ttf differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-200.woff b/ui/public/fonts/titillium-web-v5-latin-200.woff
new file mode 100644
index 000000000..4322d25f4
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-200.woff differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-200.woff2 b/ui/public/fonts/titillium-web-v5-latin-200.woff2
new file mode 100644
index 000000000..3fd7a6e94
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-200.woff2 differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-300.ttf b/ui/public/fonts/titillium-web-v5-latin-300.ttf
new file mode 100644
index 000000000..782473a32
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-300.ttf differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-300.woff b/ui/public/fonts/titillium-web-v5-latin-300.woff
new file mode 100644
index 000000000..536005cd0
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-300.woff differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-300.woff2 b/ui/public/fonts/titillium-web-v5-latin-300.woff2
new file mode 100644
index 000000000..33b815c80
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-300.woff2 differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-600.ttf b/ui/public/fonts/titillium-web-v5-latin-600.ttf
new file mode 100644
index 000000000..674123f82
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-600.ttf differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-600.woff b/ui/public/fonts/titillium-web-v5-latin-600.woff
new file mode 100644
index 000000000..4273a4dd0
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-600.woff differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-600.woff2 b/ui/public/fonts/titillium-web-v5-latin-600.woff2
new file mode 100644
index 000000000..cc0f715f2
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-600.woff2 differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-700.ttf b/ui/public/fonts/titillium-web-v5-latin-700.ttf
new file mode 100644
index 000000000..a6b6f0b34
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-700.ttf differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-700.woff b/ui/public/fonts/titillium-web-v5-latin-700.woff
new file mode 100644
index 000000000..dba86dd3c
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-700.woff differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-700.woff2 b/ui/public/fonts/titillium-web-v5-latin-700.woff2
new file mode 100644
index 000000000..3c7211dd1
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-700.woff2 differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-regular.ttf b/ui/public/fonts/titillium-web-v5-latin-regular.ttf
new file mode 100644
index 000000000..e053d2e50
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-regular.ttf differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-regular.woff b/ui/public/fonts/titillium-web-v5-latin-regular.woff
new file mode 100644
index 000000000..766c54016
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-regular.woff differ
diff --git a/ui/public/fonts/titillium-web-v5-latin-regular.woff2 b/ui/public/fonts/titillium-web-v5-latin-regular.woff2
new file mode 100644
index 000000000..a421b4f82
Binary files /dev/null and b/ui/public/fonts/titillium-web-v5-latin-regular.woff2 differ
diff --git a/ui/public/images/assets/alerts.png b/ui/public/images/assets/alerts.png
new file mode 100644
index 000000000..4ed4bfad4
Binary files /dev/null and b/ui/public/images/assets/alerts.png differ
diff --git a/ui/public/images/assets/demo.gif b/ui/public/images/assets/demo.gif
new file mode 100644
index 000000000..be870b7ae
Binary files /dev/null and b/ui/public/images/assets/demo.gif differ
diff --git a/ui/public/images/assets/logo_lighter.png b/ui/public/images/assets/logo_lighter.png
new file mode 100644
index 000000000..7eb69e4bc
Binary files /dev/null and b/ui/public/images/assets/logo_lighter.png differ
diff --git a/ui/public/images/assets/logo_with_title.png b/ui/public/images/assets/logo_with_title.png
new file mode 100644
index 000000000..0cb029258
Binary files /dev/null and b/ui/public/images/assets/logo_with_title.png differ
diff --git a/ui/public/images/assets/logo_with_title_dark.png b/ui/public/images/assets/logo_with_title_dark.png
new file mode 100644
index 000000000..0ae94817b
Binary files /dev/null and b/ui/public/images/assets/logo_with_title_dark.png differ
diff --git a/ui/public/images/assets/logo_with_title_transparent.png b/ui/public/images/assets/logo_with_title_transparent.png
new file mode 100644
index 000000000..38d8fffeb
Binary files /dev/null and b/ui/public/images/assets/logo_with_title_transparent.png differ
diff --git a/ui/public/images/assets/service_graph.png b/ui/public/images/assets/service_graph.png
new file mode 100644
index 000000000..02e40d16f
Binary files /dev/null and b/ui/public/images/assets/service_graph.png differ
diff --git a/ui/public/images/assets/trace_timeline.png b/ui/public/images/assets/trace_timeline.png
new file mode 100644
index 000000000..03950870c
Binary files /dev/null and b/ui/public/images/assets/trace_timeline.png differ
diff --git a/ui/public/images/assets/trends.png b/ui/public/images/assets/trends.png
new file mode 100644
index 000000000..b2d4426f7
Binary files /dev/null and b/ui/public/images/assets/trends.png differ
diff --git a/ui/public/images/assets/universal_search.png b/ui/public/images/assets/universal_search.png
new file mode 100644
index 000000000..8a38022c1
Binary files /dev/null and b/ui/public/images/assets/universal_search.png differ
diff --git a/ui/public/images/error.svg b/ui/public/images/error.svg
new file mode 100644
index 000000000..bf052f03b
--- /dev/null
+++ b/ui/public/images/error.svg
@@ -0,0 +1,6 @@
+
diff --git a/ui/public/images/loading.gif b/ui/public/images/loading.gif
new file mode 100644
index 000000000..c8cc9c3b1
Binary files /dev/null and b/ui/public/images/loading.gif differ
diff --git a/ui/public/images/logo-white.png b/ui/public/images/logo-white.png
new file mode 100644
index 000000000..3f7129f2f
Binary files /dev/null and b/ui/public/images/logo-white.png differ
diff --git a/ui/public/images/logo.png b/ui/public/images/logo.png
new file mode 100644
index 000000000..8251d609d
Binary files /dev/null and b/ui/public/images/logo.png differ
diff --git a/ui/public/images/slack.png b/ui/public/images/slack.png
new file mode 100644
index 000000000..ce65025cb
Binary files /dev/null and b/ui/public/images/slack.png differ
diff --git a/ui/public/images/success.svg b/ui/public/images/success.svg
new file mode 100644
index 000000000..653c41a46
--- /dev/null
+++ b/ui/public/images/success.svg
@@ -0,0 +1,6 @@
+
diff --git a/ui/public/images/zipkin-logo.jpg b/ui/public/images/zipkin-logo.jpg
new file mode 100644
index 000000000..9bbd23f8c
Binary files /dev/null and b/ui/public/images/zipkin-logo.jpg differ
diff --git a/ui/public/scripts/particles.json b/ui/public/scripts/particles.json
new file mode 100644
index 000000000..3d8008d9d
--- /dev/null
+++ b/ui/public/scripts/particles.json
@@ -0,0 +1,110 @@
+{
+ "particles": {
+ "number": {
+ "value": 80,
+ "density": {
+ "enable": true,
+ "value_area": 800
+ }
+ },
+ "color": {
+ "value": "#ffffff"
+ },
+ "shape": {
+ "type": "circle",
+ "stroke": {
+ "width": 0,
+ "color": "#000000"
+ },
+ "polygon": {
+ "nb_sides": 5
+ },
+ "image": {
+ "src": "img/github.svg",
+ "width": 100,
+ "height": 100
+ }
+ },
+ "opacity": {
+ "value": 0.5,
+ "random": false,
+ "anim": {
+ "enable": false,
+ "speed": 1,
+ "opacity_min": 0.1,
+ "sync": false
+ }
+ },
+ "size": {
+ "value": 3,
+ "random": true,
+ "anim": {
+ "enable": false,
+ "speed": 40,
+ "size_min": 0.1,
+ "sync": false
+ }
+ },
+ "line_linked": {
+ "enable": true,
+ "distance": 150,
+ "color": "#ffffff",
+ "opacity": 0.4,
+ "width": 1
+ },
+ "move": {
+ "enable": true,
+ "speed": 6,
+ "direction": "none",
+ "random": false,
+ "straight": false,
+ "out_mode": "out",
+ "bounce": false,
+ "attract": {
+ "enable": false,
+ "rotateX": 600,
+ "rotateY": 1200
+ }
+ }
+ },
+ "interactivity": {
+ "detect_on": "canvas",
+ "events": {
+ "onhover": {
+ "enable": false,
+ "mode": "repulse"
+ },
+ "onclick": {
+ "enable": false,
+ "mode": "push"
+ },
+ "resize": true
+ },
+ "modes": {
+ "grab": {
+ "distance": 400,
+ "line_linked": {
+ "opacity": 1
+ }
+ },
+ "bubble": {
+ "distance": 400,
+ "size": 40,
+ "duration": 2,
+ "opacity": 8,
+ "speed": 3
+ },
+ "repulse": {
+ "distance": 200,
+ "duration": 0.4
+ },
+ "push": {
+ "particles_nb": 4
+ },
+ "remove": {
+ "particles_nb": 2
+ }
+ }
+ },
+ "retina_detect": true
+}
\ No newline at end of file
diff --git a/ui/public/scripts/particles.min.js b/ui/public/scripts/particles.min.js
new file mode 100755
index 000000000..b3d46d127
--- /dev/null
+++ b/ui/public/scripts/particles.min.js
@@ -0,0 +1,9 @@
+/* -----------------------------------------------
+/* Author : Vincent Garreau - vincentgarreau.com
+/* MIT license: http://opensource.org/licenses/MIT
+/* Demo / Generator : vincentgarreau.com/particles.js
+/* GitHub : github.com/VincentGarreau/particles.js
+/* How to use? : Check the GitHub README
+/* v2.0.0
+/* ----------------------------------------------- */
+function hexToRgb(e){var a=/^#?([a-f\d])([a-f\d])([a-f\d])$/i;e=e.replace(a,function(e,a,t,i){return a+a+t+t+i+i});var t=/^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(e);return t?{r:parseInt(t[1],16),g:parseInt(t[2],16),b:parseInt(t[3],16)}:null}function clamp(e,a,t){return Math.min(Math.max(e,a),t)}function isInArray(e,a){return a.indexOf(e)>-1}var pJS=function(e,a){var t=document.querySelector("#"+e+" > .particles-js-canvas-el");this.pJS={canvas:{el:t,w:t.offsetWidth,h:t.offsetHeight},particles:{number:{value:400,density:{enable:!0,value_area:800}},color:{value:"#fff"},shape:{type:"circle",stroke:{width:0,color:"#ff0000"},polygon:{nb_sides:5},image:{src:"",width:100,height:100}},opacity:{value:1,random:!1,anim:{enable:!1,speed:2,opacity_min:0,sync:!1}},size:{value:20,random:!1,anim:{enable:!1,speed:20,size_min:0,sync:!1}},line_linked:{enable:!0,distance:100,color:"#fff",opacity:1,width:1},move:{enable:!0,speed:2,direction:"none",random:!1,straight:!1,out_mode:"out",bounce:!1,attract:{enable:!1,rotateX:3e3,rotateY:3e3}},array:[]},interactivity:{detect_on:"canvas",events:{onhover:{enable:!0,mode:"grab"},onclick:{enable:!0,mode:"push"},resize:!0},modes:{grab:{distance:100,line_linked:{opacity:1}},bubble:{distance:200,size:80,duration:.4},repulse:{distance:200,duration:.4},push:{particles_nb:4},remove:{particles_nb:2}},mouse:{}},retina_detect:!1,fn:{interact:{},modes:{},vendors:{}},tmp:{}};var i=this.pJS;a&&Object.deepExtend(i,a),i.tmp.obj={size_value:i.particles.size.value,size_anim_speed:i.particles.size.anim.speed,move_speed:i.particles.move.speed,line_linked_distance:i.particles.line_linked.distance,line_linked_width:i.particles.line_linked.width,mode_grab_distance:i.interactivity.modes.grab.distance,mode_bubble_distance:i.interactivity.modes.bubble.distance,mode_bubble_size:i.interactivity.modes.bubble.size,mode_repulse_distance:i.interactivity.modes.repulse.distance},i.fn.retinaInit=function(){i.retina_detect&&window.devicePixelRatio>1?(i.canvas.pxratio=window.devicePixelRatio,i.tmp.retina=!0):(i.canvas.pxratio=1,i.tmp.retina=!1),i.canvas.w=i.canvas.el.offsetWidth*i.canvas.pxratio,i.canvas.h=i.canvas.el.offsetHeight*i.canvas.pxratio,i.particles.size.value=i.tmp.obj.size_value*i.canvas.pxratio,i.particles.size.anim.speed=i.tmp.obj.size_anim_speed*i.canvas.pxratio,i.particles.move.speed=i.tmp.obj.move_speed*i.canvas.pxratio,i.particles.line_linked.distance=i.tmp.obj.line_linked_distance*i.canvas.pxratio,i.interactivity.modes.grab.distance=i.tmp.obj.mode_grab_distance*i.canvas.pxratio,i.interactivity.modes.bubble.distance=i.tmp.obj.mode_bubble_distance*i.canvas.pxratio,i.particles.line_linked.width=i.tmp.obj.line_linked_width*i.canvas.pxratio,i.interactivity.modes.bubble.size=i.tmp.obj.mode_bubble_size*i.canvas.pxratio,i.interactivity.modes.repulse.distance=i.tmp.obj.mode_repulse_distance*i.canvas.pxratio},i.fn.canvasInit=function(){i.canvas.ctx=i.canvas.el.getContext("2d")},i.fn.canvasSize=function(){i.canvas.el.width=i.canvas.w,i.canvas.el.height=i.canvas.h,i&&i.interactivity.events.resize&&window.addEventListener("resize",function(){i.canvas.w=i.canvas.el.offsetWidth,i.canvas.h=i.canvas.el.offsetHeight,i.tmp.retina&&(i.canvas.w*=i.canvas.pxratio,i.canvas.h*=i.canvas.pxratio),i.canvas.el.width=i.canvas.w,i.canvas.el.height=i.canvas.h,i.particles.move.enable||(i.fn.particlesEmpty(),i.fn.particlesCreate(),i.fn.particlesDraw(),i.fn.vendors.densityAutoParticles()),i.fn.vendors.densityAutoParticles()})},i.fn.canvasPaint=function(){i.canvas.ctx.fillRect(0,0,i.canvas.w,i.canvas.h)},i.fn.canvasClear=function(){i.canvas.ctx.clearRect(0,0,i.canvas.w,i.canvas.h)},i.fn.particle=function(e,a,t){if(this.radius=(i.particles.size.random?Math.random():1)*i.particles.size.value,i.particles.size.anim.enable&&(this.size_status=!1,this.vs=i.particles.size.anim.speed/100,i.particles.size.anim.sync||(this.vs=this.vs*Math.random())),this.x=t?t.x:Math.random()*i.canvas.w,this.y=t?t.y:Math.random()*i.canvas.h,this.x>i.canvas.w-2*this.radius?this.x=this.x-this.radius:this.x<2*this.radius&&(this.x=this.x+this.radius),this.y>i.canvas.h-2*this.radius?this.y=this.y-this.radius:this.y<2*this.radius&&(this.y=this.y+this.radius),i.particles.move.bounce&&i.fn.vendors.checkOverlap(this,t),this.color={},"object"==typeof e.value)if(e.value instanceof Array){var s=e.value[Math.floor(Math.random()*i.particles.color.value.length)];this.color.rgb=hexToRgb(s)}else void 0!=e.value.r&&void 0!=e.value.g&&void 0!=e.value.b&&(this.color.rgb={r:e.value.r,g:e.value.g,b:e.value.b}),void 0!=e.value.h&&void 0!=e.value.s&&void 0!=e.value.l&&(this.color.hsl={h:e.value.h,s:e.value.s,l:e.value.l});else"random"==e.value?this.color.rgb={r:Math.floor(256*Math.random())+0,g:Math.floor(256*Math.random())+0,b:Math.floor(256*Math.random())+0}:"string"==typeof e.value&&(this.color=e,this.color.rgb=hexToRgb(this.color.value));this.opacity=(i.particles.opacity.random?Math.random():1)*i.particles.opacity.value,i.particles.opacity.anim.enable&&(this.opacity_status=!1,this.vo=i.particles.opacity.anim.speed/100,i.particles.opacity.anim.sync||(this.vo=this.vo*Math.random()));var n={};switch(i.particles.move.direction){case"top":n={x:0,y:-1};break;case"top-right":n={x:.5,y:-.5};break;case"right":n={x:1,y:-0};break;case"bottom-right":n={x:.5,y:.5};break;case"bottom":n={x:0,y:1};break;case"bottom-left":n={x:-.5,y:1};break;case"left":n={x:-1,y:0};break;case"top-left":n={x:-.5,y:-.5};break;default:n={x:0,y:0}}i.particles.move.straight?(this.vx=n.x,this.vy=n.y,i.particles.move.random&&(this.vx=this.vx*Math.random(),this.vy=this.vy*Math.random())):(this.vx=n.x+Math.random()-.5,this.vy=n.y+Math.random()-.5),this.vx_i=this.vx,this.vy_i=this.vy;var r=i.particles.shape.type;if("object"==typeof r){if(r instanceof Array){var c=r[Math.floor(Math.random()*r.length)];this.shape=c}}else this.shape=r;if("image"==this.shape){var o=i.particles.shape;this.img={src:o.image.src,ratio:o.image.width/o.image.height},this.img.ratio||(this.img.ratio=1),"svg"==i.tmp.img_type&&void 0!=i.tmp.source_svg&&(i.fn.vendors.createSvgImg(this),i.tmp.pushing&&(this.img.loaded=!1))}},i.fn.particle.prototype.draw=function(){function e(){i.canvas.ctx.drawImage(r,a.x-t,a.y-t,2*t,2*t/a.img.ratio)}var a=this;if(void 0!=a.radius_bubble)var t=a.radius_bubble;else var t=a.radius;if(void 0!=a.opacity_bubble)var s=a.opacity_bubble;else var s=a.opacity;if(a.color.rgb)var n="rgba("+a.color.rgb.r+","+a.color.rgb.g+","+a.color.rgb.b+","+s+")";else var n="hsla("+a.color.hsl.h+","+a.color.hsl.s+"%,"+a.color.hsl.l+"%,"+s+")";switch(i.canvas.ctx.fillStyle=n,i.canvas.ctx.beginPath(),a.shape){case"circle":i.canvas.ctx.arc(a.x,a.y,t,0,2*Math.PI,!1);break;case"edge":i.canvas.ctx.rect(a.x-t,a.y-t,2*t,2*t);break;case"triangle":i.fn.vendors.drawShape(i.canvas.ctx,a.x-t,a.y+t/1.66,2*t,3,2);break;case"polygon":i.fn.vendors.drawShape(i.canvas.ctx,a.x-t/(i.particles.shape.polygon.nb_sides/3.5),a.y-t/.76,2.66*t/(i.particles.shape.polygon.nb_sides/3),i.particles.shape.polygon.nb_sides,1);break;case"star":i.fn.vendors.drawShape(i.canvas.ctx,a.x-2*t/(i.particles.shape.polygon.nb_sides/4),a.y-t/1.52,2*t*2.66/(i.particles.shape.polygon.nb_sides/3),i.particles.shape.polygon.nb_sides,2);break;case"image":if("svg"==i.tmp.img_type)var r=a.img.obj;else var r=i.tmp.img_obj;r&&e()}i.canvas.ctx.closePath(),i.particles.shape.stroke.width>0&&(i.canvas.ctx.strokeStyle=i.particles.shape.stroke.color,i.canvas.ctx.lineWidth=i.particles.shape.stroke.width,i.canvas.ctx.stroke()),i.canvas.ctx.fill()},i.fn.particlesCreate=function(){for(var e=0;e=i.particles.opacity.value&&(a.opacity_status=!1),a.opacity+=a.vo):(a.opacity<=i.particles.opacity.anim.opacity_min&&(a.opacity_status=!0),a.opacity-=a.vo),a.opacity<0&&(a.opacity=0)),i.particles.size.anim.enable&&(1==a.size_status?(a.radius>=i.particles.size.value&&(a.size_status=!1),a.radius+=a.vs):(a.radius<=i.particles.size.anim.size_min&&(a.size_status=!0),a.radius-=a.vs),a.radius<0&&(a.radius=0)),"bounce"==i.particles.move.out_mode)var s={x_left:a.radius,x_right:i.canvas.w,y_top:a.radius,y_bottom:i.canvas.h};else var s={x_left:-a.radius,x_right:i.canvas.w+a.radius,y_top:-a.radius,y_bottom:i.canvas.h+a.radius};switch(a.x-a.radius>i.canvas.w?(a.x=s.x_left,a.y=Math.random()*i.canvas.h):a.x+a.radius<0&&(a.x=s.x_right,a.y=Math.random()*i.canvas.h),a.y-a.radius>i.canvas.h?(a.y=s.y_top,a.x=Math.random()*i.canvas.w):a.y+a.radius<0&&(a.y=s.y_bottom,a.x=Math.random()*i.canvas.w),i.particles.move.out_mode){case"bounce":a.x+a.radius>i.canvas.w?a.vx=-a.vx:a.x-a.radius<0&&(a.vx=-a.vx),a.y+a.radius>i.canvas.h?a.vy=-a.vy:a.y-a.radius<0&&(a.vy=-a.vy)}if(isInArray("grab",i.interactivity.events.onhover.mode)&&i.fn.modes.grabParticle(a),(isInArray("bubble",i.interactivity.events.onhover.mode)||isInArray("bubble",i.interactivity.events.onclick.mode))&&i.fn.modes.bubbleParticle(a),(isInArray("repulse",i.interactivity.events.onhover.mode)||isInArray("repulse",i.interactivity.events.onclick.mode))&&i.fn.modes.repulseParticle(a),i.particles.line_linked.enable||i.particles.move.attract.enable)for(var n=e+1;n0){var c=i.particles.line_linked.color_rgb_line;i.canvas.ctx.strokeStyle="rgba("+c.r+","+c.g+","+c.b+","+r+")",i.canvas.ctx.lineWidth=i.particles.line_linked.width,i.canvas.ctx.beginPath(),i.canvas.ctx.moveTo(e.x,e.y),i.canvas.ctx.lineTo(a.x,a.y),i.canvas.ctx.stroke(),i.canvas.ctx.closePath()}}},i.fn.interact.attractParticles=function(e,a){var t=e.x-a.x,s=e.y-a.y,n=Math.sqrt(t*t+s*s);if(n<=i.particles.line_linked.distance){var r=t/(1e3*i.particles.move.attract.rotateX),c=s/(1e3*i.particles.move.attract.rotateY);e.vx-=r,e.vy-=c,a.vx+=r,a.vy+=c}},i.fn.interact.bounceParticles=function(e,a){var t=e.x-a.x,i=e.y-a.y,s=Math.sqrt(t*t+i*i),n=e.radius+a.radius;n>=s&&(e.vx=-e.vx,e.vy=-e.vy,a.vx=-a.vx,a.vy=-a.vy)},i.fn.modes.pushParticles=function(e,a){i.tmp.pushing=!0;for(var t=0;e>t;t++)i.particles.array.push(new i.fn.particle(i.particles.color,i.particles.opacity.value,{x:a?a.pos_x:Math.random()*i.canvas.w,y:a?a.pos_y:Math.random()*i.canvas.h})),t==e-1&&(i.particles.move.enable||i.fn.particlesDraw(),i.tmp.pushing=!1)},i.fn.modes.removeParticles=function(e){i.particles.array.splice(0,e),i.particles.move.enable||i.fn.particlesDraw()},i.fn.modes.bubbleParticle=function(e){function a(){e.opacity_bubble=e.opacity,e.radius_bubble=e.radius}function t(a,t,s,n,c){if(a!=t)if(i.tmp.bubble_duration_end){if(void 0!=s){var o=n-p*(n-a)/i.interactivity.modes.bubble.duration,l=a-o;d=a+l,"size"==c&&(e.radius_bubble=d),"opacity"==c&&(e.opacity_bubble=d)}}else if(r<=i.interactivity.modes.bubble.distance){if(void 0!=s)var v=s;else var v=n;if(v!=a){var d=n-p*(n-a)/i.interactivity.modes.bubble.duration;"size"==c&&(e.radius_bubble=d),"opacity"==c&&(e.opacity_bubble=d)}}else"size"==c&&(e.radius_bubble=void 0),"opacity"==c&&(e.opacity_bubble=void 0)}if(i.interactivity.events.onhover.enable&&isInArray("bubble",i.interactivity.events.onhover.mode)){var s=e.x-i.interactivity.mouse.pos_x,n=e.y-i.interactivity.mouse.pos_y,r=Math.sqrt(s*s+n*n),c=1-r/i.interactivity.modes.bubble.distance;if(r<=i.interactivity.modes.bubble.distance){if(c>=0&&"mousemove"==i.interactivity.status){if(i.interactivity.modes.bubble.size!=i.particles.size.value)if(i.interactivity.modes.bubble.size>i.particles.size.value){var o=e.radius+i.interactivity.modes.bubble.size*c;o>=0&&(e.radius_bubble=o)}else{var l=e.radius-i.interactivity.modes.bubble.size,o=e.radius-l*c;o>0?e.radius_bubble=o:e.radius_bubble=0}if(i.interactivity.modes.bubble.opacity!=i.particles.opacity.value)if(i.interactivity.modes.bubble.opacity>i.particles.opacity.value){var v=i.interactivity.modes.bubble.opacity*c;v>e.opacity&&v<=i.interactivity.modes.bubble.opacity&&(e.opacity_bubble=v)}else{var v=e.opacity-(i.particles.opacity.value-i.interactivity.modes.bubble.opacity)*c;v=i.interactivity.modes.bubble.opacity&&(e.opacity_bubble=v)}}}else a();"mouseleave"==i.interactivity.status&&a()}else if(i.interactivity.events.onclick.enable&&isInArray("bubble",i.interactivity.events.onclick.mode)){if(i.tmp.bubble_clicking){var s=e.x-i.interactivity.mouse.click_pos_x,n=e.y-i.interactivity.mouse.click_pos_y,r=Math.sqrt(s*s+n*n),p=((new Date).getTime()-i.interactivity.mouse.click_time)/1e3;p>i.interactivity.modes.bubble.duration&&(i.tmp.bubble_duration_end=!0),p>2*i.interactivity.modes.bubble.duration&&(i.tmp.bubble_clicking=!1,i.tmp.bubble_duration_end=!1)}i.tmp.bubble_clicking&&(t(i.interactivity.modes.bubble.size,i.particles.size.value,e.radius_bubble,e.radius,"size"),t(i.interactivity.modes.bubble.opacity,i.particles.opacity.value,e.opacity_bubble,e.opacity,"opacity"))}},i.fn.modes.repulseParticle=function(e){function a(){var a=Math.atan2(d,p);if(e.vx=u*Math.cos(a),e.vy=u*Math.sin(a),"bounce"==i.particles.move.out_mode){var t={x:e.x+e.vx,y:e.y+e.vy};t.x+e.radius>i.canvas.w?e.vx=-e.vx:t.x-e.radius<0&&(e.vx=-e.vx),t.y+e.radius>i.canvas.h?e.vy=-e.vy:t.y-e.radius<0&&(e.vy=-e.vy)}}if(i.interactivity.events.onhover.enable&&isInArray("repulse",i.interactivity.events.onhover.mode)&&"mousemove"==i.interactivity.status){var t=e.x-i.interactivity.mouse.pos_x,s=e.y-i.interactivity.mouse.pos_y,n=Math.sqrt(t*t+s*s),r={x:t/n,y:s/n},c=i.interactivity.modes.repulse.distance,o=100,l=clamp(1/c*(-1*Math.pow(n/c,2)+1)*c*o,0,50),v={x:e.x+r.x*l,y:e.y+r.y*l};"bounce"==i.particles.move.out_mode?(v.x-e.radius>0&&v.x+e.radius0&&v.y+e.radius=m&&a()}else 0==i.tmp.repulse_clicking&&(e.vx=e.vx_i,e.vy=e.vy_i)},i.fn.modes.grabParticle=function(e){if(i.interactivity.events.onhover.enable&&"mousemove"==i.interactivity.status){var a=e.x-i.interactivity.mouse.pos_x,t=e.y-i.interactivity.mouse.pos_y,s=Math.sqrt(a*a+t*t);if(s<=i.interactivity.modes.grab.distance){var n=i.interactivity.modes.grab.line_linked.opacity-s/(1/i.interactivity.modes.grab.line_linked.opacity)/i.interactivity.modes.grab.distance;if(n>0){var r=i.particles.line_linked.color_rgb_line;i.canvas.ctx.strokeStyle="rgba("+r.r+","+r.g+","+r.b+","+n+")",i.canvas.ctx.lineWidth=i.particles.line_linked.width,i.canvas.ctx.beginPath(),i.canvas.ctx.moveTo(e.x,e.y),i.canvas.ctx.lineTo(i.interactivity.mouse.pos_x,i.interactivity.mouse.pos_y),i.canvas.ctx.stroke(),i.canvas.ctx.closePath()}}}},i.fn.vendors.eventsListeners=function(){"window"==i.interactivity.detect_on?i.interactivity.el=window:i.interactivity.el=i.canvas.el,(i.interactivity.events.onhover.enable||i.interactivity.events.onclick.enable)&&(i.interactivity.el.addEventListener("mousemove",function(e){if(i.interactivity.el==window)var a=e.clientX,t=e.clientY;else var a=e.offsetX||e.clientX,t=e.offsetY||e.clientY;i.interactivity.mouse.pos_x=a,i.interactivity.mouse.pos_y=t,i.tmp.retina&&(i.interactivity.mouse.pos_x*=i.canvas.pxratio,i.interactivity.mouse.pos_y*=i.canvas.pxratio),i.interactivity.status="mousemove"}),i.interactivity.el.addEventListener("mouseleave",function(e){i.interactivity.mouse.pos_x=null,i.interactivity.mouse.pos_y=null,i.interactivity.status="mouseleave"})),i.interactivity.events.onclick.enable&&i.interactivity.el.addEventListener("click",function(){if(i.interactivity.mouse.click_pos_x=i.interactivity.mouse.pos_x,i.interactivity.mouse.click_pos_y=i.interactivity.mouse.pos_y,i.interactivity.mouse.click_time=(new Date).getTime(),i.interactivity.events.onclick.enable)switch(i.interactivity.events.onclick.mode){case"push":i.particles.move.enable?i.fn.modes.pushParticles(i.interactivity.modes.push.particles_nb,i.interactivity.mouse):1==i.interactivity.modes.push.particles_nb?i.fn.modes.pushParticles(i.interactivity.modes.push.particles_nb,i.interactivity.mouse):i.interactivity.modes.push.particles_nb>1&&i.fn.modes.pushParticles(i.interactivity.modes.push.particles_nb);break;case"remove":i.fn.modes.removeParticles(i.interactivity.modes.remove.particles_nb);break;case"bubble":i.tmp.bubble_clicking=!0;break;case"repulse":i.tmp.repulse_clicking=!0,i.tmp.repulse_count=0,i.tmp.repulse_finish=!1,setTimeout(function(){i.tmp.repulse_clicking=!1},1e3*i.interactivity.modes.repulse.duration)}})},i.fn.vendors.densityAutoParticles=function(){if(i.particles.number.density.enable){var e=i.canvas.el.width*i.canvas.el.height/1e3;i.tmp.retina&&(e/=2*i.canvas.pxratio);var a=e*i.particles.number.value/i.particles.number.density.value_area,t=i.particles.array.length-a;0>t?i.fn.modes.pushParticles(Math.abs(t)):i.fn.modes.removeParticles(t)}},i.fn.vendors.checkOverlap=function(e,a){for(var t=0;tv;v++)e.lineTo(i,0),e.translate(i,0),e.rotate(l);e.fill(),e.restore()},i.fn.vendors.exportImg=function(){window.open(i.canvas.el.toDataURL("image/png"),"_blank")},i.fn.vendors.loadImg=function(e){if(i.tmp.img_error=void 0,""!=i.particles.shape.image.src)if("svg"==e){var a=new XMLHttpRequest;a.open("GET",i.particles.shape.image.src),a.onreadystatechange=function(e){4==a.readyState&&(200==a.status?(i.tmp.source_svg=e.currentTarget.response,i.fn.vendors.checkBeforeDraw()):(console.log("Error pJS - Image not found"),i.tmp.img_error=!0))},a.send()}else{var t=new Image;t.addEventListener("load",function(){i.tmp.img_obj=t,i.fn.vendors.checkBeforeDraw()}),t.src=i.particles.shape.image.src}else console.log("Error pJS - No image.src"),i.tmp.img_error=!0},i.fn.vendors.draw=function(){"image"==i.particles.shape.type?"svg"==i.tmp.img_type?i.tmp.count_svg>=i.particles.number.value?(i.fn.particlesDraw(),i.particles.move.enable?i.fn.drawAnimFrame=requestAnimFrame(i.fn.vendors.draw):cancelRequestAnimFrame(i.fn.drawAnimFrame)):i.tmp.img_error||(i.fn.drawAnimFrame=requestAnimFrame(i.fn.vendors.draw)):void 0!=i.tmp.img_obj?(i.fn.particlesDraw(),i.particles.move.enable?i.fn.drawAnimFrame=requestAnimFrame(i.fn.vendors.draw):cancelRequestAnimFrame(i.fn.drawAnimFrame)):i.tmp.img_error||(i.fn.drawAnimFrame=requestAnimFrame(i.fn.vendors.draw)):(i.fn.particlesDraw(),i.particles.move.enable?i.fn.drawAnimFrame=requestAnimFrame(i.fn.vendors.draw):cancelRequestAnimFrame(i.fn.drawAnimFrame))},i.fn.vendors.checkBeforeDraw=function(){"image"==i.particles.shape.type?"svg"==i.tmp.img_type&&void 0==i.tmp.source_svg?i.tmp.checkAnimFrame=requestAnimFrame(check):(cancelRequestAnimFrame(i.tmp.checkAnimFrame),i.tmp.img_error||(i.fn.vendors.init(),i.fn.vendors.draw())):(i.fn.vendors.init(),i.fn.vendors.draw())},i.fn.vendors.init=function(){i.fn.retinaInit(),i.fn.canvasInit(),i.fn.canvasSize(),i.fn.canvasPaint(),i.fn.particlesCreate(),i.fn.vendors.densityAutoParticles(),i.particles.line_linked.color_rgb_line=hexToRgb(i.particles.line_linked.color)},i.fn.vendors.start=function(){isInArray("image",i.particles.shape.type)?(i.tmp.img_type=i.particles.shape.image.src.substr(i.particles.shape.image.src.length-3),i.fn.vendors.loadImg(i.tmp.img_type)):i.fn.vendors.checkBeforeDraw()},i.fn.vendors.eventsListeners(),i.fn.vendors.start()};Object.deepExtend=function(e,a){for(var t in a)a[t]&&a[t].constructor&&a[t].constructor===Object?(e[t]=e[t]||{},arguments.callee(e[t],a[t])):e[t]=a[t];return e},window.requestAnimFrame=function(){return window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(e){window.setTimeout(e,1e3/60)}}(),window.cancelRequestAnimFrame=function(){return window.cancelAnimationFrame||window.webkitCancelRequestAnimationFrame||window.mozCancelRequestAnimationFrame||window.oCancelRequestAnimationFrame||window.msCancelRequestAnimationFrame||clearTimeout}(),window.pJSDom=[],window.particlesJS=function(e,a){"string"!=typeof e&&(a=e,e="particles-js"),e||(e="particles-js");var t=document.getElementById(e),i="particles-js-canvas-el",s=t.getElementsByClassName(i);if(s.length)for(;s.length>0;)t.removeChild(s[0]);var n=document.createElement("canvas");n.className=i,n.style.width="100%",n.style.height="100%";var r=document.getElementById(e).appendChild(n);null!=r&&pJSDom.push(new pJS(e,a))},window.particlesJS.load=function(e,a,t){var i=new XMLHttpRequest;i.open("GET",a),i.onreadystatechange=function(a){if(4==i.readyState)if(200==i.status){var s=JSON.parse(a.currentTarget.response);window.particlesJS(e,s),t&&t()}else console.log("Error pJS - XMLHttpRequest status: "+i.status),console.log("Error pJS - File config not found")},i.send()};
\ No newline at end of file
diff --git a/ui/server/app.js b/ui/server/app.js
new file mode 100644
index 000000000..6696f766f
--- /dev/null
+++ b/ui/server/app.js
@@ -0,0 +1,119 @@
+/* eslint-disable global-require */
+/*
+
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+
+ */
+
+const path = require('path');
+const express = require('express');
+const favicon = require('serve-favicon');
+const compression = require('compression');
+const axios = require('axios');
+const Q = require('q');
+
+const config = require('./config/config');
+const logger = require('./utils/logger');
+const metricsMiddleware = require('./utils/metricsMiddleware');
+const authChecker = require('./sso/authChecker');
+
+const errorLogger = logger.withIdentifier('invocation:failure');
+
+const app = express();
+
+const bodyParser = require('body-parser');
+
+// CONFIGURATIONS
+axios.defaults.timeout = config.upstreamTimeout;
+axios.defaults.headers.post['Content-Type'] = 'application/json';
+Q.longStackSupport = true;
+app.set('port', config.port);
+app.set('views', path.join(__dirname, 'views'));
+app.set('view engine', 'pug');
+app.set('etag', false);
+app.set('x-powered-by', false);
+app.set('trust proxy', 1);
+
+// MIDDLEWARE SETUP
+app.use(compression());
+app.use(favicon(`${__dirname}/../public/favicon.ico`));
+if (process.env.NODE_ENV === 'development') {
+ // dont browser cache if in dev mode
+ app.use('/bundles', express.static(path.join(__dirname, '../public/bundles'), {maxAge: 0}));
+} else {
+ // browser cache aggresively for no-dev environment
+ app.use('/bundles', express.static(path.join(__dirname, '../public/bundles'), {maxAge: '60d'}));
+}
+app.use('/bundles', express.static(path.join(__dirname, '../public/bundles'), {maxAge: 0}));
+app.use(express.static(path.join(__dirname, '../public'), {maxAge: '7d'}));
+app.use(logger.REQUEST_LOGGER);
+app.use(logger.ERROR_LOGGER);
+app.use(metricsMiddleware.httpMetrics);
+app.use(bodyParser.json());
+
+// MIDDLEWARE AND ROUTES FOR SSO
+if (config.enableSSO) {
+ const passport = require('passport');
+ const cookieSession = require('cookie-session');
+
+ app.use(bodyParser.urlencoded({extended: false}));
+ app.use(
+ cookieSession({
+ secret: config.sessionSecret,
+ maxAge: config.sessionTimeout
+ })
+ );
+
+ app.use(passport.initialize());
+ app.use(passport.session());
+
+ app.use('/auth', require('./routes/auth'));
+ app.use('/sso', require('./routes/sso'));
+ app.use('/user', require('./routes/user'));
+ app.use('/api', authChecker.forApi);
+}
+
+// API ROUTING
+
+const apis = [];
+if (config.connectors.traces && config.connectors.traces.connectorName !== 'disabled') apis.push(require('./routes/servicesApi'));
+if (config.connectors.traces && config.connectors.traces.connectorName !== 'disabled') apis.push(require('./routes/tracesApi'));
+if (config.connectors.trends && config.connectors.trends.connectorName !== 'disabled') apis.push(require('./routes/trendsApi'));
+if (config.connectors.trends && config.connectors.trends.connectorName !== 'disabled') apis.push(require('./routes/servicesPerfApi'));
+if (config.connectors.alerts && config.connectors.alerts.connectorName !== 'disabled') apis.push(require('./routes/alertsApi'));
+if (config.connectors.serviceGraph && config.connectors.serviceGraph.connectorName !== 'disabled') apis.push(require('./routes/serviceGraphApi'));
+// prettier-ignore
+if (config.connectors.serviceInsights && config.connectors.serviceInsights.enableServiceInsights !== 'disabled') apis.push(require('./routes/serviceInsightsApi'));
+
+app.use('/api', ...apis);
+
+// PAGE ROUTING
+const indexRoute = require('./routes/index');
+
+if (config.enableSSO) {
+ app.use('/login', require('./routes/login'));
+ app.use('/', authChecker.forPage);
+}
+app.use('/', indexRoute);
+
+// ERROR-HANDLING
+app.use((err, req, res, next) => {
+ // eslint-disable-line no-unused-vars
+ errorLogger.error(err);
+ next(err);
+});
+
+module.exports = app;
diff --git a/ui/server/config/base.js b/ui/server/config/base.js
new file mode 100644
index 000000000..187fe469e
--- /dev/null
+++ b/ui/server/config/base.js
@@ -0,0 +1,251 @@
+/* istanbul ignore file */
+module.exports = {
+ // app port
+ port: 8080,
+
+ // use when https endpoint is needed
+ // https: {
+ // keyFile: '', // path for private key file
+ // certFile: '' // path for ssh cert file
+ // },
+
+ // whether to start in cluster mode or not
+ cluster: false,
+
+ // default timeout in ms for all the downlevels from connector
+ upstreamTimeout: 55000,
+
+ graphite: {
+ host: 'host',
+ port: 2003
+ },
+
+ // Refresh interval for auto refreshing trends and alerts
+ refreshInterval: 60000,
+
+ // Google Analytics Tracking ID
+ gaTrackingID: 'UA-XXXXXXXX-X',
+
+ // Encoding for trends and alerts
+ // base64 and periodreplacement are supported, default to noop if none provided
+ encoder: 'periodreplacement',
+
+ grpcOptions: {
+ 'grpc.max_receive_message_length': 10485760
+ },
+
+ // this list defines subsystems for which UI should be enabled
+ // traces connector must be present in connectors config
+ connectors: {
+ traces: {
+ // name of config connector module to use for fetching traces data from downstream
+ // Options (connector) :
+ // - haystack - gets data from haystack query service
+ // - zipkin - bridge for using an existing zipkin api,
+ // zipkin connector expects a zipkin config field specifying zipkin api url,
+ // eg. zipkinUrl: 'http:///api/v2'}
+ // - stub - a stub used during development, will be removed in future
+ // - mock - similar to stub, but specifically for testing Service Insights
+ connectorName: 'stub',
+ // Override haystack connector host and port.
+ // haystackHost: '127.0.0.1',
+ // haystackPort: '8088',
+ // interval in seconds to refresh the service and operation data from backend
+ serviceRefreshIntervalInSecs: 60
+ },
+ trends: {
+ // name of config connector module to use for fetching trends data from downstream
+ // Options :
+ // - haystack - gets data from Haystack Metric Tank Setup
+ // haystack connector also expects config field specifying metricTankUrl
+ // - stub - a stub used during development, will be removed in future
+ connectorName: 'stub',
+ // Feature switches
+ enableServicePerformance: true,
+ enableServiceLevelTrends: true
+ },
+ alerts: {
+ // name of config connector module to use for fetching anomaly detection data from downstream
+ // Options :
+ // - haystack - Gets data from Haystack adaptive alerting
+ // you must specify haystack host and port
+ // - stub - a stub used during development, will be removed in future
+ connectorName: 'stub',
+ // haystackHost: 'https:///alert-api',
+ // haystackPort: 8080,
+
+ // frequency of alerts coming in the system
+ alertFreqInSec: 300,
+
+ // While merging the successive alerts, need a buffer time. We will accept the point if successive alert is
+ // within this buffer
+ alertMergeBufferTimeInSec: 60,
+
+ subscriptions: {
+ // name of config connector module to use for managing subscriptions
+ // Options :
+ // - stub - a stub used during development, will be removed in future
+ connectorName: 'stub',
+ enabled: true
+ }
+ },
+ serviceGraph: {
+ // name of config connector module to use for fetching dependency graph data from downstream
+ // options :
+ // - stub - a stub used during development, will be removed in future
+ // - haystack - gets data from haystack-service-graph
+ // you must specify serviceGraphUrl
+ // e.g. serviceGraphUrl: 'https:///serviceGraph'
+ connectorName: 'stub',
+ windowSizeInSecs: 3600
+ },
+ serviceInsights: {
+ // serviceInsights uses traces.connectorName
+ // Service Insights is beta, so disabled by default
+ enableServiceInsights: true,
+ // max number of traces to retrieve
+ traceLimit: 200,
+ // functions to generate nodes from different types of spans
+ // customize these to match tech stack, available span tags, and how you want nodes displayed
+ spanTypes: {
+ edge: {
+ isType: (span) => span.serviceName === 'edge',
+ nodeId: (span) => {
+ const route = span.tags.find((tag) => tag.key === 'edge.route');
+ return route ? route.value : span.serviceName;
+ },
+ nodeName: (span) => {
+ const route = span.tags.find((tag) => tag.key === 'edge.route');
+ return route ? route.value : span.serviceName;
+ }
+ },
+ gateway: {
+ isType: (span) => span.serviceName === 'gateway',
+ nodeId: (span) => {
+ const destination = span.tags.find((tag) => tag.key === 'gateway.destination');
+ return destination ? destination.value : span.serviceName;
+ },
+ nodeName: (span) => {
+ const datacenter = span.tags.find((tag) => tag.key === 'app.datacenter');
+ return datacenter ? datacenter.value : span.serviceName;
+ }
+ },
+ mesh: {
+ isType: (span) => span.serviceName === 'service-mesh',
+ nodeId: (span) => span.operationName,
+ nodeName: (span) => span.operationName
+ },
+ database: {
+ isType: (span) => span.tags.some((tag) => tag.key === 'db.type'),
+ nodeId: (span) => span.operationName,
+ nodeName: (span) => span.operationName,
+ databaseType: (span) => span.tags.find((tag) => tag.key === 'db.type').value
+ },
+ outbound: {
+ isType: (span) => {
+ const hasMergedTag = span.tags.some((tag) => tag.key === 'X-HAYSTACK-IS-MERGED-SPAN' && tag.value === true);
+ const hasClientTag = span.tags.some((tag) => tag.key === 'span.kind' && tag.value === 'client');
+ return hasMergedTag ? false : hasClientTag;
+ },
+ nodeId: (span) => span.operationName,
+ nodeName: (span) => span.operationName
+ },
+ service: {
+ // isType implicitly true when none of the above
+ nodeId: (span) => span.serviceName,
+ nodeName: (span) => span.serviceName
+ }
+ }
+ },
+ blobs: {
+ // to enable/disable blobs decorator
+ // Blobs Service endpoint (optional) can be passed with blobsUrl key to redirect blobs request
+ // e.g. blobsUrl : 'https://haystack-blob-example-server:9090'
+ enableBlobs: false
+ }
+ },
+ timeWindowPresetOptions: [
+ {
+ shortName: '5m',
+ longName: '5 minutes',
+ value: 5 * 60 * 1000
+ },
+ {
+ shortName: '15m',
+ longName: '15 minutes',
+ value: 15 * 60 * 1000
+ },
+ {
+ shortName: '1h',
+ longName: '1 hour',
+ value: 60 * 60 * 1000
+ },
+ {
+ shortName: '6h',
+ longName: '6 hours',
+ value: 6 * 60 * 60 * 1000
+ },
+ {
+ shortName: '12h',
+ longName: '12 hours',
+ value: 12 * 60 * 60 * 1000
+ },
+ {
+ shortName: '24h',
+ longName: '24 hours',
+ value: 24 * 60 * 60 * 1000
+ },
+ {
+ shortName: '3d',
+ longName: '3 days',
+ value: 3 * 24 * 60 * 60 * 1000
+ },
+ {
+ shortName: '7d',
+ longName: '7 days',
+ value: 7 * 24 * 60 * 60 * 1000
+ },
+ {
+ shortName: '30d',
+ longName: '30 days',
+ value: 30 * 24 * 60 * 60 * 1000
+ }
+ ],
+
+ relatedTracesOptions: [
+ {
+ fieldTag: 'url2',
+ propertyToMatch: 'url2',
+ fieldDescription: 'test trait'
+ }
+ ]
+
+ // externalLinking: [
+ // {
+ // key: 'serviceName', // Searchable key to add to external link list
+ // url: 'https://my-splunk-url.com/app/search/search?q=#{key}=#{value}',
+ // label: 'Splunk-Instance-1'
+ // },
+ // {
+ // key: 'tag',
+ // tagKey: 'external-link-key', // Tag to create a link from in the span tag list
+ // url: 'https://my-other-splunk-url.com/app/search/search?q=#{key}=#{value}',
+ // label: 'Splunk-Instance-2'
+ // },
+ // {
+ // key: 'traceId', // Include traceId to add external link in trace context view
+ // url: 'https://my-splunk-url.com/app/search/search?q=traceId=#{value}',
+ // label: 'Splunk-Instance-1'
+ // }
+ // ]
+
+ // use if you need SAML back SSO auth
+ //
+ // enableSSO: true, // flag for enabling sso
+ // saml: {
+ // entry_point: '', // SAML entrypoint
+ // issuer: '' // SAML issuer
+ // },
+ // sessionTimeout: 60 * 60 * 1000, // timeout for session
+ // sessionSecret: 'XXXXXXXXXXXXX' // secret key for session
+};
diff --git a/ui/server/config/config.js b/ui/server/config/config.js
new file mode 100644
index 000000000..1d532f575
--- /dev/null
+++ b/ui/server/config/config.js
@@ -0,0 +1,42 @@
+/*
+
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+
+ */
+
+const _ = require('lodash');
+const baseConfiguration = require('../config/base');
+const override = require('./override');
+
+let finalConfiguration = _.merge({}, baseConfiguration);
+
+// if an override configuration file is provided, extend the base config with
+// the provided one. This is not a recursive merge, just a top level extend with
+// the overriden config
+if (process.env.HAYSTACK_OVERRIDES_CONFIG_PATH) {
+ let overridesConfigration = process.env.HAYSTACK_OVERRIDES_CONFIG_PATH;
+ if (!overridesConfigration.startsWith('/')) {
+ overridesConfigration = `${process.cwd()}/${overridesConfigration}`;
+ }
+ // eslint-disable-next-line global-require, import/no-dynamic-require
+ const environmentSpecificConfiguration = require(overridesConfigration);
+ finalConfiguration = _.extend({}, finalConfiguration, environmentSpecificConfiguration);
+}
+
+// if there are environment variables, read them as objects and merge them
+// into the current configuration
+const overrideObject = override.readOverrides(process.env);
+module.exports = _.merge({}, finalConfiguration, overrideObject);
diff --git a/ui/server/config/override.js b/ui/server/config/override.js
new file mode 100644
index 000000000..454a607c2
--- /dev/null
+++ b/ui/server/config/override.js
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+const _ = require('lodash');
+
+const camelize = str => str.replace(/\W+(.)/g, (match, chr) => chr.toUpperCase());
+
+// this code will tokenize incoming keys to produce json objects
+// For example, a key like 'HAYSTACK_CONNECTORS_TRACES_ZIPKIN__URL'; with value 'foo'
+// will turn into '{ connectors: { traces: { zipkinUrl: 'foo' } } }'
+// Note: a single _ splits the token into words and a __ combines them
+// to a camelCase
+module.exports = {
+
+ readOverrides: (collection) => {
+ const overrideData = {};
+ _.each(collection, (value, key) => {
+ if (key.startsWith('HAYSTACK_PROP_')) {
+ const parts = key.toLowerCase().replace(/__/g, ' ').split('_');
+ parts.splice(0, 2);
+
+ let configObject = overrideData;
+ let part = parts.shift();
+ // console.log(`${key} [${parts}] ${value}`);
+
+ while (part) {
+ part = camelize(part);
+ if (parts.length) {
+ if (configObject[part] == null) {
+ configObject[part] = {};
+ }
+ configObject = configObject[part];
+ } else {
+ configObject[part] = value;
+ }
+ part = parts.shift();
+ }
+ }
+ });
+ return overrideData;
+ }
+};
diff --git a/ui/server/connectors/alerts/haystack/alertsConnector.js b/ui/server/connectors/alerts/haystack/alertsConnector.js
new file mode 100644
index 000000000..b79c10acb
--- /dev/null
+++ b/ui/server/connectors/alerts/haystack/alertsConnector.js
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const _ = require('lodash');
+const grpc = require('grpc');
+
+const config = require('../../../config/config');
+const servicesConnector = config.connectors.traces && require('../../services/servicesConnector'); // eslint-disable-line
+const trendsConnector = require('../../trends/haystack/trendsConnector');
+
+const fetcher = require('../../operations/grpcFetcher');
+const services = require('../../../../static_codegen/anomaly/anomalyReader_grpc_pb');
+const messages = require('../../../../static_codegen/anomaly/anomalyReader_pb');
+const MetricpointNameEncoder = require('../../utils/encoders/MetricpointNameEncoder');
+
+const metricpointNameEncoder = new MetricpointNameEncoder(config.encoder);
+
+const grpcOptions = config.grpcOptions || {};
+
+const connector = {};
+const client = new services.AnomalyReaderClient(
+ `${config.connectors.alerts.haystackHost}:${config.connectors.alerts.haystackPort}`,
+ grpc.credentials.createInsecure(),
+ grpcOptions); // TODO make client secure
+const alertTypes = ['duration', 'failure-span'];
+const getAnomaliesFetcher = fetcher('getAnomalies', client);
+const alertFreqInSec = config.connectors.alerts.alertFreqInSec || 300; // TODO make this based on alert type
+
+
+function fetchOperations(serviceName) {
+ if (servicesConnector) {
+ servicesConnector.getOperations(serviceName);
+ }
+ return trendsConnector.getOperationNames(serviceName);
+}
+
+function sameOperationAndType(alertToCheck, operationName, type) {
+ if (!alertToCheck) {
+ return false;
+ }
+ const operationToCheck = alertToCheck.labelsMap.find(label => label[0] === 'operationName');
+ const typeToCheck = alertToCheck.labelsMap.find(label => label[0] === 'metric_key');
+ return ((operationToCheck && operationToCheck[1] === operationName) && typeToCheck && typeToCheck[1] === type);
+}
+
+function parseOperationAlertsResponse(data) {
+ const fullAnomalyList = data.searchanomalyresponseList;
+ const mappedAndMergedResponse = fullAnomalyList.map((anomalyResponse, baseIterationIndex) => {
+ if (anomalyResponse === null) return null;
+ const operationLabel = anomalyResponse.labelsMap.find(label => label[0] === 'operationName');
+ if (operationLabel) {
+ const operationName = operationLabel[1];
+ const type = anomalyResponse.labelsMap.find(label => label[0] === 'metric_key')[1];
+ let anomaliesList = anomalyResponse.anomaliesList;
+
+ fullAnomalyList.slice(baseIterationIndex + 1, fullAnomalyList.length).forEach((alertToCheck, checkIndex) => {
+ if (sameOperationAndType(alertToCheck, operationName, type)) {
+ anomaliesList = _.merge(anomaliesList, alertToCheck.anomaliesList);
+ fullAnomalyList[baseIterationIndex + checkIndex + 1] = null;
+ }
+ });
+
+ const latestUnhealthy = _.maxBy(anomaliesList, anomaly => anomaly.timestamp);
+ const timestamp = latestUnhealthy && latestUnhealthy.timestamp * 1000;
+ const isUnhealthy = (timestamp && timestamp >= (Date.now() - (alertFreqInSec * 1000)));
+
+ return {
+ operationName,
+ type,
+ isUnhealthy,
+ timestamp
+ };
+ }
+
+ return null;
+ });
+
+ return _.filter(mappedAndMergedResponse, a => a !== null);
+}
+
+function fetchAlerts(serviceName, interval, from, stat, key) {
+ const request = new messages.SearchAnamoliesRequest();
+ request.getLabelsMap()
+ .set('serviceName', metricpointNameEncoder.encodeMetricpointName(decodeURIComponent(serviceName)))
+ .set('interval', interval)
+ .set('mtype', 'gauge')
+ .set('product', 'haystack')
+ .set('stat', stat)
+ .set('metric_key', key);
+ request.setStarttime(Math.trunc(from / 1000));
+ request.setEndtime(Math.trunc(Date.now() / 1000));
+ request.setSize(-1);
+
+ return getAnomaliesFetcher
+ .fetch(request)
+ .then(pbResult => parseOperationAlertsResponse(messages.SearchAnomaliesResponse.toObject(false, pbResult)));
+}
+
+function fetchOperationAlerts(serviceName, interval, from) {
+ return Q.all([fetchAlerts(serviceName, interval, from, '*_99', 'duration'), fetchAlerts(serviceName, interval, from, 'count', 'failure-span')])
+ .then(stats => (_.merge(stats[0], stats[1])));
+}
+
+function mergeOperationsWithAlerts({operationAlerts, operations}) {
+ if (operations && operations.length) {
+ return _.flatten(operations.map(operation => alertTypes.map((alertType) => {
+ const operationAlert = operationAlerts.find(alert => (alert.operationName.toLowerCase() === operation.toLowerCase() && alert.type === alertType));
+
+ if (operationAlert !== undefined) {
+ return {
+ ...operationAlert
+ };
+ }
+ return {
+ operationName: operation,
+ type: alertType,
+ isUnhealthy: false,
+ timestamp: null
+ };
+ })));
+ }
+
+ return _.flatten(alertTypes.map(alertType => (_.filter(operationAlerts, alert => (alert.type === alertType)))));
+}
+
+function returnAnomalies(data) {
+ if (!data || !data.length || !data[0].anomaliesList.length) {
+ return [];
+ }
+
+ return _.flatten(data.map((anomaly) => {
+ const strength = anomaly.labelsMap.find(label => label[0] === 'anomalyLevel')[1];
+ return anomaly.anomaliesList.map(a => ({strength, ...a}));
+ }));
+}
+
+function getActiveAlertCount(operationAlerts) {
+ return operationAlerts.filter(opAlert => opAlert.isUnhealthy).length;
+}
+
+connector.getServiceAlerts = (serviceName, interval) => {
+ // todo: calculate "from" value based on selected interval
+ const oneDayAgo = Math.trunc((Date.now() - (24 * 60 * 60 * 1000)));
+ return Q.all([fetchOperations(decodeURIComponent(serviceName)), fetchOperationAlerts(serviceName, interval, oneDayAgo)])
+ .then(stats => mergeOperationsWithAlerts({
+ operations: stats[0],
+ operationAlerts: stats[1]
+ })
+ );
+};
+
+connector.getAnomalies = (serviceName, operationName, alertType, from, interval) => {
+ const stat = alertType === 'failure-span' ? 'count' : '*_99';
+
+ const request = new messages.SearchAnamoliesRequest();
+ request.getLabelsMap()
+ .set('serviceName', metricpointNameEncoder.encodeMetricpointName(decodeURIComponent(serviceName)))
+ .set('operationName', metricpointNameEncoder.encodeMetricpointName(decodeURIComponent(operationName)))
+ .set('product', 'haystack')
+ .set('metric_key', alertType)
+ .set('stat', stat)
+ .set('interval', interval)
+ .set('mtype', 'gauge');
+ request.setStarttime(Math.trunc(from / 1000));
+ request.setEndtime(Math.trunc(Date.now() / 1000));
+ request.setSize(-1);
+
+ return getAnomaliesFetcher
+ .fetch(request)
+ .then(pbResult => returnAnomalies(messages.SearchAnomaliesResponse.toObject(false, pbResult).searchanomalyresponseList));
+};
+
+connector.getServiceUnhealthyAlertCount = (serviceName, interval) =>
+ fetchOperationAlerts(serviceName, interval, Math.trunc((Date.now() - (5 * 60 * 1000))))
+ .then(result => getActiveAlertCount(result));
+
+module.exports = connector;
diff --git a/ui/server/connectors/alerts/haystack/expressionTreeBuilder.js b/ui/server/connectors/alerts/haystack/expressionTreeBuilder.js
new file mode 100644
index 000000000..c7126efe4
--- /dev/null
+++ b/ui/server/connectors/alerts/haystack/expressionTreeBuilder.js
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const requestBuilder = {};
+const messages = require('../../../../static_codegen/subscription/subscriptionManagement_pb');
+
+
+requestBuilder.createSubscriptionExpressionTree = (subscription) => {
+ const expressionTree = new messages.ExpressionTree();
+ expressionTree.setOperator(messages.ExpressionTree.Operator.AND);
+ const uiExpressionTree = subscription.expressionTree;
+
+ const operands = Object.keys(uiExpressionTree).map((key) => {
+ const op = new messages.Operand();
+
+ const field = new messages.Field();
+ field.setName(key);
+ field.setValue(uiExpressionTree[key]);
+
+ op.setField(field);
+
+ return op;
+ });
+
+ expressionTree.setOperandsList(operands);
+ return expressionTree;
+};
+
+module.exports = requestBuilder;
diff --git a/ui/server/connectors/alerts/haystack/subscriptionsConnector.js b/ui/server/connectors/alerts/haystack/subscriptionsConnector.js
new file mode 100644
index 000000000..a16d83754
--- /dev/null
+++ b/ui/server/connectors/alerts/haystack/subscriptionsConnector.js
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const grpc = require('grpc');
+const messages = require('../../../../static_codegen/subscription/subscriptionManagement_pb');
+const expressionTreeBuilder = require('./expressionTreeBuilder');
+const config = require('../../../config/config');
+const services = require('../../../../static_codegen/subscription/subscriptionManagement_grpc_pb');
+const fetcher = require('../../operations/grpcFetcher');
+const putter = require('../../operations/grpcPutter');
+const deleter = require('../../operations/grpcDeleter');
+const poster = require('../../operations/grpcPoster');
+
+const grpcOptions = config.grpcOptions || {};
+
+const MetricpointNameEncoder = require('../../utils/encoders/MetricpointNameEncoder');
+
+const metricpointNameEncoder = new MetricpointNameEncoder(config.encoder);
+
+const client = new services.SubscriptionManagementClient(
+ `${config.connectors.alerts.haystackHost}:${config.connectors.alerts.haystackPort}`,
+ grpc.credentials.createInsecure(),
+ grpcOptions); // TODO make client secure
+
+const subscriptionPoster = poster('createSubscription', client);
+const subscriptionPutter = putter('updateSubscription', client);
+const subscriptionDeleter = deleter('deleteSubscription', client);
+const getSubscriptionFetcher = fetcher('getSubscription', client); // get individual subscription
+const searchSubscriptionFetcher = fetcher('searchSubscription', client); // get group of subscriptions
+
+const converter = {};
+
+converter.pbExpressionTreeToJson = (pbExpressionTree) => {
+ const expressionTree = {};
+ pbExpressionTree.operandsList.forEach((kvPair) => {
+ expressionTree[kvPair.field.name] = kvPair.field.value;
+ });
+ return expressionTree;
+};
+
+converter.toSubscriptionJson = pbSub => ({
+ subscriptionId: pbSub.subscriptionid,
+ user: pbSub.user,
+ dispatchersList: pbSub.dispatchersList,
+ expressionTree: converter.pbExpressionTreeToJson(pbSub.expressiontree),
+ lastModifiedTime: pbSub.lastmodifiedtime,
+ createdTime: pbSub.createdtime
+});
+
+const connector = {};
+
+// Get subscription from subscriptionId. Returns SubscriptionResponse.
+connector.getPBSubscription = (subscriptionId) => {
+ const request = new messages.GetSubscriptionRequest();
+ request.setSubscriptionid(subscriptionId);
+
+ return getSubscriptionFetcher
+ .fetch(request)
+ .then(result => converter.toSubscriptionJson(messages.SubscriptionResponse.toObject(false, result)));
+};
+
+// Get subscription from subscriptionId. Returns JSON Subscription.
+connector.getSubscription = (subscriptionId) => {
+ const pbSub = connector.getPBSubscription(subscriptionId);
+ return converter.toSubscriptionJson(messages.SubscriptionResponse.toObject(false, pbSub));
+};
+
+// Search subscriptions given a set of labels. Returns a SearchSubscriptionResponse (array of SubscriptionResponses).
+connector.searchSubscriptions = (serviceName, operationName, alertType, interval) => {
+ const stat = alertType === 'failure-span' ? 'count' : '*_99';
+
+ const request = new messages.SearchSubscriptionRequest();
+ request.getLabelsMap()
+ .set('serviceName', metricpointNameEncoder.encodeMetricpointName(decodeURIComponent(serviceName)))
+ .set('operationName', metricpointNameEncoder.encodeMetricpointName(decodeURIComponent(operationName)))
+ .set('metric_key', alertType)
+ .set('stat', stat)
+ .set('interval', interval)
+ .set('product', 'haystack')
+ .set('mtype', 'gauge');
+
+ return searchSubscriptionFetcher
+ .fetch(request)
+ .then((result) => {
+ const pbResult = messages.SearchSubscriptionResponse.toObject(false, result);
+ return pbResult.subscriptionresponseList.map(pbSubResponse => converter.toSubscriptionJson(pbSubResponse));
+ });
+};
+
+function constructSubscription(subscriptionObj) {
+ const subscription = new messages.SubscriptionRequest();
+
+
+ // construct dispatcher list containing type (email or slack) and handle
+ const uiDispatchers = subscriptionObj.dispatchers.map((inputtedDispatcher) => {
+ const dispatcher = new messages.Dispatcher();
+
+ const type = inputtedDispatcher.type.toString() === '1' ? messages.DispatchType.SLACK : messages.DispatchType.EMAIL;
+ dispatcher.setType(type);
+ dispatcher.setEndpoint(inputtedDispatcher.endpoint);
+
+ return dispatcher;
+ });
+
+ subscription.setDispatchersList(uiDispatchers);
+ // construct expression tree from KV pairs in subscription object (e.g. serviceName, operationName, etc)
+ const expressionTree = expressionTreeBuilder.createSubscriptionExpressionTree(subscriptionObj);
+ subscription.setExpressiontree(expressionTree);
+
+ return subscription;
+}
+
+// Create a new subscription. Returns a subscription id.
+connector.addSubscription = (userName, subscriptionObj) => {
+ const user = new messages.User();
+ user.setUsername(userName);
+
+ const subscription = constructSubscription(subscriptionObj);
+
+ const request = new messages.CreateSubscriptionRequest();
+ request.setUser(user);
+ request.setSubscriptionrequest(subscription);
+
+ return subscriptionPoster.post(request)
+ .then(result => result);
+};
+
+// Update a subscription. Checks server for changes. If none, replace existing subscription with new SubscriptionRequest
+connector.updateSubscription = (id, clientSubscription) => (
+ connector.getPBSubscription(id)
+ .then((serverSubscription) => {
+ if (serverSubscription.lastModifiedTime === clientSubscription.old.lastModifiedTime) {
+ const subscription = constructSubscription(clientSubscription.modified);
+ const request = new messages.UpdateSubscriptionRequest();
+
+ request.setSubscriptionid(id);
+ request.setSubscriptionrequest(subscription);
+ return subscriptionPutter.put(request)
+ .then(result => result);
+ }
+
+ // todo: let UI know that subscription has already been modified
+ return null;
+ })
+);
+
+// Delete a subscription. Returns empty.
+connector.deleteSubscription = (id) => {
+ const request = new messages.DeleteSubscriptionRequest();
+ request.setSubscriptionid(id);
+
+ return subscriptionDeleter.delete(request)
+ .then(() => {});
+};
+
+module.exports = connector;
diff --git a/ui/server/connectors/alerts/stub/alertsConnector.js b/ui/server/connectors/alerts/stub/alertsConnector.js
new file mode 100644
index 000000000..82b16d7db
--- /dev/null
+++ b/ui/server/connectors/alerts/stub/alertsConnector.js
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+
+function getRandomTimeStamp() {
+ const currentTime = ((new Date()).getTime());
+ return (currentTime - Math.floor((Math.random() * 5000 * 60)));
+}
+
+function generateAnomaly() {
+ const currentTime = ((new Date()).getTime() / 1000);
+ const timestamp = (currentTime - Math.floor((Math.random() * 2000 * 60)));
+ const expectedvalue = Math.floor(Math.random() * 100000);
+ const observedvalue = Math.floor(expectedvalue * (Math.random() * 100));
+ return {
+ observedvalue,
+ expectedvalue,
+ timestamp,
+ strength: observedvalue % 2 ? 'STRONG' : 'WEAK'
+ };
+}
+
+function getAlerts() {
+ return [
+ {
+ operationName: 'tarley-1',
+ type: 'duration',
+ isUnhealthy: true,
+ timestamp: getRandomTimeStamp()
+ },
+ {
+ operationName: 'tarley-1',
+ type: 'failure-span',
+ isUnhealthy: true,
+ timestamp: getRandomTimeStamp()
+ },
+ {
+ operationName: 'tully-1',
+ type: 'duration',
+ isUnhealthy: false,
+ timestamp: getRandomTimeStamp()
+ },
+ {
+ operationName: 'tully-1',
+ type: 'failure-span',
+ isUnhealthy: false,
+ timestamp: getRandomTimeStamp()
+ },
+ {
+ operationName: 'tully-1',
+ type: 'duration',
+ isUnhealthy: false,
+ timestamp: getRandomTimeStamp()
+ }, {
+ operationName: 'tully-1',
+ type: 'failure-span',
+ isUnhealthy: false,
+ timestamp: getRandomTimeStamp()
+ },
+ {
+ operationName: 'dondarrion-1',
+ type: 'duration',
+ isUnhealthy: false,
+ timestamp: getRandomTimeStamp()
+ },
+ {
+ operationName: 'dondarrion-1',
+ type: 'failure-span',
+ isUnhealthy: false,
+ timestamp: getRandomTimeStamp()
+ }
+ ];
+}
+
+const anomalies = [
+ generateAnomaly(),
+ generateAnomaly(),
+ generateAnomaly(),
+ generateAnomaly(),
+ generateAnomaly(),
+ generateAnomaly(),
+ generateAnomaly(),
+ generateAnomaly(),
+ generateAnomaly(),
+ generateAnomaly()
+];
+
+const connector = {};
+
+connector.getServiceAlerts = (service, query) => Q.fcall(() => getAlerts(query));
+
+connector.getAnomalies = () => Q.fcall(() => anomalies);
+
+connector.getServiceUnhealthyAlertCount = () => Q.fcall(() => Math.floor(Math.random() * 3));
+
+module.exports = connector;
diff --git a/ui/server/connectors/alerts/stub/subscriptionsConnector.js b/ui/server/connectors/alerts/stub/subscriptionsConnector.js
new file mode 100644
index 000000000..301434be6
--- /dev/null
+++ b/ui/server/connectors/alerts/stub/subscriptionsConnector.js
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+
+const subscriptions = (serviceName, operationName, alertType, interval) => (
+ [
+ {
+ subscriptionId: 101,
+ user: {userName: 'haystack-team'},
+ dispatchersList: [
+ {
+ type: 1,
+ endpoint: '#haystack'
+ }
+ ],
+ expressionTree: {
+ serviceName,
+ operationName,
+ metric_key: alertType,
+ interval,
+ stat: alertType === 'failure-span' ? 'count' : '*_99',
+ mtype: 'gauge',
+ product: 'haystack'
+ }
+ },
+ {
+ subscriptionId: 102,
+ user: {userName: 'haystack-team'},
+ dispatchersList: [
+ {
+ type: 0,
+ endpoint: 'haystack@opentracing.io'
+ }
+ ],
+ expressionTree: {
+ serviceName,
+ operationName,
+ metric_key: alertType,
+ interval,
+ stat: alertType === 'failure-span' ? 'count' : '*_99',
+ mtype: 'gauge',
+ product: 'haystack'
+ }
+ }
+ ]
+);
+
+function searchSubscriptions(serviceName, operationName, alertType, interval) {
+ if (serviceName && operationName && alertType) {
+ return subscriptions(serviceName, operationName, alertType, interval);
+ }
+ throw new Error('Unable to get subscriptions');
+}
+
+function addSubscription(userName, subscriptionObj) {
+ if (userName && subscriptionObj) {
+ return Math.floor(Math.random() * 300);
+ }
+ throw new Error('Unable to add subscription');
+}
+
+function updateSubscription(id, subscription) {
+ if (id && subscription && subscription.old && subscription.modified) {
+ return;
+ }
+ throw new Error('Unable to update subscription');
+}
+
+function deleteSubscription(subscriptionId) {
+ if (subscriptionId) {
+ return;
+ }
+ throw new Error('Unable to delete subscription');
+}
+
+const connector = {};
+
+connector.searchSubscriptions = (serviceName, operationName, alertType, interval) => Q.fcall(() => searchSubscriptions(serviceName, operationName, alertType, interval));
+
+connector.addSubscription = (userName, subscriptionObj) => Q.fcall(() => addSubscription(
+ userName || 'haystack',
+ subscriptionObj)
+);
+
+connector.updateSubscription = (id, subscription) => Q.fcall(() => {
+ updateSubscription(id, subscription);
+});
+
+connector.deleteSubscription = subscriptionId => Q.fcall(() => deleteSubscription(subscriptionId));
+
+
+module.exports = connector;
diff --git a/ui/server/connectors/operations/grpcDeleter.js b/ui/server/connectors/operations/grpcDeleter.js
new file mode 100644
index 000000000..c3cf00926
--- /dev/null
+++ b/ui/server/connectors/operations/grpcDeleter.js
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const errorConverter = require('../utils/errorConverter');
+const logger = require('../../utils/logger').withIdentifier('deleter.grpc');
+const metrics = require('../../utils/metrics');
+
+const config = require('../../config/config');
+
+function generateCallDeadline() {
+ return new Date().setMilliseconds(new Date().getMilliseconds() + config.upstreamTimeout);
+}
+
+const deleter = (deleterName, client) => ({
+ delete: (request) => {
+ const deferred = Q.defer();
+ const timer = metrics.timer(`deleter_grpc_${deleterName}`).start();
+
+ client[deleterName](request, {deadline: generateCallDeadline()}, (error, result) => {
+ timer.end();
+ if (error || !result) {
+ logger.info(`delete failed: ${deleterName}`);
+ metrics.meter(`deleter_grpc_failure_${deleterName}`).mark();
+
+ deferred.reject(errorConverter.fromGrpcError(error));
+ } else {
+ logger.info(`delete successful: ${deleterName}`);
+
+ deferred.resolve(result);
+ }
+ });
+
+ return deferred.promise;
+ }
+});
+
+module.exports = deleter;
diff --git a/ui/server/connectors/operations/grpcFetcher.js b/ui/server/connectors/operations/grpcFetcher.js
new file mode 100644
index 000000000..63fd67b4b
--- /dev/null
+++ b/ui/server/connectors/operations/grpcFetcher.js
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const errorConverter = require('../utils/errorConverter');
+const logger = require('../../utils/logger').withIdentifier('fetcher.grpc');
+const metrics = require('../../utils/metrics');
+
+const config = require('../../config/config');
+
+function generateCallDeadline() {
+ return new Date().setMilliseconds(new Date().getMilliseconds() + config.upstreamTimeout);
+}
+
+const fetcher = (fetcherName, client) => ({
+ fetch: (request) => {
+ const deferred = Q.defer();
+ const timer = metrics.timer(`fetcher_grpc_${fetcherName}`).start();
+
+ client[fetcherName](request, {deadline: generateCallDeadline()}, (error, result) => {
+ timer.end();
+ if (error || !result) {
+ logger.info(`fetch failed: ${fetcherName}`);
+ metrics.meter(`fetcher_grpc_failure_${fetcherName}`).mark();
+
+ deferred.reject(errorConverter.fromGrpcError(error));
+ } else {
+ logger.info(`fetch successful: ${fetcherName}`);
+
+ deferred.resolve(result);
+ }
+ });
+
+ return deferred.promise;
+ }
+});
+
+module.exports = fetcher;
diff --git a/ui/server/connectors/operations/grpcPoster.js b/ui/server/connectors/operations/grpcPoster.js
new file mode 100644
index 000000000..4a1d19724
--- /dev/null
+++ b/ui/server/connectors/operations/grpcPoster.js
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const errorConverter = require('../utils/errorConverter');
+const logger = require('../../utils/logger').withIdentifier('poster.grpc');
+const metrics = require('../../utils/metrics');
+
+const config = require('../../config/config');
+
+function generateCallDeadline() {
+ return new Date().setMilliseconds(new Date().getMilliseconds() + config.upstreamTimeout);
+}
+
+const poster = (posterName, client) => ({
+ post: (request) => {
+ const deferred = Q.defer();
+ const timer = metrics.timer(`poster_grpc_${posterName}`).start();
+
+ client[posterName](request, {deadline: generateCallDeadline()}, (error, result) => {
+ timer.end();
+ if (error || !result) {
+ logger.info(`post failed: ${posterName}`);
+ metrics.meter(`poster_grpc_failure_${posterName}`).mark();
+
+ deferred.reject(errorConverter.fromGrpcError(error));
+ } else {
+ logger.info(`post successful: ${posterName}`);
+
+ deferred.resolve(result);
+ }
+ });
+
+ return deferred.promise;
+ }
+});
+
+module.exports = poster;
diff --git a/ui/server/connectors/operations/grpcPutter.js b/ui/server/connectors/operations/grpcPutter.js
new file mode 100644
index 000000000..e8ac624d4
--- /dev/null
+++ b/ui/server/connectors/operations/grpcPutter.js
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const errorConverter = require('../utils/errorConverter');
+const logger = require('../../utils/logger').withIdentifier('putter.grpc');
+const metrics = require('../../utils/metrics');
+
+const config = require('../../config/config');
+
+function generateCallDeadline() {
+ return new Date().setMilliseconds(new Date().getMilliseconds() + config.upstreamTimeout);
+}
+
+const putter = (putterName, client) => ({
+ put: (request) => {
+ const deferred = Q.defer();
+ const timer = metrics.timer(`putter_grpc_${putterName}`).start();
+
+ client[putterName](request, {deadline: generateCallDeadline()}, (error, result) => {
+ timer.end();
+ if (error || !result) {
+ logger.info(`put failed: ${putterName}`);
+ metrics.meter(`putter_grpc_failure_${putterName}`).mark();
+
+ deferred.reject(errorConverter.fromGrpcError(error));
+ } else {
+ logger.info(`put successful: ${putterName}`);
+
+ deferred.resolve(result);
+ }
+ });
+
+ return deferred.promise;
+ }
+});
+
+module.exports = putter;
diff --git a/ui/server/connectors/operations/restFetcher.js b/ui/server/connectors/operations/restFetcher.js
new file mode 100644
index 000000000..2e79e2290
--- /dev/null
+++ b/ui/server/connectors/operations/restFetcher.js
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const axios = require('axios');
+const Q = require('q');
+const errorConverter = require('../utils/errorConverter');
+const logger = require('../../utils/logger').withIdentifier('fetcher.rest');
+const metrics = require('../../utils/metrics');
+
+const fetcher = (fetcherName) => ({
+ fetch: (url, headers = {}) => {
+ const deferred = Q.defer();
+ const timer = metrics.timer(`fetcher_rest_${fetcherName}`).start();
+
+ axios.get(url, {headers}).then(
+ (response) => {
+ timer.end();
+ logger.info(`fetch successful: ${url}`);
+
+ deferred.resolve(response.data);
+ },
+ (error) => {
+ timer.end();
+ metrics.meter(`fetcher_rest_failure_${fetcherName}`).mark();
+ logger.error(`fetch failed: ${url}`);
+
+ deferred.reject(errorConverter.fromAxiosError(error));
+ }
+ );
+
+ return deferred.promise;
+ }
+});
+
+module.exports = fetcher;
diff --git a/ui/server/connectors/serviceGraph/haystack/graphDataExtractor.js b/ui/server/connectors/serviceGraph/haystack/graphDataExtractor.js
new file mode 100644
index 000000000..78b9dd3f1
--- /dev/null
+++ b/ui/server/connectors/serviceGraph/haystack/graphDataExtractor.js
@@ -0,0 +1,159 @@
+/* eslint-disable no-param-reassign */
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const _ = require('lodash');
+
+const extractor = {};
+
+const config = require('../../../config/config');
+
+const WINDOW_SIZE_IN_SECS = config.connectors.serviceGraph && config.connectors.serviceGraph.windowSizeInSecs;
+
+function getEdgeName(vertex) {
+ if (vertex.name) {
+ return vertex.name;
+ }
+ return vertex;
+}
+
+function flattenStats(edges) {
+ const serviceEdges = edges.map(edge => ({
+ source: {
+ name: getEdgeName(edge.source),
+ tags: edge.source.tags
+ },
+ destination: {
+ name: getEdgeName(edge.destination),
+ tags: edge.destination.tags
+ },
+ stats: {
+ count: (edge.stats.count / WINDOW_SIZE_IN_SECS),
+ errorCount: (edge.stats.errorCount / WINDOW_SIZE_IN_SECS)
+ }
+ }));
+ return _.uniqWith(serviceEdges, _.isEqual);
+}
+
+function filterEdgesInComponent(component, edges) {
+ const componentEdges = [];
+
+ edges.forEach((edge) => {
+ if (component.includes(edge.source.name) || component.includes(edge.destination.name)) {
+ componentEdges.push(edge);
+ }
+ });
+
+ return componentEdges;
+}
+
+function updatedDestination(graph, destination, source) {
+ if (graph[destination]) {
+ graph[destination].to = [...graph[destination].to, source];
+ } else {
+ graph[destination] = { to: [source] };
+ }
+}
+
+function toUndirectedGraph(edges) {
+ const graph = {};
+ edges.forEach((edge) => {
+ if (graph[edge.source.name]) {
+ // add or update source
+ graph[edge.source.name].to = [...graph[edge.source.name].to, edge.destination.name];
+
+ // add or update destination
+ // graph[edge.destination] = updatedDestination(graph[edge.destination], edge.source);
+ updatedDestination(graph, edge.destination.name, edge.source.name);
+ } else {
+ // create edge at the source
+ graph[edge.source.name] = { to: [edge.destination.name] };
+
+ // add or update destination
+ // graph[edge.destination] = updatedDestination(graph[edge.destination], edge.source);
+ updatedDestination(graph, edge.destination.name, edge.source.name);
+ }
+ });
+
+ return graph;
+}
+
+function doDepthFirstTraversal(graph, node) {
+ const traversedNodes = [];
+ const traversing = [];
+
+ traversing.push(node);
+ graph[node].isTraversing = true;
+
+ while (traversing.length) {
+ const nextNode = traversing.pop();
+ traversedNodes.push(nextNode);
+ graph[nextNode].isTraversed = true;
+
+ graph[nextNode].to.forEach((to) => {
+ if (!graph[to].isTraversing) {
+ graph[to].isTraversing = true;
+ traversing.push(to);
+ }
+ });
+ }
+
+ return traversedNodes;
+}
+
+function filterUntraversed(graph) {
+ return Object.keys(graph).filter(node => !graph[node].isTraversed);
+}
+
+function extractConnectedComponents(edges) {
+ // converting to adjacency list undirected graph
+ const graph = toUndirectedGraph(edges);
+ // perform depth first graph traversals to get connected components list
+ // until all the disjoint graps are traversed
+ const connectedComponents = [];
+ let untraversedNodes = filterUntraversed(graph);
+ while (untraversedNodes.length) {
+ connectedComponents.push(doDepthFirstTraversal(graph, untraversedNodes[0]));
+ untraversedNodes = filterUntraversed(graph);
+ }
+
+ // return list of connected components
+ return connectedComponents;
+}
+
+extractor.extractGraphFromEdges = (serviceToServiceEdges) => {
+ // get list of connected components in the full graph
+ const connectedComponents = extractConnectedComponents(serviceToServiceEdges);
+
+ // order components by service count
+ const sortedConnectedComponents = connectedComponents.sort((a, b) => b.length - a.length);
+
+ // split edges list by connected components
+ // thus form multiple sub-graphs
+ const graphs = [];
+ sortedConnectedComponents.forEach(component => graphs.push(filterEdgesInComponent(component, serviceToServiceEdges)));
+
+ // return graphs, one for each connected component
+ return graphs;
+};
+
+extractor.extractGraphs = (data) => {
+ // convert servicegraph to expected ui data format
+ const serviceToServiceEdges = flattenStats(data.edges);
+ return extractor.extractGraphFromEdges(serviceToServiceEdges);
+};
+
+module.exports = extractor;
diff --git a/ui/server/connectors/serviceGraph/haystack/serviceGraphConnector.js b/ui/server/connectors/serviceGraph/haystack/serviceGraphConnector.js
new file mode 100644
index 000000000..3a94faba4
--- /dev/null
+++ b/ui/server/connectors/serviceGraph/haystack/serviceGraphConnector.js
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the 'License');
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an 'AS IS' BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+
+const fetcher = require('../../operations/restFetcher');
+const config = require('../../../config/config');
+const extractor = require('./graphDataExtractor');
+
+const trendsFetcher = fetcher('serviceGraph');
+
+const connector = {};
+const serviceGraphUrl = config.connectors.serviceGraph && config.connectors.serviceGraph.serviceGraphUrl;
+
+function fetchServiceGraph(from, to) {
+ return trendsFetcher
+ .fetch(`${serviceGraphUrl}?from=${from}&to=${to}`)
+ .then(data => extractor.extractGraphs(data));
+}
+
+connector.getServiceGraphForTimeLine = (from, to) => Q.fcall(() => fetchServiceGraph(from, to));
+
+module.exports = connector;
diff --git a/ui/server/connectors/serviceGraph/stub/serviceGraphConnector.js b/ui/server/connectors/serviceGraph/stub/serviceGraphConnector.js
new file mode 100644
index 000000000..701c4757f
--- /dev/null
+++ b/ui/server/connectors/serviceGraph/stub/serviceGraphConnector.js
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the 'License');
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an 'AS IS' BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+
+const connector = {};
+
+const extractor = require('../haystack/graphDataExtractor');
+
+const serviceGraph = {
+ edges: [
+ {
+ source: {
+ name: 'stark-service',
+ tags: {
+ DEPLOYMENT: 'aws'
+ }
+ },
+ destination: {
+ name: 'baratheon-service'
+ },
+ operation: 'baratheon-1',
+ stats: {
+ count: 55500,
+ errorCount: 9000
+ }
+ },
+ {
+ source: {
+ name: 'stark-service'
+ },
+ destination: {
+ name: 'grayjoy-service'
+ },
+ operation: 'grayjoy-1',
+ stats: {
+ count: 21005,
+ errorCount: 1009
+ }
+ },
+ {
+ source: {
+ name: 'baratheon-service'
+ },
+ destination: {
+ name: 'lannister-service'
+ },
+ operation: 'lannister-1',
+ stats: {
+ count: 23456,
+ errorCount: 678
+ }
+ },
+ {
+ source: {
+ name: 'baratheon-service'
+ },
+ destination: {
+ name: 'clegane-service'
+ },
+ operation: 'clegane-1',
+ stats: {
+ count: 401,
+ errorCount: 13
+ }
+ },
+ {
+ source: {
+ name: 'lannister-service'
+ },
+ destination: {
+ name: 'tyrell-service'
+ },
+ operation: 'tyrell-1',
+ stats: {
+ count: 30000,
+ errorCount: 2
+ }
+ },
+ {
+ source: {
+ name: 'tyrell-service'
+ },
+ destination: {
+ name: 'targaryen-service'
+ },
+ operation: 'targaryen-1',
+ stats: {
+ count: 50004,
+ errorCount: 20000
+ }
+ },
+ {
+ source: {
+ name: 'tyrell-service'
+ },
+ destination: {
+ name: 'tully-service'
+ },
+ operation: 'tully-1',
+ stats: {
+ count: 121,
+ errorCount: 1
+ }
+ },
+ {
+ source: {
+ name: 'targaryen-service'
+ },
+ destination: {
+ name: 'dragon-service'
+ },
+ operation: 'dragon-1',
+ stats: {
+ count: 19000,
+ errorCount: 800
+ }
+ },
+ {
+ source: {
+ name: 'targaryen-service'
+ },
+ destination: {
+ name: 'drogo-service'
+ },
+ operation: 'drogo-1',
+ stats: {
+ count: 98,
+ errorCount: 0
+ }
+ },
+ {
+ source: {
+ name: 'targaryen-service'
+ },
+ destination: {
+ name: 'mormont-service'
+ },
+ operation: 'mormont-1',
+ stats: {
+ count: 5000,
+ errorCount: 100
+ }
+ }
+ ]
+};
+
+/* eslint-disable-next-line no-unused-vars */
+connector.getServiceGraphForTimeLine = (from, to) => Q.fcall(() => extractor.extractGraphs(serviceGraph));
+
+module.exports = connector;
diff --git a/ui/server/connectors/serviceGraph/zipkin/converter.js b/ui/server/connectors/serviceGraph/zipkin/converter.js
new file mode 100644
index 000000000..becfd4b8c
--- /dev/null
+++ b/ui/server/connectors/serviceGraph/zipkin/converter.js
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const config = require('../../../config/config');
+
+// Zipkin dependency data is bucketed daily. Also the data is usually sampled.
+// This means that people interpreting this will likely be misled, mistaking
+// traced requests per bucket for requests per window. For example, a bucket
+// holds up to one day of data, but if just past midnight it could be only
+// one second of data!
+const WINDOW_SIZE_IN_SECS = config.connectors.serviceGraph && config.connectors.serviceGraph.windowSizeInSecs;
+
+// In Zipkin dependency link, we do not have tags or operation name. So we
+// return a constant operation name of "unknown" for now.
+function toHaystackEdge(dependencyLink) {
+ const res = {
+ source: {
+ name: dependencyLink.parent
+ },
+ destination: {
+ name: dependencyLink.child
+ },
+ // Zipkin doesn't aggregate operation -> operation, rather service -> service
+ operation: 'unknown',
+ stats: {
+ count: (dependencyLink.callCount / WINDOW_SIZE_IN_SECS),
+ errorCount: 0
+ }
+ };
+ if (dependencyLink.errorCount) {
+ res.stats.errorCount = (dependencyLink.errorCount / WINDOW_SIZE_IN_SECS);
+ }
+ return res;
+}
+
+function toHaystackServiceEdges(dependencyLinks) {
+ return dependencyLinks.map(dependencyLink => toHaystackEdge(dependencyLink));
+}
+
+const converter = {};
+
+// exported for testing
+converter.toHaystackEdge = toHaystackEdge;
+converter.toHaystackServiceEdges = toHaystackServiceEdges;
+
+module.exports = converter;
diff --git a/ui/server/connectors/serviceGraph/zipkin/serviceGraphConnector.js b/ui/server/connectors/serviceGraph/zipkin/serviceGraphConnector.js
new file mode 100644
index 000000000..67d9c5779
--- /dev/null
+++ b/ui/server/connectors/serviceGraph/zipkin/serviceGraphConnector.js
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the 'License');
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an 'AS IS' BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+
+const fetcher = require('../../operations/restFetcher');
+const config = require('../../../config/config');
+const converter = require('./converter');
+const extractor = require('../haystack/graphDataExtractor');
+
+const dependenciesFetcher = fetcher('getDependencies');
+
+const connector = {};
+const baseZipkinUrl = config.connectors.serviceGraph.zipkinUrl;
+
+function fetchServiceGraph(from, to) {
+ const endTs = parseInt(to, 10);
+ const lookback = endTs - parseInt(from, 10);
+
+ return dependenciesFetcher
+ .fetch(`${baseZipkinUrl}/dependencies?endTs=${endTs}&lookback=${lookback}`)
+ .then(data => extractor.extractGraphFromEdges(converter.toHaystackServiceEdges(data)));
+}
+
+connector.getServiceGraphForTimeLine = (from, to) => Q.fcall(() => fetchServiceGraph(from, to));
+
+module.exports = connector;
diff --git a/ui/server/connectors/serviceInsights/detectCycles.js b/ui/server/connectors/serviceInsights/detectCycles.js
new file mode 100644
index 000000000..51567ddf4
--- /dev/null
+++ b/ui/server/connectors/serviceInsights/detectCycles.js
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2019 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the 'License');
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an 'AS IS' BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * detectCycles()
+ * Function that takes nodes/links and idAccessor function to mark nodes/links with cycles
+ * @param {object} dag - contains "nodes" and "links" properties describe a graph
+ * @param {fn} idAccessor - Function that identifies ID of nodes
+ * @returns {int} number of found cycles
+ */
+function detectCycles({nodes, links}, idAccessor = (node) => node.id) {
+ // Build "graph" map to enable cycle analysis
+ const graph = new Map();
+ nodes.forEach((node) => {
+ const nodeId = idAccessor(node);
+ graph.set(nodeId, {
+ data: node,
+ out: [],
+ depth: -1
+ });
+ });
+
+ // Extract link information into graph of nodes
+ links.forEach(({source, target}) => {
+ // Grab source and target nodes
+ const sourceNode = graph.get(source);
+ const targetNode = graph.get(target);
+
+ // Sanity check graph to make sure ID accessor function
+ // eslint-disable-next-line no-throw-literal
+ if (!sourceNode) throw new Error(`Missing source node with id: ${source}`);
+ // eslint-disable-next-line no-throw-literal
+ if (!targetNode) throw new Error(`Missing target node with id: ${target}`);
+
+ // Create "out" dependency for sourceNode
+ sourceNode.out.push(targetNode);
+ });
+
+ // Convert Map to Array of objects
+ const graphNodes = [];
+ graph.forEach((graphNode) => {
+ graphNodes.push(graphNode);
+ });
+
+ // Simple map for found offenses
+ const foundOffenses = {};
+
+ // traverse() - Recursive helper function that detects cycles
+ // Uses LR - TD recursive stepping in LR DAG graph
+ // eslint-disable-next-line consistent-return, no-shadow
+ function traverse(nodes, nodeStack = []) {
+ // What depth are we at?
+ const currentDepth = nodeStack.length;
+
+ // Look at nodes at current depth, top to bottom
+ for (let i = 0, l = nodes.length; i < l; i++) {
+ const node = nodes[i];
+ if (nodeStack.indexOf(node) !== -1) {
+ const loop = [...nodeStack.slice(nodeStack.indexOf(node)), node].map((d) => {
+ const id = idAccessor(d.data);
+ d.data.invalidCycleDetected = true;
+ return id;
+ });
+ // eslint-disable-next-line array-callback-return
+ [...nodeStack.slice(nodeStack.indexOf(node)), node].map((d) => {
+ // Cycle detected. Mark relevant nodes with data.
+ const invalidLoopPathString = loop.join(' -> ');
+ d.data.invalidCyclePath = invalidLoopPathString;
+ foundOffenses[invalidLoopPathString] = foundOffenses[invalidLoopPathString] ? foundOffenses[invalidLoopPathString] + 1 : 1;
+ });
+ return true; // Return from recursive depth
+ }
+ if (currentDepth > node.depth) {
+ // Don't unnecessarily revisit chunks of the graph
+ node.depth = currentDepth;
+ traverse(node.out, [...nodeStack, node]);
+ }
+ }
+ }
+
+ // Begin cycle analysis
+ traverse(graphNodes);
+
+ return Object.keys(foundOffenses).length;
+}
+
+module.exports = {
+ detectCycles
+};
diff --git a/ui/server/connectors/serviceInsights/fetcher.js b/ui/server/connectors/serviceInsights/fetcher.js
new file mode 100644
index 000000000..4f8bf8449
--- /dev/null
+++ b/ui/server/connectors/serviceInsights/fetcher.js
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const _ = require('lodash');
+
+const config = require('../../config/config');
+
+const tracesConnector = require(`../../connectors/traces/${config.connectors.traces.connectorName}/tracesConnector`); // eslint-disable-line import/no-dynamic-require
+const logger = require('../../utils/logger').withIdentifier('fetcher.serviceInsights');
+const metrics = require('../../utils/metrics');
+
+const TRACE_LIMIT = config.connectors.serviceInsights.traceLimit;
+
+const fetcher = (fetcherName) => ({
+ fetch(options) {
+ const {serviceName, operationName, traceId, startTime, endTime} = options;
+
+ // local vars
+ const deferred = Q.defer();
+ const timer = metrics.timer(`fetcher_${fetcherName}`).start();
+
+ // use given limit or default
+ const limit = options.limit || TRACE_LIMIT;
+
+
+ // traces api expects strings
+ const spanLevelFilters = JSON.stringify([JSON.stringify({
+ serviceName,
+ operationName,
+ traceId
+ })]);
+
+ // use traces connector
+ tracesConnector
+ .findTracesFlat({
+ startTime,
+ endTime,
+ limit,
+ spanLevelFilters
+ })
+ .then((traces) => {
+ // check for 1 or more traces
+ const hasTraces = traces && traces.length > 0;
+ if (hasTraces) {
+ // flat map childSpans to flat array
+ const spans = _.flatten(traces);
+
+ // complete timer
+ timer.end();
+
+ // log success message
+ logger.info(`fetch successful: ${fetcherName}`);
+
+ // resolve promise
+ deferred.resolve({
+ serviceName,
+ spans,
+ traceLimitReached: !!(traces.length === limit)
+ });
+ } else {
+ // log no traces found message
+ logger.info(`fetch successful with no traces: ${fetcherName}`);
+
+ // complete timer
+ timer.end();
+
+ // resolve promise
+ deferred.resolve({serviceName, spans: [], traceLimitReached: false});
+ }
+ });
+
+ // return promise
+ return deferred.promise;
+ }
+});
+
+module.exports = fetcher;
diff --git a/ui/server/connectors/serviceInsights/graphDataExtractor.js b/ui/server/connectors/serviceInsights/graphDataExtractor.js
new file mode 100644
index 000000000..59226fdf3
--- /dev/null
+++ b/ui/server/connectors/serviceInsights/graphDataExtractor.js
@@ -0,0 +1,478 @@
+/* eslint-disable no-param-reassign */
+/*
+ * Copyright 2019 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const {type, relationship} = require('../../../universal/enums');
+const {detectCycles} = require('./detectCycles');
+const {edge, gateway, mesh, database, outbound, service} = require('../../config/config').connectors.serviceInsights.spanTypes;
+
+/**
+ * caseInsensitiveEquals()
+ * Function that returns true if a === b, case insensitive
+ * @param {*} a
+ * @param {*} b
+ */
+function caseInsensitiveEquals(a, b) {
+ return a && b && a.toLowerCase() === b.toLowerCase();
+}
+
+/**
+ * createNode()
+ * Function to create a graph node and enforce data schema for creating a node
+ * @param {object} data
+ */
+function createNode(data) {
+ // Sanity check required properties
+ ['id', 'name'].forEach((requiredProperty) => {
+ /* istanbul ignore if -- this is to identify misconfiguration during development */
+ if (typeof data[requiredProperty] === 'undefined') {
+ throw new Error(`Missing required property ${requiredProperty} when calling createNode()`);
+ }
+ });
+ return {
+ count: 1,
+ ...data
+ };
+}
+
+/**
+ * getIdForLink()
+ * Generate an id for a link from source to target, idempotent
+ * @param {string} source
+ * @param {string} target
+ * @returns {string}
+ */
+function getIdForLink(source, target) {
+ return `${source}→${target}`;
+}
+
+/**
+ * createLink()
+ * Function to create a graph edge and enforce data schema for creating a edge
+ * @param {object} data
+ */
+function createLink(data) {
+ // Sanity check required properties
+ ['source', 'target'].forEach((requiredProperty) => {
+ /* istanbul ignore if -- this is to identify misconfiguration during development */
+ if (typeof data[requiredProperty] === 'undefined') {
+ throw new Error(`Missing required property ${requiredProperty} when calling createLink()`);
+ }
+ });
+ const id = getIdForLink(data.source, data.target);
+ return {
+ id,
+ isUninstrumented: false,
+ count: 1,
+ tps: 1,
+ ...data
+ };
+}
+
+/**
+ * getNodeNameFromSpan()
+ * Gets the display name given a span object
+ * @param {object} span - Haystack span object
+ */
+function getNodeNameFromSpan(span) {
+ if (edge && edge.isType(span)) {
+ return edge.nodeName(span);
+ }
+ if (gateway && gateway.isType(span)) {
+ return gateway.nodeName(span);
+ }
+ if (mesh && mesh.isType(span)) {
+ return mesh.nodeName(span);
+ }
+ if (database && database.isType(span)) {
+ return database.nodeName(span);
+ }
+ if (outbound && outbound.isType(span)) {
+ return outbound.nodeName(span);
+ }
+ /* istanbul ignore else -- required configuration */
+ if (service) {
+ return service.nodeName(span);
+ }
+ /* istanbul ignore next */
+ throw new Error('Missing required configuration: connectors.serviceInsights.spanTypes.service');
+}
+
+/**
+ * getNodeIdFromSpan()
+ * Gets the unique id given a span object, considering when to treat spans as the same node or separate
+ * @param {object} span - Haystack span object
+ */
+function getNodeIdFromSpan(span) {
+ if (edge && edge.isType(span)) {
+ return edge.nodeId(span);
+ }
+ if (gateway && gateway.isType(span)) {
+ return gateway.nodeId(span);
+ }
+ if (mesh && mesh.isType(span)) {
+ return mesh.nodeId(span);
+ }
+ if (database && database.isType(span)) {
+ return database.nodeId(span);
+ }
+ if (outbound && outbound.isType(span)) {
+ return outbound.nodeId(span);
+ }
+ /* istanbul ignore else -- required configuration */
+ if (service) {
+ return service.nodeId(span);
+ }
+ /* istanbul ignore next */
+ throw new Error('Missing required configuration: connectors.serviceInsights.spanTypes.service');
+}
+
+/**
+ * traverseDownstream()
+ * Traverse downstream nodes and set their relationship if not already set.
+ * @param {object} startingNode - traverse nodes downstream from this one; this node itself is unmodified
+ * @param {boolean} distributary - set the relationship to distributary, otherwise downstream
+ */
+function traverseDownstream(startingNode, distributary = false) {
+ startingNode.downstream.forEach((downstreamNode) => {
+ if (!downstreamNode.relationship) {
+ downstreamNode.relationship = distributary ? relationship.distributary : relationship.downstream;
+ traverseDownstream(downstreamNode, distributary);
+ }
+ });
+}
+
+/**
+ * traverseUpstream()
+ * Traverse upstream nodes and set their relationship to upstream if not already set.
+ * @param {object} startingNode - traverse nodes upstream from this one; this node itself is unmodified
+ */
+function traverseUpstream(startingNode) {
+ startingNode.upstream.forEach((upstreamNode) => {
+ if (!upstreamNode.relationship) {
+ upstreamNode.relationship = relationship.upstream;
+ traverseUpstream(upstreamNode);
+ traverseDownstream(upstreamNode, true);
+ }
+ });
+}
+
+/**
+ * findViolations()
+ * Find violations in the given nodes and links
+ * @param {Map} nodes - Map of nodes
+ * @param {Map} links - Map of links
+ * @returns {object}
+ */
+function findViolations(nodes, links) {
+ // Define map of violations
+ const violations = {};
+
+ // Marks nodes and links with invalid DAG cyces
+ const cyclesFound = detectCycles({nodes, links});
+
+ // Process invalid DAG cycle
+ links.forEach((link) => {
+ const source = nodes.get(link.source);
+ const target = nodes.get(link.target);
+
+ if (source.invalidCycleDetected === true && target.invalidCycleDetected === true) {
+ link.invalidCycleDetected = true;
+ link.invalidCyclePath = source.invalidCyclePath;
+ }
+ });
+
+ // Summarize cycle violations
+ if (cyclesFound > 0) {
+ violations.cycles = cyclesFound;
+ }
+
+ // Store count of uninstrumented
+ const uninstrumentedCount = [...nodes.values()]
+ .map((node) => (node.type === type.uninstrumented ? 1 : 0))
+ .reduce((count, current) => count + current, 0);
+
+ // Summarize unique count of uninstrumented dependencies
+ if (uninstrumentedCount > 0) {
+ violations.uninstrumented = uninstrumentedCount;
+ }
+
+ return violations;
+}
+
+/**
+ * processNodesAndLinks()
+ * Process nodes and links
+ * @param {Map} nodes - Map of nodes
+ * @param {Map} links - Map of links
+ * @returns {object}
+ */
+function processNodesAndLinks(nodes, links, relationshipFilter) {
+ // Store unique traces to calculate how many traces were considered
+ const uniqueTraces = new Set();
+
+ // Temporary references to simplify processing
+ nodes.forEach((node) => {
+ node.upstream = [];
+ node.downstream = [];
+ node.links = [];
+ });
+
+ // Process Links
+ links.forEach((link) => {
+ const source = nodes.get(link.source);
+ const target = nodes.get(link.target);
+
+ // Simplify traversal by setting upstream and downstream nodes
+ source.downstream.push(target);
+ source.links.push(link);
+ target.upstream.push(source);
+ target.links.push(link);
+ });
+
+ // Traverse nodes upstream and downstream of the central node and set their relationship
+ const centralNode = [...nodes.values()].find((node) => node.relationship === relationship.central);
+ if (centralNode) {
+ traverseDownstream(centralNode);
+ traverseUpstream(centralNode);
+ }
+
+ // Process nodes
+ nodes.forEach((node) => {
+ // Detect unique traces
+ node.traceIds.forEach((traceId) => {
+ uniqueTraces.add(traceId);
+ });
+
+ // Nodes not previously traversed have an unknown relationship
+ if (!node.relationship) {
+ node.relationship = relationship.unknown;
+ }
+
+ // Check if un-instrumented mesh or client span
+ if (node.downstream.length === 0) {
+ if (node.type === type.mesh) {
+ // Create uninstrumented node and add it to the map
+ const uninstrumentedNode = createNode({
+ ...node,
+ id: `${node.id}-missing-trace`,
+ name: 'Uninstrumented Service',
+ serviceName: 'unknown',
+ type: type.uninstrumented,
+ relationship: node.relationship
+ });
+ nodes.set(uninstrumentedNode.id, uninstrumentedNode);
+
+ // Create link to uninstrumented node
+ const uninstrumentedLink = createLink({
+ source: node.id,
+ target: uninstrumentedNode.id,
+ isUninstrumented: true
+ });
+ node.links.push(uninstrumentedLink);
+ uninstrumentedNode.links.push(uninstrumentedLink);
+ links.set(uninstrumentedLink.id, uninstrumentedLink);
+ } else if (node.type === type.outbound) {
+ node.type = type.uninstrumented;
+ }
+ }
+ });
+
+ // Construct a filter
+ const filter = [relationship.central]; // always include the central node
+ if (relationshipFilter && relationshipFilter.length) {
+ filter.push(...relationshipFilter); // use the relationship filter param if provided
+ } else {
+ filter.push(relationship.upstream, relationship.downstream); // otherwise default to upstream and downstream
+ }
+
+ // Process nodes again, now with destructive operations
+ nodes.forEach((node) => {
+ // Filter out nodes not directly upstream or downstream, and their links
+ if (!filter.some((r) => r === relationship.all || r === node.relationship)) {
+ nodes.delete(node.id);
+ node.links.forEach((link) => {
+ links.delete(link.id);
+ });
+ }
+
+ // Remove temporary properties before serializing
+ delete node.upstream;
+ delete node.downstream;
+ delete node.links;
+ });
+
+ // Find violations
+ const violations = findViolations(nodes, links);
+
+ // Summarize if any types of violations found
+ const hasViolations = Object.keys(violations).length > 0;
+
+ return {
+ violations,
+ hasViolations,
+ tracesConsidered: uniqueTraces.size
+ };
+}
+
+/**
+ * createNodeFromSpan()
+ * @param {string} nodeId
+ * @param {object} span
+ * @param {string} serviceName indicates which service is central to this graph
+ */
+function createNodeFromSpan(nodeId, span, serviceName) {
+ const nodeName = getNodeNameFromSpan(span);
+
+ const node = createNode({
+ id: nodeId,
+ name: nodeName,
+ serviceName: span.serviceName,
+ duration: span.duration,
+ operations: {[`${span.operationName}`]: 1},
+ traceIds: [span.traceId]
+ });
+
+ if (edge && edge.isType(span)) {
+ node.type = type.edge;
+ } else if (gateway && gateway.isType(span)) {
+ node.type = type.gateway;
+ } else if (mesh && mesh.isType(span)) {
+ node.type = type.mesh;
+ } else if (database && database.isType(span)) {
+ node.type = type.database;
+ node.databaseType = database.databaseType(span);
+ } else if (outbound && outbound.isType(span)) {
+ node.type = type.outbound;
+ } else {
+ node.type = type.service;
+ }
+
+ if (caseInsensitiveEquals(node.serviceName, serviceName) && node.type !== type.outbound) {
+ node.relationship = relationship.central;
+ }
+
+ return node;
+}
+/**
+ * updateNodeFromSpan()
+ * @param {object} node
+ * @param {object} span
+ */
+function updateNodeFromSpan(node, span) {
+ node.operations[span.operationName] = node.operations[span.operationName] ? node.operations[span.operationName] + 1 : 1;
+ node.count++;
+ node.duration += span.duration;
+ node.avgDuration = `${Math.floor(node.duration / node.count / 1000)} ms`;
+ node.traceIds.push(span.traceId);
+}
+
+/**
+ * buildNodes()
+ * Builds a map of nodes.
+ * @param {Array} spans - Array of fully hydrated Haystack spans
+ * @param {string} serviceName - Name of central dependency
+ */
+function buildNodes(spans, serviceName) {
+ const nodes = new Map();
+
+ spans.forEach((span) => {
+ const nodeId = getNodeIdFromSpan(span);
+ const existingNode = nodes.get(nodeId);
+
+ if (!existingNode) {
+ const newNode = createNodeFromSpan(nodeId, span, serviceName);
+ nodes.set(nodeId, newNode);
+ } else {
+ updateNodeFromSpan(existingNode, span);
+ }
+ });
+
+ return nodes;
+}
+
+/**
+ * buildLinks()
+ * Builds a map of links.
+ * @param {*} spans
+ */
+function buildLinks(spans) {
+ const linkMap = new Map(); // linkId: link
+ const spansById = new Map(); // spanId: span
+
+ spans.forEach((span) => {
+ spansById.set(span.spanId, span);
+ });
+
+ spans.forEach((span) => {
+ const parentSpanId = span.parentSpanId;
+ if (parentSpanId) {
+ const parentSpan = spansById.get(parentSpanId);
+ if (parentSpan) {
+ const parentNodeId = getNodeIdFromSpan(parentSpan);
+ const childNodeId = getNodeIdFromSpan(span);
+ if (parentNodeId !== childNodeId) {
+ const linkId = getIdForLink(parentNodeId, childNodeId);
+ const currentLink = linkMap.get(linkId);
+ // If link does not exist in map, create it
+ if (!currentLink) {
+ linkMap.set(
+ linkId,
+ createLink({
+ source: parentNodeId,
+ target: childNodeId
+ })
+ );
+ } else {
+ // else, calculate magnitude
+ currentLink.count++;
+ currentLink.tps++;
+ }
+ }
+ }
+ }
+ });
+
+ return linkMap;
+}
+
+/**
+ * extractNodesAndLinks()
+ * Given an array of spans and a service name, perform transform to build a nodes + links structure from multiple traces
+ * @param {*} spans - Array of fully hydrated span objects related to multiple traces
+ * @param {*} serviceName - Service name to search for
+ * @param {Array.} relationshipFilter - Nodes and links to filter for, by relationship, or empty for the default filter
+ */
+const extractNodesAndLinks = ({spans, serviceName, traceLimitReached}, relationshipFilter = []) => {
+ // build map of nodes
+ const nodes = buildNodes(spans, serviceName);
+
+ // build map of links
+ const links = buildLinks(spans);
+
+ const summary = processNodesAndLinks(nodes, links, relationshipFilter);
+ summary.traceLimitReached = traceLimitReached;
+
+ return {
+ summary,
+ nodes: [...nodes.values()],
+ links: [...links.values()]
+ };
+};
+
+module.exports = {
+ extractNodesAndLinks
+};
diff --git a/ui/server/connectors/serviceInsights/serviceInsightsConnector.js b/ui/server/connectors/serviceInsights/serviceInsightsConnector.js
new file mode 100644
index 000000000..12cf5780f
--- /dev/null
+++ b/ui/server/connectors/serviceInsights/serviceInsightsConnector.js
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2019 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the 'License');
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an 'AS IS' BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+
+const fetcher = require('./fetcher');
+const extractor = require('./graphDataExtractor');
+
+const connector = {};
+
+function fetchServiceInsights(options) {
+ const {serviceName, operationName, traceId, startTime, endTime, limit, relationship} = options;
+ const relationshipFilter = relationship ? relationship.split(',') : [];
+ return fetcher(serviceName)
+ .fetch({serviceName, operationName, traceId, startTime, endTime, limit})
+ .then((data) => extractor.extractNodesAndLinks(data, relationshipFilter));
+}
+
+/**
+ * getServiceInsightsForService
+ *
+ * @param {object} options - Object with the following options:
+ * - serviceName - service to get Service Insights for (required, unless traceId is provided)
+ * - operationName - operation to filter for (optional)
+ * - traceId - single trace to get Service Insights for (optional)
+ * - startTime - filter for traces after this time in microseconds (required)
+ * - endTime - filter for traces before this time in microseconds (required)
+ * - limit - override the default config limit on number of traces to fetch (optional)
+ * - relationship - comma-separated list of relationships to include (optional)
+ */
+connector.getServiceInsightsForService = (options) => Q.fcall(() => fetchServiceInsights(options));
+
+module.exports = connector;
diff --git a/ui/server/connectors/services/servicesConnector.js b/ui/server/connectors/services/servicesConnector.js
new file mode 100644
index 000000000..3448a1afb
--- /dev/null
+++ b/ui/server/connectors/services/servicesConnector.js
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const config = require('../../config/config');
+const LoaderBackedCache = require('../utils/LoaderBackedCache');
+
+const tracesConnector = config.connectors.traces && require(`../traces/${config.connectors.traces.connectorName}/tracesConnector`); // eslint-disable-line import/no-dynamic-require, global-require
+const refreshIntervalInSecs = config.connectors.traces.serviceRefreshIntervalInSecs;
+const connector = {};
+
+const serviceCache = new LoaderBackedCache(() => tracesConnector.getServices(), refreshIntervalInSecs * 1000);
+connector.getServices = () => serviceCache.get();
+
+const operationsCache = new LoaderBackedCache((serviceName) => tracesConnector.getOperations(serviceName), refreshIntervalInSecs * 1000);
+connector.getOperations = (serviceName) => operationsCache.get(serviceName);
+
+module.exports = connector;
diff --git a/ui/server/connectors/traces/haystack/expressionTreeBuilder.js b/ui/server/connectors/traces/haystack/expressionTreeBuilder.js
new file mode 100644
index 000000000..991d86833
--- /dev/null
+++ b/ui/server/connectors/traces/haystack/expressionTreeBuilder.js
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const expressionTreeBuilder = {};
+const messages = require('../../../../static_codegen/traceReader_pb');
+
+const reservedField = ['startTime', 'endTime', 'limit', 'spanLevelFilters', 'granularity'];
+
+expressionTreeBuilder.createFieldFromKeyValue = (key, value) => {
+ const field = new messages.Field();
+ field.setName(key);
+ let fieldValue = value;
+ let operator = messages.Field.Operator.EQUAL;
+
+ // check for custom operator at beginning of value string
+ if (value[0] === '>' || value[0] === '<') {
+ operator = value[0] === '>' ? messages.Field.Operator.GREATER_THAN : messages.Field.Operator.LESS_THAN;
+ fieldValue = value.substr(1, value.length);
+ }
+ field.setValue(fieldValue);
+ field.setOperator(operator);
+
+ return field;
+};
+
+function createSpanLevelExpression(spanLevelFilters) {
+ return spanLevelFilters.map((filterJson) => {
+ const filter = JSON.parse(filterJson);
+ const operand = new messages.Operand();
+ const expressionTree = new messages.ExpressionTree();
+ expressionTree.setOperator(messages.ExpressionTree.Operator.AND);
+ expressionTree.setIsspanlevelexpression(false);
+
+ const operands = Object.keys(filter)
+ .map((key) => {
+ const op = new messages.Operand();
+
+ const field = expressionTreeBuilder.createFieldFromKeyValue(key, filter[key]);
+
+ op.setField(field);
+
+ return op;
+ });
+
+ expressionTree.setOperandsList(operands);
+ operand.setExpression(expressionTree);
+
+ return operand;
+ });
+}
+
+function createTraceLevelOperands(query) {
+ return Object.keys(query)
+ .filter(key => query[key] && !reservedField.includes(key))
+ .map((key) => {
+ const operand = new messages.Operand();
+
+ const field = expressionTreeBuilder.createFieldFromKeyValue(key, query[key]);
+
+ operand.setField(field);
+
+ return operand;
+ });
+}
+
+
+expressionTreeBuilder.createFilterExpression = (query) => {
+ const expressionTree = new messages.ExpressionTree();
+
+ expressionTree.setOperator(messages.ExpressionTree.Operator.AND);
+ expressionTree.setIsspanlevelexpression(false);
+
+ const traceLevelOperands = createTraceLevelOperands(query);
+ let spanLevelExpressions = [];
+ if (query.spanLevelFilters) {
+ spanLevelExpressions = createSpanLevelExpression(JSON.parse(query.spanLevelFilters));
+ }
+
+ expressionTree.setOperandsList([...traceLevelOperands, ...spanLevelExpressions]);
+
+ return expressionTree;
+};
+
+module.exports = expressionTreeBuilder;
diff --git a/ui/server/connectors/traces/haystack/protobufConverters/callGraphConverter.js b/ui/server/connectors/traces/haystack/protobufConverters/callGraphConverter.js
new file mode 100644
index 000000000..4cf40bbd5
--- /dev/null
+++ b/ui/server/connectors/traces/haystack/protobufConverters/callGraphConverter.js
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const transformer = {};
+
+function toCallNode(pbNode) {
+ return {
+ serviceName: pbNode.servicename,
+ operationName: pbNode.operationname,
+ infrastructureProvider: pbNode.infrastructureprovider || '',
+ infrastructureLocation: pbNode.infrastructurelocation || ''
+ };
+}
+
+transformer.transform = pbCallGraph =>
+ pbCallGraph.callsList.map(call => ({
+ networkDelta: call.networkdelta / 1000,
+ from: toCallNode(call.from),
+ to: toCallNode(call.to)
+ }));
+
+const TP99_FIELD = '*_99.latency';
+const MEAN_FIELD = 'mean.latency';
+
+transformer.mergeTrendsWithLatencyCost = (latencyCost, trends) => latencyCost.map((edge) => {
+ const tp99 =
+ trends.filter(t =>
+ t.serviceName.toLowerCase() === edge.from.serviceName.toLowerCase()
+ && t.operationName.toLowerCase() === edge.from.operationName.toLowerCase()
+ && t[TP99_FIELD]
+ && t[TP99_FIELD].length);
+
+ const mean =
+ trends.filter(t =>
+ t.serviceName.toLowerCase() === edge.from.serviceName.toLowerCase()
+ && t.operationName.toLowerCase() === edge.from.operationName.toLowerCase()
+ && t[MEAN_FIELD]
+ && t[MEAN_FIELD].length);
+
+ const tp99NetworkDelta = tp99 && tp99.length && tp99[0][TP99_FIELD][0].value;
+ const meanNetworkDelta = mean && mean.length && mean[0][MEAN_FIELD][0].value;
+ if (tp99NetworkDelta && meanNetworkDelta) {
+ return {
+ ...edge,
+ tp99NetworkDelta: tp99 && tp99.length && tp99[0][TP99_FIELD][0].value,
+ meanNetworkDelta: mean && mean.length && mean[0][MEAN_FIELD][0].value
+ };
+ }
+ return {...edge};
+});
+
+module.exports = transformer;
diff --git a/ui/server/connectors/traces/haystack/protobufConverters/traceConverter.js b/ui/server/connectors/traces/haystack/protobufConverters/traceConverter.js
new file mode 100644
index 000000000..9810ccfe7
--- /dev/null
+++ b/ui/server/connectors/traces/haystack/protobufConverters/traceConverter.js
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const _ = require('lodash');
+const spanProto = require('../../../../../static_codegen/span_pb');
+
+const converter = {};
+
+function toTagJson(pbTag) {
+ let tagValue = '';
+
+ switch (pbTag.type) {
+ case spanProto.Tag.TagType.STRING:
+ tagValue = pbTag.vstr;
+ break;
+ case spanProto.Tag.TagType.DOUBLE:
+ tagValue = pbTag.vdouble;
+ break;
+ case spanProto.Tag.TagType.BOOL:
+ tagValue = pbTag.vbool;
+ break;
+ case spanProto.Tag.TagType.LONG:
+ tagValue = pbTag.vlong;
+ break;
+ case spanProto.Tag.TagType.BYTES:
+ tagValue = pbTag.vbytes;
+ break;
+ default:
+ tagValue = '';
+ }
+
+ return {
+ key: pbTag.key,
+ value: tagValue
+ };
+}
+
+function toLogJson(pbLog) {
+ return {
+ timestamp: pbLog.timestamp,
+ fields: pbLog.fieldsList.map(pbTag => toTagJson(pbTag))
+ };
+}
+
+converter.toSpanJson = pbSpan => ({
+ traceId: pbSpan.traceid,
+ spanId: pbSpan.spanid,
+ parentSpanId: pbSpan.parentspanid,
+ serviceName: pbSpan.servicename,
+ operationName: pbSpan.operationname,
+ startTime: pbSpan.starttime,
+ duration: pbSpan.duration,
+ logs: pbSpan.logsList && pbSpan.logsList.map(pbLog => toLogJson(pbLog)),
+ tags: pbSpan.logsList && pbSpan.tagsList.map(pbTag => toTagJson(pbTag))
+});
+
+converter.toTraceJson = pbTrace => pbTrace.childspansList.map(pbSpan => converter.toSpanJson(pbSpan));
+
+converter.toTracesJson = pbTraces => _.flatMap(pbTraces.tracesList, t => converter.toTraceJson(t).sort((s1, s2) => s1.startTime - s2.startTime));
+
+module.exports = converter;
diff --git a/ui/server/connectors/traces/haystack/protobufConverters/traceCountsConverter.js b/ui/server/connectors/traces/haystack/protobufConverters/traceCountsConverter.js
new file mode 100644
index 000000000..e15c4a823
--- /dev/null
+++ b/ui/server/connectors/traces/haystack/protobufConverters/traceCountsConverter.js
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const converter = {};
+
+converter.toTraceCountsJson = pbTraceCounts => pbTraceCounts.tracecountList.map(
+ pbTraceCount => ({
+ x: pbTraceCount.timestamp,
+ y: pbTraceCount.count
+ }
+ ));
+
+module.exports = converter;
diff --git a/ui/server/connectors/traces/haystack/search/searchRequestBuilder.js b/ui/server/connectors/traces/haystack/search/searchRequestBuilder.js
new file mode 100644
index 000000000..a7a2c4cbd
--- /dev/null
+++ b/ui/server/connectors/traces/haystack/search/searchRequestBuilder.js
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const expressionTreeBuilder = require('../expressionTreeBuilder');
+
+const requestBuilder = {};
+const messages = require('../../../../../static_codegen/traceReader_pb');
+
+const DEFAULT_RESULTS_LIMIT = 25;
+
+requestBuilder.buildRequest = (query) => {
+ const request = new messages.TracesSearchRequest();
+
+ request.setFilterexpression(expressionTreeBuilder.createFilterExpression(query));
+ request.setStarttime(parseInt(query.startTime, 10));
+ request.setEndtime(parseInt(query.endTime, 10));
+ request.setLimit(parseInt(query.limit, 10) || DEFAULT_RESULTS_LIMIT);
+
+ return request;
+};
+
+module.exports = requestBuilder;
diff --git a/ui/server/connectors/traces/haystack/search/searchResultsTransformer.js b/ui/server/connectors/traces/haystack/search/searchResultsTransformer.js
new file mode 100644
index 000000000..abba265db
--- /dev/null
+++ b/ui/server/connectors/traces/haystack/search/searchResultsTransformer.js
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const _ = require('lodash');
+
+const transformer = {};
+
+function calculateEndToEndDuration(spans) {
+ const startTime = spans
+ .map(span => span.startTime)
+ .reduce((earliest, cur) => Math.min(earliest, cur));
+ const endTime = spans
+ .map(span => (span.startTime + span.duration))
+ .reduce((latest, cur) => Math.max(latest, cur));
+
+ const difference = endTime - startTime;
+ return difference || 1;
+}
+
+function findTag(tags, tagName) {
+ const foundTag = tags.find(tag => tag.key && tag.key.toLowerCase() === tagName.toLowerCase());
+ return foundTag && foundTag.value;
+}
+
+function calculateShadowDuration(spans) {
+ if (!spans.length) return 0;
+
+ const filteredSpans = spans.filter(span => !findTag(span.tags, 'X-HAYSTACK-AUTOGEN'));
+
+ const shadows = _.flatMap(filteredSpans, span => [{time: span.startTime, value: 1}, {time: span.startTime + span.duration, value: -1}]);
+
+ const sortedShadows = shadows.sort((a, b) => a.time - b.time);
+
+ let runningCount = 0;
+ let lastStartTimestamp = sortedShadows[0].time;
+ let runningShadowDuration = 0;
+
+ for (let i = 0; i < sortedShadows.length; i += 1) {
+ if (runningCount === 1 && sortedShadows[i].value === -1) {
+ runningShadowDuration += sortedShadows[i].time - lastStartTimestamp;
+ }
+
+ if (runningCount === 0 && sortedShadows[i].value === 1) {
+ lastStartTimestamp = sortedShadows[i].time;
+ }
+
+ runningCount += sortedShadows[i].value;
+ }
+
+ return runningShadowDuration;
+}
+
+function isSpanError(span) {
+ return findTag(span.tags, 'error') === true || (typeof findTag(span.tags, 'error') === 'string' && findTag(span.tags, 'error') !== 'false');
+}
+
+function createServicesSummary(trace) {
+ const services = _.countBy(trace, span => span.serviceName);
+
+ return _.keys(services).map(service => ({
+ name: service,
+ spanCount: services[service]
+ }));
+}
+
+function createQueriedServiceSummary(trace, serviceName, endToEndDuration) {
+ const serviceSpans = trace.filter(span => span.serviceName === serviceName);
+
+ const serviceShadowDuration = calculateShadowDuration(serviceSpans);
+ const percent = Math.ceil((serviceShadowDuration / endToEndDuration) * 100);
+
+ return serviceName && serviceSpans && {
+ duration: serviceShadowDuration,
+ durationPercent: percent,
+ error: serviceSpans.some(span => isSpanError(span))
+ };
+}
+
+function createQueriedOperationSummary(trace, operationName, endToEndDuration) {
+ const operationSpans = trace.filter(span => operationName && span.operationName && (span.operationName.toLowerCase() === operationName.toLowerCase()));
+ const operationShadowDuration = calculateShadowDuration(operationSpans);
+ const percent = Math.floor((operationShadowDuration / endToEndDuration) * 100);
+
+ return operationName && operationSpans && {
+ duration: operationShadowDuration,
+ durationPercent: percent,
+ error: operationSpans.some(span => isSpanError(span))
+ };
+}
+
+function toSearchResult(trace, query) {
+ const rootSpan = trace.find(span => !span.parentSpanId);
+ const root = {
+ url: findTag(rootSpan.tags, 'url') || '',
+ serviceName: rootSpan.serviceName,
+ operationName: rootSpan.operationName,
+ duration: rootSpan.duration,
+ error: isSpanError(rootSpan)
+ };
+
+ const services = createServicesSummary(trace);
+
+ const endToEndDuration = calculateEndToEndDuration(trace);
+ const queriedService = createQueriedServiceSummary(trace, query.serviceName, endToEndDuration);
+ const queriedOperation = createQueriedOperationSummary(trace, query.operationName, endToEndDuration);
+
+ return {
+ traceId: rootSpan.traceId,
+ spanCount: trace.length,
+ errorSpanCount: trace.filter(span => isSpanError(span)).length,
+ services,
+ root,
+ queriedService,
+ queriedOperation,
+ startTime: rootSpan.startTime, // start time of the root span
+ duration: endToEndDuration // end-to-end duration
+ };
+}
+
+transformer.transform = (traces, query) => traces.map(trace => toSearchResult(trace, query));
+
+module.exports = transformer;
diff --git a/ui/server/connectors/traces/haystack/timeline/traceCountsRequestBuilder.js b/ui/server/connectors/traces/haystack/timeline/traceCountsRequestBuilder.js
new file mode 100644
index 000000000..57f1c69dc
--- /dev/null
+++ b/ui/server/connectors/traces/haystack/timeline/traceCountsRequestBuilder.js
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const createFilterExpression = require('../expressionTreeBuilder').createFilterExpression;
+
+const requestBuilder = {};
+const messages = require('../../../../../static_codegen/traceReader_pb');
+
+const DEFAULT_INTERVAL_LIMIT = 60 * 1000 * 1000; // 5m in micro seconds
+
+function roundUpToGranularity(timeString, granularityString) {
+ const granularity = parseInt(granularityString, 10);
+ const time = parseInt(timeString, 10);
+
+ return ((parseInt((time / granularity), 10) + 1) * granularity) - 1;
+}
+
+function roundDownToGranularity(timeString, granularityString) {
+ const granularity = parseInt(granularityString, 10);
+ const time = parseInt(timeString, 10);
+ return parseInt((time / granularity), 10) * granularity;
+}
+
+requestBuilder.buildRequest = (query) => {
+ const request = new messages.TraceCountsRequest();
+
+ request.setFilterexpression(createFilterExpression(query));
+ request.setStarttime(roundDownToGranularity(query.startTime, query.granularity));
+ request.setEndtime(roundUpToGranularity(query.endTime, query.granularity));
+ request.setInterval(parseInt(query.granularity, 10) || DEFAULT_INTERVAL_LIMIT);
+
+ return request;
+};
+
+module.exports = requestBuilder;
diff --git a/ui/server/connectors/traces/haystack/tracesConnector.js b/ui/server/connectors/traces/haystack/tracesConnector.js
new file mode 100644
index 000000000..cd7f4eaae
--- /dev/null
+++ b/ui/server/connectors/traces/haystack/tracesConnector.js
@@ -0,0 +1,223 @@
+/*
+
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+
+ */
+
+const grpc = require('grpc');
+
+const messages = require('../../../../static_codegen/traceReader_pb');
+const searchResultsTransformer = require('./search/searchResultsTransformer');
+const callGraphResultTransformer = require('./protobufConverters/callGraphConverter');
+const pbTraceConverter = require('./protobufConverters/traceConverter');
+const pbTraceCountsConverter = require('./protobufConverters/traceCountsConverter');
+const searchRequestBuilder = require('./search/searchRequestBuilder');
+const traceCountsRequestBuilder = require('./timeline/traceCountsRequestBuilder');
+const objectUtils = require('../../utils/objectUtils');
+const fetcher = require('../../operations/grpcFetcher');
+const config = require('../../../config/config');
+
+const trendsConnector =
+ config.connectors.trends &&
+ config.connectors.trends.connectorName !== 'disabled' &&
+ require(`../../trends/${config.connectors.trends.connectorName}/trendsConnector`); // eslint-disable-line import/no-dynamic-require, global-require
+
+const services = require('../../../../static_codegen/traceReader_grpc_pb');
+
+const grpcOptions = config.grpcOptions || {};
+
+const client = new services.TraceReaderClient(
+ `${config.connectors.traces.haystackHost}:${config.connectors.traces.haystackPort}`,
+ grpc.credentials.createInsecure(),
+ grpcOptions
+); // TODO make client secure
+
+const fieldValueFetcher = fetcher('getFieldValues', client);
+const fieldNameFetcher = fetcher('getFieldNames', client);
+const traceFetcher = fetcher('getTrace', client);
+const rawTraceFetcher = fetcher('getRawTrace', client);
+const rawTracesFetcher = fetcher('getRawTraces', client);
+const rawSpanFetcher = fetcher('getRawSpan', client);
+const tracesSearchFetcher = fetcher('searchTraces', client);
+const traceCallGraphFetcher = fetcher('getTraceCallGraph', client);
+const traceCountsFetcher = fetcher('getTraceCounts', client);
+const connector = {};
+
+connector.getServices = () => {
+ const request = new messages.FieldValuesRequest();
+ request.setFieldname('serviceName');
+
+ return fieldValueFetcher
+ .fetch(request)
+ .then((result) => result.getValuesList())
+ .then((result) =>
+ result.filter((value) => {
+ const servicesFilter = config.connectors.traces.servicesFilter;
+ if (servicesFilter) {
+ for (let i = 0; i < servicesFilter.length; i += 1) {
+ if (servicesFilter[i].test(value)) {
+ return false;
+ }
+ }
+ }
+ return true;
+ })
+ );
+};
+
+connector.getSearchableKeys = () => {
+ const request = new messages.Empty();
+
+ return fieldNameFetcher.fetch(request).then((result) => {
+ const fieldNamesWithMetadata = {};
+ const names = result.getNamesList();
+ const metadata = result.getFieldmetadataList();
+
+ // create map with key as whitelisted field name
+ names.forEach((name, index) => {
+ fieldNamesWithMetadata[name] = {isRangeQuery: metadata[index] ? metadata[index].getIsrangequery() : false};
+ });
+
+ // additional keys which are not part of Index
+ fieldNamesWithMetadata.traceId = {isRangeQuery: false};
+ fieldNamesWithMetadata.serviceName = {isRangeQuery: false};
+ fieldNamesWithMetadata.operationName = {isRangeQuery: false};
+ fieldNamesWithMetadata.duration = {isRangeQuery: true, description: '(unit: microseconds)'};
+
+ return fieldNamesWithMetadata;
+ });
+};
+
+connector.getOperations = (serviceName) => {
+ const service = new messages.Field();
+ service.setName('serviceName');
+ service.setValue(serviceName);
+
+ const request = new messages.FieldValuesRequest();
+ request.setFieldname('operationName');
+ request.setFiltersList(new messages.Field());
+ request.setFiltersList([service]);
+
+ return fieldValueFetcher.fetch(request).then((result) => result.getValuesList());
+};
+
+connector.getTrace = (traceId) => {
+ const request = new messages.TraceRequest();
+ request.setTraceid(traceId);
+
+ return traceFetcher.fetch(request).then((result) => pbTraceConverter.toTraceJson(messages.Trace.toObject(false, result)));
+};
+
+connector.findTraces = (query) => {
+ const traceId = objectUtils.getPropIgnoringCase(JSON.parse(query.spanLevelFilters), 'traceId');
+
+ if (traceId) {
+ // if search is for a singe trace, perform getTrace instead of search
+ const request = new messages.TraceRequest();
+ request.setTraceid(traceId);
+
+ return traceFetcher.fetch(request).then((result) => {
+ const pbTrace = messages.Trace.toObject(false, result);
+ const jsonTrace = pbTraceConverter.toTraceJson(pbTrace);
+
+ return searchResultsTransformer.transform([jsonTrace], query);
+ });
+ }
+
+ return tracesSearchFetcher.fetch(searchRequestBuilder.buildRequest(query)).then((result) => {
+ const pbTraceResult = messages.TracesSearchResult.toObject(false, result);
+ const jsonTraceResults = pbTraceResult.tracesList.map((pbTrace) => pbTraceConverter.toTraceJson(pbTrace));
+
+ return searchResultsTransformer.transform(jsonTraceResults, query);
+ });
+};
+
+connector.findTracesFlat = (query) => {
+ const traceId = objectUtils.getPropIgnoringCase(JSON.parse(query.spanLevelFilters), 'traceId');
+
+ if (traceId) {
+ // if search is for a singe trace, perform getTrace instead of search
+ const request = new messages.TraceRequest();
+ request.setTraceid(traceId);
+
+ return traceFetcher.fetch(request).then((result) => {
+ const pbTrace = messages.Trace.toObject(false, result);
+ return pbTraceConverter.toTraceJson(pbTrace);
+ });
+ }
+
+ return tracesSearchFetcher.fetch(searchRequestBuilder.buildRequest(query)).then((result) => {
+ const pbTraceResult = messages.TracesSearchResult.toObject(false, result);
+ return pbTraceResult.tracesList.map((pbTrace) => pbTraceConverter.toTraceJson(pbTrace));
+ });
+};
+
+connector.getRawTrace = (traceId) => {
+ const request = new messages.TraceRequest();
+ request.setTraceid(traceId);
+
+ return rawTraceFetcher.fetch(request).then((result) => pbTraceConverter.toTraceJson(messages.Trace.toObject(false, result)));
+};
+
+connector.getRawTraces = (traceIds) => {
+ const request = new messages.RawTracesRequest();
+ request.setTraceidList(JSON.parse(traceIds));
+
+ return rawTracesFetcher.fetch(request).then((result) => pbTraceConverter.toTracesJson(messages.RawTracesResult.toObject(false, result)));
+};
+
+connector.getRawSpan = (traceId, spanId, serviceName) => {
+ const request = new messages.SpanRequest();
+ request.setTraceid(traceId);
+ request.setSpanid(spanId);
+ return rawSpanFetcher.fetch(request).then((result) => {
+ const spanResponse = messages.SpanResponse.toObject(false, result);
+ const pbSpan = spanResponse.spansList.find((span) => span.servicename === serviceName);
+ return pbTraceConverter.toSpanJson(pbSpan);
+ });
+};
+
+connector.getLatencyCost = (traceId) => {
+ const request = new messages.TraceRequest();
+ request.setTraceid(traceId);
+
+ return traceCallGraphFetcher.fetch(request).then((result) => {
+ const latencyCost = callGraphResultTransformer.transform(messages.TraceCallGraph.toObject(false, result));
+ const edges = latencyCost.map((e) => ({
+ serviceName: e.from.serviceName,
+ operationName: e.from.operationName
+ }));
+
+ return (
+ trendsConnector &&
+ trendsConnector.getEdgeLatency(edges).then((trends) => {
+ if (trends && trends.length) {
+ const latencyCostTrends = callGraphResultTransformer.mergeTrendsWithLatencyCost(latencyCost, trends);
+ return {latencyCost, latencyCostTrends};
+ }
+ return {latencyCost};
+ })
+ );
+ });
+};
+
+connector.getTimeline = (query) => traceCountsFetcher.fetch(traceCountsRequestBuilder.buildRequest(query)).then((result) => {
+ const pbTraceCounts = messages.TraceCounts.toObject(false, result);
+ return pbTraceCountsConverter.toTraceCountsJson(pbTraceCounts)
+ .sort((a, b) => (a.x - b.x));
+ });
+
+module.exports = connector;
diff --git a/ui/server/connectors/traces/mock/mock-web-ui.js b/ui/server/connectors/traces/mock/mock-web-ui.js
new file mode 100644
index 000000000..157f20454
--- /dev/null
+++ b/ui/server/connectors/traces/mock/mock-web-ui.js
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2019 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the 'License');
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an 'AS IS' BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+module.exports = [
+ {
+ extends: 'edge',
+ data: {
+ operationName: 'www.website-a.com',
+ tags: [
+ {
+ key: 'edge.route',
+ value: '/foo/{id}'
+ }
+ ]
+ },
+ children: [
+ {
+ extends: 'gateway',
+ data: {
+ operationName: 'proxy request'
+ },
+ children: [
+ {
+ extends: 'service-mesh',
+ data: {
+ operationName: 'proxy mock-web-ui'
+ },
+ children: [
+ {
+ extends: 'mock-web-ui',
+ data: {
+ operationName: 'GET /foo/123'
+ },
+ children: [
+ {
+ extends: 'service-mesh',
+ data: {
+ operationName: 'proxy auth-service'
+ },
+ children: [
+ {
+ extends: 'auth-service',
+ data: {
+ operationName: 'generate token'
+ }
+ }
+ ]
+ },
+ {
+ extends: 'service-mesh',
+ data: {
+ operationName: 'proxy mock-api-service'
+ },
+ children: [
+ {
+ extends: 'mock-api-service',
+ data: {
+ serviceName: 'Foo service',
+ operationName: 'READ'
+ },
+ children: [
+ {
+ extends: 'distributed-database',
+ data: {
+ operationName: 'SELECT foo'
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ extends: 'edge',
+ data: {
+ operationName: 'www.website-b.com',
+ tags: [
+ {
+ key: 'edge.route',
+ value: '/bar/{id}'
+ }
+ ]
+ },
+ children: [
+ {
+ extends: 'gateway',
+ data: {
+ operationName: 'proxy request'
+ },
+ children: [
+ {
+ extends: 'service-mesh',
+ data: {
+ operationName: 'proxy mock-web-ui'
+ },
+ children: [
+ {
+ extends: 'mock-web-ui',
+ data: {
+ operationName: 'GET /bar/123'
+ },
+ children: [
+ {
+ extends: 'key-value-store',
+ data: {
+ operationName: 'GET bar'
+ }
+ },
+ {
+ extends: 'service-mesh',
+ data: {
+ operationName: 'proxy missing-service'
+ },
+ children: []
+ },
+ {
+ extends: 'service-mesh',
+ data: {
+ operationName: 'proxy mock-api-service'
+ },
+ children: [
+ {
+ extends: 'mock-api-service',
+ data: {
+ serviceName: 'Foo service',
+ operationName: 'GET /foo/related'
+ },
+ children: [
+ {
+ extends: 'service-mesh',
+ data: {
+ operationName: 'proxy other-api-service'
+ },
+ children: []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+];
diff --git a/ui/server/connectors/traces/mock/spanTypes.js b/ui/server/connectors/traces/mock/spanTypes.js
new file mode 100644
index 000000000..6e18b9a0f
--- /dev/null
+++ b/ui/server/connectors/traces/mock/spanTypes.js
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the 'License');
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an 'AS IS' BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const serverSpanTags = [
+ {
+ key: 'span.kind',
+ value: 'server'
+ },
+ {
+ key: 'X-HAYSTACK-IS-MERGED-SPAN',
+ value: true
+ }
+];
+
+const clientSpanTags = [
+ {
+ key: 'span.kind',
+ value: 'client'
+ }
+];
+
+module.exports = {
+ edge: {
+ serviceName: 'edge',
+ tags: serverSpanTags.concat([
+ {
+ key: 'edge.route',
+ value: '/path/{id}'
+ }
+ ])
+ },
+ gateway: {
+ serviceName: 'gateway',
+ tags: serverSpanTags.concat([
+ {
+ key: 'app.datacenter',
+ value: 'us-east-1'
+ }
+ ])
+ },
+ 'mock-web-ui': {
+ serviceName: 'mock-web-ui',
+ tags: serverSpanTags
+ },
+ 'auth-service': {
+ serviceName: 'auth-service',
+ tags: serverSpanTags
+ },
+ 'mock-api-service': {
+ serviceName: 'mock-api-service',
+ tags: serverSpanTags
+ },
+ 'key-value-store': {
+ serviceName: 'key-value-store',
+ tags: clientSpanTags.concat([
+ {
+ key: 'db.type',
+ value: 'key-value-store'
+ }
+ ])
+ },
+ 'document-store': {
+ serviceName: 'document-store',
+ tags: clientSpanTags.concat([
+ {
+ key: 'db.type',
+ value: 'document-store'
+ }
+ ])
+ },
+ 'distributed-database': {
+ serviceName: 'distributed-database',
+ tags: clientSpanTags.concat([
+ {
+ key: 'db.type',
+ value: 'distributed-database'
+ }
+ ])
+ },
+ 'service-mesh': {
+ serviceName: 'service-mesh',
+ tags: serverSpanTags
+ },
+ client: {
+ tags: clientSpanTags
+ }
+};
diff --git a/ui/server/connectors/traces/mock/tracesConnector.js b/ui/server/connectors/traces/mock/tracesConnector.js
new file mode 100644
index 000000000..dec346d92
--- /dev/null
+++ b/ui/server/connectors/traces/mock/tracesConnector.js
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2019 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const _ = require('lodash');
+
+const {generateMockTraceSpans, getMockServiceNames, getMockOperationNames} = require('./tracesGenerator');
+
+const connector = {};
+
+connector.getServices = () => Q.fcall(() => getMockServiceNames());
+
+connector.getOperations = () => Q.fcall(() => getMockOperationNames());
+
+function getValue(min, max) {
+ return _.round(Math.random() * (max - min) + min, 0);
+}
+
+function getRandomValues(granularity, dataPoints, from) {
+ const valuesArr = [];
+ _.range(dataPoints).forEach((i) => valuesArr.push({x: from + i * granularity, y: getValue(0, 3000)}));
+ return valuesArr;
+}
+
+connector.getTimeline = (query) =>
+ Q.fcall(() => {
+ const granularity = (query.endTime - query.startTime) / 15;
+ const range = query.endTime - query.startTime;
+ const points = range / granularity;
+
+ return getRandomValues(granularity, points, parseInt(query.startTime, 10));
+ });
+
+connector.getSearchableKeys = () =>
+ Q.fcall(() => ({
+ serviceName: {isRangeQuery: false},
+ operationName: {isRangeQuery: false},
+ traceId: {isRangeQuery: false}
+ }));
+
+// TODO: support these such that this connector can be used by Traces tab (currently only supports Service Insights tab)
+connector.getLatencyCost = () =>
+ Q.fcall(() => {
+ throw new Error('Unsupported by mock connector.');
+ });
+connector.getTrace = () =>
+ Q.fcall(() => {
+ throw new Error('Unsupported by mock connector.');
+ });
+connector.getRawTrace = () =>
+ Q.fcall(() => {
+ throw new Error('Unsupported by mock connector.');
+ });
+connector.getRawSpan = () =>
+ Q.fcall(() => {
+ throw new Error('Unsupported by mock connector.');
+ });
+
+connector.findTraces = () =>
+ Q.fcall(() => {
+ throw new Error('Unsupported by mock connector.');
+ });
+
+connector.findTracesFlat = () => Q.fcall(() => generateMockTraceSpans());
+
+connector.getRawTraces = () =>
+ Q.fcall(() => {
+ throw new Error('Unsupported by mock connector.');
+ });
+
+module.exports = connector;
diff --git a/ui/server/connectors/traces/mock/tracesGenerator.js b/ui/server/connectors/traces/mock/tracesGenerator.js
new file mode 100644
index 000000000..ec462fe6b
--- /dev/null
+++ b/ui/server/connectors/traces/mock/tracesGenerator.js
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2019 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the 'License');
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an 'AS IS' BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Seedrandom = require('seedrandom');
+const merge = require('deepmerge');
+
+const defaultTypes = require('./spanTypes');
+
+const mockTraces = {
+ 'mock-web-ui': require('./mock-web-ui') // eslint-disable-line
+};
+
+/**
+ * generateRandomId()
+ * Generate a ID given "length" and "seed" arguments
+ * @param {int} length - length of desired ID returned in [0-9][a-f] format
+ * @param {string} seed - seed value for idempotent generation.
+ */
+function generateRandomId(length = 16, seed) {
+ const randomNumberGenerator = new Seedrandom(`${seed}`);
+ const characters = 'abcdef0123456789';
+ const charactersLength = characters.length;
+ let result = '';
+ for (let i = 0; i < length; i++) {
+ result += characters.charAt(Math.floor(randomNumberGenerator.quick() * charactersLength));
+ }
+ return result;
+}
+
+/**
+ * getMockServiceNames()
+ * Returns an array of strings which represent available mock traces
+ */
+function getMockServiceNames() {
+ return Object.keys(mockTraces);
+}
+
+/**
+ * getMockOperationNamesFromTrace()
+ * Recursive function that looks through tree of mock data returns a map/dictionary of found operation names
+ * @param {object} trace - Object which represents current root of tree-like data strucutre of mock data.
+ * @param {object} operationNameMap - Map of operation names found
+ */
+function getMockOperationNamesFromTrace(trace, operationNameMap = {}) {
+ // If operationName found on trace, add to map
+ const operationName = trace.data && trace.data.operationName;
+ if (operationName) {
+ operationNameMap[operationName] = true;
+ }
+
+ // Recurse data
+ if (trace.children) {
+ for (let i = 0; i < trace.children.length; i++) {
+ getMockOperationNamesFromTrace(trace.children[i], operationNameMap);
+ }
+ }
+ return operationNameMap;
+}
+
+/**
+ * getMockOperationNames()
+ * Gets all available operation names from all traces
+ */
+function getMockOperationNames() {
+ let operationNames = {};
+ const serviceNames = getMockServiceNames();
+ for (let i = 0; i < serviceNames.length; i++) {
+ const tracePath = mockTraces[serviceNames[i]];
+ for (let j = 0; j < tracePath.length; j++) {
+ const trace = tracePath[j];
+ operationNames = {
+ ...operationNames,
+ ...getMockOperationNamesFromTrace(trace)
+ };
+ }
+ }
+ return Object.keys(operationNames);
+}
+/**
+ * generateMockTrace()
+ * Recursive function that generates a single mock trace represented by an array of linked spans
+ * @param {object} trace - Object which represents current root of tree-like data strucutre of mock data. Recursion through "children" property
+ * @param {array} spans - Array of mock spans which is the final return result
+ * @param {string} seed - Seed value used for idempotent generation of random Ids
+ * @param {object} parentSpan - Parent span generated in previous recursive step
+ */
+function generateMockTrace(trace, spans = [], seed, parentSpan) {
+ // Define random number generator
+ const randomNumberGenerator = new Seedrandom(seed);
+
+ // Sanity check
+ // If current trace defines extension, copy information
+ if (trace.extends && !defaultTypes[trace.extends]) {
+ throw new Error(`Invalid "extends" property "${trace.extends}" defined on mock trace data.`);
+ }
+
+ // Merge custom trace span data with default span data
+ const newSpan = merge(trace.data || {}, defaultTypes[trace.extends] || {});
+ let spanId;
+ let traceId;
+ let parentSpanId;
+
+ // If no parent span, assume this is the root. (traceId should also be spanId)
+ if (!parentSpan) {
+ traceId = generateRandomId(16, randomNumberGenerator.quick() * 100);
+ spanId = traceId;
+ } else {
+ traceId = parentSpan.traceId;
+ parentSpanId = parentSpan.spanId;
+ spanId = generateRandomId(16, randomNumberGenerator.quick() * 100);
+ }
+
+ // Sanity check merged mock span data that is required
+ if (!newSpan.operationName) {
+ throw new Error(`Mock span is missing property operationName: ${JSON.stringify(newSpan)}`);
+ }
+
+ // Set IDs required for proper trace linkage
+ newSpan.spanId = spanId;
+ newSpan.traceId = traceId;
+ newSpan.parentSpanId = parentSpanId;
+
+ // Set random duration
+ newSpan.duration = Math.floor(randomNumberGenerator.quick() * 1000);
+
+ // Push span onto array
+ spans.push(newSpan);
+
+ // Now, process children
+ if (trace.children) {
+ for (let i = 0; i < trace.children.length; i++) {
+ generateMockTrace(trace.children[i], spans, randomNumberGenerator.quick() * 100, newSpan);
+ }
+ }
+
+ // Return spans
+ return spans;
+}
+
+/**
+ * generateMockTraceSpans()
+ * Generates an array of mock traces
+ * @param {string} mockTraceName - Name of mock trace of spans to generate
+ */
+
+function generateMockTraceSpans(mockTraceName = 'mock-web-ui') {
+ // Sane options
+ if (!mockTraces[mockTraceName]) {
+ throw new Error(`No mock trace data available for '${mockTraceName}'`);
+ }
+ const tracePath = mockTraces[mockTraceName];
+ const seed = mockTraceName;
+
+ // Define traces variable
+ let traceSpans = [];
+
+ // Start generating trace spans
+ for (let i = 0; i < tracePath.length; i++) {
+ traceSpans = traceSpans.concat(generateMockTrace(tracePath[i], [], `${seed}-${i}`));
+ }
+
+ // Return array of spans representing multiple traces
+ return traceSpans;
+}
+
+module.exports = {
+ generateMockTraceSpans,
+ getMockServiceNames,
+ getMockOperationNames
+};
diff --git a/ui/server/connectors/traces/stub/tracesConnector.js b/ui/server/connectors/traces/stub/tracesConnector.js
new file mode 100644
index 000000000..f2cbfef62
--- /dev/null
+++ b/ui/server/connectors/traces/stub/tracesConnector.js
@@ -0,0 +1,939 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const _ = require('lodash');
+const objectUtils = require('../../utils/objectUtils');
+
+function guid() {
+ function s4() {
+ return Math.floor((1 + Math.random()) * 0x10000)
+ .toString(16)
+ .substring(1);
+ }
+ return `${s4()}${s4()}-${s4()}-${s4()}-${s4()}-${s4()}${s4()}${s4()}`;
+}
+
+const trace = () => {
+ const traceId = guid();
+ const span1 = guid();
+ const span2 = guid();
+ const span3 = guid();
+ const span4 = guid();
+ const span5 = guid();
+ const span6 = guid();
+ const span7 = guid();
+ const span8 = guid();
+ const span9 = guid();
+ const span10 = guid();
+ const span11 = guid();
+ const span12 = guid();
+
+ return [
+ {
+ traceId,
+ spanId: span1,
+ serviceName: 'stark-service',
+ operationName: 'snow-1',
+ startTime: 1504784384000,
+ duration: 3525000,
+ logs: [],
+ tags: [
+ {
+ key: 'url',
+ value: 'http://trace.io/blah'
+ },
+ {
+ key: 'url2',
+ value: 'some:data'
+ },
+ {
+ key: 'error',
+ value: false
+ },
+ {
+ key: 'request-blob',
+ value: 'test-blob-client_message_4d35d44f-719e-4196-a784-7b4d506db6de_response_77b56acc-91d6-48fd-96ce-5f66de8fc7e7'
+ },
+ {
+ key: 'response-blob',
+ value: 'test-blob-client_message_4d35d44f-719e-4196-a784-7b4d506db6de_response_77b56acc-91d6-48fd-96ce-5f66de8fc7e7'
+ }
+ ]
+ },
+ {
+ traceId,
+ parentSpanId: span1,
+ spanId: span2,
+ serviceName: 'westeros-service',
+ operationName: 'mormont-1',
+ startTime: 1504784384000 + 250000,
+ duration: 1505000,
+ logs: [],
+ tags: [
+ {
+ key: 'url',
+ value: 'http://trace.io/blah'
+ },
+ {
+ key: 'error',
+ value: true
+ },
+ {
+ key: 'url2',
+ value: 'some:data'
+ },
+ {
+ key: 'request-blob',
+ value: 'test-blob-client_message_4d35d44f-719e-4196-a784-7b4d506db6de_response_77b56acc-91d6-48fd-96ce-5f66de8fc7e7'
+ },
+ {
+ key: 'response-blob',
+ value: 'test-blob-client_message_4d35d44f-719e-4196-a784-7b4d506db6de_response_77b56acc-91d6-48fd-96ce-5f66de8fc7e7'
+ },
+ {
+ key: 'url3',
+ value: 'http://trace.io/blah'
+ },
+ {
+ key: 'url4',
+ value: 'some:data'
+ },
+ {
+ key: 'url5',
+ value: 'http://trace.io/blah'
+ },
+ {
+ key: 'url6',
+ value: 'some:data'
+ },
+ {
+ key: 'url7',
+ value: 'http://trace.io/blah'
+ },
+ {
+ key: 'url8',
+ value: 'some:data'
+ }
+ ]
+ },
+ {
+ traceId,
+ parentSpanId: span2,
+ spanId: span3,
+ serviceName: 'tyrell-service',
+ operationName: 'tully-1',
+ startTime: 1504784384000 + 250000 + 120000,
+ duration: 605000,
+ logs: [],
+ tags: [
+ {
+ key: 'blob-request',
+ value: '/getBlob/tyrell-service/tully-1_9ff6c0cf-03ba-4675-991f-5dfbbf45af03'
+ },
+ {
+ key: 'blob-response',
+ value: '/getBlob/tyrell-service/tully-1_9ff6c0cf-03ba-4675-991f-5dfbbf45af03'
+ }
+ ]
+ },
+ {
+ traceId,
+ parentSpanId: span2,
+ spanId: span4,
+ serviceName: 'dragon-service',
+ operationName: 'drogo-1',
+ startTime: 1504784384000 + 250000 + 680000,
+ duration: 645000,
+ logs: [],
+ tags: [
+ {
+ key: 'error',
+ value: 'true'
+ }
+ ]
+ },
+ {
+ traceId,
+ parentSpanId: span2,
+ spanId: span5,
+ serviceName: 'dragon-service',
+ operationName: 'grayjoy-1',
+ startTime: 1504784384000 + 250000 + 680000,
+ duration: 805000,
+ logs: [],
+ tags: [
+ {
+ key: 'error',
+ value: '500'
+ }
+ ]
+ },
+ {
+ traceId,
+ parentSpanId: span5,
+ spanId: span6,
+ serviceName: 'blackwater-service',
+ operationName: 'clegane-1',
+ startTime: 1504784384000 + 250000 + 920000,
+ duration: 675000,
+ logs: [],
+ tags: []
+ },
+ {
+ traceId,
+ parentSpanId: span1,
+ spanId: span7,
+ serviceName: 'baratheon-service',
+ operationName: 'dondarrion-1',
+ startTime: 1504784384000 + 1760000,
+ duration: 834000,
+ logs: [],
+ tags: []
+ },
+ {
+ traceId,
+ parentSpanId: span7,
+ spanId: span8,
+ serviceName: 'blackwater-service',
+ operationName: 'grayjoy-1',
+ startTime: 1504784384000 + 1960000,
+ duration: 234000,
+ logs: [],
+ tags: []
+ },
+ {
+ traceId,
+ parentSpanId: span1,
+ spanId: span9,
+ serviceName: 'westeros-service',
+ operationName: 'tarley-1',
+ startTime: 1504784384000 + 2560000 + 105000,
+ duration: 105000,
+ logs: [],
+ tags: [
+ {
+ key: 'external-link-key',
+ value: 'external-link-value'
+ }
+ ]
+ },
+ {
+ traceId,
+ parentSpanId: span1,
+ spanId: span10,
+ serviceName: 'westeros-service',
+ operationName: 'snow-1',
+ startTime: 1504784384000 + 2560000 + 105000,
+ duration: 505000,
+ logs: [],
+ tags: []
+ },
+ {
+ traceId,
+ parentSpanId: span1,
+ spanId: span11,
+ serviceName: 'westeros-service',
+ operationName: 'tarley-1',
+ startTime: 1504784384000 + 2560000 + 105000,
+ duration: 505000 + 225000,
+ logs: [],
+ tags: []
+ },
+ {
+ traceId,
+ parentSpanId: span1,
+ spanId: span12,
+ serviceName: 'westeros-service',
+ operationName: 'dondarrion-1',
+ startTime: 1504784384000 + 2560000 + 105000 + 505000 + 225000,
+ duration: 150000,
+ logs: [],
+ tags: [
+ {
+ key: 'error',
+ value: true
+ }
+ ]
+ }
+ ];
+};
+
+const connector = {};
+
+connector.getServices = () =>
+ Q.fcall(() => [
+ 'root-service',
+ 'lannister-service',
+ 'stark-service',
+ 'tyrell-service',
+ 'targaryen-service',
+ 'baratheon-service',
+ 'dragon-service',
+ 'westeros-service'
+ ]);
+
+connector.getOperations = () =>
+ Q.fcall(() => [
+ 'mormont-1',
+ 'seaworth-1',
+ 'bolton-1',
+ 'baelish-1',
+ 'snow-1',
+ 'tully-1',
+ 'dondarrion-1',
+ 'grayjoy-1',
+ 'clegane-1',
+ 'drogo-1',
+ 'tarley-1'
+ ]);
+
+function getValue(min, max) {
+ return _.round(Math.random() * (max - min) + min, 0);
+}
+
+function getRandomValues(granularity, dataPoints, from) {
+ const valuesArr = [];
+ _.range(dataPoints).forEach((i) => valuesArr.push({x: from + i * granularity, y: getValue(0, 3000)}));
+ return valuesArr;
+}
+
+connector.getTimeline = (query) =>
+ Q.fcall(() => {
+ const granularity = query.granularity || (query.endTime - query.startTime) / 15;
+ const range = query.endTime - query.startTime;
+ const points = range / granularity;
+
+ return getRandomValues(granularity, points, parseInt(query.startTime, 10));
+ });
+
+connector.getSearchableKeys = () =>
+ Q.fcall(() => ({
+ serviceName: {isRangeQuery: false},
+ operationName: {isRangeQuery: false},
+ traceId: {isRangeQuery: false},
+ error: {isRangeQuery: false, values: ['true', 'false']},
+ duration: {isRangeQuery: true, description: '(unit: microseconds)'},
+ guid: {isRangeQuery: false},
+ testid: {isRangeQuery: false}
+ }));
+
+const latencyCost = {
+ latencyCost: [
+ {
+ from: {
+ serviceName: 'stark-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ networkDelta: 65
+ },
+ {
+ from: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'tyrell-service',
+ infrastructureProvider: '',
+ infrastructureLocation: ''
+ },
+ networkDelta: null
+ },
+ {
+ from: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'dragon-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-1'
+ },
+ networkDelta: 55
+ },
+ {
+ from: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'dragon-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-1'
+ },
+ networkDelta: 64
+ },
+ {
+ from: {
+ serviceName: 'dragon-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-1'
+ },
+ to: {
+ serviceName: 'blackwater-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-east-2'
+ },
+ networkDelta: 121
+ },
+ {
+ from: {
+ serviceName: 'stark-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'baratheon-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-east-1'
+ },
+ networkDelta: 180
+ },
+ {
+ from: {
+ serviceName: 'baratheon-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-east-1'
+ },
+ to: {
+ serviceName: 'blackwater-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-east-1'
+ },
+ networkDelta: 109
+ },
+ {
+ from: {
+ serviceName: 'stark-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ networkDelta: 99
+ },
+ {
+ from: {
+ serviceName: 'stark-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ networkDelta: 128
+ },
+ {
+ from: {
+ serviceName: 'stark-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ networkDelta: 77
+ },
+ {
+ from: {
+ serviceName: 'stark-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-3'
+ },
+ networkDelta: 98
+ }
+ ],
+ latencyCostTrends: [
+ {
+ from: {
+ serviceName: 'stark-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ tp99NetworkDelta: 333,
+ meanNetworkDelta: 21
+ },
+ {
+ from: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'tyrell-service',
+ infrastructureProvider: '',
+ infrastructureLocation: ''
+ },
+ tp99NetworkDelta: 1031,
+ meanNetworkDelta: 310
+ },
+ {
+ from: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'dragon-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-1'
+ },
+ tp99NetworkDelta: 198,
+ meanNetworkDelta: 88
+ },
+ {
+ from: {
+ serviceName: 'dragon-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-1'
+ },
+ to: {
+ serviceName: 'blackwater-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-east-2'
+ },
+ tp99NetworkDelta: 355,
+ meanNetworkDelta: 301
+ },
+ {
+ from: {
+ serviceName: 'stark-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'baratheon-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-east-1'
+ },
+ tp99NetworkDelta: 34,
+ meanNetworkDelta: 21
+ },
+ {
+ from: {
+ serviceName: 'baratheon-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-east-1'
+ },
+ to: {
+ serviceName: 'blackwater-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-east-1'
+ },
+ tp99NetworkDelta: 50,
+ meanNetworkDelta: 31
+ },
+ {
+ from: {
+ serviceName: 'stark-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-2'
+ },
+ to: {
+ serviceName: 'westeros-service',
+ infrastructureProvider: 'aws',
+ infrastructureLocation: 'us-west-3'
+ },
+ tp99NetworkDelta: 46,
+ meanNetworkDelta: 45
+ }
+ ]
+};
+
+connector.getLatencyCost = () => Q.fcall(() => latencyCost);
+
+connector.getTrace = () => Q.fcall(() => trace());
+
+connector.getRawTrace = () => Q.fcall(() => trace());
+
+connector.getRawSpan = () => Q.fcall(() => trace()[0]);
+
+connector.getRawTraces = () => Q.fcall(() => [...trace(), ...trace()]);
+
+connector.findTraces = (query) =>
+ Q.fcall(() => {
+ const traceId = query.spanLevelFilters && objectUtils.getPropIgnoringCase(JSON.parse(query.spanLevelFilters), 'traceId');
+
+ if (traceId) {
+ return [
+ {
+ traceId: '380965e5-e0c4-4c37-91a7-da79def7597b',
+ spanCount: 12,
+ errorSpanCount: 2,
+ services: [
+ {
+ name: 'stark-service',
+ spanCount: 1
+ },
+ {
+ name: 'tyrell-service',
+ spanCount: 29
+ }
+ ],
+ root: {
+ url: '/stark/endpoint',
+ serviceName: 'stark-service',
+ operationName: 'snow-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 31000,
+ durationPercent: 64,
+ error: true
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000,
+ duration: 390000
+ }
+ ];
+ }
+ return [
+ {
+ traceId: 'x00245a5-g0c4-4c37-55a7-da83def7127a',
+ spanCount: 34,
+ errorSpanCount: 2,
+ services: [
+ {
+ name: 'stark-service',
+ spanCount: 16
+ },
+ {
+ name: 'targaryen-service',
+ spanCount: 18
+ }
+ ],
+ root: {
+ url: '/stark/endpoint',
+ serviceName: 'stark-service',
+ operationName: 'snow-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 23000,
+ durationPercent: 99,
+ error: false
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000 - 10 * 1000 * 1000,
+ duration: 240000
+ },
+ {
+ traceId: 'a40165e5-e0c4-4c51-11x7-bb79def7597a',
+ spanCount: 34,
+ errorSpanCount: 2,
+ services: [
+ {
+ name: 'stark-service',
+ spanCount: 1
+ },
+ {
+ name: 'rob-service',
+ spanCount: 8
+ }
+ ],
+ root: {
+ url: '/rob/endpoint',
+ serviceName: 'rob-service',
+ operationName: 'mormont-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 590000,
+ durationPercent: 64,
+ error: false
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000 - 15 * 1000 * 1000,
+ duration: 850000
+ },
+ {
+ traceId: 'a80921e5-e0c4-4c37-91a7-da79def7597a',
+ spanCount: 44,
+ services: [
+ {
+ name: 'tyrell-service',
+ spanCount: 22
+ },
+ {
+ name: 'renly-service',
+ spanCount: 22
+ }
+ ],
+ root: {
+ url: '/baratheon/endpoint',
+ serviceName: 'gendry-service',
+ operationName: 'dondarrion-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 5990000,
+ durationPercent: 64,
+ error: true
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000 - 18 * 1000 * 1000,
+ duration: 3500000
+ },
+ {
+ traceId: 'a55965e5-e0c4-4a37-91a7-da79def7597a',
+ spanCount: 30,
+ services: [
+ {
+ name: 'stark-service',
+ spanCount: 1
+ },
+ {
+ name: 'tyrell-service',
+ spanCount: 29
+ }
+ ],
+ root: {
+ url: '/stark/endpoint',
+ serviceName: 'stark-service',
+ operationName: 'clegane-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 120000,
+ durationPercent: 64,
+ error: true
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000 - 18 * 1000 * 1000,
+ duration: 126000
+ },
+ {
+ traceId: 'wb651a1b-146x-4c37-91a7-6r61v513r1v11',
+ spanCount: 30,
+ services: [
+ {
+ name: 'stark-service',
+ spanCount: 1
+ },
+ {
+ name: 'jon-service',
+ spanCount: 29
+ }
+ ],
+ root: {
+ url: '/east/endpoint',
+ serviceName: 'stark-service',
+ operationName: 'grayjoy-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 5990000,
+ durationPercent: 88,
+ error: true
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000 - 30 * 1000 * 1000,
+ duration: 3500000
+ },
+ {
+ traceId: 'b44165e5-xx14-4c37-91a7-da79def7597b',
+ spanCount: 25,
+ services: [
+ {
+ name: 'randall-service',
+ spanCount: 1
+ },
+ {
+ name: 'stark-service',
+ spanCount: 29
+ }
+ ],
+ root: {
+ url: '/tarley/endpoint',
+ serviceName: 'randall-service',
+ operationName: 'tarley-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 2450000,
+ durationPercent: 94,
+ error: true
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000 - 44 * 1000 * 1000,
+ duration: 2450000
+ },
+ {
+ traceId: 'c80965e5-e0c4-4c37-91a7-da79def7597b',
+ spanCount: 19,
+ services: [
+ {
+ name: 'stark-service',
+ spanCount: 1
+ },
+ {
+ name: 'tyrell-service',
+ spanCount: 29
+ }
+ ],
+ root: {
+ url: '/targaryen/endpoint',
+ serviceName: 'targaryen-service',
+ operationName: 'drogo-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 5990000,
+ durationPercent: 76,
+ error: false
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000 - 56 * 1000 * 1000,
+ duration: 3500000
+ },
+ {
+ traceId: 'd80965e5-e0c4-4c37-91a7-da79def7597b',
+ spanCount: 88,
+ services: [
+ {
+ name: 'stark-service',
+ spanCount: 1
+ },
+ {
+ name: 'tyrell-service',
+ spanCount: 29
+ }
+ ],
+ root: {
+ url: '/stark/endpoint',
+ serviceName: 'stark-service',
+ operationName: 'tully-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 5990000,
+ durationPercent: 64,
+ error: true
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000 - 90 * 1000 * 1000,
+ duration: 3500000
+ },
+ {
+ traceId: 'e80965e5-e0c4-4c37-91a7-da79def7597b',
+ spanCount: 12,
+ services: [
+ {
+ name: 'stark-service',
+ spanCount: 1
+ },
+ {
+ name: 'westeros-service',
+ spanCount: 5
+ }
+ ],
+ root: {
+ url: '/stark/endpoint',
+ serviceName: 'stark-service',
+ operationName: 'snow-1',
+ duration: 3404000,
+ error: false
+ },
+ queriedService: {
+ duration: 1260000,
+ durationPercent: 64,
+ error: true
+ },
+ queriedOperation: {
+ duration: 1,
+ durationPercent: 0,
+ error: false
+ },
+ startTime: new Date().getTime() * 1000 - 2 * 1000 * 1000,
+ duration: 3545000
+ }
+ ];
+ });
+
+connector.findTracesFlat = () => Q.fcall(() => [...trace(), ...trace()]);
+
+module.exports = connector;
diff --git a/ui/server/connectors/traces/zipkin/converter.js b/ui/server/connectors/traces/zipkin/converter.js
new file mode 100644
index 000000000..ad74ffa10
--- /dev/null
+++ b/ui/server/connectors/traces/zipkin/converter.js
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const searchResultsTransformer = require('../haystack/search/searchResultsTransformer');
+
+// NOTICE: This converter was originally ported from the following ASL 2.0 code:
+// https://github.com/openzipkin/zipkin/blob/6fbef6bcfc84e721215c1037771300643eb1b0ed/zipkin-ui/js/spanConverter.js
+function toHaystackLog(annotation) {
+ return {
+ timestamp: annotation.timestamp,
+ fields: [
+ {
+ key: 'event',
+ value: annotation.value
+ }
+ ]
+ };
+}
+
+function normalizeTraceId(traceId) {
+ if (traceId.length > 16) {
+ return traceId.padStart(32, '0');
+ }
+ return traceId.padStart(16, '0');
+}
+
+// NOTE: 'not_found' is different than Zipkin's 'unknown' default
+function sanitizeName(name) {
+ return (name && name !== '' && name !== 'unknown') ? name : 'not_found';
+}
+
+// Note: the tag 'success' is not something defined in Zipkin, nor commonly used
+function convertSuccessTag(tags) {
+ const successTag = tags.find(tag => tag.key.toLowerCase() === 'success');
+ if (successTag) {
+ successTag.key = 'error';
+ successTag.value = successTag.value === 'false' ? 'true' : 'false';
+ }
+}
+
+// Note: the tag 'methoduri' is not something defined in Zipkin, nor commonly used
+function convertMethodUriTag(tags) {
+ const methodUriTag = tags.find(tag => tag.key.toLowerCase() === 'methoduri');
+ if (methodUriTag) {
+ methodUriTag.key = 'url';
+ }
+}
+
+function toHaystackSpan(span) {
+ const res = {
+ traceId: normalizeTraceId(span.traceId)
+ };
+
+ // take care not to create self-referencing spans even if the input data is incorrect
+ const id = span.id.padStart(16, '0');
+ if (span.parentId) {
+ const parentId = span.parentId.padStart(16, '0');
+ if (parentId !== id) {
+ res.parentSpanId = parentId;
+ }
+ }
+
+ res.spanId = id;
+
+ if (span.localEndpoint) {
+ res.serviceName = sanitizeName(span.localEndpoint.serviceName);
+ } else {
+ res.serviceName = 'not_found';
+ }
+
+ res.operationName = sanitizeName(span.name);
+
+ // Don't report timestamp and duration on shared spans (should be server, but not necessarily)
+ if (!span.shared) {
+ if (span.timestamp) res.startTime = span.timestamp;
+ if (span.duration) res.duration = span.duration;
+ }
+
+ let startTs = span.timestamp || 0;
+ let endTs = startTs && span.duration ? startTs + span.duration : 0;
+ let msTs = 0;
+ let wsTs = 0;
+ let wrTs = 0;
+ let mrTs = 0;
+
+ let begin;
+ let end;
+
+ let kind = span.kind;
+
+ // scan annotations in case there are better timestamps, or inferred kind
+ (span.annotations || []).forEach((a) => {
+ switch (a.value) {
+ case 'cs':
+ kind = 'CLIENT';
+ if (a.timestamp < startTs) startTs = a.timestamp;
+ break;
+ case 'sr':
+ kind = 'SERVER';
+ if (a.timestamp < startTs) startTs = a.timestamp;
+ break;
+ case 'ss':
+ kind = 'SERVER';
+ if (a.timestamp > endTs) endTs = a.timestamp;
+ break;
+ case 'cr':
+ kind = 'CLIENT';
+ if (a.timestamp > endTs) endTs = a.timestamp;
+ break;
+ case 'ms':
+ kind = 'PRODUCER';
+ msTs = a.timestamp;
+ break;
+ case 'mr':
+ kind = 'CONSUMER';
+ mrTs = a.timestamp;
+ break;
+ case 'ws':
+ wsTs = a.timestamp;
+ break;
+ case 'wr':
+ wrTs = a.timestamp;
+ break;
+ default:
+ }
+ });
+
+ let remoteAddressTag;
+ switch (kind) {
+ case 'CLIENT':
+ remoteAddressTag = 'server.service_name';
+ begin = 'cs';
+ end = 'cr';
+ break;
+ case 'SERVER':
+ remoteAddressTag = 'client.service_name';
+ begin = 'sr';
+ end = 'ss';
+ break;
+ case 'PRODUCER':
+ remoteAddressTag = 'broker.service_name';
+ begin = 'ms';
+ end = 'ws';
+ if (startTs === 0 || (msTs !== 0 && msTs < startTs)) {
+ startTs = msTs;
+ }
+ if (endTs === 0 || (wsTs !== 0 && wsTs > endTs)) {
+ endTs = wsTs;
+ }
+ break;
+ case 'CONSUMER':
+ remoteAddressTag = 'broker.service_name';
+ if (startTs === 0 || (wrTs !== 0 && wrTs < startTs)) {
+ startTs = wrTs;
+ }
+ if (endTs === 0 || (mrTs !== 0 && mrTs > endTs)) {
+ endTs = mrTs;
+ }
+ if (endTs !== 0 || wrTs !== 0) {
+ begin = 'wr';
+ end = 'mr';
+ } else {
+ begin = 'mr';
+ }
+ break;
+ default:
+ }
+
+ const beginAnnotation = startTs && begin;
+ const endAnnotation = endTs && end;
+
+ res.logs = []; // prefer empty to undefined for arrays
+
+ if (beginAnnotation) {
+ res.logs.push(toHaystackLog({
+ value: begin,
+ timestamp: startTs
+ }));
+ }
+
+ (span.annotations || []).forEach((a) => {
+ if (beginAnnotation && a.value === begin) return;
+ if (endAnnotation && a.value === end) return;
+ res.logs.push(toHaystackLog(a));
+ });
+
+ if (endAnnotation) {
+ res.logs.push(toHaystackLog({
+ value: end,
+ timestamp: endTs
+ }));
+ }
+
+ res.tags = []; // prefer empty to undefined for arrays
+ const keys = Object.keys(span.tags || {});
+ if (keys.length > 0) {
+ res.tags = keys.map(key => ({
+ key,
+ value: span.tags[key]
+ }));
+
+ // handle special tags defined by Haystack
+ convertSuccessTag(res.tags);
+ convertMethodUriTag(res.tags);
+ }
+
+ if (span.remoteEndpoint) {
+ const remoteService = sanitizeName(span.remoteEndpoint.serviceName);
+ if (remoteService !== 'not_found') {
+ res.tags.push({
+ key: remoteAddressTag || 'remote.service_name',
+ value: remoteService
+ });
+ }
+ }
+
+ return res;
+}
+/*
+ * Instrumentation should set span.startTime when recording a span so that guess-work
+ * isn't needed. Since a lot of instrumentation don't, we have to make some guesses.
+ *
+ * * If there is a 'cs', use that
+ * * Fall back to 'sr'
+ * * Otherwise, return undefined
+ */
+// originally zipkin.internal.ApplyTimestampAndDuration.guessTimestamp
+function guessTimestamp(span) {
+ if (span.startTime || span.logs.length === 0) {
+ return span.startTime;
+ }
+ let rootServerRecv;
+ for (let i = 0; i < span.logs.length; i += 1) {
+ const a = span.logs[i];
+ if (a.fields[0].value === 'cs') {
+ return a.timestamp;
+ } else if (a.fields[0].value === 'sr') {
+ rootServerRecv = a.timestamp;
+ }
+ }
+ return rootServerRecv;
+}
+
+/*
+ * For RPC two-way spans, the duration between 'cs' and 'cr' is authoritative. RPC one-way spans
+ * lack a response, so the duration is between 'cs' and 'sr'. We special-case this to avoid
+ * setting incorrect duration when there's skew between the client and the server.
+ */
+// originally zipkin.internal.ApplyTimestampAndDuration.apply
+function applyTimestampAndDuration(span) {
+ const logsLength = span.logs.length;
+ // Don't overwrite authoritatively set startTime and duration!
+ if ((span.startTime && span.duration) || logsLength === 0) {
+ return span;
+ }
+
+ // We cannot backfill duration on a span with less than two logs. However, we
+ // can backfill timestamp.
+ if (logsLength < 2) {
+ if (span.startTime) return span;
+ const guess = guessTimestamp(span);
+ if (!guess) return span;
+ span.startTime = guess; // eslint-disable-line no-param-reassign
+ return span;
+ }
+
+ // Prefer RPC one-way (cs -> sr) vs arbitrary annotations.
+ let first = span.logs[0].timestamp;
+ let last = span.logs[logsLength - 1].timestamp;
+ span.logs.forEach((a) => {
+ if (a.fields[0].value === 'cs') {
+ first = a.timestamp;
+ } else if (a.fields[0].value === 'cr') {
+ last = a.timestamp;
+ }
+ });
+
+ if (!span.startTime) {
+ span.startTime = first; // eslint-disable-line no-param-reassign
+ }
+ if (!span.duration && last !== first) {
+ span.duration = last - first; // eslint-disable-line no-param-reassign
+ }
+ return span;
+}
+
+// This guards to ensure we don't add duplicate logs on merge
+function maybePushHaystackLog(annotations, a) {
+ if (annotations.findIndex(b => a.fields[0].value === b.fields[0].value) === -1) {
+ annotations.push(a);
+ }
+}
+
+// This guards to ensure we don't add duplicate tags on merge
+function maybePushHaystackTag(tags, a) {
+ if (tags.findIndex(b => a.key === b.key) === -1) {
+ tags.push(a);
+ }
+}
+
+function merge(left, right) {
+ const res = {
+ traceId: right.traceId.length > 16 ? right.traceId : left.traceId
+ };
+
+ if (left.parentSpanId) {
+ res.parentSpanId = left.parentSpanId;
+ } else if (right.parentSpanId) {
+ res.parentSpanId = right.parentSpanId;
+ }
+
+ res.spanId = left.spanId;
+
+ // When we move to span model 2, remove this code in favor of using Span.kind == CLIENT
+ let leftClientSpan;
+ let rightClientSpan;
+ let rightServerSpan;
+
+ const logs = [];
+
+ (left.logs || []).forEach((a) => {
+ if (a.fields[0].value === 'cs') leftClientSpan = true;
+ maybePushHaystackLog(logs, a);
+ });
+
+ (right.logs || []).forEach((a) => {
+ if (a.fields[0].value === 'cs') rightClientSpan = true;
+ if (a.fields[0].value === 'sr') rightServerSpan = true;
+ maybePushHaystackLog(logs, a);
+ });
+
+ res.operationName = left.operationName;
+ if (right.operationName !== 'not_found') {
+ if (res.operationName === 'not_found') {
+ res.operationName = right.operationName;
+ } else if (leftClientSpan && rightServerSpan) {
+ res.operationName = right.operationName; // prefer the server's span name
+ }
+ }
+
+ res.serviceName = left.serviceName;
+ if (right.serviceName !== 'not_found') {
+ if (res.serviceName === 'not_found') {
+ res.serviceName = right.serviceName;
+ } else if (leftClientSpan && rightServerSpan) {
+ res.serviceName = right.serviceName; // prefer the server's service name
+ }
+ }
+
+ res.logs = logs.sort((a, b) => a.timestamp - b.timestamp);
+
+ res.tags = [];
+
+ (left.tags || []).forEach((b) => {
+ maybePushHaystackTag(res.tags, b);
+ });
+
+ (right.tags || []).forEach((b) => {
+ maybePushHaystackTag(res.tags, b);
+ });
+
+ // Single timestamp makes duration easy: just choose max
+ if (!left.startTime || !right.startTime || left.startTime === right.startTime) {
+ res.startTime = left.startTime || right.startTime;
+ if (!left.duration) {
+ res.duration = right.duration;
+ } else if (right.duration) {
+ res.duration = Math.max(left.duration, right.duration);
+ } else {
+ res.duration = left.duration;
+ }
+
+ // We have 2 different timestamps. If we have client data in either one of them, use right,
+ // else set timestamp and duration to null
+ } else if (rightClientSpan) {
+ res.startTime = right.startTime;
+ res.duration = right.duration;
+ } else if (leftClientSpan) {
+ res.startTime = left.startTime;
+ res.duration = left.duration;
+ }
+
+ return res;
+}
+
+/*
+ * Zipkin spans can be sent in multiple parts. Also client and server spans can
+ * share the same ID. This merges both scenarios.
+ */
+// originally zipkin.internal.MergeById.apply
+function mergeById(spans) {
+ const result = [];
+
+ if (!spans || spans.length === 0) return result;
+
+ const spanIdToSpans = {};
+ spans.forEach((s) => {
+ const id = s.spanId;
+ spanIdToSpans[id] = spanIdToSpans[id] || [];
+ spanIdToSpans[id].push(s);
+ });
+
+ Object.keys(spanIdToSpans).forEach((id) => {
+ const spansToMerge = spanIdToSpans[id];
+ let left = spansToMerge[0];
+ for (let i = 1; i < spansToMerge.length; i += 1) {
+ left = merge(left, spansToMerge[i]);
+ }
+
+ // attempt to get a timestamp so that the UI can sort results
+ result.push(applyTimestampAndDuration(left));
+ });
+
+ return result;
+}
+
+const converter = {};
+
+// exported for testing
+converter.toHaystackSpan = toHaystackSpan;
+converter.applyTimestampAndDuration = applyTimestampAndDuration;
+converter.merge = merge;
+converter.mergeById = mergeById;
+
+// NOTE: unlike Zipkin UI, this neither sorts, nor corrects clock skew in the
+// results. Not sure what is in scope of the haystack UI logic.
+converter.toHaystackTrace = zipkinTrace =>
+ mergeById(zipkinTrace.map(zipkinSpan => toHaystackSpan(zipkinSpan)));
+
+converter.toHaystackSearchResult = (zipkinTraces, query) => {
+ const haystackTraces = zipkinTraces.map(zipkinTrace => converter.toHaystackTrace(zipkinTrace));
+ return searchResultsTransformer.transform(haystackTraces, query);
+};
+
+module.exports = converter;
diff --git a/ui/server/connectors/traces/zipkin/tracesConnector.js b/ui/server/connectors/traces/zipkin/tracesConnector.js
new file mode 100644
index 000000000..fb4084536
--- /dev/null
+++ b/ui/server/connectors/traces/zipkin/tracesConnector.js
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const config = require('../../../config/config');
+const Q = require('q');
+const converter = require('./converter');
+const objectUtils = require('../../utils/objectUtils');
+const fetcher = require('../../operations/restFetcher');
+
+const connector = {};
+const baseZipkinUrl = config.connectors.traces.zipkinUrl;
+const servicesFilter = config.connectors.traces.servicesFilter;
+
+const servicesFetcher = fetcher('getServices');
+const operationsFetcher = fetcher('getOperations');
+const traceFetcher = fetcher('getTrace');
+const searchTracesFetcher = fetcher('searchTraces');
+const rawTraceFetcher = fetcher('getRawTrace');
+const rawSpanFetcher = fetcher('getRawSpan');
+
+const reservedField = ['serviceName', 'operationName', 'startTime', 'endTime', 'limit', 'spanLevelFilters'];
+const DEFAULT_RESULTS_LIMIT = 40;
+
+function toAnnotationQuery(query) {
+ return Object.keys(query)
+ .filter((key) => query[key] && !reservedField.includes(key))
+ .map((key) => `${encodeURIComponent(key).toLowerCase()}=${encodeURIComponent(query[key])}`)
+ .join(' ');
+}
+
+function mapQueryParams(query) {
+ const mappedQuery = {
+ serviceName: query.serviceName,
+ spanName: query.operationName ? query.operationName : 'all',
+ annotationQuery: toAnnotationQuery(query),
+ endTs: (parseInt(query.endTime, 10) - 30 * 1000 * 1000) / 1000,
+ lookback: (parseInt(query.endTime, 10) - parseInt(query.startTime, 10)) / 1000,
+ limit: parseInt(query.limit, 10) || DEFAULT_RESULTS_LIMIT
+ };
+
+ return Object.keys(mappedQuery)
+ .filter((key) => mappedQuery[key])
+ .map((key) => `${encodeURIComponent(key)}=${encodeURIComponent(mappedQuery[key])}`)
+ .join('&');
+}
+
+connector.getServices = () => {
+ const fetched = servicesFetcher.fetch(`${baseZipkinUrl}/services`);
+ if (!servicesFilter) {
+ return fetched;
+ }
+ return fetched.then((result) =>
+ result.filter((value) => {
+ for (let i = 0; i < servicesFilter.length; i += 1) {
+ if (servicesFilter[i].test(value)) {
+ return false;
+ }
+ }
+ return true;
+ })
+ );
+};
+
+connector.getOperations = (serviceName) => operationsFetcher.fetch(`${baseZipkinUrl}/spans?serviceName=${serviceName}`);
+
+connector.getTrace = (traceId) => traceFetcher.fetch(`${baseZipkinUrl}/trace/${traceId}`).then((result) => converter.toHaystackTrace(result));
+
+connector.findTraces = (query) => {
+ const traceId = objectUtils.getPropIgnoringCase(JSON.parse(query.spanLevelFilters), 'traceId');
+
+ if (traceId) {
+ // if search is for a trace perform getTrace instead of search
+ return traceFetcher.fetch(`${baseZipkinUrl}/trace/${traceId}`).then((result) => converter.toHaystackSearchResult([result], query));
+ }
+
+ const queryUrl = mapQueryParams(query);
+
+ return searchTracesFetcher.fetch(`${baseZipkinUrl}/traces?${queryUrl}`).then((result) => converter.toHaystackSearchResult(result, query));
+};
+
+// Not supported for zipkin. Required for service insights feature.
+connector.findTracesFlat = () => Q.fcall(() => []);
+
+connector.getRawTrace = (traceId) => rawTraceFetcher.fetch(`${baseZipkinUrl}/trace/${traceId}`);
+
+// TODO: get by trace, span ID is not supported by Zipkin. However, should we
+// not just issue getRawTrace and then filter by span ID?
+connector.getRawSpan = (traceId) => rawSpanFetcher.fetch(`${baseZipkinUrl}/trace/${traceId}`);
+
+// TODO: get by multiple ID is not supported by Zipkin. However, should we not
+// just issue multiple calls to getRawTrace?
+connector.getRawTraces = () => Q.fcall(() => []);
+
+// Not supported for zipkin
+connector.getTimeline = () => Q.fcall(() => []);
+
+// TODO get whitelisted keys from configuration
+connector.getSearchableKeys = () => Q.fcall(() => ['serviceName', 'operationName', 'traceId', 'error']);
+
+module.exports = connector;
diff --git a/ui/server/connectors/trends/haystack/trendsConnector.js b/ui/server/connectors/trends/haystack/trendsConnector.js
new file mode 100644
index 000000000..12167b2a2
--- /dev/null
+++ b/ui/server/connectors/trends/haystack/trendsConnector.js
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const config = require('../../../config/config');
+const _ = require('lodash');
+const fetcher = require('../../operations/restFetcher');
+const MetricpointNameEncoder = require('../../utils/encoders/MetricpointNameEncoder');
+
+const trendsFetcher = fetcher('trends');
+
+const defaultFrom = Math.ceil((Date.now() / 1000) - (60 * 60));
+const defaultUntil = Math.ceil(Date.now() / 1000);
+const connector = {};
+const metricTankUrl = config.connectors.trends && config.connectors.trends.metricTankUrl;
+const metricpointNameEncoder = new MetricpointNameEncoder(config.encoder);
+
+function createServicesOperationsTarget(services, operations, timeWindow, metricStats, metricNames) {
+ return encodeURIComponent(`seriesByTag('name=${metricNames}','serviceName=${services}','operationName=${operations}','interval=${timeWindow}','stat=${metricStats}')`);
+}
+
+function createOperationTarget(service, operationName, timeWindow, metricStats, metricNames) {
+ return encodeURIComponent(`seriesByTag('name=${metricNames}','serviceName=${service}','operationName=${operationName}','interval=${timeWindow}','stat=${metricStats}')`);
+}
+
+function getServiceTargetStat(service, timeWindow, metricStats, metricNames) {
+ return encodeURIComponent(`seriesByTag('name=${metricNames}','serviceName=${service}','interval=${timeWindow}','stat=${metricStats}')`);
+}
+
+function convertGranularityToTimeWindow(timespan) {
+ switch (timespan) {
+ case '60000':
+ return 'OneMinute';
+ case '300000':
+ return 'FiveMinute';
+ case '900000':
+ return 'FifteenMinute';
+ case '3600000':
+ return 'OneHour';
+ default:
+ return 'OneMinute';
+ }
+}
+
+function convertEpochTimeInSecondsToMillis(timestamp) {
+ return timestamp * 1000;
+}
+
+function toMilliseconds(micro) {
+ return Math.ceil(micro / 1000);
+}
+
+function groupResponseByServiceOperation(data) {
+ return data.map((op) => {
+ const tags = op.tags;
+
+ const serviceName = tags.serviceName ? metricpointNameEncoder.decodeMetricpointName(tags.serviceName) : null;
+ const operationName = tags.operationName ? metricpointNameEncoder.decodeMetricpointName(tags.operationName) : null;
+ const trendStat = `${tags.stat}.${tags.name}`;
+
+ return {
+ serviceName,
+ operationName,
+ [trendStat]: op.datapoints.map(datapoint => ({
+ value: datapoint[0] ? datapoint[0] : 0,
+ timestamp: convertEpochTimeInSecondsToMillis(datapoint[1])
+ }))
+ };
+ });
+}
+
+function fetchTrendValues(target, from, until) {
+ return trendsFetcher
+ .fetch(`${metricTankUrl}/render?target=${target}&from=${from}&to=${until}`, { 'x-org-id': 1})
+ .then(data => groupResponseByServiceOperation(data));
+}
+
+function extractTrendPointsForSingleServiceOperation(operationTrends, trendStat) {
+ const dataPoints = operationTrends.find(trend => trendStat in trend);
+ const trendStatDataPoints = dataPoints ? dataPoints[trendStat] : [];
+
+ if (trendStatDataPoints.length && trendStatDataPoints[trendStatDataPoints.length - 1].value === null) {
+ trendStatDataPoints.pop();
+ }
+
+ return trendStatDataPoints;
+}
+
+function dataPointsSum(dataPoints) {
+ return dataPoints.reduce(((accumulator, dataPoint) => (accumulator + dataPoint.value)), 0);
+}
+
+function toSuccessPercent(successPoints, failurePoints) {
+ const successCount = successPoints.reduce(((accumulator, dataPoint) => (accumulator + dataPoint.value)), 0);
+ const failureCount = failurePoints.reduce(((accumulator, dataPoint) => (accumulator + dataPoint.value)), 0);
+
+ return 100 - ((failureCount / (successCount + failureCount)) * 100);
+}
+
+function toSuccessPercentPoints(successCount, failureCount) {
+ const successTimestamps = successCount.map(point => point.timestamp);
+ const failureTimestamps = failureCount.map(point => point.timestamp);
+ const timestamps = _.uniq([...successTimestamps, ...failureTimestamps]);
+
+ return _.compact(timestamps.map((timestamp) => {
+ const successItem = _.find(successCount, x => (x.timestamp === timestamp));
+ const successVal = (successItem && successItem.value) ? successItem.value : 0;
+
+ const failureItem = _.find(failureCount, x => (x.timestamp === timestamp));
+ const failureVal = (failureItem && failureItem.value) ? failureItem.value : 0;
+
+ if (successVal + failureVal) {
+ return {
+ value: (100 - ((failureVal / (successVal + failureVal)) * 100)),
+ timestamp
+ };
+ }
+ return null;
+ }));
+}
+
+function toCountPoints(successCount, failureCount) {
+ const successTimestamps = successCount.map(point => point.timestamp);
+ const failureTimestamps = failureCount.map(point => point.timestamp);
+ const timestamps = _.uniq([...successTimestamps, ...failureTimestamps]);
+
+ return _.compact(timestamps.map((timestamp) => {
+ const successItem = _.find(successCount, x => (x.timestamp === timestamp));
+ const successVal = (successItem && successItem.value) ? successItem.value : 0;
+
+ const failureItem = _.find(failureCount, x => (x.timestamp === timestamp));
+ const failureVal = (failureItem && failureItem.value) ? failureItem.value : 0;
+
+ return {
+ value: successVal + failureVal,
+ timestamp
+ };
+ }));
+}
+
+function extractServicePerfStats({successValues, failureValues, tp99Values}) {
+ const trendResults = [];
+
+ const groupedByServiceName = _.groupBy(successValues.concat(successValues, failureValues, tp99Values), val => val.serviceName);
+ Object.keys(groupedByServiceName).forEach((service) => {
+ const serviceTrends = groupedByServiceName[service];
+ const successCount = dataPointsSum(extractTrendPointsForSingleServiceOperation(serviceTrends, 'count.success-span'));
+ const failureCount = dataPointsSum(extractTrendPointsForSingleServiceOperation(serviceTrends, 'count.failure-span'));
+ const successPercent = ((successCount / (successCount + failureCount)) * 100);
+
+ const opKV = {
+ serviceName: service,
+ successPercent,
+ failureCount,
+ successCount,
+ totalCount: successCount + failureCount
+ };
+
+ trendResults.push(opKV);
+ });
+ return trendResults;
+}
+
+function extractServiceSummary(serviceTrends) {
+ const successCount = extractTrendPointsForSingleServiceOperation(serviceTrends, 'count.success-span');
+ const failureCount = extractTrendPointsForSingleServiceOperation(serviceTrends, 'count.failure-span');
+ const countPoints = toCountPoints(successCount, failureCount);
+ const tp99DurationPoints = extractTrendPointsForSingleServiceOperation(serviceTrends, '*_99.duration');
+ const latestTp99DurationDatapoint = _.findLast(tp99DurationPoints, point => point.value);
+
+ return [{
+ type: 'Incoming Requests',
+ totalCount: dataPointsSum(countPoints),
+ countPoints,
+ avgSuccessPercent: toSuccessPercent(successCount, failureCount),
+ successPercentPoints: toSuccessPercentPoints(successCount, failureCount),
+ latestTp99Duration: latestTp99DurationDatapoint && latestTp99DurationDatapoint.value,
+ tp99DurationPoints
+ }];
+}
+
+function extractOperationSummary(values) {
+ const groupedByOperationName = _.groupBy(values, val => val.operationName);
+ return Object.keys(groupedByOperationName).map((operationName) => {
+ const operationTrends = groupedByOperationName[operationName];
+
+ const successPoints = extractTrendPointsForSingleServiceOperation(operationTrends, 'count.success-span');
+ const failurePoints = extractTrendPointsForSingleServiceOperation(operationTrends, 'count.failure-span');
+ const countPoints = toCountPoints(successPoints, failurePoints);
+ const tp99DurationPoints = extractTrendPointsForSingleServiceOperation(operationTrends, '*_99.duration');
+ const latestTp99DurationDatapoint = _.findLast(tp99DurationPoints, point => point.value);
+
+ return {
+ operationName,
+ totalCount: dataPointsSum(countPoints),
+ countPoints,
+ avgSuccessPercent: toSuccessPercent(successPoints, failurePoints),
+ successPercentPoints: toSuccessPercentPoints(successPoints, failurePoints),
+ latestTp99Duration: latestTp99DurationDatapoint && latestTp99DurationDatapoint.value,
+ tp99DurationPoints,
+ failurePoints
+ };
+ });
+}
+
+function getServicePerfStatsResults(timeWindow, from, until) {
+ const SuccessTarget = getServiceTargetStat('~.*', timeWindow, 'count', 'success-span');
+ const FailureTarget = getServiceTargetStat('~.*', timeWindow, 'count', 'failure-span');
+ const tp99Target = getServiceTargetStat('~.*', timeWindow, '*_99', 'duration');
+
+
+ return Q.all([
+ fetchTrendValues(SuccessTarget, from, until),
+ fetchTrendValues(FailureTarget, from, until),
+ fetchTrendValues(tp99Target, from, until)
+ ]).then(values => extractServicePerfStats({
+ successValues: values[0],
+ failureValues: values[1],
+ tp99Values: values[2]
+ })
+ );
+}
+
+function getServiceSummaryResults(serviceName, timeWindow, from, until) {
+ const target = getServiceTargetStat(serviceName, timeWindow, '~(count)|(\\*_99)', '~(success-span)|(failure-span)|(duration)');
+
+ return fetchTrendValues(target, from, until)
+ .then(values => extractServiceSummary(values));
+}
+
+function getServiceTrendResults(serviceName, timeWindow, from, until) {
+ const target = getServiceTargetStat(serviceName, timeWindow, '~(count)|(mean)|(\\*_95)|(\\*_99)', '~(success-span)|(failure-span)|(duration)');
+
+ return fetchTrendValues(target, from, until)
+ .then((trends) => {
+ const successCount = extractTrendPointsForSingleServiceOperation(trends, 'count.success-span');
+ const failureCount = extractTrendPointsForSingleServiceOperation(trends, 'count.failure-span');
+ return {
+ count: toCountPoints(successCount, failureCount),
+ successCount,
+ failureCount,
+ meanDuration: extractTrendPointsForSingleServiceOperation(trends, 'mean.duration'),
+ tp95Duration: extractTrendPointsForSingleServiceOperation(trends, '*_95.duration'),
+ tp99Duration: extractTrendPointsForSingleServiceOperation(trends, '*_99.duration')
+ };
+ });
+}
+
+function getOperationSummaryResults(service, timeWindow, from, until) {
+ const target = createOperationTarget(service, '~.*', timeWindow, '~(count)|(\\*_99)', '~(received-span)|(success-span)|(failure-span)|(duration)');
+
+ return fetchTrendValues(target, from, until)
+ .then(values => extractOperationSummary(values));
+}
+
+function getOperationTrendResults(serviceName, operationName, timeWindow, from, until) {
+ const target = createOperationTarget(serviceName, operationName, timeWindow, '~(count)|(mean)|(\\*_95)|(\\*_99)', '~(received-span)|(success-span)|(failure-span)|(duration)');
+
+ return fetchTrendValues(target, from, until)
+ .then((trends) => {
+ const successCount = extractTrendPointsForSingleServiceOperation(trends, 'count.success-span');
+ const failureCount = extractTrendPointsForSingleServiceOperation(trends, 'count.failure-span');
+ return {
+ count: toCountPoints(successCount, failureCount),
+ successCount,
+ failureCount,
+ meanDuration: extractTrendPointsForSingleServiceOperation(trends, 'mean.duration'),
+ tp95Duration: extractTrendPointsForSingleServiceOperation(trends, '*_95.duration'),
+ tp99Duration: extractTrendPointsForSingleServiceOperation(trends, '*_99.duration')
+ };
+ });
+}
+
+function getEdgeLatencyTrendResults(edges, from, until) {
+ const serviceNameRegex = edges.map(e => metricpointNameEncoder.encodeMetricpointName(e.serviceName)).join('|');
+ const operationNameRegex = edges.map(e => metricpointNameEncoder.encodeMetricpointName(e.operationName)).join('|');
+
+ const target = createServicesOperationsTarget(`~${serviceNameRegex}`, `~${operationNameRegex}`, 'OneHour', '~(mean)|(\\*_99)', 'latency');
+
+ return fetchTrendValues(target, from, until)
+ .then(trends => trends);
+}
+
+function getOperationNames(serviceName, from, until) {
+ const target = createOperationTarget(serviceName, '~.*', 'OneMinute', '~(count)|(\\*_99)', '~(received-span)|(success-span)|(failure-span)|(duration)');
+
+ return fetchTrendValues(target, from, until)
+ .then(values => (_.uniq(values.map(val => (val.operationName))))); // return only unique operation names from Metrictank response
+}
+
+// api
+connector.getServicePerfStats = (granularity, from, until) =>
+ getServicePerfStatsResults(convertGranularityToTimeWindow(granularity), toMilliseconds(from), toMilliseconds(until));
+
+connector.getServiceStats = (serviceName, granularity, from, until) =>
+ getServiceSummaryResults(metricpointNameEncoder.encodeMetricpointName(serviceName), convertGranularityToTimeWindow(granularity), toMilliseconds(from), toMilliseconds(until));
+
+connector.getServiceTrends = (serviceName, granularity, from, until) =>
+ getServiceTrendResults(metricpointNameEncoder.encodeMetricpointName(serviceName), convertGranularityToTimeWindow(granularity), toMilliseconds(from), toMilliseconds(until));
+
+connector.getOperationStats = (serviceName, granularity, from, until) =>
+ getOperationSummaryResults(metricpointNameEncoder.encodeMetricpointName(serviceName), convertGranularityToTimeWindow(granularity), toMilliseconds(from), toMilliseconds(until));
+
+connector.getOperationTrends = (serviceName, operationName, granularity, from, until) =>
+ getOperationTrendResults(metricpointNameEncoder.encodeMetricpointName(serviceName), metricpointNameEncoder.encodeMetricpointName(operationName), convertGranularityToTimeWindow(granularity), toMilliseconds(from), toMilliseconds(until));
+
+connector.getEdgeLatency = (edges, from = defaultFrom, until = defaultUntil) => getEdgeLatencyTrendResults(edges, from, until);
+
+connector.getOperationNames = (serviceName, from = defaultFrom, until = defaultUntil) => getOperationNames(metricpointNameEncoder.encodeMetricpointName(serviceName), from, until);
+
+module.exports = connector;
diff --git a/ui/server/connectors/trends/stub/trendsConnector.js b/ui/server/connectors/trends/stub/trendsConnector.js
new file mode 100644
index 000000000..5fdd07b23
--- /dev/null
+++ b/ui/server/connectors/trends/stub/trendsConnector.js
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const Q = require('q');
+const _ = require('lodash');
+
+const connector = {};
+
+function getValue(min, max) {
+ return _.round(Math.random() * (max - min) + min, 0);
+}
+
+function getTimeStamp(addMin) {
+ const currentTime = new Date().getTime();
+ return currentTime - addMin * 60 * 1000;
+}
+
+function getRandomValues(timeWindow, dataPoints) {
+ const valuesArr = [];
+ _.range(dataPoints).forEach((i) => valuesArr.push({value: getValue(1000, 10000000), timestamp: getTimeStamp(i * timeWindow)}));
+ return valuesArr;
+}
+
+function getRandomPercentageValues(timeWindow, dataPoints) {
+ const valuesArr = [];
+ _.range(dataPoints).forEach((i) => valuesArr.push({value: getValue(80, 100), timestamp: getTimeStamp(i * timeWindow)}));
+ return valuesArr;
+}
+
+connector.getServiceStats = (serviceName, granularity, from, until) => {
+ const deffered = Q.defer();
+
+ const range = until - from;
+ const points = range / granularity;
+ const mins = granularity / (60 * 1000);
+
+ deffered.resolve([
+ {
+ type: 'Incoming Requests',
+ totalCount: 18800,
+ countPoints: getRandomValues(mins, points),
+ avgSuccessPercent: 69.997,
+ successPercentPoints: getRandomPercentageValues(mins, points),
+ latestTp99Duration: 14530,
+ tp99DurationPoints: getRandomValues(mins, points)
+ }
+ ]);
+
+ return deffered.promise;
+};
+
+connector.getServicePerfStats = () => {
+ const deffered = Q.defer();
+
+ deffered.resolve([
+ {
+ serviceName: 'Service 1',
+ successPercent: getValue(90, 100),
+ failureCount: getValue(10, 100000),
+ totalCount: getValue(1000000000, 10000000000)
+ },
+ {serviceName: 'Service 2', successPercent: getValue(90, 100), failureCount: getValue(10, 100000), totalCount: getValue(1000000, 10000000)},
+ {serviceName: 'Service 3', successPercent: getValue(95, 100), failureCount: getValue(10, 100000), totalCount: getValue(1000000, 1000000)},
+ {serviceName: 'Service 4', successPercent: getValue(95, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 100000)},
+ {serviceName: 'Service 5', successPercent: null, failureCount: getValue(10, 100000), totalCount: getValue(10, 100000)},
+ {serviceName: 'Service 6', successPercent: getValue(95, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 100000)},
+ {serviceName: 'Service 7', successPercent: null, failureCount: getValue(10, 100000), totalCount: getValue(10, 100000)},
+ {serviceName: 'Service 8', successPercent: getValue(95, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 1000)},
+ {serviceName: 'Service 9', successPercent: getValue(95, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 1000)},
+ {serviceName: 'Service 10', successPercent: getValue(95, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 1000)},
+ {serviceName: 'Service 11', successPercent: getValue(95, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 100)},
+ {serviceName: 'Service 12', successPercent: getValue(95, 100), failureCount: getValue(10, 100000), totalCount: getValue(1, 10)},
+ {serviceName: 'Service 13', successPercent: getValue(90, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 100)},
+ {serviceName: 'Service 14', successPercent: getValue(90, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 1000)},
+ {serviceName: 'Service 15', successPercent: getValue(10, 40), failureCount: getValue(10, 100000), totalCount: getValue(10, 100)},
+ {serviceName: 'Service 16', successPercent: getValue(90, 100), failureCount: getValue(10, 100000), totalCount: getValue(1000000, 10000000)},
+ {serviceName: 'Service 17', successPercent: getValue(99, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 100000)},
+ {serviceName: 'Service 18', successPercent: getValue(99, 100), failureCount: getValue(10, 100000), totalCount: getValue(10, 100)},
+ {serviceName: 'Service 19', successPercent: getValue(10, 40), failureCount: getValue(10, 100000), totalCount: getValue(10, 100)},
+ {serviceName: 'Service 20', successPercent: getValue(0, 1), failureCount: getValue(10, 100000), totalCount: getValue(10, 100)}
+ ]);
+
+ return deffered.promise;
+};
+
+connector.getServiceTrends = (serviceName, granularity, from, until) => {
+ const deffered = Q.defer();
+
+ const range = until - from;
+ const points = range / granularity;
+ const mins = granularity / (60 * 1000);
+
+ deffered.resolve({
+ count: getRandomValues(mins, points),
+ successCount: getRandomValues(mins, points),
+ failureCount: getRandomValues(mins, points),
+ meanDuration: getRandomValues(mins, points),
+ tp95Duration: getRandomValues(mins, points),
+ tp99Duration: getRandomValues(mins, points)
+ });
+
+ return deffered.promise;
+};
+
+connector.getOperationStats = (serviceName, granularity, from, until) => {
+ const deffered = Q.defer();
+
+ const range = until - from;
+ const points = range / granularity;
+ const mins = granularity / (60 * 1000);
+
+ deffered.resolve([
+ {
+ operationName: 'tarley-1',
+ totalCount: 18800,
+ countPoints: getRandomValues(mins, points),
+ avgSuccessPercent: 69.997,
+ successPercentPoints: getRandomPercentageValues(mins, points),
+ latestTp99Duration: 14530,
+ tp99DurationPoints: getRandomValues(mins, points),
+ successPoints: getRandomValues(mins, points),
+ failurePoints: getRandomValues(mins, points)
+ },
+ {
+ operationName: 'snow-1',
+ totalCount: 15075,
+ countPoints: getRandomValues(mins, points),
+ avgSuccessPercent: 79.997,
+ successPercentPoints: getRandomPercentageValues(mins, points),
+ latestTp99Duration: 14153,
+ tp99DurationPoints: getRandomValues(mins, points),
+ successPoints: getRandomValues(mins, points),
+ failurePoints: getRandomValues(mins, points)
+ },
+ {
+ operationName: 'grayjoy-1',
+ totalCount: 299,
+ countPoints: getRandomValues(mins, points),
+ avgSuccessPercent: 89.997,
+ successPercentPoints: getRandomPercentageValues(mins, points),
+ latestTp99Duration: 14353,
+ tp99DurationPoints: getRandomValues(mins, points),
+ successPoints: getRandomValues(mins, points),
+ failurePoints: getRandomValues(mins, points)
+ },
+ {
+ operationName: 'tully-1',
+ totalCount: 58859,
+ countPoints: getRandomValues(mins, points),
+ avgSuccessPercent: 99.99,
+ successPercentPoints: getRandomPercentageValues(mins, points),
+ latestTp99Duration: 31453,
+ tp99DurationPoints: getRandomValues(mins, points),
+ successPoints: getRandomValues(mins, points),
+ failurePoints: getRandomValues(mins, points)
+ },
+ {
+ operationName: 'clegane-1',
+ totalCount: 18800,
+ countPoints: getRandomValues(mins, points),
+ avgSuccessPercent: 59.997,
+ successPercentPoints: getRandomPercentageValues(mins, points),
+ latestTp99Duration: 31453,
+ tp99DurationPoints: getRandomValues(mins, points),
+ successPoints: getRandomValues(mins, points),
+ failurePoints: getRandomValues(mins, points)
+ },
+ {
+ operationName: 'drogo-1',
+ totalCount: 15075,
+ countPoints: getRandomValues(mins, points),
+ avgSuccessPercent: 89.997,
+ successPercentPoints: getRandomPercentageValues(mins, points),
+ latestTp99Duration: 81453,
+ tp99DurationPoints: getRandomValues(mins, points),
+ successPoints: getRandomValues(mins, points),
+ failurePoints: getRandomValues(mins, points)
+ },
+ {
+ operationName: 'dondarrion-1',
+ totalCount: 5750,
+ countPoints: getRandomValues(mins, points),
+ avgSuccessPercent: 9.997,
+ successPercentPoints: getRandomPercentageValues(mins, points),
+ latestTp99Duration: 91453,
+ tp99DurationPoints: getRandomValues(mins, points),
+ successPoints: getRandomValues(mins, points),
+ failurePoints: getRandomValues(mins, points)
+ },
+ {
+ operationName: 'mormont-1',
+ totalCount: 5899,
+ countPoints: getRandomValues(mins, points),
+ avgSuccessPercent: 99.997,
+ successPercentPoints: getRandomPercentageValues(mins, points),
+ latestTp99Duration: 1453,
+ tp99DurationPoints: getRandomValues(mins, points),
+ successPoints: getRandomValues(mins, points),
+ failurePoints: getRandomValues(mins, points)
+ }
+ ]);
+
+ return deffered.promise;
+};
+
+connector.getOperationTrends = (serviceName, operationName, granularity, from, until) => {
+ const deffered = Q.defer();
+
+ const range = until - from;
+ const points = range / granularity;
+ const mins = granularity / (60 * 1000);
+
+ deffered.resolve({
+ count: getRandomValues(mins, points),
+ successCount: getRandomValues(mins, points),
+ failureCount: getRandomValues(mins, points),
+ meanDuration: getRandomValues(mins, points),
+ tp95Duration: getRandomValues(mins, points),
+ tp99Duration: getRandomValues(mins, points)
+ });
+
+ return deffered.promise;
+};
+
+module.exports = connector;
diff --git a/ui/server/connectors/utils/LoaderBackedCache.js b/ui/server/connectors/utils/LoaderBackedCache.js
new file mode 100644
index 000000000..d05a0c2a9
--- /dev/null
+++ b/ui/server/connectors/utils/LoaderBackedCache.js
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const _ = require('lodash');
+const Q = require('q');
+
+class LoaderBackedCache {
+ constructor(loader, timeout) {
+ this.cache = {};
+ this.timeout = timeout;
+ this.loader = loader;
+ }
+
+ populateIfPresent(result, key) {
+ if (!_.isEmpty(result)) {
+ this.cache[key] = {
+ item: result,
+ expiryTimestamp: Date.now() + this.timeout
+ };
+ }
+ }
+
+ get(key = 'key') {
+ const cachedItem = this.cache[key];
+
+ if (cachedItem) {
+ const isExpired = cachedItem.expiryTimestamp < Date.now();
+
+ if (isExpired) {
+ this.loader(key).then((result) => this.populateIfPresent(result, key));
+ }
+
+ return Q.fcall(() => cachedItem.item);
+ }
+
+ return this.loader(key).then((result) => {
+ this.populateIfPresent(result, key);
+ return result;
+ });
+ }
+
+ getCached(key = 'key') {
+ const cachedItem = this.cache[key];
+
+ if (cachedItem) {
+ const isExpired = cachedItem.expiryTimestamp < Date.now();
+
+ if (isExpired) {
+ this.loader(key).then((result) => this.populateIfPresent(result, key));
+ }
+
+ return cachedItem.item;
+ }
+
+ this.loader(key).then((result) => this.populateIfPresent(result, key));
+ return null;
+ }
+
+ get size() {
+ return Object.keys(this.cache).length;
+ }
+}
+
+module.exports = LoaderBackedCache;
diff --git a/ui/server/connectors/utils/encoders/Base64Encoder.js b/ui/server/connectors/utils/encoders/Base64Encoder.js
new file mode 100644
index 000000000..da53528bf
--- /dev/null
+++ b/ui/server/connectors/utils/encoders/Base64Encoder.js
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Base64Encoder {
+ static encode(name) {
+ return Buffer.from(name).toString('base64').replace(new RegExp('=', 'g'), '_');
+ }
+
+ static decode(name) {
+ return Buffer.from(name.replace(new RegExp('_', 'g'), '='), 'base64').toString('utf8');
+ }
+}
+
+module.exports = Base64Encoder;
diff --git a/ui/server/connectors/utils/encoders/MetricpointNameEncoder.js b/ui/server/connectors/utils/encoders/MetricpointNameEncoder.js
new file mode 100644
index 000000000..092ce23c9
--- /dev/null
+++ b/ui/server/connectors/utils/encoders/MetricpointNameEncoder.js
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const PeriodReplacementEncoder = require('../encoders/PeriodReplacementEncoder');
+const Base64Encoder = require('../encoders/Base64Encoder');
+const NoopEncoder = require('../encoders/NoopEncoder');
+
+class MetricpointNameEncoder {
+ constructor(encoderType) {
+ if (encoderType === 'periodreplacement') {
+ this.encoder = PeriodReplacementEncoder;
+ } else if (encoderType === 'base64') {
+ this.encoder = Base64Encoder;
+ } else {
+ this.encoder = NoopEncoder;
+ }
+ }
+
+ encodeMetricpointName(operationName) {
+ return this.encoder.encode(decodeURIComponent(operationName));
+ }
+
+ decodeMetricpointName(operationName) {
+ return this.encoder.decode(operationName);
+ }
+}
+
+module.exports = MetricpointNameEncoder;
diff --git a/ui/server/connectors/utils/encoders/NoopEncoder.js b/ui/server/connectors/utils/encoders/NoopEncoder.js
new file mode 100644
index 000000000..93eb7b695
--- /dev/null
+++ b/ui/server/connectors/utils/encoders/NoopEncoder.js
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class NoopEncoder {
+ static encode(name) {
+ return name;
+ }
+
+ static decode(name) {
+ return name;
+ }
+}
+
+module.exports = NoopEncoder;
diff --git a/ui/server/connectors/utils/encoders/PeriodReplacementEncoder.js b/ui/server/connectors/utils/encoders/PeriodReplacementEncoder.js
new file mode 100644
index 000000000..7ca2301f3
--- /dev/null
+++ b/ui/server/connectors/utils/encoders/PeriodReplacementEncoder.js
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class PeriodReplacementEncoder {
+ static encode(name) {
+ return name.replace(/\./gi, '___');
+ }
+
+ static decode(name) {
+ return name.replace(/___/gi, '.');
+ }
+}
+
+module.exports = PeriodReplacementEncoder;
diff --git a/ui/server/connectors/utils/errorConverter.js b/ui/server/connectors/utils/errorConverter.js
new file mode 100644
index 000000000..9874e0cb7
--- /dev/null
+++ b/ui/server/connectors/utils/errorConverter.js
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const converter = {};
+
+converter.fromAxiosError = (error) => new Error(`${error} ${JSON.stringify(error.config)}`);
+
+converter.fromGrpcError = (error) => new Error(`${JSON.stringify(error)}`);
+
+module.exports = converter;
diff --git a/ui/server/connectors/utils/objectUtils.js b/ui/server/connectors/utils/objectUtils.js
new file mode 100644
index 000000000..b5688b894
--- /dev/null
+++ b/ui/server/connectors/utils/objectUtils.js
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const _ = require('lodash');
+
+const utils = {};
+
+utils.getPropIgnoringCase = (array, name) => {
+ const prop = _.compact(array.map(query => JSON.parse(query.toLowerCase())[name.toLowerCase()]));
+ return prop && prop.length > 0 && prop[0];
+};
+
+module.exports = utils;
+
diff --git a/ui/server/routes/alertsApi.js b/ui/server/routes/alertsApi.js
new file mode 100644
index 000000000..afb12de84
--- /dev/null
+++ b/ui/server/routes/alertsApi.js
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+const config = require('../config/config');
+const handleResponsePromise = require('./utils/apiResponseHandler').handleResponsePromise;
+
+const alertsConnector = require(`../connectors/alerts/${config.connectors.alerts.connectorName}/alertsConnector`); // eslint-disable-line import/no-dynamic-require
+const subscriptionsConnector = require(`../connectors/alerts/${config.connectors.alerts.subscriptions.connectorName}/subscriptionsConnector`); // eslint-disable-line import/no-dynamic-require
+
+const router = express.Router();
+
+router.get('/alerts/:serviceName', (req, res, next) => {
+ handleResponsePromise(res, next, 'alerts_SVC')(
+ () => alertsConnector.getServiceAlerts(req.params.serviceName, req.query.interval, req.query.from)
+ );
+});
+
+router.get('/alerts/:serviceName/unhealthyCount', (req, res, next) => {
+ handleResponsePromise(res, next, 'alerts_SVC_unhealthyCount')(
+ () => alertsConnector.getServiceUnhealthyAlertCount(req.params.serviceName, req.query.interval)
+ );
+});
+
+router.get('/alert/:serviceName/:operationName/:alertType/history', (req, res, next) => {
+ handleResponsePromise(res, next, 'alerts_SVC_OP_TYPE')(
+ () => alertsConnector.getAnomalies(
+ req.params.serviceName,
+ req.params.operationName,
+ req.params.alertType,
+ req.query.from,
+ req.query.interval
+ )
+ );
+});
+
+router.get('/alert/:serviceName/:operationName/:alertType/:interval/subscriptions', (req, res, next) => {
+ handleResponsePromise(res, next, 'getsubscriptions_SVC_OP_TYPE')(
+ () => subscriptionsConnector.searchSubscriptions(
+ req.params.serviceName,
+ req.params.operationName,
+ req.params.alertType,
+ req.params.interval
+ )
+ );
+});
+
+router.post('/alert/:serviceName/:operationName/:alertType/subscriptions', (req, res, next) => {
+ handleResponsePromise(res, next, 'addsubscriptions_SVC_OP_TYPE')(
+ () => subscriptionsConnector.addSubscription(
+ req.body.user || 'Haystack', req.body.subscription)
+ );
+});
+
+router.put('/alert/subscriptions/:subscriptionId', (req, res, next) => {
+ handleResponsePromise(res, next, 'updatesubscriptions_SVC_OP_TYPE')(
+ () => subscriptionsConnector.updateSubscription(
+ req.params.subscriptionId,
+ req.body.subscriptions)
+ );
+});
+
+router.delete('/alert/subscriptions/:subscriptionId', (req, res, next) => {
+ handleResponsePromise(res, next, 'deletesubscriptions_SVC_OP_TYPE')(
+ () => subscriptionsConnector.deleteSubscription(req.params.subscriptionId)
+ );
+});
+
+module.exports = router;
diff --git a/ui/server/routes/auth.js b/ui/server/routes/auth.js
new file mode 100644
index 000000000..c547d10ba
--- /dev/null
+++ b/ui/server/routes/auth.js
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+const authenticatorWithRedirect = require('../sso/samlSsoAuthenticator').authenticatorWithRedirect;
+const authChecker = require('../sso/authChecker');
+
+const router = express.Router();
+
+const loggedOutHome = '/login';
+
+const authenticate = req => authenticatorWithRedirect(req);
+
+const extractFullRedirectUrl = req => req.originalUrl.split('/auth/login?redirectUrl=').pop();
+
+router.get('/login', (req, res, next) => {
+ const redirectUrl = extractFullRedirectUrl(req);
+ return authenticate(redirectUrl)(redirectUrl, res, next);
+});
+
+// check for active login session and then renew user
+router.get('/renewlogin', authChecker.forApi, (req, res, next) => {
+ req.login({...req.user, timestamp: Date.now()}, (err) => {
+ if (err) {
+ next(err);
+ } else {
+ res.send(200);
+ }
+ });
+});
+
+router.get('/logout', (req, res) => {
+ req.logout();
+ req.session = null;
+ res.redirect(loggedOutHome);
+});
+
+module.exports = router;
diff --git a/ui/server/routes/index.js b/ui/server/routes/index.js
new file mode 100644
index 000000000..e8e956d72
--- /dev/null
+++ b/ui/server/routes/index.js
@@ -0,0 +1,67 @@
+/*
+
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+
+ */
+
+const express = require('express');
+const onFinished = require('finished');
+const config = require('../config/config');
+const metrics = require('../utils/metrics');
+const assets = require('../../public/assets.json');
+
+const router = express.Router();
+
+const subsystems = Object.keys(config.connectors).filter((connector) => {
+ if (connector === 'serviceInsights') {
+ return config.connectors[connector].enableServiceInsights !== 'disabled';
+ }
+ return config.connectors[connector].connectorName !== 'disabled';
+});
+
+router.get('*', (req, res) => {
+ const timer = metrics.timer('index').start();
+
+ res.render('index', {
+ bundleAppJsPath: assets.app.js,
+ bundleAppCssPath: assets.app.css,
+ bundleCommonsJsPath: assets.commons.js,
+ subsystems,
+ gaTrackingID: config.gaTrackingID,
+ usbPrimary: config.usbPrimary,
+ enableServicePerformance: config.connectors.trends && config.connectors.trends.enableServicePerformance,
+ enableServiceLevelTrends: config.connectors.trends && config.connectors.trends.enableServiceLevelTrends,
+ enableServiceInsights: config.connectors.serviceInsights && config.connectors.serviceInsights.enableServiceInsights,
+ enableSSO: config.enableSSO,
+ refreshInterval: config.refreshInterval,
+ enableAlertSubscriptions: config.connectors.alerts && config.connectors.alerts.subscriptions,
+ tracesTimePresetOptions: config.connectors.traces && config.connectors.traces.timePresetOptions,
+ timeWindowPresetOptions: config.timeWindowPresetOptions,
+ tracesTTL: config.connectors.traces && config.connectors.traces.ttl,
+ trendsTTL: config.connectors.trends && config.connectors.trends.ttl,
+ relatedTracesOptions: config.relatedTracesOptions,
+ externalLinking: config.externalLinking,
+ usingZipkinConnector: config.connectors.traces && config.connectors.traces.connectorName === 'zipkin',
+ enableBlobs: config.connectors.blobs && config.connectors.blobs.enableBlobs,
+ blobsUrl: config.connectors.blobs && config.connectors.blobs.blobsUrl
+ });
+
+ onFinished(res, () => {
+ timer.end();
+ });
+});
+
+module.exports = router;
diff --git a/ui/server/routes/login.js b/ui/server/routes/login.js
new file mode 100644
index 000000000..21f34cfa9
--- /dev/null
+++ b/ui/server/routes/login.js
@@ -0,0 +1,33 @@
+
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+const config = require('../config/config');
+const assets = require('../../public/assets.json');
+
+const router = express.Router();
+
+router.get('/', (req, res) => {
+ res.render('index', {
+ bundleAppJsPath: assets.app.js,
+ bundleAppCssPath: assets.app.css,
+ bundleCommonsJsPath: assets.commons.js,
+ ssoAdfsDomain: config.saml && config.saml.adfsDomain
+ });
+});
+
+module.exports = router;
diff --git a/ui/server/routes/serviceGraphApi.js b/ui/server/routes/serviceGraphApi.js
new file mode 100644
index 000000000..9702149fd
--- /dev/null
+++ b/ui/server/routes/serviceGraphApi.js
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+const config = require('../config/config');
+const handleResponsePromise = require('./utils/apiResponseHandler').handleResponsePromise;
+
+const serviceGraphConnector = require(`../connectors/serviceGraph/${config.connectors.serviceGraph.connectorName}/serviceGraphConnector`); // eslint-disable-line import/no-dynamic-require
+
+const router = express.Router();
+
+router.get('/serviceGraph', (req, res, next) => {
+ handleResponsePromise(res, next, 'svc_graph_SVC')(
+ () => serviceGraphConnector.getServiceGraphForTimeLine(req.query.from, req.query.to)
+
+ );
+});
+
+module.exports = router;
diff --git a/ui/server/routes/serviceInsightsApi.js b/ui/server/routes/serviceInsightsApi.js
new file mode 100644
index 000000000..c34827bb5
--- /dev/null
+++ b/ui/server/routes/serviceInsightsApi.js
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+const {handleResponsePromise} = require('./utils/apiResponseHandler');
+
+const serviceInsightsConnector = require('../connectors/serviceInsights/serviceInsightsConnector');
+
+const router = express.Router();
+
+router.get('/serviceInsights', (req, res, next) => {
+ handleResponsePromise(res, next, 'svc_insights_SVC')(() => {
+ const {serviceName, operationName, traceId, startTime, endTime, limit, relationship} = req.query;
+ return serviceInsightsConnector.getServiceInsightsForService({
+ serviceName,
+ operationName,
+ traceId,
+ startTime,
+ endTime,
+ limit,
+ relationship
+ });
+ });
+});
+
+module.exports = router;
diff --git a/ui/server/routes/servicesApi.js b/ui/server/routes/servicesApi.js
new file mode 100644
index 000000000..fc4540e14
--- /dev/null
+++ b/ui/server/routes/servicesApi.js
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+
+const handleResponsePromise = require('./utils/apiResponseHandler').handleResponsePromise;
+const servicesConnector = require('../connectors/services/servicesConnector');
+
+const router = express.Router();
+
+router.get('/services', (req, res, next) => {
+ handleResponsePromise(res, next, 'services')(
+ () => servicesConnector.getServices()
+ );
+});
+
+router.get('/operations', (req, res, next) => {
+ handleResponsePromise(res, next, 'operations')(
+ () => servicesConnector.getOperations(req.query.serviceName)
+ );
+});
+
+module.exports = router;
diff --git a/ui/server/routes/servicesPerfApi.js b/ui/server/routes/servicesPerfApi.js
new file mode 100644
index 000000000..9a7583ea6
--- /dev/null
+++ b/ui/server/routes/servicesPerfApi.js
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+
+const config = require('../config/config');
+const handleResponsePromise = require('./utils/apiResponseHandler').handleResponsePromise;
+
+const trendsConnector = require(`../connectors/trends/${config.connectors.trends.connectorName}/trendsConnector`); // eslint-disable-line import/no-dynamic-require
+
+const router = express.Router();
+
+router.get('/servicePerf', (req, res, next) => {
+ const {
+ granularity,
+ from,
+ until
+ } = req.query;
+ handleResponsePromise(res, next, 'servicePerf')(
+ () => trendsConnector.getServicePerfStats(granularity, from, until)
+ );
+});
+
+module.exports = router;
diff --git a/ui/server/routes/sso.js b/ui/server/routes/sso.js
new file mode 100644
index 000000000..515c80ea8
--- /dev/null
+++ b/ui/server/routes/sso.js
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+const authenticator = require('../sso/samlSsoAuthenticator').authenticator;
+const logger = require('../utils/logger').withIdentifier('sso');
+
+const router = express.Router();
+
+const extractFullRedirectUrl = req => req.originalUrl.split('/sso/saml/consume?redirectUrl=').pop();
+
+router.use('/saml/consume', authenticator.authenticate('saml'),
+ (req, res) => {
+ const redirectUrl = extractFullRedirectUrl(req);
+ logger.info(`action=authentication, status=success, redirectUrl=${redirectUrl}`);
+ res.redirect(redirectUrl);
+ });
+
+module.exports = router;
diff --git a/ui/server/routes/tracesApi.js b/ui/server/routes/tracesApi.js
new file mode 100644
index 000000000..2c6797941
--- /dev/null
+++ b/ui/server/routes/tracesApi.js
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+const express = require('express');
+
+const config = require('../config/config');
+const handleResponsePromise = require('./utils/apiResponseHandler').handleResponsePromise;
+
+const tracesConnector = require(`../connectors/traces/${config.connectors.traces.connectorName}/tracesConnector`); // eslint-disable-line import/no-dynamic-require
+
+const router = express.Router();
+
+router.get('/traces', (req, res, next) => {
+ handleResponsePromise(res, next, 'traces')(
+ () => tracesConnector.findTraces(req.query)
+ );
+});
+
+router.get('/traces/raw', (req, res, next) => {
+ handleResponsePromise(res, next, 'traces_raw')(
+ () => tracesConnector.getRawTraces(req.query.traceIds)
+ );
+});
+
+router.get('/traces/timeline', (req, res, next) => {
+ handleResponsePromise(res, next, 'trace_timeline')(
+ () => tracesConnector.getTimeline(req.query)
+ );
+});
+
+router.get('/traces/searchableKeys', (req, res, next) => {
+ handleResponsePromise(res, next, 'trace_searchableKeys')(
+ () => tracesConnector.getSearchableKeys()
+ );
+});
+
+router.get('/trace/:traceId', (req, res, next) => {
+ handleResponsePromise(res, next, 'trace_TRACEID')(
+ () => tracesConnector.getTrace(req.params.traceId)
+ );
+});
+
+router.get('/trace/raw/:traceId', (req, res, next) => {
+ handleResponsePromise(res, next, 'trace_raw_TRACEID')(
+ () => tracesConnector.getRawTrace(req.params.traceId)
+ );
+});
+
+router.get('/trace/raw/:traceId/:spanId', (req, res, next) => {
+ handleResponsePromise(res, next, 'trace_raw_TRACEID_SPANID')(
+ () => tracesConnector.getRawSpan(req.params.traceId, req.params.spanId, req.query.serviceName)
+ );
+});
+
+router.get('/trace/:traceId/latencyCost', (req, res, next) => {
+ handleResponsePromise(res, next, 'trace_TRACEID_latencyCost')(
+ () => tracesConnector.getLatencyCost(req.params.traceId)
+ );
+});
+
+module.exports = router;
diff --git a/ui/server/routes/trendsApi.js b/ui/server/routes/trendsApi.js
new file mode 100644
index 000000000..b4c623550
--- /dev/null
+++ b/ui/server/routes/trendsApi.js
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+
+const config = require('../config/config');
+const handleResponsePromise = require('./utils/apiResponseHandler').handleResponsePromise;
+
+const trendsConnector = require(`../connectors/trends/${config.connectors.trends.connectorName}/trendsConnector`); // eslint-disable-line import/no-dynamic-require
+
+const router = express.Router();
+
+router.get('/trends/service/:serviceName/:type', (req, res, next) => {
+ const {
+ granularity,
+ from,
+ until
+ } = req.query;
+
+ handleResponsePromise(res, next, 'trends_service_SVC_TYPE')(
+ () => trendsConnector.getServiceTrends(req.params.serviceName, granularity, from, until)
+ );
+});
+
+router.get('/trends/service/:serviceName', (req, res, next) => {
+ const {
+ granularity,
+ from,
+ until
+ } = req.query;
+
+ handleResponsePromise(res, next, 'trends_service_SVC_TYPE')(
+ () => trendsConnector.getServiceStats(req.params.serviceName, granularity, from, until)
+ );
+});
+
+router.get('/trends/operation/:serviceName/:operationName', (req, res, next) => {
+ const {
+ granularity,
+ from,
+ until
+ } = req.query;
+
+ handleResponsePromise(res, next, 'trends_operation_SVC_OP')(
+ () => trendsConnector.getOperationTrends(req.params.serviceName, req.params.operationName, granularity, from, until)
+ );
+});
+
+router.get('/trends/operation/:serviceName', (req, res, next) => {
+ const {
+ granularity,
+ from,
+ until
+ } = req.query;
+
+ handleResponsePromise(res, next, 'trends_operation_SVC')(
+ () => trendsConnector.getOperationStats(req.params.serviceName, granularity, from, until)
+ );
+});
+
+module.exports = router;
diff --git a/ui/server/routes/user.js b/ui/server/routes/user.js
new file mode 100644
index 000000000..48a9da483
--- /dev/null
+++ b/ui/server/routes/user.js
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const express = require('express');
+
+const router = express.Router();
+
+router.get('/details', (req, res) => {
+ res.type('application/json').send(req.user);
+ });
+
+module.exports = router;
diff --git a/ui/server/routes/utils/apiResponseHandler.js b/ui/server/routes/utils/apiResponseHandler.js
new file mode 100644
index 000000000..fc311d685
--- /dev/null
+++ b/ui/server/routes/utils/apiResponseHandler.js
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const metrics = require('../../utils/metrics');
+
+const responseHandler = {};
+
+responseHandler.handleResponsePromise = (response, next, pathName) => (operation) => {
+ const timer = metrics.timer(`http_rq_${pathName}`).start();
+
+ operation()
+ .then(
+ (result) => response.json(result),
+ (err) => {
+ metrics.meter(`http_rq_${pathName}_failed`).mark();
+ next(err);
+ }
+ )
+ .fin(() => timer.end())
+ .done();
+};
+
+module.exports = responseHandler;
diff --git a/ui/server/sso/authChecker.js b/ui/server/sso/authChecker.js
new file mode 100644
index 000000000..0f9bcd16c
--- /dev/null
+++ b/ui/server/sso/authChecker.js
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+function authChecker(errorHandler) {
+ return (req, res, next) => {
+ if (req.user) {
+ next();
+ } else {
+ errorHandler(res, req.originalUrl);
+ }
+ };
+}
+
+module.exports = {
+ forPage: authChecker((res, redirectUrl) => { res.redirect(`/login?redirectUrl=${redirectUrl}`); }),
+ forApi: authChecker((res) => { res.status(401).send('UNAUTHORIZED'); })
+};
diff --git a/ui/server/sso/samlSsoAuthenticator.js b/ui/server/sso/samlSsoAuthenticator.js
new file mode 100644
index 000000000..ef162dcf9
--- /dev/null
+++ b/ui/server/sso/samlSsoAuthenticator.js
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const config = require('../config/config');
+
+const passport = require('passport');
+const SamlStrategy = require('passport-saml').Strategy;
+
+const loggedInHome = '/';
+const loginErrRedirect = '/login?error=true';
+const IDENTIFIER_FORMAT = 'urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified';
+const EMAIL_ADDRESS_SCHEMA = 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress';
+const SECURITY_GROUPS_SCHEMA = 'http://schemas.xmlsoap.org/claims/Group';
+
+const logger = require('../utils/logger').withIdentifier('sso');
+
+function createSamlStrategyWithRedirect(redirectUrl) {
+ return new SamlStrategy({
+ callbackUrl: `${config.saml.callbackUrl}?redirectUrl=${redirectUrl}`,
+ entryPoint: config.saml.entry_point,
+ issuer: config.saml.issuer,
+ acceptedClockSkewMs: -1,
+ identifierFormat: IDENTIFIER_FORMAT
+ },
+ (profile, done) => {
+ const groups = profile[SECURITY_GROUPS_SCHEMA] || [];
+ const requiredSecurityGroup = config.saml.securityGroupName;
+
+ if (requiredSecurityGroup && !groups.includes(requiredSecurityGroup)) {
+ logger.info(`User '${profile[EMAIL_ADDRESS_SCHEMA]}' attempted to log in but was not part of '${requiredSecurityGroup}' security group`);
+ return done(null, false);
+ }
+
+ return done(null, {
+ id: profile.nameID,
+ email: profile[EMAIL_ADDRESS_SCHEMA],
+ timestamp: Date.now()
+ });
+ }
+ );
+}
+
+passport.serializeUser((user, done) => {
+ done(null, user);
+});
+
+passport.deserializeUser((user, done) => {
+ if (user.id && user.timestamp && user.timestamp > (Date.now() - config.sessionTimeout)) {
+ done(null, user);
+ } else {
+ done('invalid user: timeout exceeded', null);
+ }
+});
+
+passport.use(createSamlStrategyWithRedirect('/'));
+
+module.exports = {
+ authenticator: passport,
+ authenticatorWithRedirect: (redirectUrl) => {
+ // no predefined way to do redirects
+ // falling back to using new saml object every time for new login request
+ passport.use(createSamlStrategyWithRedirect(redirectUrl || '/'));
+
+ return passport.authenticate('saml',
+ {
+ successRedirect: loggedInHome,
+ failureRedirect: loginErrRedirect
+ });
+ }
+};
diff --git a/ui/server/start.js b/ui/server/start.js
new file mode 100644
index 000000000..5b7747675
--- /dev/null
+++ b/ui/server/start.js
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+const app = require('./app');
+const config = require('./config/config');
+const Server = require('./utils/server');
+
+const server = new Server(app);
+
+if (config.cluster) {
+ server.startInClusterMode();
+} else {
+ server.startInStandaloneMode();
+}
diff --git a/ui/server/utils/logger.js b/ui/server/utils/logger.js
new file mode 100644
index 000000000..22aa6741f
--- /dev/null
+++ b/ui/server/utils/logger.js
@@ -0,0 +1,56 @@
+/*
+
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+
+ */
+
+const winston = require('winston');
+const expressWinston = require('express-winston');
+const _ = require('lodash');
+const moment = require('moment');
+
+const BASE_TRANSPORT_OPTIONS = {
+ json: false
+};
+const CONSOLE_TRANSPORT_OPTIONS = _.merge({}, BASE_TRANSPORT_OPTIONS);
+
+function getLogFormatterOptionsWithIdentifier(identifier) {
+ return {
+ timestamp: () => Date.now(),
+ formatter: (options) => {
+ const level = options.level || 'unknown';
+ const meta = _.isEmpty(options.meta) ? '' : JSON.stringify(options.meta);
+ return `${moment.utc(options.timestamp()).format()}: identifier="${identifier}" level="${level.toUpperCase()}" message="${options.message}" ${meta}`;
+ }
+ };
+}
+
+exports.withIdentifier = (identifier) => {
+ if (!identifier) {
+ throw new Error('Identifier is required while setting up a logger. For example, pass the module name that will use this logger.');
+ }
+ return new winston.Logger({
+ transports: [new winston.transports.Console(_.merge({}, CONSOLE_TRANSPORT_OPTIONS, getLogFormatterOptionsWithIdentifier(identifier)))]
+ });
+};
+
+exports.REQUEST_LOGGER = expressWinston.logger({
+ transports: [new winston.transports.Console(CONSOLE_TRANSPORT_OPTIONS)]
+});
+
+exports.ERROR_LOGGER = expressWinston.errorLogger({
+ transports: [new winston.transports.Console(CONSOLE_TRANSPORT_OPTIONS)]
+});
diff --git a/ui/server/utils/metrics.js b/ui/server/utils/metrics.js
new file mode 100644
index 000000000..b03ef156e
--- /dev/null
+++ b/ui/server/utils/metrics.js
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const measured = require('measured');
+
+module.exports = measured.createCollection();
diff --git a/ui/server/utils/metricsMiddleware.js b/ui/server/utils/metricsMiddleware.js
new file mode 100644
index 000000000..706409318
--- /dev/null
+++ b/ui/server/utils/metricsMiddleware.js
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const onFinished = require('finished');
+const metrics = require('./metrics');
+
+const middleware = {};
+
+middleware.httpMetrics = (req, res, next) => {
+ onFinished(res, () => {
+ metrics.meter(`http_${res.statusCode}`).mark();
+ });
+
+ next();
+};
+
+module.exports = middleware;
diff --git a/ui/server/utils/metricsReporter.js b/ui/server/utils/metricsReporter.js
new file mode 100644
index 000000000..b99bd48ac
--- /dev/null
+++ b/ui/server/utils/metricsReporter.js
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const flatten = require('flat');
+const metrics = require('./metrics');
+const logger = require('./logger').withIdentifier('metrics');
+
+const reporter = {};
+
+reporter.start = (host, port, prefix, interval) => {
+ setInterval(() => {
+ const flattenedMetrics = flatten(metrics.toJSON());
+
+ // report to logs
+ logger.info(prefix);
+ logger.info(JSON.stringify(flattenedMetrics));
+ }, interval);
+};
+
+module.exports = reporter;
diff --git a/ui/server/utils/server.js b/ui/server/utils/server.js
new file mode 100644
index 000000000..a787d81f8
--- /dev/null
+++ b/ui/server/utils/server.js
@@ -0,0 +1,70 @@
+/*
+
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+
+ */
+
+const OS = require('os');
+const cluster = require('cluster');
+const logger = require('./logger').withIdentifier('server');
+const https = require('https');
+const fs = require('fs');
+const config = require('../config/config');
+
+class Server {
+ constructor(expressApp) {
+ this.expressApp = expressApp;
+ }
+
+ startInStandaloneMode() {
+ const activeApp = this.expressApp;
+ const port = this.expressApp.get('port');
+
+ if (config.https && config.https.keyFile && config.https.certFile) {
+ https.createServer({
+ key: fs.readFileSync(config.https.keyFile),
+ cert: fs.readFileSync(config.https.certFile)
+ }, activeApp)
+ .listen(port, () => logger.info(`Express server listening on: ${port}`));
+ } else {
+ activeApp
+ .listen(port, () => logger.info(`Express server listening : ${port}`));
+ }
+ }
+
+ startInClusterMode() {
+ const cpuCount = OS.cpus().length;
+ if (cpuCount < 2) {
+ this.startInStandaloneMode();
+ } else if (cluster.isMaster) {
+ logger.info(`Launching in cluster mode across ${cpuCount} CPUs`);
+ for (let i = 0; i < cpuCount; i += 1) {
+ cluster.fork();
+ }
+ cluster.on('exit', (worker) => {
+ logger.info(`Worker ${worker.id} exited. Launching again...`);
+ cluster.fork();
+ });
+ cluster.on('listening', (worker, address) => {
+ logger.info(`Worker ${worker.id} is now connected to ${address.address || 'localhost'}:${address.port}`);
+ });
+ } else {
+ this.startInStandaloneMode();
+ }
+ }
+}
+
+module.exports = Server;
diff --git a/ui/server/views/index.pug b/ui/server/views/index.pug
new file mode 100644
index 000000000..fb2dd282a
--- /dev/null
+++ b/ui/server/views/index.pug
@@ -0,0 +1,38 @@
+doctype html
+html
+ head
+ title= 'Haystack'
+ link(rel='stylesheet', href=bundleAppCssPath)
+ // prefetch fonts
+ link(rel='preload', as='font', href='/fonts/titillium-web-v5-latin-300.woff2')
+ link(rel='preload', as='font', href='/fonts/titillium-web-v5-latin-regular.woff2')
+ link(rel='preload', as='font', href='/fonts/titillium-web-v5-latin-600.woff2')
+ link(rel='preload', as='font', href='/fonts/titillium-web-v5-latin-700.woff2')
+ link(rel='preload', as='font', href='/fonts/themify.woff')
+
+ body
+ #root
+ script.
+ var haystackUiConfig = {
+ subsystems: !{JSON.stringify(subsystems || [])},
+ gaTrackingID: !{JSON.stringify(gaTrackingID || '')},
+ enableServicePerformance: !{JSON.stringify(enableServicePerformance || false)},
+ enableServiceLevelTrends: !{JSON.stringify(enableServiceLevelTrends || false)},
+ enableServiceInsights: !{JSON.stringify(enableServiceInsights || false)},
+ services: !{JSON.stringify(services || null)},
+ enableSSO: !{JSON.stringify(enableSSO || false)},
+ ssoAdfsDomain: !{JSON.stringify(ssoAdfsDomain || '')},
+ refreshInterval: !{JSON.stringify(refreshInterval || 60000)},
+ enableAlertSubscriptions: !{JSON.stringify(enableAlertSubscriptions || false)},
+ tracesTimePresetOptions: !{JSON.stringify(tracesTimePresetOptions || [{shortName: '5m', value: 5 * 60 * 1000}, {shortName: '15m', value: 15 * 60 * 1000}, {shortName: '1h', value: 60 * 60 * 1000}, {shortName: '4h', value: 4 * 60 * 60 * 1000}, {shortName: '12h', value: 12 * 60 * 60 * 1000}, {shortName: '24h', value: 24 * 60 * 60 * 1000}, {shortName: '3d', value: 3 * 24 * 60 * 60 * 1000}])},
+ timeWindowPresetOptions: !{JSON.stringify(timeWindowPresetOptions || [{shortName: '5m', longName: '5 minutes', value: 5 * 60 * 1000}, {shortName: '15m', longName: '15 minutes', value: 15 * 60 * 1000}, {shortName: '1h', longName: '1 hour', value: 60 * 60 * 1000}, {shortName: '6h', longName: '6 hours', value: 6 * 60 * 60 * 1000}, {shortName: '12h', longName: '12 hours', value: 12 * 60 * 60 * 1000}, {shortName: '24h', longName: '24 hours', value: 24 * 60 * 60 * 1000}, {shortName: '3d', longName: '3 days', value: 3 * 24 * 60 * 60 * 1000}, {shortName: '7d', longName: '7 days', value: 7 * 24 * 60 * 60 * 1000}, {shortName: '30d', longName: '30 days', value: 30 * 24 * 60 * 60 * 1000}])},
+ tracesTTL: !{JSON.stringify(tracesTTL || -1)},
+ trendsTTL: !{JSON.stringify(trendsTTL || -1)},
+ relatedTracesOptions: !{JSON.stringify(relatedTracesOptions || [])},
+ externalLinking: !{JSON.stringify(externalLinking || [])},
+ usingZipkinConnector: !{JSON.stringify(usingZipkinConnector || false)},
+ enableBlobs: !{JSON.stringify(enableBlobs || false)},
+ blobsUrl: !{JSON.stringify(blobsUrl || '')},
+ }
+ script(async, defer, src = bundleCommonsJsPath)
+ script(async, defer, src = bundleAppJsPath)
diff --git a/ui/src/app.jsx b/ui/src/app.jsx
new file mode 100644
index 000000000..1de7392a0
--- /dev/null
+++ b/ui/src/app.jsx
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+import React from 'react';
+import ReactDOM from 'react-dom';
+import {Route, BrowserRouter as Router} from 'react-router-dom';
+import Main from './main';
+import storesInitializer from './stores/storesInitializer';
+import withTracker from './components/common/withTracker';
+
+// app initializers
+storesInitializer.init();
+
+// mount react components
+ReactDOM.render(
+
+
+
+ , document.getElementById('root')
+);
diff --git a/ui/src/app.less b/ui/src/app.less
new file mode 100644
index 000000000..07566bdf8
--- /dev/null
+++ b/ui/src/app.less
@@ -0,0 +1,477 @@
+/*
+ * Copyright 2018 Expedia Group
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// master less files for all other components/less files to use
+// only define common variables and utilities here
+
+@import 'bootstrap/bootstrap';
+
+@spacing-xxs : 3px;
+@spacing-xs : 6px;
+@spacing-s : 12px;
+@spacing-m : 18px;
+@spacing-l : 30px;
+@spacing-xl : 60px;
+
+html, body, #root {
+ height: 100%;
+}
+
+// common resets
+td {
+ border: none !important;
+}
+
+tr {
+ border: none;
+}
+
+table {
+ border: none;
+}
+
+// util classes
+.ellipsis {
+ overflow: hidden;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+}
+
+.label-success {
+ color: @white;
+ background-color: @brand-success;
+}
+
+.label-failure {
+ color: @white;
+ background-color: @brand-danger;
+}
+
+// layout
+.primary-content {
+ background-color: @gray-lighter;
+}
+
+.primary-content__spacer {
+ padding-top: @spacing-l;
+ padding-bottom: @spacing-l;
+}
+
+.tabs-nav-container {
+ margin-bottom: @spacing-m;
+ font-size: @font-size-large;
+ ul > li > a {
+ padding: 0 @spacing-m @spacing-xs @spacing-m;
+ &:focus {
+ outline: 0;
+ }
+ }
+}
+
+.time-range-selector {
+ padding: 3px 12px 3px 6px;
+ border: 1px solid @gray-mid-light;
+ font-size: @font-size-base;
+ border-radius: 3px;
+
+ &:hover {
+ cursor: pointer;
+ }
+}
+
+.full-width {
+ width: 100%
+}
+
+.underlined {
+ text-decoration: underline;
+}
+
+/**
+ Styling for selector
+*/
+
+.Select {
+ position: relative;
+ font-size: @font-size-large;
+}
+.Select,
+.Select div,
+.Select input,
+.Select span {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+.Select.is-disabled > .Select-control {
+ background-color: #f9f9f9;
+}
+.Select.is-disabled > .Select-control:hover {
+ box-shadow: none;
+}
+.Select.is-disabled .Select-arrow-zone {
+ cursor: default;
+ pointer-events: none;
+ opacity: 0.35;
+}
+.Select-control {
+ background-color: transparent;
+ border-color: #d9d9d9 #ccc #b3b3b3;
+ border-radius: 0;
+ border: 0;
+ color: white;
+ cursor: default;
+ display: table;
+ border-spacing: 0;
+ border-collapse: separate;
+ height: 36px;
+ outline: none;
+ overflow: hidden;
+ position: relative;
+ width: 100%;
+ box-shadow: inset 0 -2px 0 #ddd ;
+}
+.Select-control:hover {
+ box-shadow: inset 0 -2px 0 @brand-primary;
+}
+.Select-control .Select-input:focus {
+ outline: none;
+}
+.is-searchable.is-open > .Select-control {
+ cursor: text;
+}
+.is-open > .Select-control {
+ border-bottom-right-radius: 0;
+ border-bottom-left-radius: 0;
+ background: transparent;
+ border-color: #b3b3b3 #ccc #d9d9d9;
+}
+.is-open > .Select-control .Select-arrow {
+ top: -2px;
+ border-color: transparent transparent #999;
+ border-width: 0 5px 5px;
+}
+.is-searchable.is-focused:not(.is-open) > .Select-control {
+ cursor: text;
+}
+.is-focused:not(.is-open) > .Select-control {
+ border-color: #007eff;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 0 3px rgba(0, 126, 255, 0.1);
+}
+.Select-placeholder,
+.Select--single > .Select-control .Select-value {
+ bottom: 0;
+ color: white;
+ left: 0;
+ line-height: 34px;
+ padding-left: 10px;
+ padding-right: 10px;
+ position: absolute;
+ right: 0;
+ top: 0;
+ max-width: 100%;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap
+}
+.has-value.Select--single > .Select-control .Select-value .Select-value-label,
+.has-value.is-pseudo-focused.Select--single > .Select-control .Select-value .Select-value-label {
+ color: #333;
+}
+.has-value.Select--single > .Select-control .Select-value a.Select-value-label,
+.has-value.is-pseudo-focused.Select--single > .Select-control .Select-value a.Select-value-label {
+ cursor: pointer;
+ text-decoration: none;
+}
+.has-value.Select--single > .Select-control .Select-value a.Select-value-label:hover,
+.has-value.is-pseudo-focused.Select--single > .Select-control .Select-value a.Select-value-label:hover,
+.has-value.Select--single > .Select-control .Select-value a.Select-value-label:focus,
+.has-value.is-pseudo-focused.Select--single > .Select-control .Select-value a.Select-value-label:focus {
+ color: #007eff;
+ outline: none;
+ text-decoration: underline;
+}
+.Select-input {
+ height: 34px;
+ padding-left: 10px;
+ padding-right: 10px;
+ vertical-align: middle;
+}
+.Select-input > input {
+ width: 100%;
+ background: none transparent;
+ border: 0 none;
+ box-shadow: none;
+ cursor: default;
+ display: inline-block;
+ font-family: inherit;
+ font-size: inherit;
+ margin: 0;
+ outline: none;
+ line-height: 14px;
+ /* For IE 8 compatibility */
+ padding: 8px 0 12px;
+ /* For IE 8 compatibility */
+ -webkit-appearance: none;
+}
+.is-focused .Select-input > input {
+ cursor: text;
+}
+.has-value.is-pseudo-focused .Select-input {
+ opacity: 0;
+}
+.Select-control:not(.is-searchable) > .Select-input {
+ outline: none;
+}
+.Select-loading-zone {
+ cursor: pointer;
+ display: table-cell;
+ position: relative;
+ text-align: center;
+ vertical-align: middle;
+ width: 16px;
+}
+.Select-loading {
+ -webkit-animation: Select-animation-spin 400ms infinite linear;
+ -o-animation: Select-animation-spin 400ms infinite linear;
+ animation: Select-animation-spin 400ms infinite linear;
+ width: 16px;
+ height: 16px;
+ box-sizing: border-box;
+ border-radius: 50%;
+ border: 2px solid #ccc;
+ border-right-color: #333;
+ display: inline-block;
+ position: relative;
+ vertical-align: middle;
+}
+.Select-clear-zone {
+ -webkit-animation: Select-animation-fadeIn 200ms;
+ -o-animation: Select-animation-fadeIn 200ms;
+ animation: Select-animation-fadeIn 200ms;
+ color: #999;
+ cursor: pointer;
+ display: table-cell;
+ position: relative;
+ text-align: center;
+ vertical-align: middle;
+ width: 17px;
+}
+.Select-clear-zone:hover {
+ color: #D0021B;
+}
+.Select-clear {
+ display: inline-block;
+ font-size: 18px;
+ line-height: 1;
+}
+.Select--multi .Select-clear-zone {
+ width: 17px;
+}
+.Select-arrow-zone {
+ cursor: pointer;
+ display: table-cell;
+ position: relative;
+ text-align: center;
+ vertical-align: middle;
+ width: 25px;
+ padding-right: 5px;
+}
+.Select-arrow {
+ border-color: #999 transparent transparent;
+ border-style: solid;
+ border-width: 5px 5px 2.5px;
+ display: inline-block;
+ height: 0;
+ width: 0;
+ position: relative;
+}
+.is-open .Select-arrow,
+.Select-arrow-zone:hover > .Select-arrow {
+ border-top-color: #666;
+}
+.Select--multi .Select-multi-value-wrapper {
+ display: inline-block;
+}
+.Select .Select-aria-only {
+ display: inline-block;
+ height: 1px;
+ width: 1px;
+ margin: -1px;
+ clip: rect(0, 0, 0, 0);
+ overflow: hidden;
+ float: left;
+}
+@-webkit-keyframes Select-animation-fadeIn {
+ from {
+ opacity: 0;
+ }
+ to {
+ opacity: 1;
+ }
+}
+@keyframes Select-animation-fadeIn {
+ from {
+ opacity: 0;
+ }
+ to {
+ opacity: 1;
+ }
+}
+.Select-menu-outer {
+ border-bottom-right-radius: 4px;
+ border-bottom-left-radius: 4px;
+ background-color: #fff;
+ border: 1px solid #ccc;
+ border-top-color: #e6e6e6;
+ box-shadow: 0 1px 0 rgba(0, 0, 0, 0.06);
+ box-sizing: border-box;
+ margin-top: -1px;
+ max-height: 400px;
+ position: absolute;
+ top: 100%;
+ width: 100%;
+ z-index: 1;
+ -webkit-overflow-scrolling: touch;
+}
+.Select-menu {
+ max-height: 398px;
+ overflow-y: auto;
+}
+.Select-option {
+ box-sizing: border-box;
+ background-color: #fff;
+ color: #666666;
+ cursor: pointer;
+ display: block;
+ padding: 8px 10px;
+}
+.Select-option:last-child {
+ border-bottom-right-radius: 4px;
+ border-bottom-left-radius: 4px;
+}
+.Select-option.is-selected {
+ background-color: #f5faff;
+ /* Fallback color for IE 8 */
+ background-color: rgba(0, 126, 255, 0.04);
+ color: #333;
+}
+.Select-option.is-focused {
+ background-color: #ebf5ff;
+ /* Fallback color for IE 8 */
+ background-color: rgba(0, 126, 255, 0.08);
+ color: #333;
+}
+.Select-option.is-disabled {
+ color: #cccccc;
+ cursor: default;
+}
+.Select-noresults {
+ box-sizing: border-box;
+ color: #999999;
+ cursor: default;
+ display: block;
+ padding: 8px 10px;
+}
+.Select--multi .Select-input {
+ vertical-align: middle;
+ margin-left: 10px;
+ padding: 0;
+}
+.Select--multi.has-value .Select-input {
+ margin-left: 5px;
+}
+.Select--multi .Select-value {
+ background-color: #ebf5ff;
+ /* Fallback color for IE 8 */
+ background-color: rgba(0, 126, 255, 0.08);
+ border-radius: 2px;
+ border: 1px solid #c2e0ff;
+ /* Fallback color for IE 8 */
+ border: 1px solid rgba(0, 126, 255, 0.24);
+ color: #007eff;
+ display: inline-block;
+ font-size: 0.9em;
+ line-height: 1.4;
+ margin-left: 5px;
+ margin-top: 5px;
+ vertical-align: top;
+}
+.Select--multi .Select-value-icon,
+.Select--multi .Select-value-label {
+ display: inline-block;
+ vertical-align: middle;
+}
+.Select--multi .Select-value-label {
+ border-bottom-right-radius: 2px;
+ border-top-right-radius: 2px;
+ cursor: default;
+ padding: 2px 5px;
+}
+.Select--multi a.Select-value-label {
+ color: #007eff;
+ cursor: pointer;
+ text-decoration: none;
+}
+.Select--multi a.Select-value-label:hover {
+ text-decoration: underline;
+}
+.Select--multi .Select-value-icon {
+ cursor: pointer;
+ border-bottom-left-radius: 2px;
+ border-top-left-radius: 2px;
+ border-right: 1px solid #c2e0ff;
+ /* Fallback color for IE 8 */
+ border-right: 1px solid rgba(0, 126, 255, 0.24);
+ padding: 1px 5px 3px;
+}
+.Select--multi .Select-value-icon:hover,
+.Select--multi .Select-value-icon:focus {
+ background-color: #d8eafd;
+ /* Fallback color for IE 8 */
+ background-color: rgba(0, 113, 230, 0.08);
+ color: #0071e6;
+}
+.Select--multi .Select-value-icon:active {
+ background-color: #c2e0ff;
+ /* Fallback color for IE 8 */
+ background-color: rgba(0, 126, 255, 0.24);
+}
+.Select--multi.is-disabled .Select-value {
+ background-color: #fcfcfc;
+ border: 1px solid #e3e3e3;
+ color: #333;
+}
+.Select--multi.is-disabled .Select-value-icon {
+ cursor: not-allowed;
+ border-right: 1px solid #e3e3e3;
+}
+.Select--multi.is-disabled .Select-value-icon:hover,
+.Select--multi.is-disabled .Select-value-icon:focus,
+.Select--multi.is-disabled .Select-value-icon:active {
+ background-color: #fcfcfc;
+}
+@keyframes Select-animation-spin {
+ to {
+ transform: rotate(1turn);
+ }
+}
+@-webkit-keyframes Select-animation-spin {
+ to {
+ -webkit-transform: rotate(1turn);
+ }
+}
\ No newline at end of file
diff --git a/ui/src/bootstrap/LICENSE_BOOTSTRAP b/ui/src/bootstrap/LICENSE_BOOTSTRAP
new file mode 100755
index 000000000..7a300022c
--- /dev/null
+++ b/ui/src/bootstrap/LICENSE_BOOTSTRAP
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2011-2016 Twitter, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/ui/src/bootstrap/LICENSE_BOOTSWATCH b/ui/src/bootstrap/LICENSE_BOOTSWATCH
new file mode 100644
index 000000000..c470246ff
--- /dev/null
+++ b/ui/src/bootstrap/LICENSE_BOOTSWATCH
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Thomas Park
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/ui/src/bootstrap/alerts.less b/ui/src/bootstrap/alerts.less
new file mode 100755
index 000000000..c4199db92
--- /dev/null
+++ b/ui/src/bootstrap/alerts.less
@@ -0,0 +1,73 @@
+//
+// Alerts
+// --------------------------------------------------
+
+
+// Base styles
+// -------------------------
+
+.alert {
+ padding: @alert-padding;
+ margin-bottom: @line-height-computed;
+ border: 1px solid transparent;
+ border-radius: @alert-border-radius;
+
+ // Headings for larger alerts
+ h4 {
+ margin-top: 0;
+ // Specified for the h4 to prevent conflicts of changing @headings-color
+ color: inherit;
+ }
+
+ // Provide class for links that match alerts
+ .alert-link {
+ font-weight: @alert-link-font-weight;
+ }
+
+ // Improve alignment and spacing of inner content
+ > p,
+ > ul {
+ margin-bottom: 0;
+ }
+
+ > p + p {
+ margin-top: 5px;
+ }
+}
+
+// Dismissible alerts
+//
+// Expand the right padding and account for the close button's positioning.
+
+.alert-dismissable, // The misspelled .alert-dismissable was deprecated in 3.2.0.
+.alert-dismissible {
+ padding-right: (@alert-padding + 20);
+
+ // Adjust close link position
+ .close {
+ position: relative;
+ top: -2px;
+ right: -21px;
+ color: inherit;
+ }
+}
+
+// Alternate styles
+//
+// Generate contextual modifier classes for colorizing the alert.
+
+.alert-success {
+ .alert-variant(@alert-success-bg; @alert-success-border; @alert-success-text);
+}
+
+.alert-info {
+ .alert-variant(@alert-info-bg; @alert-info-border; @alert-info-text);
+}
+
+.alert-warning {
+ .alert-variant(@alert-warning-bg; @alert-warning-border; @alert-warning-text);
+}
+
+.alert-danger {
+ .alert-variant(@alert-danger-bg; @alert-danger-border; @alert-danger-text);
+}
diff --git a/ui/src/bootstrap/badges.less b/ui/src/bootstrap/badges.less
new file mode 100755
index 000000000..6ee16dca4
--- /dev/null
+++ b/ui/src/bootstrap/badges.less
@@ -0,0 +1,66 @@
+//
+// Badges
+// --------------------------------------------------
+
+
+// Base class
+.badge {
+ display: inline-block;
+ min-width: 10px;
+ padding: 3px 7px;
+ font-size: @font-size-small;
+ font-weight: @badge-font-weight;
+ color: @badge-color;
+ line-height: @badge-line-height;
+ vertical-align: middle;
+ white-space: nowrap;
+ text-align: center;
+ background-color: @badge-bg;
+ border-radius: @badge-border-radius;
+
+ // Empty badges collapse automatically (not available in IE8)
+ &:empty {
+ display: none;
+ }
+
+ // Quick fix for badges in buttons
+ .btn & {
+ position: relative;
+ top: -1px;
+ }
+
+ .btn-xs &,
+ .btn-group-xs > .btn & {
+ top: 0;
+ padding: 1px 5px;
+ }
+
+ // Hover state, but only for links
+ a& {
+ &:hover,
+ &:focus {
+ color: @badge-link-hover-color;
+ text-decoration: none;
+ cursor: pointer;
+ }
+ }
+
+ // Account for badges in navs
+ .list-group-item.active > &,
+ .nav-pills > .active > a > & {
+ color: @badge-active-color;
+ background-color: @badge-active-bg;
+ }
+
+ .list-group-item > & {
+ float: right;
+ }
+
+ .list-group-item > & + & {
+ margin-right: 5px;
+ }
+
+ .nav-pills > li > a > & {
+ margin-left: 3px;
+ }
+}
diff --git a/ui/src/bootstrap/bootstrap.less b/ui/src/bootstrap/bootstrap.less
new file mode 100755
index 000000000..27a362a81
--- /dev/null
+++ b/ui/src/bootstrap/bootstrap.less
@@ -0,0 +1,63 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+// Core variables and mixins
+@import 'variables.less';
+@import 'mixins.less';
+
+// Reset and dependencies
+@import 'normalize.less';
+@import 'print.less';
+@import 'themify-icons.less';
+@import 'titillium-web.less';
+
+// Core CSS
+@import 'scaffolding.less';
+@import 'type.less';
+@import 'code.less';
+@import 'grid.less';
+@import 'tables.less';
+@import 'forms.less';
+@import 'buttons.less';
+
+// Components
+@import 'component-animations.less';
+@import 'dropdowns.less';
+@import 'button-groups.less';
+@import 'input-groups.less';
+@import 'navs.less';
+@import 'navbar.less';
+//@import "breadcrumbs.less";
+@import 'pagination.less';
+@import 'pager.less';
+@import 'labels.less';
+@import 'badges.less';
+@import 'jumbotron.less';
+//@import "thumbnails.less";
+//@import "alerts.less";
+//@import "progress-bars.less";
+//@import "media.less";
+@import 'list-group.less';
+//@import "panels.less";
+//@import "responsive-embed.less";
+@import 'wells.less';
+@import 'close.less';
+
+// Components w/ JavaScript
+@import 'modals.less';
+@import 'tooltip.less';
+//@import "popovers.less";
+//@import "carousel.less";
+
+// Utility classes
+@import 'utilities.less';
+@import 'responsive-utilities.less';
+
+// custom colors
+@import 'svc-colors.less';
+
+// Theme override
+@import 'bootswatch.less';
diff --git a/ui/src/bootstrap/bootswatch.less b/ui/src/bootstrap/bootswatch.less
new file mode 100644
index 000000000..e682c1415
--- /dev/null
+++ b/ui/src/bootstrap/bootswatch.less
@@ -0,0 +1,639 @@
+// Paper 3.3.7
+// Bootswatch
+// -----------------------------------------------------
+
+// Effects =================================================================
+
+.ripple(@color) {
+ position: relative;
+
+ &:after {
+ content: "";
+ display: block;
+ position: absolute;
+ width: 100%;
+ height: 100%;
+ top: 0;
+ left: 0;
+ #gradient > .radial(@color 10%, transparent 10.01%);
+ background-size: 1000% 1000%;
+ background-position: 50%;
+ opacity: 0;
+ pointer-events: none;
+ transition: background .5s, opacity 1s;
+ }
+
+ &:active:after {
+ background-size: 0% 0%;
+ opacity: .2;
+ transition: 0s;
+ }
+}
+
+// Navbar =====================================================================
+
+.navbar {
+ border: none;
+ .box-shadow(0 1px 2px rgba(0,0,0,.3));
+
+ &-brand {
+ font-size: 24px;
+ }
+
+ &-inverse {
+ .navbar-form {
+
+ input[type=text],
+ input[type=password] {
+ color: #fff;
+ .box-shadow(inset 0 -1px 0 @navbar-inverse-link-color);
+ .placeholder(@navbar-inverse-link-color);
+
+ &:focus {
+ .box-shadow(inset 0 -2px 0 #fff);
+ }
+ }
+ }
+ }
+}
+
+// Buttons ====================================================================
+
+#btn(@class,@bg,@color) {
+ .btn-@{class} {
+ &:focus {
+ background-color: @bg;
+ }
+
+ &:hover,
+ &:active:hover {
+ background-color: darken(@bg, 6%);
+ }
+
+ &:active {
+ .box-shadow(2px 2px 4px rgba(0,0,0,.4));
+ }
+
+ .ripple(@color);
+ }
+}
+
+#btn(default,@btn-default-bg,@btn-default-color);
+#btn(primary,@btn-primary-bg,@btn-primary-color);
+#btn(success,@btn-success-bg,@btn-success-color);
+#btn(info,@btn-info-bg,@btn-info-color);
+#btn(warning,@btn-warning-bg,@btn-warning-color);
+#btn(danger,@btn-danger-bg,@btn-danger-color);
+#btn(link,#fff,@btn-default-color);
+
+.btn {
+ text-transform: uppercase;
+ border: none;
+ .box-shadow(1px 1px 4px rgba(0,0,0,.4));
+ .transition(all 0.4s);
+
+ &-link {
+ border-radius: @btn-border-radius-base;
+ .box-shadow(none);
+ color: @btn-default-color;
+
+ &:hover,
+ &:focus {
+ .box-shadow(none);
+ color: @btn-default-color;
+ text-decoration: none;
+ }
+
+ .disabled, // Although btn-link is intended for buttons, which want to look like link, I include here a.disable for the sake of consistency
+ &[disabled],
+ fieldset[disabled] & {
+ &:hover,
+ &:active:hover {
+ background-color: #fff;
+ color: @btn-default-color;
+ }
+ }
+ }
+
+ &-default {
+ &.disabled,
+ &[disabled],
+ fieldset[disabled] & {
+ background-color: rgba(0, 0, 0, 0.1);
+ color: rgba(0, 0, 0, 0.4);
+ opacity: 1;
+
+ &:hover,
+ &:focus {
+ background-color: rgba(0, 0, 0, 0.1);
+ }
+ }
+ }
+}
+
+.btn-group {
+ .btn + .btn,
+ .btn + .btn-group,
+ .btn-group + .btn,
+ .btn-group + .btn-group {
+ margin-left: 0;
+ }
+
+ &-vertical {
+ > .btn + .btn,
+ > .btn + .btn-group,
+ > .btn-group + .btn,
+ > .btn-group + .btn-group {
+ margin-top: 0;
+ }
+ }
+}
+
+// Typography =================================================================
+
+body {
+ -webkit-font-smoothing: antialiased;
+ letter-spacing: .1px;
+}
+
+p {
+ margin: 0 0 1em;
+}
+
+input,
+button {
+ -webkit-font-smoothing: antialiased;
+ letter-spacing: .1px;
+}
+
+a {
+ .transition(all 0.2s);
+}
+
+// Tables =====================================================================
+
+.table-hover {
+ > tbody > tr,
+ > tbody > tr > th,
+ > tbody > tr > td {
+ .transition(all 0.2s);
+ }
+}
+
+// Forms ======================================================================
+
+label {
+ font-weight: normal;
+}
+
+textarea,
+textarea.form-control,
+input.form-control,
+input[type=text],
+input[type=password],
+input[type=email],
+input[type=number],
+[type=text].form-control,
+[type=password].form-control,
+[type=email].form-control,
+[type=tel].form-control,
+[contenteditable].form-control {
+ padding: 0;
+ border: none;
+ border-radius: 0;
+ -webkit-appearance: none;
+ .box-shadow(inset 0 -1px 0 #ddd);
+ font-size: 16px;
+
+ &:focus, &:hover {
+ .box-shadow(inset 0 -2px 0 @brand-primary);
+ }
+
+ &[disabled],
+ &[readonly] {
+ .box-shadow(none);
+ border-bottom: 1px dotted #ddd;
+ }
+
+ &.input {
+ &-sm {
+ font-size: @font-size-small;
+ }
+
+ &-lg {
+ font-size: @font-size-large;
+ }
+ }
+}
+
+select,
+select.form-control {
+ border: 0;
+ border-radius: 0;
+ -webkit-appearance: none;
+ -moz-appearance: none;
+ appearance: none;
+ padding-left: 0;
+ padding-right: 0\9; // remove padding for < ie9 since default arrow can't be removed
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABoAAAAaCAMAAACelLz8AAAAJ1BMVEVmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmaP/QSjAAAADHRSTlMAAgMJC0uWpKa6wMxMdjkoAAAANUlEQVR4AeXJyQEAERAAsNl7Hf3X6xt0QL6JpZWq30pdvdadme+0PMdzvHm8YThHcT1H7K0BtOMDniZhWOgAAAAASUVORK5CYII=);
+ background-size: 13px;
+ background-repeat: no-repeat;
+ background-position: right center;
+ .box-shadow(inset 0 -1px 0 #ddd);
+ font-size: 16px;
+ line-height: 1.5;
+
+ &::-ms-expand {
+ display: none;
+ }
+
+ &.input {
+ &-sm {
+ font-size: @font-size-small;
+ }
+
+ &-lg {
+ font-size: @font-size-large;
+ }
+ }
+
+ &:focus {
+ .box-shadow(inset 0 -2px 0 @brand-primary);
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABoAAAAaCAMAAACelLz8AAAAJ1BMVEUhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISF8S9ewAAAADHRSTlMAAgMJC0uWpKa6wMxMdjkoAAAANUlEQVR4AeXJyQEAERAAsNl7Hf3X6xt0QL6JpZWq30pdvdadme+0PMdzvHm8YThHcT1H7K0BtOMDniZhWOgAAAAASUVORK5CYII=);
+ }
+
+ &[disabled],
+ &[readonly] {
+ .box-shadow(none);
+ border-bottom: 1px dotted #ddd;
+
+ &,
+ option {
+ color: @input-color-placeholder;
+ }
+ }
+
+ &[multiple] {
+ background: none;
+ }
+}
+
+.radio,
+.radio-inline,
+.checkbox,
+.checkbox-inline {
+ label {
+ padding-left: 25px;
+ }
+
+ input[type="radio"],
+ input[type="checkbox"] {
+ margin-left: -25px;
+ }
+}
+
+input[type="radio"],
+.radio input[type="radio"],
+.radio-inline input[type="radio"] {
+ position: relative;
+ margin-top: 6px;
+ margin-right: 4px;
+ vertical-align: top;
+ border: none;
+ background-color: transparent;
+ -webkit-appearance: none;
+ appearance: none;
+ cursor: pointer;
+
+ &:focus {
+ outline: none;
+ }
+
+ &:before,
+ &:after {
+ content: "";
+ display: block;
+ width: 18px;
+ height: 18px;
+ border-radius: 50%;
+ .transition(240ms);
+ }
+
+ &:before {
+ position: absolute;
+ left: 0;
+ top: -3px;
+ background-color: @brand-primary;
+ .scale(0);
+ }
+
+ &:after {
+ position: relative;
+ top: -3px;
+ border: 2px solid @gray;
+ }
+
+ &:checked:before {
+ .scale(0.5);
+ }
+
+ &:disabled:checked:before {
+ background-color: @gray-light;
+ }
+
+ &:checked:after {
+ border-color: @brand-primary;
+ }
+
+ &:disabled:after,
+ &:disabled:checked:after {
+ border-color: @gray-light;
+ }
+}
+
+input[type="checkbox"],
+.checkbox input[type="checkbox"],
+.checkbox-inline input[type="checkbox"] {
+ position: relative;
+ border: none;
+ margin-bottom: -4px;
+ -webkit-appearance: none;
+ appearance: none;
+ cursor: pointer;
+
+ &:focus {
+ outline: none;
+ }
+
+ &:focus:after {
+ border-color: @brand-primary;
+ }
+
+ &:after {
+ content: "";
+ display: block;
+ width: 18px;
+ height: 18px;
+ margin-top: -2px;
+ margin-right: 5px;
+ border: 2px solid @gray;
+ border-radius: 2px;
+ .transition(240ms);
+ }
+
+ &:checked:before {
+ content: "";
+ position: absolute;
+ top: 0;
+ left: 6px;
+ display: table;
+ width: 6px;
+ height: 12px;
+ border: 2px solid #fff;
+ border-top-width: 0;
+ border-left-width: 0;
+ .rotate(45deg);
+ }
+
+ &:checked:after {
+ background-color: @brand-primary;
+ border-color: @brand-primary;
+ }
+
+ &:disabled:after {
+ border-color: @gray-light;
+ }
+
+ &:disabled:checked:after {
+ background-color: @gray-light;
+ border-color: transparent;
+ }
+}
+
+.has-warning {
+ input:not([type=checkbox]),
+ .form-control,
+ input.form-control[readonly],
+ input[type=text][readonly],
+ [type=text].form-control[readonly],
+ input:not([type=checkbox]):focus,
+ .form-control:focus {
+ border-bottom: none;
+ .box-shadow(inset 0 -2px 0 @brand-warning);
+ }
+}
+
+.has-error {
+ input:not([type=checkbox]),
+ .form-control,
+ input.form-control[readonly],
+ input[type=text][readonly],
+ [type=text].form-control[readonly],
+ input:not([type=checkbox]):focus,
+ .form-control:focus {
+ border-bottom: none;
+ .box-shadow(inset 0 -2px 0 @brand-danger);
+ }
+}
+
+.has-success {
+ input:not([type=checkbox]),
+ .form-control,
+ input.form-control[readonly],
+ input[type=text][readonly],
+ [type=text].form-control[readonly],
+ input:not([type=checkbox]):focus,
+ .form-control:focus {
+ border-bottom: none;
+ .box-shadow(inset 0 -2px 0 @brand-success);
+ }
+}
+
+// Remove the Bootstrap feedback styles for input addons
+.input-group-addon {
+ .has-warning &, .has-error &, .has-success & {
+ color: @input-color;
+ border-color: @input-group-addon-border-color;
+ background-color: @input-group-addon-bg;
+ }
+}
+
+.form-group-lg {
+ select,
+ select.form-control {
+ line-height: 1.5;
+ }
+}
+
+// Navs =======================================================================
+
+.nav-tabs {
+ > li > a,
+ > li > a:focus {
+ margin-right: 0;
+ background-color: transparent;
+ border: none;
+ color: @navbar-default-link-color;
+ .box-shadow(inset 0 -1px 0 #ddd);
+ .transition(all 0.2s);
+
+ &:hover {
+ background-color: transparent;
+ .box-shadow(inset 0 -2px 0 @brand-primary);
+ color: @brand-primary;
+ }
+ }
+
+ & > li.active > a,
+ & > li.active > a:focus {
+ border: none;
+ .box-shadow(inset 0 -2px 0 @brand-primary);
+ color: @brand-primary;
+
+ &:hover {
+ border: none;
+ color: @brand-primary;
+ }
+ }
+
+ & > li.disabled > a {
+ .box-shadow(inset 0 -1px 0 #ddd);
+ }
+
+ &.nav-justified {
+
+ & > li > a,
+ & > li > a:hover,
+ & > li > a:focus,
+ & > .active > a,
+ & > .active > a:hover,
+ & > .active > a:focus {
+ border: none;
+ }
+ }
+
+ .dropdown-menu {
+ margin-top: 0;
+ }
+}
+
+.dropdown-menu {
+ margin-top: 0;
+ border: none;
+ .box-shadow(0 1px 4px rgba(0,0,0,.3));
+}
+
+// Indicators =================================================================
+
+.alert {
+ border: none;
+}
+
+.badge {
+ padding: 4px 6px 4px;
+}
+
+.progress {
+ position: relative;
+ z-index: 1;
+ height: 6px;
+ border-radius: 0;
+
+ .box-shadow(none);
+
+ &-bar {
+ .box-shadow(none);
+
+ &:last-child {
+ border-radius: 0 3px 3px 0;
+ }
+
+ &:last-child {
+ &:before {
+ display: block;
+ content: "";
+ position: absolute;
+ width: 100%;
+ height: 100%;
+ left: 0;
+ right: 0;
+ z-index: -1;
+ background-color: lighten(@progress-bar-bg, 35%);
+ }
+ }
+
+ &-success:last-child.progress-bar:before {
+ background-color: lighten(@progress-bar-success-bg, 35%);
+ }
+
+ &-info:last-child.progress-bar:before {
+ background-color: lighten(@progress-bar-info-bg, 45%);
+ }
+ &-warning:last-child.progress-bar:before {
+ background-color: lighten(@progress-bar-warning-bg, 35%);
+ }
+
+ &-danger:last-child.progress-bar:before {
+ background-color: lighten(@progress-bar-danger-bg, 25%);
+ }
+ }
+}
+
+// Progress bars ==============================================================
+
+// Containers =================================================================
+
+.close {
+ font-size: 34px;
+ font-weight: 300;
+ line-height: 24px;
+ opacity: 0.6;
+ .transition(all 0.2s);
+
+ &:hover {
+ opacity: 1;
+ }
+}
+
+.list-group {
+
+ &-item {
+ padding: 15px;
+ }
+
+ &-item-text {
+ color: @gray-light;
+ }
+}
+
+.well {
+ border-radius: 0;
+ .box-shadow(none);
+}
+
+.panel {
+ border: none;
+ border-radius: 2px;
+ .box-shadow(0 1px 4px rgba(0,0,0,.3));
+
+ &-heading {
+ border-bottom: none;
+ }
+
+ &-footer {
+ border-top: none;
+ }
+}
+
+.popover {
+ border: none;
+ .box-shadow(0 1px 4px rgba(0,0,0,.3));
+}
+
+.carousel {
+ &-caption {
+ h1, h2, h3, h4, h5, h6 {
+ color: inherit;
+ }
+ }
+}
\ No newline at end of file
diff --git a/ui/src/bootstrap/breadcrumbs.less b/ui/src/bootstrap/breadcrumbs.less
new file mode 100755
index 000000000..cb01d503f
--- /dev/null
+++ b/ui/src/bootstrap/breadcrumbs.less
@@ -0,0 +1,26 @@
+//
+// Breadcrumbs
+// --------------------------------------------------
+
+
+.breadcrumb {
+ padding: @breadcrumb-padding-vertical @breadcrumb-padding-horizontal;
+ margin-bottom: @line-height-computed;
+ list-style: none;
+ background-color: @breadcrumb-bg;
+ border-radius: @border-radius-base;
+
+ > li {
+ display: inline-block;
+
+ + li:before {
+ content: "@{breadcrumb-separator}\00a0"; // Unicode space added since inline-block means non-collapsing white-space
+ padding: 0 5px;
+ color: @breadcrumb-color;
+ }
+ }
+
+ > .active {
+ color: @breadcrumb-active-color;
+ }
+}
diff --git a/ui/src/bootstrap/button-groups.less b/ui/src/bootstrap/button-groups.less
new file mode 100755
index 000000000..16db0c613
--- /dev/null
+++ b/ui/src/bootstrap/button-groups.less
@@ -0,0 +1,244 @@
+//
+// Button groups
+// --------------------------------------------------
+
+// Make the div behave like a button
+.btn-group,
+.btn-group-vertical {
+ position: relative;
+ display: inline-block;
+ vertical-align: middle; // match .btn alignment given font-size hack above
+ > .btn {
+ position: relative;
+ float: left;
+ // Bring the "active" button to the front
+ &:hover,
+ &:focus,
+ &:active,
+ &.active {
+ z-index: 2;
+ }
+ }
+}
+
+// Prevent double borders when buttons are next to each other
+.btn-group {
+ .btn + .btn,
+ .btn + .btn-group,
+ .btn-group + .btn,
+ .btn-group + .btn-group {
+ margin-left: -1px;
+ }
+}
+
+// Optional: Group multiple button groups together for a toolbar
+.btn-toolbar {
+ margin-left: -5px; // Offset the first child's margin
+ &:extend(.clearfix all);
+
+ .btn,
+ .btn-group,
+ .input-group {
+ float: left;
+ }
+ > .btn,
+ > .btn-group,
+ > .input-group {
+ margin-left: 5px;
+ }
+}
+
+.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {
+ border-radius: 0;
+}
+
+// Set corners individual because sometimes a single button can be in a .btn-group and we need :first-child and :last-child to both match
+.btn-group > .btn:first-child {
+ margin-left: 0;
+ &:not(:last-child):not(.dropdown-toggle) {
+ .border-right-radius(0);
+ }
+}
+// Need .dropdown-toggle since :last-child doesn't apply, given that a .dropdown-menu is used immediately after it
+.btn-group > .btn:last-child:not(:first-child),
+.btn-group > .dropdown-toggle:not(:first-child) {
+ .border-left-radius(0);
+}
+
+// Custom edits for including btn-groups within btn-groups (useful for including dropdown buttons within a btn-group)
+.btn-group > .btn-group {
+ float: left;
+}
+.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {
+ border-radius: 0;
+}
+.btn-group > .btn-group:first-child:not(:last-child) {
+ > .btn:last-child,
+ > .dropdown-toggle {
+ .border-right-radius(0);
+ }
+}
+.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {
+ .border-left-radius(0);
+}
+
+// On active and open, don't show outline
+.btn-group .dropdown-toggle:active,
+.btn-group.open .dropdown-toggle {
+ outline: 0;
+}
+
+
+// Sizing
+//
+// Remix the default button sizing classes into new ones for easier manipulation.
+
+.btn-group-xs > .btn { &:extend(.btn-xs); }
+.btn-group-sm > .btn { &:extend(.btn-sm); }
+.btn-group-lg > .btn { &:extend(.btn-lg); }
+
+
+// Split button dropdowns
+// ----------------------
+
+// Give the line between buttons some depth
+.btn-group > .btn + .dropdown-toggle {
+ padding-left: 8px;
+ padding-right: 8px;
+}
+.btn-group > .btn-lg + .dropdown-toggle {
+ padding-left: 12px;
+ padding-right: 12px;
+}
+
+// The clickable button for toggling the menu
+// Remove the gradient and set the same inset shadow as the :active state
+.btn-group.open .dropdown-toggle {
+ .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));
+
+ // Show no shadow for `.btn-link` since it has no other button styles.
+ &.btn-link {
+ .box-shadow(none);
+ }
+}
+
+
+// Reposition the caret
+.btn .caret {
+ margin-left: 0;
+}
+// Carets in other button sizes
+.btn-lg .caret {
+ border-width: @caret-width-large @caret-width-large 0;
+ border-bottom-width: 0;
+}
+// Upside down carets for .dropup
+.dropup .btn-lg .caret {
+ border-width: 0 @caret-width-large @caret-width-large;
+}
+
+
+// Vertical button groups
+// ----------------------
+
+.btn-group-vertical {
+ > .btn,
+ > .btn-group,
+ > .btn-group > .btn {
+ display: block;
+ float: none;
+ width: 100%;
+ max-width: 100%;
+ }
+
+ // Clear floats so dropdown menus can be properly placed
+ > .btn-group {
+ &:extend(.clearfix all);
+ > .btn {
+ float: none;
+ }
+ }
+
+ > .btn + .btn,
+ > .btn + .btn-group,
+ > .btn-group + .btn,
+ > .btn-group + .btn-group {
+ margin-top: -1px;
+ margin-left: 0;
+ }
+}
+
+.btn-group-vertical > .btn {
+ &:not(:first-child):not(:last-child) {
+ border-radius: 0;
+ }
+ &:first-child:not(:last-child) {
+ .border-top-radius(@btn-border-radius-base);
+ .border-bottom-radius(0);
+ }
+ &:last-child:not(:first-child) {
+ .border-top-radius(0);
+ .border-bottom-radius(@btn-border-radius-base);
+ }
+}
+.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {
+ border-radius: 0;
+}
+.btn-group-vertical > .btn-group:first-child:not(:last-child) {
+ > .btn:last-child,
+ > .dropdown-toggle {
+ .border-bottom-radius(0);
+ }
+}
+.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {
+ .border-top-radius(0);
+}
+
+
+// Justified button groups
+// ----------------------
+
+.btn-group-justified {
+ display: table;
+ width: 100%;
+ table-layout: fixed;
+ border-collapse: separate;
+ > .btn,
+ > .btn-group {
+ float: none;
+ display: table-cell;
+ width: 1%;
+ }
+ > .btn-group .btn {
+ width: 100%;
+ }
+
+ > .btn-group .dropdown-menu {
+ left: auto;
+ }
+}
+
+
+// Checkbox and radio options
+//
+// In order to support the browser's form validation feedback, powered by the
+// `required` attribute, we have to "hide" the inputs via `clip`. We cannot use
+// `display: none;` or `visibility: hidden;` as that also hides the popover.
+// Simply visually hiding the inputs via `opacity` would leave them clickable in
+// certain cases which is prevented by using `clip` and `pointer-events`.
+// This way, we ensure a DOM element is visible to position the popover from.
+//
+// See https://github.com/twbs/bootstrap/pull/12794 and
+// https://github.com/twbs/bootstrap/pull/14559 for more information.
+
+[data-toggle="buttons"] {
+ > .btn,
+ > .btn-group > .btn {
+ input[type="radio"],
+ input[type="checkbox"] {
+ position: absolute;
+ clip: rect(0,0,0,0);
+ pointer-events: none;
+ }
+ }
+}
diff --git a/ui/src/bootstrap/buttons.less b/ui/src/bootstrap/buttons.less
new file mode 100755
index 000000000..9cbb8f416
--- /dev/null
+++ b/ui/src/bootstrap/buttons.less
@@ -0,0 +1,166 @@
+//
+// Buttons
+// --------------------------------------------------
+
+
+// Base styles
+// --------------------------------------------------
+
+.btn {
+ display: inline-block;
+ margin-bottom: 0; // For input.btn
+ font-weight: @btn-font-weight;
+ text-align: center;
+ vertical-align: middle;
+ touch-action: manipulation;
+ cursor: pointer;
+ background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214
+ border: 1px solid transparent;
+ white-space: nowrap;
+ .button-size(@padding-base-vertical; @padding-base-horizontal; @font-size-base; @line-height-base; @btn-border-radius-base);
+ .user-select(none);
+
+ &,
+ &:active,
+ &.active {
+ &:focus,
+ &.focus {
+ .tab-focus();
+ }
+ }
+
+ &:hover,
+ &:focus,
+ &.focus {
+ color: @btn-default-color;
+ text-decoration: none;
+ }
+
+ &:active,
+ &.active {
+ outline: 0;
+ background-image: none;
+ .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));
+ }
+
+ &.disabled,
+ &[disabled],
+ fieldset[disabled] & {
+ cursor: @cursor-disabled;
+ .opacity(.65);
+ .box-shadow(none);
+ }
+
+ a& {
+ &.disabled,
+ fieldset[disabled] & {
+ pointer-events: none; // Future-proof disabling of clicks on `` elements
+ }
+ }
+}
+
+
+// Alternate buttons
+// --------------------------------------------------
+
+.btn-default {
+ .button-variant(@btn-default-color; @btn-default-bg; @btn-default-border);
+}
+.btn-primary {
+ .button-variant(@btn-primary-color; @btn-primary-bg; @btn-primary-border);
+}
+// Success appears as green
+.btn-success {
+ .button-variant(@btn-success-color; @btn-success-bg; @btn-success-border);
+}
+// Info appears as blue-green
+.btn-info {
+ .button-variant(@btn-info-color; @btn-info-bg; @btn-info-border);
+}
+// Warning appears as orange
+.btn-warning {
+ .button-variant(@btn-warning-color; @btn-warning-bg; @btn-warning-border);
+}
+// Danger and error appear as red
+.btn-danger {
+ .button-variant(@btn-danger-color; @btn-danger-bg; @btn-danger-border);
+}
+
+
+// Link buttons
+// -------------------------
+
+// Make a button look and behave like a link
+.btn-link {
+ color: @link-color;
+ font-weight: normal;
+ border-radius: 0;
+
+ &,
+ &:active,
+ &.active,
+ &[disabled],
+ fieldset[disabled] & {
+ background-color: transparent;
+ .box-shadow(none);
+ }
+ &,
+ &:hover,
+ &:focus,
+ &:active {
+ border-color: transparent;
+ }
+ &:hover,
+ &:focus {
+ color: @link-hover-color;
+ text-decoration: @link-hover-decoration;
+ background-color: transparent;
+ }
+ &[disabled],
+ fieldset[disabled] & {
+ &:hover,
+ &:focus {
+ color: @btn-link-disabled-color;
+ text-decoration: none;
+ }
+ }
+}
+
+
+// Button Sizes
+// --------------------------------------------------
+
+.btn-lg {
+ // line-height: ensure even-numbered height of button next to large input
+ .button-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @btn-border-radius-large);
+}
+.btn-sm {
+ // line-height: ensure proper height of button next to small input
+ .button-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);
+}
+.btn-xs {
+ .button-size(@padding-xs-vertical; @padding-xs-horizontal; @font-size-small; @line-height-small; @btn-border-radius-small);
+}
+
+
+// Block button
+// --------------------------------------------------
+
+.btn-block {
+ display: block;
+ width: 100%;
+}
+
+// Vertically space out multiple block buttons
+.btn-block + .btn-block {
+ margin-top: 5px;
+}
+
+// Specificity overrides
+input[type="submit"],
+input[type="reset"],
+input[type="button"] {
+ &.btn-block {
+ width: 100%;
+ }
+}
diff --git a/ui/src/bootstrap/carousel.less b/ui/src/bootstrap/carousel.less
new file mode 100755
index 000000000..252011e9e
--- /dev/null
+++ b/ui/src/bootstrap/carousel.less
@@ -0,0 +1,270 @@
+//
+// Carousel
+// --------------------------------------------------
+
+
+// Wrapper for the slide container and indicators
+.carousel {
+ position: relative;
+}
+
+.carousel-inner {
+ position: relative;
+ overflow: hidden;
+ width: 100%;
+
+ > .item {
+ display: none;
+ position: relative;
+ .transition(.6s ease-in-out left);
+
+ // Account for jankitude on images
+ > img,
+ > a > img {
+ &:extend(.img-responsive);
+ line-height: 1;
+ }
+
+ // WebKit CSS3 transforms for supported devices
+ @media all and (transform-3d), (-webkit-transform-3d) {
+ .transition-transform(~'0.6s ease-in-out');
+ .backface-visibility(~'hidden');
+ .perspective(1000px);
+
+ &.next,
+ &.active.right {
+ .translate3d(100%, 0, 0);
+ left: 0;
+ }
+ &.prev,
+ &.active.left {
+ .translate3d(-100%, 0, 0);
+ left: 0;
+ }
+ &.next.left,
+ &.prev.right,
+ &.active {
+ .translate3d(0, 0, 0);
+ left: 0;
+ }
+ }
+ }
+
+ > .active,
+ > .next,
+ > .prev {
+ display: block;
+ }
+
+ > .active {
+ left: 0;
+ }
+
+ > .next,
+ > .prev {
+ position: absolute;
+ top: 0;
+ width: 100%;
+ }
+
+ > .next {
+ left: 100%;
+ }
+ > .prev {
+ left: -100%;
+ }
+ > .next.left,
+ > .prev.right {
+ left: 0;
+ }
+
+ > .active.left {
+ left: -100%;
+ }
+ > .active.right {
+ left: 100%;
+ }
+
+}
+
+// Left/right controls for nav
+// ---------------------------
+
+.carousel-control {
+ position: absolute;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ width: @carousel-control-width;
+ .opacity(@carousel-control-opacity);
+ font-size: @carousel-control-font-size;
+ color: @carousel-control-color;
+ text-align: center;
+ text-shadow: @carousel-text-shadow;
+ background-color: rgba(0, 0, 0, 0); // Fix IE9 click-thru bug
+ // We can't have this transition here because WebKit cancels the carousel
+ // animation if you trip this while in the middle of another animation.
+
+ // Set gradients for backgrounds
+ &.left {
+ #gradient > .horizontal(@start-color: rgba(0,0,0,.5); @end-color: rgba(0,0,0,.0001));
+ }
+ &.right {
+ left: auto;
+ right: 0;
+ #gradient > .horizontal(@start-color: rgba(0,0,0,.0001); @end-color: rgba(0,0,0,.5));
+ }
+
+ // Hover/focus state
+ &:hover,
+ &:focus {
+ outline: 0;
+ color: @carousel-control-color;
+ text-decoration: none;
+ .opacity(.9);
+ }
+
+ // Toggles
+ .icon-prev,
+ .icon-next,
+ .glyphicon-chevron-left,
+ .glyphicon-chevron-right {
+ position: absolute;
+ top: 50%;
+ margin-top: -10px;
+ z-index: 5;
+ display: inline-block;
+ }
+ .icon-prev,
+ .glyphicon-chevron-left {
+ left: 50%;
+ margin-left: -10px;
+ }
+ .icon-next,
+ .glyphicon-chevron-right {
+ right: 50%;
+ margin-right: -10px;
+ }
+ .icon-prev,
+ .icon-next {
+ width: 20px;
+ height: 20px;
+ line-height: 1;
+ font-family: serif;
+ }
+
+
+ .icon-prev {
+ &:before {
+ content: '\2039';// SINGLE LEFT-POINTING ANGLE QUOTATION MARK (U+2039)
+ }
+ }
+ .icon-next {
+ &:before {
+ content: '\203a';// SINGLE RIGHT-POINTING ANGLE QUOTATION MARK (U+203A)
+ }
+ }
+}
+
+// Optional indicator pips
+//
+// Add an unordered list with the following class and add a list item for each
+// slide your carousel holds.
+
+.carousel-indicators {
+ position: absolute;
+ bottom: 10px;
+ left: 50%;
+ z-index: 15;
+ width: 60%;
+ margin-left: -30%;
+ padding-left: 0;
+ list-style: none;
+ text-align: center;
+
+ li {
+ display: inline-block;
+ width: 10px;
+ height: 10px;
+ margin: 1px;
+ text-indent: -999px;
+ border: 1px solid @carousel-indicator-border-color;
+ border-radius: 10px;
+ cursor: pointer;
+
+ // IE8-9 hack for event handling
+ //
+ // Internet Explorer 8-9 does not support clicks on elements without a set
+ // `background-color`. We cannot use `filter` since that's not viewed as a
+ // background color by the browser. Thus, a hack is needed.
+ // See https://developer.mozilla.org/en-US/docs/Web/Events/click#Internet_Explorer
+ //
+ // For IE8, we set solid black as it doesn't support `rgba()`. For IE9, we
+ // set alpha transparency for the best results possible.
+ background-color: #000 \9; // IE8
+ background-color: rgba(0,0,0,0); // IE9
+ }
+ .active {
+ margin: 0;
+ width: 12px;
+ height: 12px;
+ background-color: @carousel-indicator-active-bg;
+ }
+}
+
+// Optional captions
+// -----------------------------
+// Hidden by default for smaller viewports
+.carousel-caption {
+ position: absolute;
+ left: 15%;
+ right: 15%;
+ bottom: 20px;
+ z-index: 10;
+ padding-top: 20px;
+ padding-bottom: 20px;
+ color: @carousel-caption-color;
+ text-align: center;
+ text-shadow: @carousel-text-shadow;
+ & .btn {
+ text-shadow: none; // No shadow for button elements in carousel-caption
+ }
+}
+
+
+// Scale up controls for tablets and up
+@media screen and (min-width: @screen-sm-min) {
+
+ // Scale up the controls a smidge
+ .carousel-control {
+ .glyphicon-chevron-left,
+ .glyphicon-chevron-right,
+ .icon-prev,
+ .icon-next {
+ width: (@carousel-control-font-size * 1.5);
+ height: (@carousel-control-font-size * 1.5);
+ margin-top: (@carousel-control-font-size / -2);
+ font-size: (@carousel-control-font-size * 1.5);
+ }
+ .glyphicon-chevron-left,
+ .icon-prev {
+ margin-left: (@carousel-control-font-size / -2);
+ }
+ .glyphicon-chevron-right,
+ .icon-next {
+ margin-right: (@carousel-control-font-size / -2);
+ }
+ }
+
+ // Show and left align the captions
+ .carousel-caption {
+ left: 20%;
+ right: 20%;
+ padding-bottom: 30px;
+ }
+
+ // Move up the indicators
+ .carousel-indicators {
+ bottom: 20px;
+ }
+}
diff --git a/ui/src/bootstrap/close.less b/ui/src/bootstrap/close.less
new file mode 100755
index 000000000..6d5bfe087
--- /dev/null
+++ b/ui/src/bootstrap/close.less
@@ -0,0 +1,34 @@
+//
+// Close icons
+// --------------------------------------------------
+
+
+.close {
+ float: right;
+ font-size: (@font-size-base * 1.5);
+ font-weight: @close-font-weight;
+ line-height: 1;
+ color: @close-color;
+ text-shadow: @close-text-shadow;
+ .opacity(.2);
+
+ &:hover,
+ &:focus {
+ color: @close-color;
+ text-decoration: none;
+ cursor: pointer;
+ .opacity(.5);
+ }
+
+ // Additional properties for button version
+ // iOS requires the button element instead of an anchor tag.
+ // If you want the anchor version, it requires `href="#"`.
+ // See https://developer.mozilla.org/en-US/docs/Web/Events/click#Safari_Mobile
+ button& {
+ padding: 0;
+ cursor: pointer;
+ background: transparent;
+ border: 0;
+ -webkit-appearance: none;
+ }
+}
diff --git a/ui/src/bootstrap/code.less b/ui/src/bootstrap/code.less
new file mode 100755
index 000000000..a08b4d48c
--- /dev/null
+++ b/ui/src/bootstrap/code.less
@@ -0,0 +1,69 @@
+//
+// Code (inline and block)
+// --------------------------------------------------
+
+
+// Inline and block code styles
+code,
+kbd,
+pre,
+samp {
+ font-family: @font-family-monospace;
+}
+
+// Inline code
+code {
+ padding: 2px 4px;
+ font-size: 90%;
+ color: @code-color;
+ background-color: @code-bg;
+ border-radius: @border-radius-base;
+}
+
+// User input typically entered via keyboard
+kbd {
+ padding: 2px 4px;
+ font-size: 90%;
+ color: @kbd-color;
+ background-color: @kbd-bg;
+ border-radius: @border-radius-small;
+ box-shadow: inset 0 -1px 0 rgba(0,0,0,.25);
+
+ kbd {
+ padding: 0;
+ font-size: 100%;
+ font-weight: bold;
+ box-shadow: none;
+ }
+}
+
+// Blocks of code
+pre {
+ display: block;
+ padding: ((@line-height-computed - 1) / 2);
+ margin: 0 0 (@line-height-computed / 2);
+ font-size: (@font-size-base - 1); // 14px to 13px
+ line-height: @line-height-base;
+ word-break: break-all;
+ word-wrap: break-word;
+ color: @pre-color;
+ background-color: @pre-bg;
+ border: 1px solid @pre-border-color;
+ border-radius: @border-radius-base;
+
+ // Account for some code outputs that place code tags in pre tags
+ code {
+ padding: 0;
+ font-size: inherit;
+ color: inherit;
+ white-space: pre-wrap;
+ background-color: transparent;
+ border-radius: 0;
+ }
+}
+
+// Enable scrollable blocks of code
+.pre-scrollable {
+ max-height: @pre-scrollable-max-height;
+ overflow-y: scroll;
+}
diff --git a/ui/src/bootstrap/component-animations.less b/ui/src/bootstrap/component-animations.less
new file mode 100755
index 000000000..0bcee910a
--- /dev/null
+++ b/ui/src/bootstrap/component-animations.less
@@ -0,0 +1,33 @@
+//
+// Component animations
+// --------------------------------------------------
+
+// Heads up!
+//
+// We don't use the `.opacity()` mixin here since it causes a bug with text
+// fields in IE7-8. Source: https://github.com/twbs/bootstrap/pull/3552.
+
+.fade {
+ opacity: 0;
+ .transition(opacity .15s linear);
+ &.in {
+ opacity: 1;
+ }
+}
+
+.collapse {
+ display: none;
+
+ &.in { display: block; }
+ tr&.in { display: table-row; }
+ tbody&.in { display: table-row-group; }
+}
+
+.collapsing {
+ position: relative;
+ height: 0;
+ overflow: hidden;
+ .transition-property(~"height, visibility");
+ .transition-duration(.35s);
+ .transition-timing-function(ease);
+}
diff --git a/ui/src/bootstrap/dropdowns.less b/ui/src/bootstrap/dropdowns.less
new file mode 100755
index 000000000..f6876c1a9
--- /dev/null
+++ b/ui/src/bootstrap/dropdowns.less
@@ -0,0 +1,216 @@
+//
+// Dropdown menus
+// --------------------------------------------------
+
+
+// Dropdown arrow/caret
+.caret {
+ display: inline-block;
+ width: 0;
+ height: 0;
+ margin-left: 2px;
+ vertical-align: middle;
+ border-top: @caret-width-base dashed;
+ border-top: @caret-width-base solid ~"\9"; // IE8
+ border-right: @caret-width-base solid transparent;
+ border-left: @caret-width-base solid transparent;
+}
+
+// The dropdown wrapper (div)
+.dropup,
+.dropdown {
+ position: relative;
+}
+
+// Prevent the focus on the dropdown toggle when closing dropdowns
+.dropdown-toggle:focus {
+ outline: 0;
+}
+
+// The dropdown menu (ul)
+.dropdown-menu {
+ position: absolute;
+ top: 100%;
+ left: 0;
+ z-index: @zindex-dropdown;
+ display: none; // none by default, but block on "open" of the menu
+ float: left;
+ min-width: 160px;
+ padding: 5px 0;
+ margin: 2px 0 0; // override default ul
+ list-style: none;
+ font-size: @font-size-base;
+ text-align: left; // Ensures proper alignment if parent has it changed (e.g., modal footer)
+ background-color: @dropdown-bg;
+ border: 1px solid @dropdown-fallback-border; // IE8 fallback
+ border: 1px solid @dropdown-border;
+ border-radius: @border-radius-base;
+ .box-shadow(0 6px 12px rgba(0,0,0,.175));
+ background-clip: padding-box;
+
+ // Aligns the dropdown menu to right
+ //
+ // Deprecated as of 3.1.0 in favor of `.dropdown-menu-[dir]`
+ &.pull-right {
+ right: 0;
+ left: auto;
+ }
+
+ // Dividers (basically an hr) within the dropdown
+ .divider {
+ .nav-divider(@dropdown-divider-bg);
+ }
+
+ // Links within the dropdown menu
+ > li > a {
+ display: block;
+ padding: 3px 20px;
+ clear: both;
+ font-weight: normal;
+ line-height: @line-height-base;
+ color: @dropdown-link-color;
+ white-space: nowrap; // prevent links from randomly breaking onto new lines
+ }
+}
+
+// Hover/Focus state
+.dropdown-menu > li > a {
+ &:hover,
+ &:focus {
+ text-decoration: none;
+ color: @dropdown-link-hover-color;
+ background-color: @dropdown-link-hover-bg;
+ }
+}
+
+// Active state
+.dropdown-menu > .active > a {
+ &,
+ &:hover,
+ &:focus {
+ color: @dropdown-link-active-color;
+ text-decoration: none;
+ outline: 0;
+ background-color: @dropdown-link-active-bg;
+ }
+}
+
+// Disabled state
+//
+// Gray out text and ensure the hover/focus state remains gray
+
+.dropdown-menu > .disabled > a {
+ &,
+ &:hover,
+ &:focus {
+ color: @dropdown-link-disabled-color;
+ }
+
+ // Nuke hover/focus effects
+ &:hover,
+ &:focus {
+ text-decoration: none;
+ background-color: transparent;
+ background-image: none; // Remove CSS gradient
+ .reset-filter();
+ cursor: @cursor-disabled;
+ }
+}
+
+// Open state for the dropdown
+.open {
+ // Show the menu
+ > .dropdown-menu {
+ display: block;
+ }
+
+ // Remove the outline when :focus is triggered
+ > a {
+ outline: 0;
+ }
+}
+
+// Menu positioning
+//
+// Add extra class to `.dropdown-menu` to flip the alignment of the dropdown
+// menu with the parent.
+.dropdown-menu-right {
+ left: auto; // Reset the default from `.dropdown-menu`
+ right: 0;
+}
+// With v3, we enabled auto-flipping if you have a dropdown within a right
+// aligned nav component. To enable the undoing of that, we provide an override
+// to restore the default dropdown menu alignment.
+//
+// This is only for left-aligning a dropdown menu within a `.navbar-right` or
+// `.pull-right` nav component.
+.dropdown-menu-left {
+ left: 0;
+ right: auto;
+}
+
+// Dropdown section headers
+.dropdown-header {
+ display: block;
+ padding: 3px 20px;
+ font-size: @font-size-small;
+ line-height: @line-height-base;
+ color: @dropdown-header-color;
+ white-space: nowrap; // as with > li > a
+}
+
+// Backdrop to catch body clicks on mobile, etc.
+.dropdown-backdrop {
+ position: fixed;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ top: 0;
+ z-index: (@zindex-dropdown - 10);
+}
+
+// Right aligned dropdowns
+.pull-right > .dropdown-menu {
+ right: 0;
+ left: auto;
+}
+
+// Allow for dropdowns to go bottom up (aka, dropup-menu)
+//
+// Just add .dropup after the standard .dropdown class and you're set, bro.
+// TODO: abstract this so that the navbar fixed styles are not placed here?
+
+.dropup,
+.navbar-fixed-bottom .dropdown {
+ // Reverse the caret
+ .caret {
+ border-top: 0;
+ border-bottom: @caret-width-base dashed;
+ border-bottom: @caret-width-base solid ~"\9"; // IE8
+ content: "";
+ }
+ // Different positioning for bottom up menu
+ .dropdown-menu {
+ top: auto;
+ bottom: 100%;
+ margin-bottom: 2px;
+ }
+}
+
+
+// Component alignment
+//
+// Reiterate per navbar.less and the modified component alignment there.
+
+@media (min-width: @grid-float-breakpoint) {
+ .navbar-right {
+ .dropdown-menu {
+ .dropdown-menu-right();
+ }
+ // Necessary for overrides of the default right aligned menu.
+ // Will remove come v4 in all likelihood.
+ .dropdown-menu-left {
+ .dropdown-menu-left();
+ }
+ }
+}
diff --git a/ui/src/bootstrap/forms.less b/ui/src/bootstrap/forms.less
new file mode 100755
index 000000000..9377d3846
--- /dev/null
+++ b/ui/src/bootstrap/forms.less
@@ -0,0 +1,613 @@
+//
+// Forms
+// --------------------------------------------------
+
+
+// Normalize non-controls
+//
+// Restyle and baseline non-control form elements.
+
+fieldset {
+ padding: 0;
+ margin: 0;
+ border: 0;
+ // Chrome and Firefox set a `min-width: min-content;` on fieldsets,
+ // so we reset that to ensure it behaves more like a standard block element.
+ // See https://github.com/twbs/bootstrap/issues/12359.
+ min-width: 0;
+}
+
+legend {
+ display: block;
+ width: 100%;
+ padding: 0;
+ margin-bottom: @line-height-computed;
+ font-size: (@font-size-base * 1.5);
+ line-height: inherit;
+ color: @legend-color;
+ border: 0;
+ border-bottom: 1px solid @legend-border-color;
+}
+
+label {
+ display: inline-block;
+ max-width: 100%; // Force IE8 to wrap long content (see https://github.com/twbs/bootstrap/issues/13141)
+ margin-bottom: 5px;
+ font-weight: bold;
+}
+
+
+// Normalize form controls
+//
+// While most of our form styles require extra classes, some basic normalization
+// is required to ensure optimum display with or without those classes to better
+// address browser inconsistencies.
+
+// Override content-box in Normalize (* isn't specific enough)
+input[type="search"] {
+ .box-sizing(border-box);
+}
+
+// Position radios and checkboxes better
+input[type="radio"],
+input[type="checkbox"] {
+ margin: 4px 0 0;
+ margin-top: 1px \9; // IE8-9
+ line-height: normal;
+}
+
+input[type="file"] {
+ display: block;
+}
+
+// Make range inputs behave like textual form controls
+input[type="range"] {
+ display: block;
+ width: 100%;
+}
+
+// Make multiple select elements height not fixed
+select[multiple],
+select[size] {
+ height: auto;
+}
+
+// Focus for file, radio, and checkbox
+input[type="file"]:focus,
+input[type="radio"]:focus,
+input[type="checkbox"]:focus {
+ .tab-focus();
+}
+
+// Adjust output element
+output {
+ display: block;
+ padding-top: (@padding-base-vertical + 1);
+ font-size: @font-size-base;
+ line-height: @line-height-base;
+ color: @input-color;
+}
+
+
+// Common form controls
+//
+// Shared size and type resets for form controls. Apply `.form-control` to any
+// of the following form controls:
+//
+// select
+// textarea
+// input[type="text"]
+// input[type="password"]
+// input[type="datetime"]
+// input[type="datetime-local"]
+// input[type="date"]
+// input[type="month"]
+// input[type="time"]
+// input[type="week"]
+// input[type="number"]
+// input[type="email"]
+// input[type="url"]
+// input[type="search"]
+// input[type="tel"]
+// input[type="color"]
+
+.form-control {
+ display: block;
+ width: 100%;
+ height: @input-height-base; // Make inputs at least the height of their button counterpart (base line-height + padding + border)
+ padding: @padding-base-vertical @padding-base-horizontal;
+ font-size: @font-size-base;
+ line-height: @line-height-base;
+ color: @input-color;
+ background-color: @input-bg;
+ background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214
+ border: 1px solid @input-border;
+ border-radius: @input-border-radius; // Note: This has no effect on