Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,8 @@ public boolean publish(List<? extends CoreSpan<?>> trace) {
for (CoreSpan<?> span : trace) {
boolean isTopLevel = span.isTopLevel();
if (shouldComputeMetric(span)) {
if (ignoredResources.contains(span.getResourceName().toString())) {
final CharSequence resourceName = span.getResourceName();
if (resourceName != null && ignoredResources.contains(resourceName.toString())) {
// skip publishing all children
forceKeep = false;
break;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,22 +1,21 @@
package datadog.trace.common.metrics

import static datadog.trace.bootstrap.instrumentation.api.Tags.SPAN_KIND
import static java.util.concurrent.TimeUnit.MILLISECONDS
import static java.util.concurrent.TimeUnit.SECONDS

import datadog.communication.ddagent.DDAgentFeaturesDiscovery
import datadog.trace.api.WellKnownTags
import datadog.trace.bootstrap.instrumentation.api.UTF8BytesString
import datadog.trace.core.CoreSpan
import datadog.trace.core.monitor.HealthMetrics
import datadog.trace.test.util.DDSpecification
import spock.lang.Shared

import java.util.concurrent.CompletableFuture
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
import java.util.concurrent.TimeoutException
import java.util.function.Supplier

import static datadog.trace.bootstrap.instrumentation.api.Tags.SPAN_KIND
import static java.util.concurrent.TimeUnit.MILLISECONDS
import static java.util.concurrent.TimeUnit.SECONDS
import spock.lang.Shared

class ConflatingMetricAggregatorTest extends DDSpecification {

Expand Down Expand Up @@ -96,6 +95,48 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
aggregator.close()
}

def "should be resilient to null resource names"() {
setup:
MetricWriter writer = Mock(MetricWriter)
Sink sink = Stub(Sink)
DDAgentFeaturesDiscovery features = Mock(DDAgentFeaturesDiscovery)
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS)
aggregator.start()

when:
CountDownLatch latch = new CountDownLatch(1)
aggregator.publish([
new SimpleSpan("service", "operation", null, "type", false, true, false, 0, 100, HTTP_OK)
.setTag(SPAN_KIND, "baz")
])
aggregator.report()
def latchTriggered = latch.await(2, SECONDS)

then:
latchTriggered
1 * writer.startBucket(1, _, _)
1 * writer.add(new MetricKey(
null,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion:

Suggested change
null,
null, // resource name

"service",
"operation",
"type",
HTTP_OK,
false,
false,
"baz",
[]
), _) >> { MetricKey key, AggregateMetric value ->
value.getHitCount() == 1 && value.getTopLevelCount() == 1 && value.getDuration() == 100
}
1 * writer.finishBucket() >> { latch.countDown() }

cleanup:
aggregator.close()
}

def "unmeasured top level spans have metrics computed"() {
setup:
MetricWriter writer = Mock(MetricWriter)
Expand Down
Loading