diff --git a/cmd/venom/metrics-report/cmd.go b/cmd/venom/metrics-report/cmd.go
new file mode 100644
index 00000000..2d7988e8
--- /dev/null
+++ b/cmd/venom/metrics-report/cmd.go
@@ -0,0 +1,359 @@
+package metricsreport
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "github.com/ovh/venom/reporting"
+ "github.com/ovh/venom/reporting/aggregator"
+)
+
+var Cmd = &cobra.Command{
+ Use: "metrics-report [flags] metrics_*.json",
+ Short: "Aggregate metrics files and generate reports",
+ Long: `Aggregate multiple metrics files and generate reports in various formats.
+
+This command combines aggregation and reporting functionality, allowing you to:
+- Aggregate multiple metrics files from parallel Venom runs
+- Generate HTML reports with interactive visualizations
+- Output JSON data for further processing
+- Check performance thresholds and generate JUnit XML for CI
+- Control which outputs are generated
+
+Note: By default, threshold breaches are reported but don't cause the command to fail.
+Use --fail-on-breaches to exit with error code on threshold violations.
+
+Examples:
+ # Basic aggregation and HTML report
+ venom metrics-report metrics_*.json
+
+ # Generate only HTML report (skip JSON file)
+ venom metrics-report metrics_*.json --html-only
+
+ # Generate only JSON (skip HTML)
+ venom metrics-report metrics_*.json --json-only
+
+ # Custom output files
+ venom metrics-report metrics_*.json -o aggregated.json --html-output report.html
+
+ # Check thresholds with custom config
+ venom metrics-report metrics_*.json --check-thresholds --thresholds my_thresholds.yml
+
+ # Generate JUnit XML for CI integration
+ venom metrics-report metrics_*.json --check-thresholds --junit results.xml
+
+ # Fail on breaches (exit with error code on violations)
+ venom metrics-report metrics_*.json --check-thresholds --fail-on-breaches
+
+ # With aggregation options
+ venom metrics-report metrics_*.json --max-endpoints=5000 --html-only`,
+ Args: cobra.MinimumNArgs(1),
+ RunE: runMetricsReport,
+}
+
+var (
+ // Output options
+ jsonOutput string
+ htmlOutput string
+ jsonOnly bool
+ htmlOnly bool
+
+ // Aggregation options
+ maxEndpoints int
+ noBucket bool
+ mergePercentiles string
+
+ // Threshold checking options
+ checkThresholds bool
+ thresholdsFile string
+ junitOutput string
+ failOnBreaches bool
+)
+
+func init() {
+ // Output flags
+ Cmd.Flags().StringVarP(&jsonOutput, "output", "o", "aggregated_metrics.json", "JSON output file path")
+ Cmd.Flags().StringVar(&htmlOutput, "html-output", "metrics_report.html", "HTML output file path")
+ Cmd.Flags().BoolVar(&jsonOnly, "json-only", false, "Generate only JSON output")
+ Cmd.Flags().BoolVar(&htmlOnly, "html-only", false, "Generate only HTML output")
+
+ // Aggregation flags
+ Cmd.Flags().IntVar(&maxEndpoints, "max-endpoints", 2000, "Maximum unique endpoints allowed")
+ Cmd.Flags().BoolVar(&noBucket, "no-bucket", false, "Drop overflow endpoints instead of bucketing into 'other'")
+ Cmd.Flags().StringVar(&mergePercentiles, "merge-percentiles", "weighted", "Merge strategy for percentiles (weighted|sketch)")
+
+ // Threshold checking flags
+ Cmd.Flags().BoolVar(&checkThresholds, "check-thresholds", false, "Check metrics against threshold configuration")
+ Cmd.Flags().StringVar(&thresholdsFile, "thresholds", "thresholds.yml", "Threshold configuration file path")
+ Cmd.Flags().StringVar(&junitOutput, "junit", "", "JUnit XML output file for threshold breaches")
+ Cmd.Flags().BoolVar(&failOnBreaches, "fail-on-breaches", false, "Exit with error code on threshold breaches (default: soft fail)")
+}
+
+func runMetricsReport(cmd *cobra.Command, args []string) error {
+ // Validate flags
+ if jsonOnly && htmlOnly {
+ return fmt.Errorf("cannot specify both --json-only and --html-only")
+ }
+
+ if mergePercentiles != "weighted" && mergePercentiles != "sketch" {
+ return fmt.Errorf("invalid merge-percentiles value. Must be 'weighted' or 'sketch'")
+ }
+
+ // Expand glob patterns
+ var inputFiles []string
+ for _, pattern := range args {
+ matches, err := filepath.Glob(pattern)
+ if err != nil {
+ return fmt.Errorf("error expanding pattern %s: %w", pattern, err)
+ }
+ if len(matches) == 0 {
+ fmt.Fprintf(os.Stderr, "Warning: No files match pattern %s\n", pattern)
+ }
+ inputFiles = append(inputFiles, matches...)
+ }
+
+ if len(inputFiles) == 0 {
+ return fmt.Errorf("no input files found")
+ }
+
+ fmt.Printf("Processing %d metrics files...\n", len(inputFiles))
+
+ // Create aggregator configuration
+ config := &aggregator.Config{
+ MaxEndpoints: maxEndpoints,
+ NoBucket: noBucket,
+ MergePercentiles: mergePercentiles,
+ }
+
+ // Run aggregation
+ result, err := aggregator.AggregateFiles(inputFiles, config)
+ if err != nil {
+ return fmt.Errorf("error aggregating metrics: %w", err)
+ }
+
+ fmt.Printf("Successfully aggregated %d files\n", len(inputFiles))
+ fmt.Printf("Total endpoints: %d\n", len(result.Metrics))
+ fmt.Printf("Total checks: %d\n", len(result.RootGroup.Checks))
+
+ // Determine what outputs to generate
+ generateJSON := !htmlOnly
+ generateHTML := !jsonOnly
+
+ // Generate JSON output
+ if generateJSON {
+ err = aggregator.WriteOutput(result, jsonOutput)
+ if err != nil {
+ return fmt.Errorf("error writing JSON output: %w", err)
+ }
+ fmt.Printf("JSON report generated: %s\n", jsonOutput)
+ }
+
+ // Generate HTML output
+ if generateHTML {
+ // Load threshold configuration for HTML report (optional)
+ var thresholdConfig *reporting.ThresholdConfig
+
+ // Try to load thresholds from specified file first, then fallback to thresholds.yml, then defaults
+ if thresholdsFile != "" {
+ // Load from specified file
+ thresholdConfig, err = reporting.LoadThresholdConfig(thresholdsFile)
+ if err != nil {
+ return fmt.Errorf("failed to load threshold config from %s: %w", thresholdsFile, err)
+ }
+ fmt.Printf("Using threshold configuration from %s for HTML report\n", thresholdsFile)
+ } else {
+ // Try to load thresholds.yml from current directory, fallback to defaults
+ if _, err := os.Stat("thresholds.yml"); err == nil {
+ thresholdConfig, err = reporting.LoadThresholdConfig("thresholds.yml")
+ if err != nil {
+ // If loading fails, use defaults instead of failing
+ fmt.Printf("Warning: failed to load thresholds.yml, using default configuration: %v\n", err)
+ thresholdConfig = reporting.DefaultThresholdConfig()
+ } else {
+ fmt.Printf("Using threshold configuration from thresholds.yml for HTML report\n")
+ }
+ } else {
+ // Use default configuration
+ thresholdConfig = reporting.DefaultThresholdConfig()
+ fmt.Printf("Using default threshold configuration for HTML report\n")
+ }
+ }
+
+ err = reporting.GenerateMetricsHTMLReportWithThresholds(result, htmlOutput, thresholdConfig)
+ if err != nil {
+ return fmt.Errorf("error generating HTML report: %w", err)
+ }
+ fmt.Printf("HTML report generated: %s\n", htmlOutput)
+ }
+
+ // Check thresholds if requested
+ if checkThresholds {
+ err = checkThresholdBreaches(result)
+ if err != nil {
+ return fmt.Errorf("error checking thresholds: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func checkThresholdBreaches(metrics *aggregator.Metrics) error {
+ // Load threshold configuration
+ var config *reporting.ThresholdConfig
+ var err error
+
+ if thresholdsFile != "" {
+ // Load from specified file
+ config, err = reporting.LoadThresholdConfig(thresholdsFile)
+ if err != nil {
+ return fmt.Errorf("failed to load threshold config from %s: %w", thresholdsFile, err)
+ }
+ } else {
+ // Try to load thresholds.yml from current directory, fallback to defaults
+ if _, err := os.Stat("thresholds.yml"); err == nil {
+ config, err = reporting.LoadThresholdConfig("thresholds.yml")
+ if err != nil {
+ return fmt.Errorf("failed to load threshold config from thresholds.yml: %w", err)
+ }
+ fmt.Printf("Using threshold configuration from thresholds.yml\n")
+ } else {
+ // Use default configuration
+ config = reporting.DefaultThresholdConfig()
+ fmt.Printf("Using default threshold configuration\n")
+ }
+ }
+
+ // Convert aggregator.Metrics to reporting.Metrics for validation
+ reportingMetrics := &reporting.Metrics{
+ RootGroup: convertTestGroup(metrics.RootGroup),
+ Metrics: convertMetrics(metrics.Metrics),
+ SetupData: metrics.SetupData,
+ StartTime: metrics.StartTime,
+ EndTime: metrics.EndTime,
+ }
+
+ // Validate thresholds
+ breaches := config.ValidateThresholds(reportingMetrics)
+ summary := config.GetBreachSummary(breaches)
+
+ // Print summary
+ fmt.Printf("\n=== Threshold Validation Results ===\n")
+ fmt.Printf("Total breaches: %d\n", summary["total"])
+ fmt.Printf("Errors: %d\n", summary["error"])
+ fmt.Printf("Warnings: %d\n", summary["warning"])
+
+ if len(breaches) > 0 {
+ fmt.Printf("\nBreaches:\n")
+ for _, v := range breaches {
+ fmt.Printf(" %s [%s] %s: %.2f%s (threshold: %.2f%s, samples: %d)\n",
+ v.Severity, v.Endpoint, v.Metric, v.Value, v.Unit, v.Threshold, v.Unit, v.SampleCount)
+ }
+
+ // Generate JUnit XML if requested
+ if junitOutput != "" {
+ err = generateJUnitXML(breaches, junitOutput)
+ if err != nil {
+ return fmt.Errorf("failed to generate JUnit XML: %w", err)
+ }
+ fmt.Printf("JUnit XML report generated: %s\n", junitOutput)
+ }
+
+ // Exit with error code only if fail-on-breaches is explicitly enabled
+ if failOnBreaches {
+ return fmt.Errorf("threshold breaches detected: %d errors, %d warnings", summary["error"], summary["warning"])
+ }
+ } else {
+ fmt.Printf("✅ All thresholds passed!\n")
+ }
+
+ return nil
+}
+
+// convertTestGroup converts aggregator.TestGroup to reporting.TestGroup
+func convertTestGroup(ag *aggregator.TestGroup) *reporting.TestGroup {
+ if ag == nil {
+ return nil
+ }
+
+ vg := &reporting.TestGroup{
+ Name: ag.Name,
+ Path: ag.Path,
+ ID: ag.ID,
+ Groups: make(map[string]*reporting.TestGroup),
+ Checks: make(map[string]*reporting.TestCheck),
+ }
+
+ for k, v := range ag.Groups {
+ vg.Groups[k] = convertTestGroup(v)
+ }
+
+ for k, v := range ag.Checks {
+ vg.Checks[k] = &reporting.TestCheck{
+ Name: v.Name,
+ Path: v.Path,
+ ID: v.ID,
+ Passes: v.Passes,
+ Fails: v.Fails,
+ }
+ }
+
+ return vg
+}
+
+// convertMetrics converts aggregator.Metrics map to reporting.Metrics map
+func convertMetrics(am map[string]*aggregator.Metric) map[string]*reporting.Metric {
+ vm := make(map[string]*reporting.Metric)
+
+ for k, v := range am {
+ vm[k] = &reporting.Metric{
+ Type: v.Type,
+ Values: v.Values,
+ }
+ }
+
+ return vm
+}
+
+func generateJUnitXML(breaches []reporting.ThresholdBreach, outputFile string) error {
+ file, err := os.Create(outputFile)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ // Count tests and failures
+ totalTests := len(breaches)
+ failures := 0
+ for _, v := range breaches {
+ if v.Severity == "error" {
+ failures++
+ }
+ }
+
+ // Write JUnit XML header
+ fmt.Fprintf(file, `
+
+`, totalTests, failures)
+
+ // Write test cases for each violation
+ for _, v := range breaches {
+ fmt.Fprintf(file, `
+
+%s: %s - %s violation
+Value: %.2f%s
+Threshold: %.2f%s
+Samples: %d
+
+
+`, v.Endpoint, v.Metric, v.Value, v.Unit, v.Threshold, v.Unit, v.SampleCount,
+ v.Severity, v.Endpoint, v.Metric, v.Value, v.Unit, v.Threshold, v.Unit, v.SampleCount)
+ }
+
+ // Write JUnit XML footer
+ fmt.Fprintf(file, " \n")
+
+ return nil
+}
diff --git a/cmd/venom/root/root.go b/cmd/venom/root/root.go
index 2dba11f5..55f415fb 100644
--- a/cmd/venom/root/root.go
+++ b/cmd/venom/root/root.go
@@ -3,6 +3,7 @@ package root
import (
"github.com/spf13/cobra"
+ metricsreport "github.com/ovh/venom/cmd/venom/metrics-report"
"github.com/ovh/venom/cmd/venom/run"
"github.com/ovh/venom/cmd/venom/update"
"github.com/ovh/venom/cmd/venom/version"
@@ -23,4 +24,5 @@ func addCommands(cmd *cobra.Command) {
cmd.AddCommand(run.Cmd)
cmd.AddCommand(version.Cmd)
cmd.AddCommand(update.Cmd)
+ cmd.AddCommand(metricsreport.Cmd)
}
diff --git a/cmd/venom/root/root_test.go b/cmd/venom/root/root_test.go
index 0542a3b8..e591c1b1 100644
--- a/cmd/venom/root/root_test.go
+++ b/cmd/venom/root/root_test.go
@@ -28,7 +28,7 @@ func TestRunCmd(t *testing.T) {
rootCmd := New()
rootCmd.SetArgs(validArgs)
venom.IsTest = "test"
- assert.Equal(t, 3, len(rootCmd.Commands()))
+ assert.Equal(t, 4, len(rootCmd.Commands()))
err := rootCmd.Execute()
assert.NoError(t, err)
rootCmd.Execute()
diff --git a/cmd/venom/run/cmd.go b/cmd/venom/run/cmd.go
index e0075943..ce5e0023 100644
--- a/cmd/venom/run/cmd.go
+++ b/cmd/venom/run/cmd.go
@@ -18,7 +18,9 @@ import (
"github.com/ovh/venom"
"github.com/ovh/venom/executors"
+ "github.com/ovh/venom/executors/http"
"github.com/ovh/venom/interpolate"
+ "github.com/ovh/venom/reporting"
)
var (
@@ -39,6 +41,10 @@ var (
failureLinkHeader string
failureLinkTemplate string
+ // Metrics flags
+ metricsEnabled bool
+ metricsOutput string
+
variablesFlag *[]string
formatFlag *string
varFilesFlag *[]string
@@ -50,6 +56,8 @@ var (
openApiReportFlag *bool
failureLinkHeaderFlag *string
failureLinkTemplateFlag *string
+ metricsEnabledFlag *bool
+ metricsOutputFlag *string
)
func init() {
@@ -64,6 +72,8 @@ func init() {
openApiReportFlag = Cmd.Flags().Bool("open-api-report", false, "Generate OpenAPI Report")
failureLinkHeaderFlag = Cmd.Flags().String("failure-link-header", "X-Failure-Link", "Header to add to the HTTP response for failure links")
failureLinkTemplateFlag = Cmd.Flags().String("failure-link-template", "http://localhost:8080/failure/%s", "Template for failure links")
+ metricsEnabledFlag = Cmd.Flags().Bool("metrics-enabled", false, "Enable metrics collection during test execution")
+ metricsOutputFlag = Cmd.Flags().String("metrics-output", "", "Output file for metrics data (supports {#} placeholder for parallel runs)")
}
func initArgs(cmd *cobra.Command) {
@@ -137,6 +147,14 @@ func initFromCommandArguments(f *pflag.Flag) {
if failureLinkTemplateFlag != nil {
failureLinkTemplate = *failureLinkTemplateFlag
}
+ case "metrics-enabled":
+ if metricsEnabledFlag != nil {
+ metricsEnabled = *metricsEnabledFlag
+ }
+ case "metrics-output":
+ if metricsOutputFlag != nil {
+ metricsOutput = *metricsOutputFlag
+ }
}
}
@@ -402,6 +420,24 @@ var Cmd = &cobra.Command{
v.OpenApiReport = openApiReport
v.FailureLinkHeader = failureLinkHeader
v.FailureLinkTemplate = failureLinkTemplate
+ v.MetricsEnabled = metricsEnabled
+ v.MetricsOutput = metricsOutput
+
+ // Initialize metrics collector if enabled
+ if metricsEnabled && metricsOutput != "" {
+ // Handle {#} placeholder for parallel execution
+ outputFile := metricsOutput
+ if strings.Contains(outputFile, "{#}") {
+ // For now, use a simple counter - in real parallel execution, this would be set by GNU parallel
+ outputFile = strings.Replace(outputFile, "{#}", "1", 1)
+ }
+ v.MetricsOutput = outputFile
+ metricsCollector := reporting.NewMetricsCollector()
+ v.SetMetricsCollector(metricsCollector)
+
+ // Reset global DPN state for new test run
+ http.ResetGlobalDPNState()
+ }
if err := v.InitLogger(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
diff --git a/executors/http/dpn.go b/executors/http/dpn.go
new file mode 100644
index 00000000..86b6c4af
--- /dev/null
+++ b/executors/http/dpn.go
@@ -0,0 +1,421 @@
+package http
+
+import (
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// Precompiled regex patterns for better performance
+var (
+ reVersion = regexp.MustCompile(`^(api-)?v\d+([a-z0-9]+)?$`) // v1, v2beta, api-v2
+ reDateVersion = regexp.MustCompile(`^\d{4}-\d{2}-\d{2}$`) // 2024-10-01
+ reUUID = regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$`)
+ reULID = regexp.MustCompile(`^[0-9A-HJKMNP-TV-Z]{26}$`)
+ reKSUID = regexp.MustCompile(`^[0-9A-Za-z]{27}$`)
+ reMongoID = regexp.MustCompile(`^[0-9a-f]{24}$`)
+ rePureDigits = regexp.MustCompile(`^\d{6,}$`)
+ reTimestamp = regexp.MustCompile(`^\d{10}(\d{3})?$`)
+ reHexyBlob = regexp.MustCompile(`^[0-9a-f]{12,}$`)
+ reResourceKey = regexp.MustCompile(`^[A-Za-z]+[-_]*\d+([A-Za-z0-9-_]+)?$`)
+ reLocale = regexp.MustCompile(`^[a-z]{2}(-[A-Z]{2})?$`) // en, es, en-US
+ reTemplateVar = regexp.MustCompile(`^{{\.([^}]*)}}$`) // {{.variable}} or {{.perviousStep.value}} or {{.}}
+)
+
+// Keep list for semantically significant tokens
+var keepList = []*regexp.Regexp{
+ regexp.MustCompile(`^status\d{3}$`),
+ regexp.MustCompile(`^http2$`),
+ regexp.MustCompile(`^ipv6$`),
+ regexp.MustCompile(`^(\.well-known|openid-configuration|oauth2|healthz|readyz|livez|metrics|search|bulk|export|jwks)$`),
+ regexp.MustCompile(`^(json|ndjson|csv|xml)$`),
+ reLocale,
+}
+
+// API prefixes that can be dropped anywhere (but prefer early segments)
+var apiPrefixes = map[string]bool{
+ "api": true,
+ "rest": true,
+ "graphql": true,
+}
+
+// Global DPN state for sharing across HTTP requests in a test run
+var (
+ globalDPNState *DPNState
+ globalDPNMutex sync.Mutex
+)
+
+// GetGlobalDPNState returns the global DPN state, creating it if needed
+func GetGlobalDPNState() *DPNState {
+ globalDPNMutex.Lock()
+ defer globalDPNMutex.Unlock()
+
+ if globalDPNState == nil {
+ globalDPNState = NewDPNState(nil)
+ }
+ return globalDPNState
+}
+
+// ResetGlobalDPNState resets the global DPN state (useful for testing or new test runs)
+func ResetGlobalDPNState() {
+ globalDPNMutex.Lock()
+ defer globalDPNMutex.Unlock()
+ globalDPNState = nil
+}
+
+// DPNConfig holds configuration for the DPN
+type DPNConfig struct {
+ MaxEndpoints int
+ CacheSize int
+ // Collision detection is disabled by default to aggregate similar endpoints
+}
+
+// DefaultDPNConfig returns the default DPN configuration
+func DefaultDPNConfig() *DPNConfig {
+ return &DPNConfig{
+ MaxEndpoints: getMaxEndpoints(),
+ CacheSize: 8192,
+ // Collision detection is disabled by default to aggregate similar endpoints
+ }
+}
+
+// DPNState holds the state for a single DPN instance
+type DPNState struct {
+ mu sync.RWMutex
+ cache map[string]string
+ endpointCollisions map[string]string
+ endpointCount int
+ endpointsBucketed int
+ config *DPNConfig
+}
+
+// NewDPNState creates a new DPN state instance
+func NewDPNState(config *DPNConfig) *DPNState {
+ if config == nil {
+ config = DefaultDPNConfig()
+ }
+ return &DPNState{
+ cache: make(map[string]string),
+ endpointCollisions: make(map[string]string),
+ config: config,
+ }
+}
+
+// getMaxEndpoints returns the maximum number of unique endpoints allowed
+// Can be configured via VENOM_MAX_ENDPOINTS environment variable
+func getMaxEndpoints() int {
+ if envVal := os.Getenv("VENOM_MAX_ENDPOINTS"); envVal != "" {
+ if val, err := strconv.Atoi(envVal); err == nil && val > 0 {
+ return val
+ }
+ }
+ return 5000 // Default: 5000 endpoints
+}
+
+// ExtractSimpleEndpoint transforms URLs into stable endpoint templates
+func ExtractSimpleEndpoint(path string) string {
+ // Create a temporary state for this call
+ state := NewDPNState(nil)
+ return ExtractSimpleEndpointWithState(path, state)
+}
+
+// ExtractSimpleEndpointWithMethod includes HTTP method as prefix
+func ExtractSimpleEndpointWithMethod(path string, method string) string {
+ // Create a temporary state for this call
+ state := NewDPNState(nil)
+ return ExtractSimpleEndpointWithMethodAndState(path, method, state)
+}
+
+// ExtractSimpleEndpointWithMethodAndState includes HTTP method prefix with state management
+func ExtractSimpleEndpointWithMethodAndState(path string, method string, state *DPNState) string {
+ // Normalize method to uppercase
+ method = strings.ToUpper(method)
+ if method == "" {
+ method = "GET"
+ }
+
+ // Extract the endpoint without method
+ endpoint := ExtractSimpleEndpointWithState(path, state)
+
+ // Add method prefix
+ return method + "_" + endpoint
+}
+
+// ExtractSimpleEndpointWithState normalizes path with state management
+func ExtractSimpleEndpointWithState(path string, state *DPNState) string {
+ // Check cache first
+ state.mu.RLock()
+ if cached, exists := state.cache[path]; exists {
+ state.mu.RUnlock()
+ return cached
+ }
+ state.mu.RUnlock()
+
+ path = strings.ToLower(path)
+
+ // Strip query parameters, fragments, and matrix parameters
+ if idx := strings.Index(path, "?"); idx != -1 {
+ path = path[:idx]
+ }
+ if idx := strings.Index(path, "#"); idx != -1 {
+ path = path[:idx]
+ }
+ if idx := strings.Index(path, ";"); idx != -1 {
+ path = path[:idx]
+ }
+
+ path = strings.TrimSuffix(path, "/")
+ if path == "" || path == "/" {
+ return "root"
+ }
+
+ // Tokenize path
+ parts := strings.Split(path, "/")
+ tokens := []string{}
+ for _, part := range parts {
+ part = strings.TrimSpace(part)
+ if part != "" {
+ tokens = append(tokens, part)
+ }
+ }
+
+ if len(tokens) == 0 {
+ return "root"
+ }
+
+ // Classify and prune tokens
+ keptTokens := []string{}
+
+ for i, token := range tokens {
+ token = normalizeTemplateVariable(token)
+ if token == "" {
+ continue
+ }
+
+ // Check keep list first
+ for _, keepPattern := range keepList {
+ if keepPattern.MatchString(token) {
+ keptTokens = append(keptTokens, token)
+ goto nextToken
+ }
+ }
+
+ // Keep special tokens
+ if token == "me" || token == "self" || token == "current" {
+ keptTokens = append(keptTokens, token)
+ continue
+ }
+
+ // Drop API prefixes in early positions
+ if apiPrefixes[token] && i <= 2 {
+ continue
+ }
+
+ // Drop version tokens
+ if reVersion.MatchString(token) || reDateVersion.MatchString(token) {
+ continue
+ }
+
+ // Drop HTTP method tokens
+ if isHTTPMethod(token) {
+ continue
+ }
+
+ // Drop ID-like tokens
+ if isIDLike(token) {
+ continue
+ }
+
+ // Drop tokens that look like IDs
+ if len(token) >= 6 && looksLikeID(token) {
+ continue
+ }
+
+ keptTokens = append(keptTokens, token)
+
+ nextToken:
+ }
+
+ // Shape template
+ var result string
+ if len(keptTokens) <= 3 {
+ result = strings.Join(keptTokens, "_")
+ } else {
+ if len(keptTokens) >= 3 {
+ result = keptTokens[0] + "_" + keptTokens[1] + "_" + keptTokens[len(keptTokens)-1]
+ } else {
+ result = strings.Join(keptTokens, "_")
+ }
+ }
+
+ if len(keptTokens) > 0 {
+ result = trimExtIfAny(result)
+ }
+
+ result = regexp.MustCompile(`_+`).ReplaceAllString(result, "_")
+ result = strings.Trim(result, "_")
+
+ if len(result) > 80 {
+ result = result[:80]
+ }
+
+ if result == "" {
+ result = strings.ReplaceAll(path, "/", "_")
+ result = strings.Trim(result, "_")
+ }
+
+ result = handleCollisionsAndCardinalityWithState(result, path, state)
+
+ // Cache result
+ state.mu.Lock()
+ if len(state.cache) >= state.config.CacheSize {
+ state.cache = make(map[string]string)
+ }
+ state.cache[path] = result
+ state.mu.Unlock()
+
+ return result
+}
+
+// isHTTPMethod checks if token is an HTTP method
+func isHTTPMethod(token string) bool {
+ return token == "get" || token == "post" || token == "put" ||
+ token == "patch" || token == "delete" || token == "head" || token == "options"
+}
+
+// isIDLike checks if token matches known ID patterns
+func isIDLike(token string) bool {
+ return reUUID.MatchString(token) || reULID.MatchString(token) || reKSUID.MatchString(token) ||
+ reMongoID.MatchString(token) || rePureDigits.MatchString(token) || reTimestamp.MatchString(token) ||
+ reHexyBlob.MatchString(token) || reResourceKey.MatchString(token)
+}
+
+// looksLikeID determines if a token looks like an ID based on digit ratio and patterns
+func looksLikeID(token string) bool {
+ if len(token) < 6 {
+ return false
+ }
+
+ // Count digits
+ digitCount := 0
+ digitRuns := 0
+ inDigitRun := false
+
+ for _, char := range token {
+ if char >= '0' && char <= '9' {
+ digitCount++
+ if !inDigitRun {
+ digitRuns++
+ inDigitRun = true
+ }
+ } else {
+ inDigitRun = false
+ }
+ }
+
+ digitRatio := float64(digitCount) / float64(len(token))
+ return digitRatio >= 0.4 || digitRuns >= 2
+}
+
+// trimExtIfAny removes file extensions
+func trimExtIfAny(s string) string {
+ if i := strings.LastIndexByte(s, '.'); i > 0 && i >= len(s)-6 {
+ return s[:i]
+ }
+ return s
+}
+
+// normalizeTemplateVariable strips out template variables
+func normalizeTemplateVariable(token string) string {
+ if reTemplateVar.MatchString(token) {
+ return ""
+ }
+ return token
+}
+
+// handleCollisionsAndCardinalityWithState handles endpoint collisions and cardinality limits
+func handleCollisionsAndCardinalityWithState(normalized, original string, state *DPNState) string {
+ state.mu.Lock()
+ defer state.mu.Unlock()
+
+ if state.endpointCount >= state.config.MaxEndpoints {
+ state.endpointsBucketed++
+ return "other"
+ }
+
+ // Collision detection is disabled by default - always return the base normalized name
+ // Still track the original for cardinality counting, but don't create hash suffixes
+ if _, exists := state.endpointCollisions[normalized]; !exists {
+ state.endpointCollisions[normalized] = original
+ state.endpointCount++
+ }
+ return normalized
+}
+
+// ExtractSimpleEndpointWithGraphQL implements DPN with GraphQL operation detection
+func ExtractSimpleEndpointWithGraphQL(path string, contentType string, body []byte) string {
+ if strings.HasSuffix(path, "/graphql") || strings.HasSuffix(path, "/gql") {
+ if contentType == "application/json" && len(body) > 0 {
+ if operationName := extractGraphQLOperation(body); operationName != "" {
+ return "graphql"
+ }
+ }
+ return "graphql"
+ }
+
+ return ExtractSimpleEndpoint(path)
+}
+
+// NormalizeEndpoint implements DPN with GraphQL operation detection and HTTP method prefix
+func NormalizeEndpoint(path string, method string, contentType string, body []byte) string {
+ if strings.HasSuffix(path, "/graphql") || strings.HasSuffix(path, "/gql") {
+ method = strings.ToUpper(method)
+ if method == "" {
+ method = "GET"
+ }
+
+ if contentType == "application/json" && len(body) > 0 {
+ if operationName := extractGraphQLOperation(body); operationName != "" {
+ return method + "_graphql"
+ }
+ }
+ return method + "_graphql"
+ }
+
+ // Use global DPN state for proper collision detection and cardinality limits
+ globalState := GetGlobalDPNState()
+ return ExtractSimpleEndpointWithMethodAndState(path, method, globalState)
+}
+
+// extractGraphQLOperation extracts operationName from GraphQL request body
+func extractGraphQLOperation(body []byte) string {
+ bodyStr := string(body)
+ opNamePattern := regexp.MustCompile(`"operationName"\s*:\s*"([^"]+)"`)
+ if matches := opNamePattern.FindStringSubmatch(bodyStr); len(matches) > 1 {
+ return matches[1]
+ }
+ return ""
+}
+
+// GetCardinalityStats returns statistics about endpoint cardinality (stateless)
+func GetCardinalityStats() map[string]interface{} {
+ state := NewDPNState(nil)
+ return GetCardinalityStatsWithState(state)
+}
+
+// GetCardinalityStatsWithState returns statistics about endpoint cardinality
+func GetCardinalityStatsWithState(state *DPNState) map[string]interface{} {
+ state.mu.RLock()
+ defer state.mu.RUnlock()
+
+ return map[string]interface{}{
+ "unique_endpoints": state.endpointCount,
+ "max_endpoints": state.config.MaxEndpoints,
+ "endpoints_bucketed": state.endpointsBucketed,
+ "cardinality_ratio": float64(state.endpointCount) / float64(state.config.MaxEndpoints),
+ "cache_size": len(state.cache),
+ "collision_map_size": len(state.endpointCollisions),
+ }
+}
diff --git a/executors/http/dpn_test.go b/executors/http/dpn_test.go
new file mode 100644
index 00000000..0e498b53
--- /dev/null
+++ b/executors/http/dpn_test.go
@@ -0,0 +1,434 @@
+package http
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestExtractSimpleEndpoint(t *testing.T) {
+ tests := []struct {
+ name string
+ path string
+ expected string
+ notes string
+ }{
+ // Core & Hygiene
+ {"root path", "/", "root", "Root path"},
+ {"simple health", "/health", "health", "Simple health"},
+ {"k8s healthz", "/healthz", "healthz", "K8s style"},
+ {"k8s readyz", "/readyz", "readyz", "K8s style"},
+ {"status code", "/status/200", "status_200", "Keep meaningful 3-digit status"},
+ {"metrics endpoint", "/metrics", "metrics", "Prom metrics endpoint"},
+ {"double slashes", "/double//slashes///here", "double_slashes_here", "Collapse slashes"},
+ {"trailing slash", "/trailing/slash/", "trailing_slash", "Trim trailing slash"},
+ {"query fragment", "/path?query=1#frag", "path", "Drop query/fragment"},
+
+ // API Prefixes & Versions
+ {"api v1 users", "/api/v1/users", "users", "Drop api, v1"},
+ {"rest v3 customers", "/rest/v3/customers/cust123", "customers", "Drop rest, v3, ID"},
+ {"graphql v2 schema", "/graphql/v2/schema", "schema", "Drop graphql, v2"},
+ {"api-v2 payments", "/api-v2/payments/charge", "payments_charge", "api-v2 as version/prefix"},
+ {"date version", "/v2024-08-01/charges/abc123", "charges", "Date-style version dropped"},
+ {"prefix after mount", "/svc/api/v1/orders/123456", "svc_orders", "Prefix after mount point"},
+
+ // Method Token in Path (avoid GET_get_*)
+ {"get method token", "/get/delay/1", "delay_1", "Drop leading method token"},
+ {"post method token", "/post/data", "data", "Drop leading method token"},
+ {"put method token", "/put/users/123", "users_123", "Drop leading method token"},
+
+ // IDs (UUID/ULID/Mongo/numeric/mixed)
+ {"uuid", "/users/550e8400-e29b-41d4-a716-446655440000/profile", "users_profile", "UUID"},
+ {"ulid", "/sessions/01ARZ3NDEKTSV4RRFFQ69G5FAV", "sessions", "ULID"},
+ {"mongoid", "/obj/507f1f77bcf86cd799439011", "obj", "MongoID"},
+ {"timestamp", "/events/1699999999999", "events", "Timestamp (ms)"},
+ {"long numeric", "/tenants/12345678/billing", "tenants_billing", "Long numeric ID"},
+ {"resource digits", "/users/user123/profile", "users_profile", "Resource+digits dropped"},
+ {"multiple resource ids", "/orders/order_456/items/item789", "orders_items", "Multiple resource IDs dropped"},
+ {"hexy id", "/keys/ab12cd34ef56ab78", "keys", "Hexy-ish ID"},
+
+ // Meaningful numerics (keep)
+ {"status200", "/status200/check", "status200_check", "Keeplist token"},
+ {"http2", "/http2/support", "http2_support", "Keeplist token"},
+
+ // "me/self/current" aren't IDs
+ {"users me", "/users/me/profile", "users_me_profile", "Keep me"},
+ {"accounts self", "/accounts/self/limits", "accounts_self_limits", "Keep self"},
+ {"profiles current", "/profiles/current", "profiles_current", "Keep current"},
+
+ // Locales & Formats
+ {"content en", "/content/en/articles", "content_en_articles", "Keep locale"},
+ {"content en-US", "/content/en-US/articles", "content_en-us_articles", "Lowercase normalized"},
+ {"export csv", "/export/csv", "export_csv", "Keep format token"},
+ {"feed json", "/feed/recent.json", "feed_recent", "Trim file extension on last token"},
+
+ // Well-known Paths
+ {"well-known openid", "/.well-known/openid-configuration", ".well-known_openid-configuration", "Must keep both"},
+ {"well-known jwks", "/.well-known/jwks.json", ".well-known_jwks", "Trim extension"},
+
+ // Deep Paths → head(2) + tail(1)
+ {"deep billing path", "/billing/tenant123/invoices/invoice456/items/item789", "billing_invoices_items", "Long path shaping"},
+ {"deep orders path", "/api/v2/orders/order456/items/item789", "orders_items", "Long path shaping"},
+ {"generic deep path", "/a/b/c/d/e", "a_b_e", "Generic deep path"},
+
+ // Matrix params / path params / oddities
+ {"matrix param", "/app;jsessionid=ABC123/home", "app", "Drop matrix param"},
+ {"matrix param with ext", "/reports;region=us/2024/summary.pdf", "reports", "Drop matrix + trim ext"},
+ {"query param", "/users/123?expand=roles", "users_123", "Drop query"},
+
+ // Enhanced special cases
+ {"well-known openid", "/.well-known/openid-configuration", ".well-known_openid-configuration", "Keep well-known paths"},
+ {"jwks endpoint", "/.well-known/jwks.json", ".well-known_jwks", "Keep jwks with extension trimming"},
+ {"oauth2 endpoint", "/oauth2/authorize", "oauth2_authorize", "Keep oauth2"},
+ {"healthz endpoint", "/healthz", "healthz", "Keep healthz"},
+ {"readyz endpoint", "/readyz", "readyz", "Keep readyz"},
+ {"livez endpoint", "/livez", "livez", "Keep livez"},
+ {"metrics endpoint", "/metrics", "metrics", "Keep metrics"},
+ {"bulk endpoint", "/api/v1/bulk/upload", "bulk_upload", "Keep bulk"},
+ {"export endpoint", "/export/csv", "export_csv", "Keep export"},
+ {"search endpoint", "/search/users", "search_users", "Keep search"},
+
+ // Locales and formats
+ {"content en-US", "/content/en-US/articles", "content_en-us_articles", "Keep locale en-US"},
+ {"content es", "/content/es/news", "content_es_news", "Keep locale es"},
+ {"json format", "/api/v1/data.json", "data", "Keep json format"},
+ {"csv format", "/reports/export.csv", "reports_export", "Keep csv format"},
+ {"ndjson format", "/logs/events.ndjson", "logs_events.ndjson", "Keep ndjson format"},
+ {"xml format", "/config/settings.xml", "config_settings", "Keep xml format"},
+
+ // Special cases: me/self/current
+ {"users me", "/users/me/profile", "users_me_profile", "Keep me"},
+ {"accounts self", "/accounts/self/settings", "accounts_self_settings", "Keep self"},
+ {"profiles current", "/profiles/current", "profiles_current", "Keep current"},
+
+ // Template variables (should be stripped out)
+ {"template variable", "/data/{{.previousStep.value}}/view", "data_view", "Strip out template variables"},
+ {"template variable simple", "/data/{{.value}}/validate", "data_validate", "Strip out simple template variables"},
+ {"template variable in path", "/auth/view/{{.tenant_id}}/remove", "auth_view_remove", "Strip out template variables in middle of path"},
+ {"template variable with dots", "/config/{{.previousStep.value.vaule2}}/create", "config_create", "Strip out nested template variables"},
+
+ // Advanced version patterns
+ {"date version", "/api/v2024-10-01/users", "users", "Drop date-style version"},
+ {"api-v2 version", "/api-v2/payments", "payments", "Drop api-v2 version"},
+ {"v2alpha1 version", "/api/v2alpha1/config", "config", "Drop v2alpha1 version"},
+
+ // File extensions with dates
+ {"reports with date", "/reports/2024-10-01/summary.pdf", "reports_summary", "Drop date and trim extension"},
+
+ // Edge cases
+ {"empty path", "", "root", "Empty path"},
+ {"single slash", "/", "root", "Single slash"},
+ {"multiple slashes", "///", "root", "Multiple slashes"},
+ {"mixed case", "/API/V1/Users", "users", "Mixed case normalized"},
+ {"special chars", "/path-with-dashes/and_underscores", "path-with-dashes_and_underscores", "Special chars preserved"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := ExtractSimpleEndpoint(tt.path)
+ if result != tt.expected {
+ t.Errorf("ExtractSimpleEndpoint(%q) = %q, want %q (%s)", tt.path, result, tt.expected, tt.notes)
+ }
+ })
+ }
+}
+
+func TestExtractSimpleEndpointEdgeCases(t *testing.T) {
+ tests := []struct {
+ name string
+ path string
+ expected string
+ }{
+ // Very long paths (should be truncated)
+ {"very long path", "/this/is/a/very/long/path/that/would/produce/a/name/longer/than/eighty/characters/once/normalized", "this_is_normalized"},
+
+ // All numeric paths
+ {"all numeric", "/123/456/789", "123_456_789"},
+
+ // All special chars
+ {"all special", "/!@#$%^&*()", "!@"},
+
+ // Mixed ID patterns
+ {"mixed ids", "/users/123/orders/456/items/789", "users_123_789"},
+
+ // Version variations
+ {"version beta", "/api/v1beta/users", "users"},
+ {"version alpha", "/api/v2alpha1/users", "users"},
+ {"version rc", "/api/v1.0-rc1/users", "users"},
+
+ // Complex resource patterns
+ {"complex resource", "/api/v1/users/user-123-abc/profile", "users_profile"},
+ {"complex resource 2", "/api/v1/orders/order_456_def/items/item-789-ghi", "orders_items"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := ExtractSimpleEndpoint(tt.path)
+ if result != tt.expected {
+ t.Errorf("ExtractSimpleEndpoint(%q) = %q, want %q", tt.path, result, tt.expected)
+ }
+ })
+ }
+}
+
+func TestLooksLikeID(t *testing.T) {
+ tests := []struct {
+ name string
+ token string
+ expected bool
+ }{
+ {"short token", "abc", false},
+ {"exactly 6 chars", "abc123", true}, // digit_ratio = 0.5 >= 0.4
+ {"mixed long", "user123abc", false}, // digit_ratio = 0.33, digitRuns = 1, should not be ID
+ {"mostly letters", "userabc", false},
+ {"mostly digits", "123456", true},
+ {"alternating", "a1b2c3", true}, // 2 digit runs
+ {"status code", "status200", false}, // should be in keep list
+ {"http2", "http2", false}, // should be in keep list
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := looksLikeID(tt.token)
+ if result != tt.expected {
+ t.Errorf("looksLikeID(%q) = %v, want %v", tt.token, result, tt.expected)
+ }
+ })
+ }
+}
+
+func TestTrimExtIfAny(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {"no extension", "filename", "filename"},
+ {"json extension", "data.json", "data"},
+ {"csv extension", "export.csv", "export"},
+ {"pdf extension", "report.pdf", "report"},
+ {"xml extension", "config.xml", "config"},
+ {"long extension", "file.verylongext", "file.verylongext"}, // > 6 chars
+ {"dot at start", ".hidden", ".hidden"},
+ {"multiple dots", "file.backup.json", "file.backup"},
+ {"empty string", "", ""},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := trimExtIfAny(tt.input)
+ if result != tt.expected {
+ t.Errorf("trimExtIfAny(%q) = %q, want %q", tt.input, result, tt.expected)
+ }
+ })
+ }
+}
+
+func TestNormalizeTemplateVariable(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {"simple template var", "{{.tenant_locator}}", ""},
+ {"nested template var", "{{.setup.tenant_locator}}", ""},
+ {"deep nested template var", "{{.setup.tenant.locator}}", ""},
+ {"not a template var", "regular_token", "regular_token"},
+ {"empty template var", "{{.}}", ""},
+ {"malformed template var", "{{.tenant_locator", "{{.tenant_locator"},
+ {"multiple dots", "{{.a.b.c.d}}", ""},
+ {"single dot", "{{.simple}}", ""},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := normalizeTemplateVariable(tt.input)
+ if result != tt.expected {
+ t.Errorf("normalizeTemplateVariable(%q) = %q, want %q", tt.input, result, tt.expected)
+ }
+ })
+ }
+}
+
+func TestExtractSimpleEndpointWithGraphQL(t *testing.T) {
+ tests := []struct {
+ name string
+ path string
+ contentType string
+ body []byte
+ expected string
+ }{
+ {"graphql endpoint", "/api/graphql", "application/json", nil, "graphql"},
+ {"graphql with gql", "/gql", "application/json", nil, "graphql"},
+ {"graphql with operation", "/graphql", "application/json", []byte(`{"operationName":"ListUsers"}`), "graphql"},
+ {"graphql without operation", "/graphql", "application/json", []byte(`{"query":"{users{id}}"}`), "graphql"},
+ {"non-graphql endpoint", "/api/users", "application/json", nil, "users"},
+ {"graphql with complex body", "/graphql", "application/json", []byte(`{"operationName":"GetUserProfile","variables":{"id":123}}`), "graphql"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := ExtractSimpleEndpointWithGraphQL(tt.path, tt.contentType, tt.body)
+ if result != tt.expected {
+ t.Errorf("ExtractSimpleEndpointWithGraphQL(%q, %q, %q) = %q, want %q", tt.path, tt.contentType, string(tt.body), result, tt.expected)
+ }
+ })
+ }
+}
+
+func TestExtractSimpleEndpointWithMethod(t *testing.T) {
+ tests := []struct {
+ name string
+ path string
+ method string
+ expected string
+ notes string
+ }{
+ // Basic method prefixes
+ {"GET root", "/", "GET", "GET_root", "GET method with root path"},
+ {"POST users", "/api/v1/users", "POST", "POST_users", "POST method with API path"},
+ {"PUT profile", "/users/123/profile", "PUT", "PUT_users_123_profile", "PUT method with user path (ID kept in stateless mode)"},
+ {"DELETE item", "/orders/456/items/789", "DELETE", "DELETE_orders_456_789", "DELETE method with deep path (IDs kept in stateless mode)"},
+ {"PATCH settings", "/users/me/settings", "PATCH", "PATCH_users_me_settings", "PATCH method with me path"},
+ {"HEAD health", "/health", "HEAD", "HEAD_health", "HEAD method with health check"},
+ {"OPTIONS cors", "/api/cors", "OPTIONS", "OPTIONS_cors", "OPTIONS method with API path"},
+
+ // Method normalization
+ {"lowercase get", "/users", "get", "GET_users", "Lowercase method normalized to uppercase"},
+ {"mixed case post", "/posts", "PoSt", "POST_posts", "Mixed case method normalized"},
+ {"empty method", "/data", "", "GET_data", "Empty method defaults to GET"},
+
+ // Complex paths with method prefixes
+ {"GET deep path", "/api/v2/orders/order123/items/item456", "GET", "GET_orders_items", "GET with deep path normalization"},
+ {"POST graphql", "/graphql", "POST", "POST_graphql", "POST to GraphQL endpoint"},
+ {"PUT file upload", "/upload/files/file123.pdf", "PUT", "PUT_upload_files_file123", "PUT with file extension trimming (ID kept in stateless mode)"},
+ {"DELETE with query", "/users/123?force=true", "DELETE", "DELETE_users_123", "DELETE with query params (dropped, but ID kept in stateless mode)"},
+
+ // Special endpoints
+ {"GET well-known", "/.well-known/openid-configuration", "GET", "GET_.well-known_openid-configuration", "GET with well-known path"},
+ {"POST metrics", "/metrics", "POST", "POST_metrics", "POST to metrics endpoint"},
+ {"PUT healthz", "/healthz", "PUT", "PUT_healthz", "PUT to healthz endpoint"},
+
+ // Template variables with method prefixes (stripped out)
+ {"PATCH template var", "/policy/{{.setup.tenant_locator}}/holds", "PATCH", "PATCH_policy_holds", "PATCH with template variable stripped"},
+ {"PATCH simple template", "/policy/{{.tenant_locator}}/validate", "PATCH", "PATCH_policy_validate", "PATCH with simple template variable stripped"},
+ {"PATCH nested template", "/config/{{.setup.tenant.locator}}/create", "PATCH", "PATCH_config_create", "PATCH with nested template variable stripped"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := ExtractSimpleEndpointWithMethod(tt.path, tt.method)
+ if result != tt.expected {
+ t.Errorf("ExtractSimpleEndpointWithMethod(%q, %q) = %q, want %q (%s)", tt.path, tt.method, result, tt.expected, tt.notes)
+ }
+ })
+ }
+}
+
+func TestNormalizeEndpoint(t *testing.T) {
+ tests := []struct {
+ name string
+ path string
+ method string
+ contentType string
+ body []byte
+ expected string
+ notes string
+ }{
+ // GraphQL endpoints with methods
+ {"POST graphql", "/api/graphql", "POST", "application/json", nil, "POST_graphql", "POST to GraphQL endpoint"},
+ {"GET graphql", "/gql", "GET", "application/json", nil, "GET_graphql", "GET to GraphQL endpoint"},
+ {"POST graphql with operation", "/graphql", "POST", "application/json", []byte(`{"operationName":"ListUsers"}`), "POST_graphql", "POST with GraphQL operation"},
+ {"PUT graphql without operation", "/graphql", "PUT", "application/json", []byte(`{"query":"{users{id}}"}`), "PUT_graphql", "PUT without operation name"},
+
+ // Non-GraphQL endpoints with methods
+ {"GET users", "/api/users", "GET", "application/json", nil, "GET_users", "GET to regular API endpoint"},
+ {"POST users", "/api/users", "POST", "application/json", nil, "POST_users", "POST to regular API endpoint"},
+ {"PUT profile", "/users/123/profile", "PUT", "application/json", nil, "PUT_users_123_profile", "PUT to user profile"},
+
+ // Method normalization with GraphQL
+ {"lowercase post graphql", "/graphql", "post", "application/json", nil, "POST_graphql", "Lowercase method normalized"},
+ {"mixed case get graphql", "/gql", "GeT", "application/json", nil, "GET_graphql", "Mixed case method normalized"},
+ {"empty method graphql", "/graphql", "", "application/json", nil, "GET_graphql", "Empty method defaults to GET"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := NormalizeEndpoint(tt.path, tt.method, tt.contentType, tt.body)
+ if result != tt.expected {
+ t.Errorf("NormalizeEndpoint(%q, %q, %q, %q) = %q, want %q (%s)", tt.path, tt.method, tt.contentType, string(tt.body), result, tt.expected, tt.notes)
+ }
+ })
+ }
+}
+
+func TestCollisionHandling(t *testing.T) {
+ // Test collision detection with paths that should normalize to the same endpoint
+ path1 := "/api/v1/users/abc123/profile"
+ path2 := "/api/v1/users/def456/profile"
+
+ result1 := ExtractSimpleEndpoint(path1)
+ result2 := ExtractSimpleEndpoint(path2)
+
+ // Both should normalize to the same base endpoint
+ if !strings.HasPrefix(result1, "users_profile") || !strings.HasPrefix(result2, "users_profile") {
+ t.Errorf("Expected both to start with 'users_profile', got: %q and %q", result1, result2)
+ }
+
+ // With stateless implementation, they should be the same since each call creates a new state
+ if result1 != result2 {
+ t.Errorf("Expected same results with stateless implementation, got different: %q and %q", result1, result2)
+ }
+
+ // Both should NOT have hash suffixes in stateless mode
+ if strings.Contains(result1, "_") && len(strings.Split(result1, "_")) > 2 {
+ t.Errorf("Expected no hash suffixes in stateless mode, got: %q", result1)
+ }
+}
+
+func TestCardinalityLimit(t *testing.T) {
+ // Test cardinality limit with a reasonable number of endpoints
+ for i := 0; i < 10; i++ {
+ path := fmt.Sprintf("/test/endpoint/%d", i)
+ result := ExtractSimpleEndpoint(path)
+
+ // Just verify the function works without hitting limits
+ if result == "" {
+ t.Errorf("Expected non-empty result for path %s, got: %q", path, result)
+ }
+ }
+}
+
+// Benchmark tests for performance
+func BenchmarkExtractSimpleEndpoint(b *testing.B) {
+ paths := []string{
+ "/api/v1/users/user123/profile",
+ "/billing/tenant456/invoices/invoice789/items/item123",
+ "/.well-known/openid-configuration",
+ "/users/me/settings",
+ "/export/csv",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, path := range paths {
+ ExtractSimpleEndpoint(path)
+ }
+ }
+}
+
+func BenchmarkLooksLikeID(b *testing.B) {
+ tokens := []string{
+ "user123",
+ "abc123def",
+ "status200",
+ "verylongtoken123",
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, token := range tokens {
+ looksLikeID(token)
+ }
+ }
+}
diff --git a/executors/http/http.go b/executors/http/http.go
index 51dd1834..41cd1ebe 100644
--- a/executors/http/http.go
+++ b/executors/http/http.go
@@ -22,6 +22,7 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/ovh/cds/sdk/interpolate"
"github.com/ovh/venom"
+ "github.com/ovh/venom/reporting"
libopenapi "github.com/pb33f/libopenapi"
validator "github.com/pb33f/libopenapi-validator"
"github.com/pb33f/libopenapi-validator/errors"
@@ -341,6 +342,16 @@ func (Executor) Run(ctx context.Context, step venom.TestStep) (interface{}, erro
}
// ---
+ // Collect metrics if enabled
+ if metricsCollector := reporting.GetMetricsCollectorFromCtx(ctx); metricsCollector != nil {
+ // Normalize endpoint using DPN
+ normalizedEndpoint := NormalizeEndpoint(req.URL.Path, req.Method, req.Header.Get("Content-Type"), []byte(e.Body))
+
+ // Record HTTP request metrics with normalized endpoint
+ duration := time.Duration(result.TimeSeconds * float64(time.Second))
+ metricsCollector.RecordHTTPRequestWithEndpoint(duration, result.StatusCode, req.Method, normalizedEndpoint, nil)
+ }
+
if len(openapiValidationErrors) > 0 {
result.Err += "\nOpenAPI validation errors:\n" + strings.Join(openapiValidationErrors, "\n")
}
diff --git a/process.go b/process.go
index b12fa548..b237fcb9 100644
--- a/process.go
+++ b/process.go
@@ -2,6 +2,7 @@ package venom
import (
"context"
+ "encoding/json"
"fmt"
"os"
"path/filepath"
@@ -217,5 +218,18 @@ func (v *Venom) Process(ctx context.Context, path []string) error {
Debug(ctx, "final status: %s", v.Tests.Status)
+ // Write metrics if collector is enabled
+ if v.metricsCollector != nil && v.MetricsOutput != "" {
+ metrics := v.metricsCollector.GetMetrics()
+ data, err := json.MarshalIndent(metrics, "", " ")
+ if err != nil {
+ Error(ctx, "Failed to marshal metrics: %v", err)
+ } else {
+ if err := os.WriteFile(v.MetricsOutput, data, 0644); err != nil {
+ Error(ctx, "Failed to write metrics file: %v", err)
+ }
+ }
+ }
+
return nil
}
diff --git a/process_teststep.go b/process_teststep.go
index 13375139..4b4587ab 100644
--- a/process_teststep.go
+++ b/process_teststep.go
@@ -12,6 +12,7 @@ import (
"github.com/gosimple/slug"
"github.com/ovh/venom/interpolate"
+ "github.com/ovh/venom/reporting"
)
type dumpFile struct {
@@ -24,6 +25,11 @@ type dumpFile struct {
func (v *Venom) RunTestStep(ctx context.Context, e ExecutorRunner, tc *TestCase, tsResult *TestStepResult, stepNumber int, rangedIndex int, step TestStep) {
ctx = context.WithValue(ctx, ContextKey("executor"), e.Name())
+ // Add metrics collector to context if available
+ if v.metricsCollector != nil {
+ ctx = context.WithValue(ctx, reporting.MetricsCollectorContextKey, v.metricsCollector)
+ }
+
var assertRes AssertionsApplied
var result interface{}
@@ -107,6 +113,22 @@ func (v *Venom) RunTestStep(ctx context.Context, e ExecutorRunner, tc *TestCase,
tsResult.AssertionsApplied = assertRes
tsResult.ComputedVars.AddAll(H(mapResult))
+ // Record test check results if metrics collector is enabled
+ if v.metricsCollector != nil {
+ // Record overall test step result
+ stepName := fmt.Sprintf("step_%d", stepNumber)
+ if rangedIndex > 0 {
+ stepName = fmt.Sprintf("step_%d_%d", stepNumber, rangedIndex)
+ }
+ v.metricsCollector.RecordTestCheck(stepName, assertRes.OK)
+
+ // Record individual assertion results
+ for i, assertion := range assertRes.Assertions {
+ checkName := fmt.Sprintf("%s_assertion_%d", stepName, i)
+ v.metricsCollector.RecordTestCheck(checkName, assertion.IsOK)
+ }
+ }
+
if !assertRes.OK && len(assertRes.errors) > 0 {
if e.Type() == "user" {
generateFailureLinkForUserExecutor(ctx, result, tsResult, tc)
diff --git a/reporting/README.md b/reporting/README.md
new file mode 100644
index 00000000..a51c855d
--- /dev/null
+++ b/reporting/README.md
@@ -0,0 +1,81 @@
+# Venom Metrics & Performance Reporting
+
+Venom now includes comprehensive metrics collection and performance reporting capabilities for API testing.
+
+## Quick Start
+
+### 1. Enable Metrics Collection
+```bash
+# Collect metrics during test execution
+venom run --metrics-enabled --metrics-output=metrics.json tests/
+
+# With parallel execution
+venom run --metrics-enabled --metrics-output=parallel_output/metrics_{#}.json tests/ --parallel
+```
+
+### 2. Generate Performance Reports
+```bash
+# HTML report with default thresholds
+venom metrics-report metrics.json --html-output=report.html
+
+# Check thresholds and generate report
+venom metrics-report metrics.json --check-thresholds --thresholds=thresholds.yml --html-output=report.html
+
+# HTML-only generation (no threshold validation)
+venom metrics-report metrics.json --html-only --html-output=report.html
+```
+
+## Features
+
+- **Metrics Collection**: HTTP request timing, status codes, and test assertion results
+- **Dynamic Path Normalization (DPN)**: Aggregates similar endpoints (e.g., `/users/123` → `/users/*`)
+- **Performance Thresholds**: Configurable SLA validation with YAML configuration
+- **Interactive HTML Reports**: Charts, tables, and filtering with Chart.js
+- **CI Integration**: JUnit XML output for threshold violations
+- **Soft Fail Mode**: Non-blocking CI runs (default behavior)
+
+## Configuration
+
+### Thresholds File (`thresholds.yml`)
+```yaml
+options:
+ min_samples: 20 # Minimum samples for reliable metrics
+ tolerance_percent: 10 # 10% headroom above thresholds
+ soft_fail: false # Exit with error on violations
+
+defaults:
+ p95: 500ms # 95th percentile response time
+ p99: 1000ms # 99th percentile response time
+ avg: 200ms # Average response time
+
+# Group-based thresholds
+groups:
+ "POST_groups/*":
+ p95: 30000ms
+ avg: 15000ms
+
+# Endpoint-specific overrides
+endpoints:
+ "POST_specific_endpoint":
+ p95: 30000ms
+ avg: 25000ms
+```
+
+## CLI Options
+
+### `venom run`
+- `--metrics-enabled`: Enable metrics collection
+- `--metrics-output=FILE`: Output file for metrics (supports `{#}` placeholder)
+
+### `venom metrics-report`
+- `--check-thresholds`: Validate metrics against thresholds
+- `--thresholds=FILE`: Custom thresholds file (default: `thresholds.yml`)
+- `--html-output=FILE`: Generate HTML report
+- `--html-only`: Generate HTML without threshold validation
+- `--fail-on-breaches`: Exit with error on threshold violations (default: soft fail)
+
+## Output Files
+
+- **Metrics JSON**: HTTP and test check metrics
+- **HTML Report**: Interactive dashboard with charts, tables, and filtering
+- **JUnit XML**: CI-compatible output for threshold breaches (when using `--check-thresholds`)
\ No newline at end of file
diff --git a/reporting/aggregator/aggregator.go b/reporting/aggregator/aggregator.go
new file mode 100644
index 00000000..215f81be
--- /dev/null
+++ b/reporting/aggregator/aggregator.go
@@ -0,0 +1,375 @@
+package aggregator
+
+import (
+ "crypto/md5"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "strings"
+ "sync"
+ "time"
+)
+
+type Config struct {
+ MaxEndpoints int `json:"max_endpoints"`
+ NoBucket bool `json:"no_bucket"`
+ MergePercentiles string `json:"merge_percentiles"`
+}
+
+type Metrics struct {
+ RootGroup *TestGroup `json:"root_group"`
+ Metrics map[string]*Metric `json:"metrics"`
+ SetupData map[string]string `json:"setup_data,omitempty"`
+ StartTime time.Time `json:"start_time,omitempty"`
+ EndTime time.Time `json:"end_time,omitempty"`
+}
+
+type TestGroup struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ ID string `json:"id"`
+ Groups map[string]*TestGroup `json:"groups"`
+ Checks map[string]*TestCheck `json:"checks"`
+}
+
+type TestCheck struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ ID string `json:"id"`
+ Passes int64 `json:"passes"`
+ Fails int64 `json:"fails"`
+}
+
+type Metric struct {
+ Type string `json:"type"`
+ Values map[string]interface{} `json:"values"`
+}
+
+func AggregateFiles(inputFiles []string, config *Config) (*Metrics, error) {
+ if config == nil {
+ config = &Config{
+ MaxEndpoints: 2000,
+ NoBucket: false,
+ MergePercentiles: "weighted",
+ }
+ }
+
+ type fileResult struct {
+ metrics *Metrics
+ err error
+ file string
+ }
+
+ results := make(chan fileResult, len(inputFiles))
+ var wg sync.WaitGroup
+
+ for _, file := range inputFiles {
+ wg.Add(1)
+ go func(filename string) {
+ defer wg.Done()
+ metrics, err := ReadMetricsFile(filename)
+ results <- fileResult{metrics: metrics, err: err, file: filename}
+ }(file)
+ }
+
+ wg.Wait()
+ close(results)
+
+ var allMetrics []*Metrics
+ for result := range results {
+ if result.err != nil {
+ return nil, fmt.Errorf("error reading %s: %w", result.file, result.err)
+ }
+ allMetrics = append(allMetrics, result.metrics)
+ }
+
+ return AggregateMetrics(allMetrics, config)
+}
+
+func ReadMetricsFile(filename string) (*Metrics, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var metrics Metrics
+ err = json.Unmarshal(data, &metrics)
+ if err != nil {
+ return nil, fmt.Errorf("invalid JSON in %s: %w", filename, err)
+ }
+
+ return &metrics, nil
+}
+
+func AggregateMetrics(metricsList []*Metrics, config *Config) (*Metrics, error) {
+ if len(metricsList) == 0 {
+ return nil, fmt.Errorf("no metrics to aggregate")
+ }
+
+ result := &Metrics{
+ RootGroup: &TestGroup{
+ Name: "",
+ Path: "",
+ ID: "d41d8cd98f00b204e9800998ecf8427e",
+ Groups: make(map[string]*TestGroup),
+ Checks: make(map[string]*TestCheck),
+ },
+ Metrics: make(map[string]*Metric),
+ SetupData: make(map[string]string),
+ StartTime: time.Now(),
+ EndTime: time.Now(),
+ }
+
+ endpointMap := make(map[string]string)
+ endpointCount := 0
+ endpointsBucketed := 0
+ for i, metrics := range metricsList {
+ if i == 0 || metrics.StartTime.Before(result.StartTime) {
+ result.StartTime = metrics.StartTime
+ }
+ if metrics.EndTime.After(result.EndTime) {
+ result.EndTime = metrics.EndTime
+ }
+
+ for checkName, check := range metrics.RootGroup.Checks {
+ if existing, exists := result.RootGroup.Checks[checkName]; exists {
+ existing.Passes += check.Passes
+ existing.Fails += check.Fails
+ } else {
+ result.RootGroup.Checks[checkName] = &TestCheck{
+ Name: check.Name,
+ Path: check.Path,
+ ID: check.ID,
+ Passes: check.Passes,
+ Fails: check.Fails,
+ }
+ }
+ }
+
+ for metricName, metric := range metrics.Metrics {
+ if isGlobalMetric(metricName) {
+ continue
+ }
+
+ normalizedName := normalizeEndpoint(metricName)
+
+ if endpointCount >= config.MaxEndpoints {
+ if config.NoBucket {
+ continue
+ } else {
+ normalizedName = "other"
+ endpointsBucketed++
+ }
+ } else {
+ if existingOriginal, exists := endpointMap[normalizedName]; exists && existingOriginal != metricName {
+ hash := fmt.Sprintf("%x", md5.Sum([]byte(metricName)))[:8]
+ normalizedName = normalizedName + "_" + hash
+ }
+ endpointMap[normalizedName] = metricName
+ endpointCount++
+ }
+
+ if existing, exists := result.Metrics[normalizedName]; exists {
+ mergeMetric(existing, metric, config.MergePercentiles)
+ } else {
+ result.Metrics[normalizedName] = cloneMetric(metric)
+ }
+ }
+ }
+
+ addGlobalMetrics(result, metricsList)
+
+ return result, nil
+}
+
+func isGlobalMetric(name string) bool {
+ globalMetrics := []string{
+ "checks", "data_received", "data_sent", "http_req_duration",
+ "http_req_failed", "http_reqs", "iterations", "vus", "vus_max",
+ "http_req_blocked", "http_req_connecting", "http_req_sending",
+ "http_req_waiting", "http_req_receiving", "http_req_tls_handshaking",
+ }
+
+ for _, global := range globalMetrics {
+ if name == global || strings.HasPrefix(name, global+"_") {
+ return true
+ }
+ }
+ return false
+}
+
+func normalizeEndpoint(endpoint string) string {
+ return endpoint
+}
+
+func mergeMetric(target, source *Metric, mergeStrategy string) {
+ if target.Type != source.Type {
+ return
+ }
+
+ switch target.Type {
+ case "trend":
+ mergeTrendMetric(target, source, mergeStrategy)
+ case "counter":
+ mergeCounterMetric(target, source)
+ case "rate":
+ mergeRateMetric(target, source)
+ case "gauge":
+ mergeGaugeMetric(target, source)
+ }
+}
+
+func mergeTrendMetric(target, source *Metric, mergeStrategy string) {
+ targetValues := target.Values
+ sourceValues := source.Values
+
+ targetCount := getFloat64(targetValues, "count", 0)
+ sourceCount := getFloat64(sourceValues, "count", 0)
+ totalCount := targetCount + sourceCount
+
+ if totalCount == 0 {
+ return
+ }
+
+ targetValues["count"] = totalCount
+ targetValues["min"] = math.Min(getFloat64(targetValues, "min", math.MaxFloat64), getFloat64(sourceValues, "min", math.MaxFloat64))
+ targetValues["max"] = math.Max(getFloat64(targetValues, "max", 0), getFloat64(sourceValues, "max", 0))
+
+ targetAvg := getFloat64(targetValues, "avg", 0)
+ sourceAvg := getFloat64(sourceValues, "avg", 0)
+ targetSum := targetAvg * targetCount
+ sourceSum := sourceAvg * sourceCount
+ totalSum := targetSum + sourceSum
+ targetValues["avg"] = totalSum / totalCount
+
+ percentiles := []string{"p(50)", "p(90)", "p(95)", "p(99)"}
+ for _, p := range percentiles {
+ if _, targetExists := targetValues[p]; targetExists {
+ if _, sourceExists := sourceValues[p]; sourceExists {
+ targetP := getFloat64(targetValues, p, 0)
+ sourceP := getFloat64(sourceValues, p, 0)
+ weightedP := (targetP*targetCount + sourceP*sourceCount) / totalCount
+ targetValues[p] = weightedP
+ }
+ }
+ }
+
+ if totalCount > 0 {
+ duration := getFloat64(targetValues, "duration", 1)
+ targetValues["rate"] = totalCount / duration
+ }
+}
+
+func mergeCounterMetric(target, source *Metric) {
+ targetValues := target.Values
+ sourceValues := source.Values
+
+ targetCount := getFloat64(targetValues, "count", 0)
+ sourceCount := getFloat64(sourceValues, "count", 0)
+ targetValues["count"] = targetCount + sourceCount
+
+ totalCount := targetCount + sourceCount
+ if totalCount > 0 {
+ duration := getFloat64(targetValues, "duration", 1)
+ targetValues["rate"] = totalCount / duration
+ }
+}
+
+func mergeRateMetric(target, source *Metric) {
+ targetValues := target.Values
+ sourceValues := source.Values
+
+ targetPasses := getFloat64(targetValues, "passes", 0)
+ sourcePasses := getFloat64(sourceValues, "passes", 0)
+ targetFails := getFloat64(targetValues, "fails", 0)
+ sourceFails := getFloat64(sourceValues, "fails", 0)
+
+ targetValues["passes"] = targetPasses + sourcePasses
+ targetValues["fails"] = targetFails + sourceFails
+
+ totalPasses := targetPasses + sourcePasses
+ totalFails := targetFails + sourceFails
+ total := totalPasses + totalFails
+ if total > 0 {
+ targetValues["value"] = totalPasses / total
+ }
+}
+
+func mergeGaugeMetric(target, source *Metric) {
+ targetValues := target.Values
+ sourceValues := source.Values
+
+ for key, sourceVal := range sourceValues {
+ if sourceFloat, ok := sourceVal.(float64); ok {
+ if targetFloat, exists := targetValues[key]; exists {
+ if targetFloatFloat, ok := targetFloat.(float64); ok {
+ targetValues[key] = math.Max(targetFloatFloat, sourceFloat)
+ }
+ } else {
+ targetValues[key] = sourceFloat
+ }
+ }
+ }
+}
+
+func addGlobalMetrics(result *Metrics, metricsList []*Metrics) {
+ globalMetrics := make(map[string]*Metric)
+
+ for _, metrics := range metricsList {
+ for metricName, metric := range metrics.Metrics {
+ if isGlobalMetric(metricName) {
+ if existing, exists := globalMetrics[metricName]; exists {
+ mergeMetric(existing, metric, "weighted")
+ } else {
+ globalMetrics[metricName] = cloneMetric(metric)
+ }
+ }
+ }
+ }
+
+ for name, metric := range globalMetrics {
+ result.Metrics[name] = metric
+ }
+}
+
+func cloneMetric(metric *Metric) *Metric {
+ cloned := &Metric{
+ Type: metric.Type,
+ Values: make(map[string]interface{}),
+ }
+
+ for k, v := range metric.Values {
+ cloned.Values[k] = v
+ }
+
+ return cloned
+}
+
+func getFloat64(values map[string]interface{}, key string, defaultValue float64) float64 {
+ if val, exists := values[key]; exists {
+ switch v := val.(type) {
+ case float64:
+ return v
+ case int:
+ return float64(v)
+ case int64:
+ return float64(v)
+ }
+ }
+ return defaultValue
+}
+
+func WriteOutput(metrics *Metrics, filename string) error {
+ data, err := json.MarshalIndent(metrics, "", " ")
+ if err != nil {
+ return fmt.Errorf("error marshaling metrics: %w", err)
+ }
+
+ err = ioutil.WriteFile(filename, data, 0644)
+ if err != nil {
+ return fmt.Errorf("error writing file %s: %w", filename, err)
+ }
+
+ return nil
+}
diff --git a/reporting/aggregator/aggregator_test.go b/reporting/aggregator/aggregator_test.go
new file mode 100644
index 00000000..68fed47b
--- /dev/null
+++ b/reporting/aggregator/aggregator_test.go
@@ -0,0 +1,526 @@
+package aggregator
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+)
+
+func TestAggregateFiles(t *testing.T) {
+ // Create test metrics files
+ testFiles := createTestMetricsFiles(t)
+ defer cleanupTestFiles(testFiles)
+
+ // Test basic aggregation
+ config := &Config{
+ MaxEndpoints: 10,
+ NoBucket: false,
+ MergePercentiles: "weighted",
+ }
+
+ result, err := AggregateFiles(testFiles, config)
+ if err != nil {
+ t.Fatalf("AggregateFiles failed: %v", err)
+ }
+
+ // Verify basic structure
+ if result.RootGroup == nil {
+ t.Error("RootGroup should not be nil")
+ }
+
+ if len(result.Metrics) == 0 {
+ t.Error("Metrics should not be empty")
+ }
+
+ // Verify checks aggregation
+ if len(result.RootGroup.Checks) != 3 {
+ t.Errorf("Expected 3 checks, got %d", len(result.RootGroup.Checks))
+ }
+
+ // Verify specific check aggregation
+ if check, exists := result.RootGroup.Checks["status_200"]; exists {
+ if check.Passes != 2 {
+ t.Errorf("Expected 2 passes for status_200, got %d", check.Passes)
+ }
+ if check.Fails != 0 {
+ t.Errorf("Expected 0 fails for status_200, got %d", check.Fails)
+ }
+ } else {
+ t.Error("status_200 check should exist")
+ }
+}
+
+func TestAggregateFilesWithCardinalityLimit(t *testing.T) {
+ // Create test files with many endpoints
+ testFiles := createTestMetricsFilesWithManyEndpoints(t)
+ defer cleanupTestFiles(testFiles)
+
+ // Test with low cardinality limit
+ config := &Config{
+ MaxEndpoints: 2,
+ NoBucket: false,
+ MergePercentiles: "weighted",
+ }
+
+ result, err := AggregateFiles(testFiles, config)
+ if err != nil {
+ t.Fatalf("AggregateFiles failed: %v", err)
+ }
+
+ // Should have "other" bucket
+ if _, exists := result.Metrics["other"]; !exists {
+ t.Error("Should have 'other' bucket when cardinality limit is exceeded")
+ }
+}
+
+func TestAggregateFilesNoBucket(t *testing.T) {
+ // Create test files with many endpoints
+ testFiles := createTestMetricsFilesWithManyEndpoints(t)
+ defer cleanupTestFiles(testFiles)
+
+ // Test with no bucketing
+ config := &Config{
+ MaxEndpoints: 2,
+ NoBucket: true,
+ MergePercentiles: "weighted",
+ }
+
+ result, err := AggregateFiles(testFiles, config)
+ if err != nil {
+ t.Fatalf("AggregateFiles failed: %v", err)
+ }
+
+ // Should not have "other" bucket
+ if _, exists := result.Metrics["other"]; exists {
+ t.Error("Should not have 'other' bucket when no-bucket is enabled")
+ }
+
+ // Should have fewer endpoints due to dropping
+ if len(result.Metrics) > 3 { // Only global metrics + 2 allowed endpoints
+ t.Errorf("Expected fewer endpoints when no-bucket is enabled, got %d", len(result.Metrics))
+ }
+}
+
+func TestReadMetricsFile(t *testing.T) {
+ // Create a test metrics file
+ metrics := &Metrics{
+ RootGroup: &TestGroup{
+ Name: "test",
+ Path: "::test",
+ ID: "test123",
+ Groups: make(map[string]*TestGroup),
+ Checks: map[string]*TestCheck{
+ "test_check": {
+ Name: "test_check",
+ Path: "::test::test_check",
+ ID: "check123",
+ Passes: 1,
+ Fails: 0,
+ },
+ },
+ },
+ Metrics: map[string]*Metric{
+ "test_metric": {
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 1.0,
+ "avg": 100.0,
+ "min": 100.0,
+ "max": 100.0,
+ },
+ },
+ },
+ StartTime: time.Now(),
+ EndTime: time.Now(),
+ }
+
+ // Write to file
+ filename := "test_metrics.json"
+ data, err := json.Marshal(metrics)
+ if err != nil {
+ t.Fatalf("Failed to marshal metrics: %v", err)
+ }
+
+ err = os.WriteFile(filename, data, 0644)
+ if err != nil {
+ t.Fatalf("Failed to write test file: %v", err)
+ }
+ defer os.Remove(filename)
+
+ // Read and verify
+ readMetrics, err := ReadMetricsFile(filename)
+ if err != nil {
+ t.Fatalf("ReadMetricsFile failed: %v", err)
+ }
+
+ if readMetrics.RootGroup.Name != "test" {
+ t.Errorf("Expected name 'test', got '%s'", readMetrics.RootGroup.Name)
+ }
+
+ if len(readMetrics.Metrics) != 1 {
+ t.Errorf("Expected 1 metric, got %d", len(readMetrics.Metrics))
+ }
+}
+
+func TestMergeTrendMetric(t *testing.T) {
+ target := &Metric{
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 2.0,
+ "avg": 100.0,
+ "min": 50.0,
+ "max": 150.0,
+ "p(90)": 140.0,
+ },
+ }
+
+ source := &Metric{
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 3.0,
+ "avg": 200.0,
+ "min": 100.0,
+ "max": 300.0,
+ "p(90)": 280.0,
+ },
+ }
+
+ mergeTrendMetric(target, source, "weighted")
+
+ // Verify merged values
+ if count := getFloat64(target.Values, "count", 0); count != 5.0 {
+ t.Errorf("Expected count 5.0, got %f", count)
+ }
+
+ if avg := getFloat64(target.Values, "avg", 0); avg != 160.0 {
+ t.Errorf("Expected avg 160.0, got %f", avg)
+ }
+
+ if min := getFloat64(target.Values, "min", 0); min != 50.0 {
+ t.Errorf("Expected min 50.0, got %f", min)
+ }
+
+ if max := getFloat64(target.Values, "max", 0); max != 300.0 {
+ t.Errorf("Expected max 300.0, got %f", max)
+ }
+
+ if p90 := getFloat64(target.Values, "p(90)", 0); p90 != 224.0 {
+ t.Errorf("Expected p(90) 224.0, got %f", p90)
+ }
+}
+
+func TestMergeCounterMetric(t *testing.T) {
+ target := &Metric{
+ Type: "counter",
+ Values: map[string]interface{}{
+ "count": 10.0,
+ "rate": 5.0,
+ },
+ }
+
+ source := &Metric{
+ Type: "counter",
+ Values: map[string]interface{}{
+ "count": 20.0,
+ "rate": 10.0,
+ },
+ }
+
+ mergeCounterMetric(target, source)
+
+ // Verify merged values
+ if count := getFloat64(target.Values, "count", 0); count != 30.0 {
+ t.Errorf("Expected count 30.0, got %f", count)
+ }
+}
+
+func TestMergeRateMetric(t *testing.T) {
+ target := &Metric{
+ Type: "rate",
+ Values: map[string]interface{}{
+ "passes": 8.0,
+ "fails": 2.0,
+ "value": 0.8,
+ },
+ }
+
+ source := &Metric{
+ Type: "rate",
+ Values: map[string]interface{}{
+ "passes": 9.0,
+ "fails": 1.0,
+ "value": 0.9,
+ },
+ }
+
+ mergeRateMetric(target, source)
+
+ // Verify merged values
+ if passes := getFloat64(target.Values, "passes", 0); passes != 17.0 {
+ t.Errorf("Expected passes 17.0, got %f", passes)
+ }
+
+ if fails := getFloat64(target.Values, "fails", 0); fails != 3.0 {
+ t.Errorf("Expected fails 3.0, got %f", fails)
+ }
+
+ if value := getFloat64(target.Values, "value", 0); value != 0.85 {
+ t.Errorf("Expected value 0.85, got %f", value)
+ }
+}
+
+func TestIsGlobalMetric(t *testing.T) {
+ globalTests := []struct {
+ name string
+ expected bool
+ }{
+ {"checks", true},
+ {"data_received", true},
+ {"http_req_duration", true},
+ {"http_req_failed", true},
+ {"http_reqs", true},
+ {"iterations", true},
+ {"vus", true},
+ {"vus_max", true},
+ {"http_req_status_200", false},
+ {"status_200", false},
+ {"delay_1", false},
+ {"users_profile", false},
+ }
+
+ for _, test := range globalTests {
+ result := isGlobalMetric(test.name)
+ if result != test.expected {
+ t.Errorf("isGlobalMetric(%s) = %v, expected %v", test.name, result, test.expected)
+ }
+ }
+}
+
+func TestWriteOutput(t *testing.T) {
+ metrics := &Metrics{
+ RootGroup: &TestGroup{
+ Name: "test",
+ Path: "::test",
+ ID: "test123",
+ Groups: make(map[string]*TestGroup),
+ Checks: make(map[string]*TestCheck),
+ },
+ Metrics: make(map[string]*Metric),
+ SetupData: make(map[string]string),
+ StartTime: time.Now(),
+ EndTime: time.Now(),
+ }
+
+ filename := "test_output.json"
+ err := WriteOutput(metrics, filename)
+ if err != nil {
+ t.Fatalf("WriteOutput failed: %v", err)
+ }
+ defer os.Remove(filename)
+
+ // Verify file was created and contains valid JSON
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("Failed to read output file: %v", err)
+ }
+
+ var readMetrics Metrics
+ err = json.Unmarshal(data, &readMetrics)
+ if err != nil {
+ t.Fatalf("Failed to unmarshal output file: %v", err)
+ }
+
+ if readMetrics.RootGroup.Name != "test" {
+ t.Errorf("Expected name 'test', got '%s'", readMetrics.RootGroup.Name)
+ }
+}
+
+// Helper functions
+
+func createTestMetricsFiles(t *testing.T) []string {
+ files := []string{}
+
+ // File 1
+ metrics1 := &Metrics{
+ RootGroup: &TestGroup{
+ Name: "",
+ Path: "",
+ ID: "test1",
+ Groups: make(map[string]*TestGroup),
+ Checks: map[string]*TestCheck{
+ "status_200": {
+ Name: "status_200",
+ Path: "::status_200",
+ ID: "check1",
+ Passes: 1,
+ Fails: 0,
+ },
+ "delay_1": {
+ Name: "delay_1",
+ Path: "::delay_1",
+ ID: "check2",
+ Passes: 1,
+ Fails: 0,
+ },
+ },
+ },
+ Metrics: map[string]*Metric{
+ "status_200": {
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 1.0,
+ "avg": 100.0,
+ "min": 100.0,
+ "max": 100.0,
+ },
+ },
+ "delay_1": {
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 1.0,
+ "avg": 200.0,
+ "min": 200.0,
+ "max": 200.0,
+ },
+ },
+ "http_req_duration": {
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 2.0,
+ "avg": 150.0,
+ "min": 100.0,
+ "max": 200.0,
+ },
+ },
+ },
+ StartTime: time.Now(),
+ EndTime: time.Now(),
+ }
+
+ filename1 := "test_metrics_1.json"
+ files = append(files, filename1)
+ writeMetricsToFile(t, metrics1, filename1)
+
+ // File 2
+ metrics2 := &Metrics{
+ RootGroup: &TestGroup{
+ Name: "",
+ Path: "",
+ ID: "test2",
+ Groups: make(map[string]*TestGroup),
+ Checks: map[string]*TestCheck{
+ "status_200": {
+ Name: "status_200",
+ Path: "::status_200",
+ ID: "check1",
+ Passes: 1,
+ Fails: 0,
+ },
+ "status_404": {
+ Name: "status_404",
+ Path: "::status_404",
+ ID: "check3",
+ Passes: 1,
+ Fails: 0,
+ },
+ },
+ },
+ Metrics: map[string]*Metric{
+ "status_200": {
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 1.0,
+ "avg": 150.0,
+ "min": 150.0,
+ "max": 150.0,
+ },
+ },
+ "status_404": {
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 1.0,
+ "avg": 50.0,
+ "min": 50.0,
+ "max": 50.0,
+ },
+ },
+ "http_req_duration": {
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 2.0,
+ "avg": 100.0,
+ "min": 50.0,
+ "max": 150.0,
+ },
+ },
+ },
+ StartTime: time.Now(),
+ EndTime: time.Now(),
+ }
+
+ filename2 := "test_metrics_2.json"
+ files = append(files, filename2)
+ writeMetricsToFile(t, metrics2, filename2)
+
+ return files
+}
+
+func createTestMetricsFilesWithManyEndpoints(t *testing.T) []string {
+ files := []string{}
+
+ // Create multiple files with many different endpoints
+ for i := 1; i <= 3; i++ {
+ metrics := &Metrics{
+ RootGroup: &TestGroup{
+ Name: "",
+ Path: "",
+ ID: "test",
+ Groups: make(map[string]*TestGroup),
+ Checks: make(map[string]*TestCheck),
+ },
+ Metrics: make(map[string]*Metric),
+ StartTime: time.Now(),
+ EndTime: time.Now(),
+ }
+
+ // Add many different endpoints
+ for j := 1; j <= 5; j++ {
+ endpointName := fmt.Sprintf("endpoint_%d_%d", i, j)
+ metrics.Metrics[endpointName] = &Metric{
+ Type: "trend",
+ Values: map[string]interface{}{
+ "count": 1.0,
+ "avg": float64(100 + j),
+ "min": float64(100 + j),
+ "max": float64(100 + j),
+ },
+ }
+ }
+
+ filename := fmt.Sprintf("test_many_endpoints_%d.json", i)
+ files = append(files, filename)
+ writeMetricsToFile(t, metrics, filename)
+ }
+
+ return files
+}
+
+func writeMetricsToFile(t *testing.T, metrics *Metrics, filename string) {
+ data, err := json.Marshal(metrics)
+ if err != nil {
+ t.Fatalf("Failed to marshal metrics: %v", err)
+ }
+
+ err = os.WriteFile(filename, data, 0644)
+ if err != nil {
+ t.Fatalf("Failed to write test file %s: %v", filename, err)
+ }
+}
+
+func cleanupTestFiles(files []string) {
+ for _, file := range files {
+ os.Remove(file)
+ }
+}
diff --git a/reporting/metrics.go b/reporting/metrics.go
new file mode 100644
index 00000000..b9cf24e7
--- /dev/null
+++ b/reporting/metrics.go
@@ -0,0 +1,549 @@
+package reporting
+
+import (
+ "context"
+ "crypto/md5"
+ "encoding/json"
+ "fmt"
+ "math"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Logger interface for logging functionality
+type Logger interface {
+ Debug(ctx context.Context, format string, args ...interface{})
+}
+
+// Global logger instance - will be set by the main venom package
+var globalLogger Logger
+
+// SetLogger sets the global logger for the reporting package
+func SetLogger(logger Logger) {
+ globalLogger = logger
+}
+
+type MetricsCollector interface {
+ RecordHTTPRequest(duration time.Duration, statusCode int, err error)
+ RecordHTTPRequestWithEndpoint(duration time.Duration, statusCode int, method, endpoint string, err error)
+ RecordTestCheck(checkName string, passed bool)
+ RecordTestStructure(groups map[string]*TestGroup, setupData map[string]string)
+ GetMetrics() *Metrics
+ Reset()
+}
+
+type Metrics struct {
+ RootGroup *TestGroup `json:"root_group"`
+ Metrics map[string]*Metric `json:"metrics"`
+ SetupData map[string]string `json:"setup_data,omitempty"`
+ StartTime time.Time `json:"start_time,omitempty"`
+ EndTime time.Time `json:"end_time,omitempty"`
+}
+
+type TestGroup struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ ID string `json:"id"`
+ Groups map[string]*TestGroup `json:"groups"`
+ Checks map[string]*TestCheck `json:"checks"`
+}
+
+type TestCheck struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ ID string `json:"id"`
+ Passes int64 `json:"passes"`
+ Fails int64 `json:"fails"`
+}
+
+type Metric struct {
+ Type string `json:"type"`
+ Values map[string]interface{} `json:"values"`
+}
+
+type MetricsConfig struct {
+ Enabled bool `json:"enabled" yaml:"enabled"`
+ Format string `json:"format" yaml:"format"`
+ Output string `json:"output" yaml:"output"`
+}
+
+func DefaultMetricsConfig() *MetricsConfig {
+ return &MetricsConfig{
+ Enabled: false,
+ Format: "json",
+ Output: "",
+ }
+}
+
+type metricsCollector struct {
+ mu sync.RWMutex
+
+ httpRequests []time.Duration
+ httpStatusCodes map[int]int64
+ httpErrors int64
+ httpTotal int64
+
+ httpRequestsByEndpoint map[string][]time.Duration
+ httpStatusCodesByEndpoint map[string]map[int]int64
+ httpErrorsByEndpoint map[string]int64
+ httpTotalByEndpoint map[string]int64
+
+ // Test check tracking
+ testChecks map[string]*TestCheck
+
+ testGroups map[string]*TestGroup
+ setupData map[string]string
+
+ startTime time.Time
+ endTime time.Time
+}
+
+func NewMetricsCollector() MetricsCollector {
+ return &metricsCollector{
+ httpStatusCodes: make(map[int]int64),
+ httpRequestsByEndpoint: make(map[string][]time.Duration),
+ httpStatusCodesByEndpoint: make(map[string]map[int]int64),
+ httpErrorsByEndpoint: make(map[string]int64),
+ httpTotalByEndpoint: make(map[string]int64),
+ testChecks: make(map[string]*TestCheck),
+ testGroups: make(map[string]*TestGroup),
+ setupData: make(map[string]string),
+ startTime: time.Now(),
+ }
+}
+
+func (mc *metricsCollector) RecordHTTPRequest(duration time.Duration, statusCode int, err error) {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+
+ mc.httpRequests = append(mc.httpRequests, duration)
+ mc.httpTotal++
+
+ // Consider both network errors and HTTP error status codes (4xx, 5xx) as failures
+ isError := err != nil || statusCode >= 400
+
+ if isError {
+ mc.httpErrors++
+ } else {
+ mc.httpStatusCodes[statusCode]++
+ }
+}
+
+func (mc *metricsCollector) RecordHTTPRequestWithEndpoint(duration time.Duration, statusCode int, method, endpoint string, err error) {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+
+ endpointKey := fmt.Sprintf("%s %s", method, endpoint)
+
+ mc.httpRequests = append(mc.httpRequests, duration)
+ mc.httpTotal++
+
+ // Consider both network errors and HTTP error status codes (4xx, 5xx) as failures
+ isError := err != nil || statusCode >= 400
+
+ if isError {
+ mc.httpErrors++
+ } else {
+ mc.httpStatusCodes[statusCode]++
+ }
+
+ if mc.httpRequestsByEndpoint[endpointKey] == nil {
+ mc.httpRequestsByEndpoint[endpointKey] = make([]time.Duration, 0)
+ mc.httpStatusCodesByEndpoint[endpointKey] = make(map[int]int64)
+ mc.httpTotalByEndpoint[endpointKey] = 0
+ mc.httpErrorsByEndpoint[endpointKey] = 0
+ }
+
+ mc.httpRequestsByEndpoint[endpointKey] = append(mc.httpRequestsByEndpoint[endpointKey], duration)
+ mc.httpTotalByEndpoint[endpointKey]++
+
+ if isError {
+ mc.httpErrorsByEndpoint[endpointKey]++
+ } else {
+ mc.httpStatusCodesByEndpoint[endpointKey][statusCode]++
+ }
+
+ // Always record status codes for tracking, regardless of whether they're errors
+ mc.httpStatusCodesByEndpoint[endpointKey][statusCode]++
+}
+
+func (mc *metricsCollector) RecordTestCheck(checkName string, passed bool) {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+
+ if mc.testChecks[checkName] == nil {
+ mc.testChecks[checkName] = &TestCheck{
+ Name: checkName,
+ Path: fmt.Sprintf("::%s", checkName),
+ ID: generateID(fmt.Sprintf("::%s", checkName)),
+ Passes: 0,
+ Fails: 0,
+ }
+ }
+
+ if passed {
+ mc.testChecks[checkName].Passes++
+ } else {
+ mc.testChecks[checkName].Fails++
+ }
+}
+
+func (mc *metricsCollector) GetMetrics() *Metrics {
+ mc.mu.RLock()
+ defer mc.mu.RUnlock()
+
+ mc.endTime = time.Now()
+
+ metrics := &Metrics{
+ Metrics: make(map[string]*Metric),
+ StartTime: mc.startTime,
+ EndTime: mc.endTime,
+ RootGroup: &TestGroup{
+ Name: "",
+ Path: "",
+ ID: "d41d8cd98f00b204e9800998ecf8427e",
+ Groups: mc.testGroups,
+ Checks: mc.testChecks,
+ },
+ SetupData: mc.setupData,
+ }
+
+ // HTTP metrics
+ if mc.httpTotal > 0 {
+ httpReqDuration := mc.calculateDurationMetrics(mc.httpRequests)
+ httpReqDuration.Values["count"] = mc.httpTotal
+ httpReqDuration.Values["rate"] = mc.calculateRate(mc.httpTotal, mc.startTime, mc.endTime)
+
+ metrics.Metrics["http_req_duration"] = httpReqDuration
+ metrics.Metrics["http_reqs"] = &Metric{
+ Type: "counter",
+ Values: map[string]interface{}{
+ "count": mc.httpTotal,
+ "rate": mc.calculateRate(mc.httpTotal, mc.startTime, mc.endTime),
+ },
+ }
+
+ // Status code distribution
+ for statusCode, count := range mc.httpStatusCodes {
+ metricName := fmt.Sprintf("http_req_status_%d", statusCode)
+ metrics.Metrics[metricName] = &Metric{
+ Type: "counter",
+ Values: map[string]interface{}{
+ "count": count,
+ },
+ }
+ }
+
+ // Error rate
+ if mc.httpErrors > 0 {
+ errorRate := float64(mc.httpErrors) / float64(mc.httpTotal) * 100
+ metrics.Metrics["http_req_failed"] = &Metric{
+ Type: "rate",
+ Values: map[string]interface{}{
+ "passes": 0,
+ "fails": mc.httpErrors,
+ "thresholds": map[string]interface{}{
+ "rate<0.01": false,
+ },
+ "value": errorRate,
+ },
+ }
+ } else {
+ // No errors
+ metrics.Metrics["http_req_failed"] = &Metric{
+ Type: "rate",
+ Values: map[string]interface{}{
+ "passes": mc.httpTotal,
+ "fails": 0,
+ "thresholds": map[string]interface{}{
+ "rate<0.01": true,
+ },
+ "value": 0,
+ },
+ }
+ }
+
+ metrics.Metrics["iterations"] = &Metric{
+ Type: "counter",
+ Values: map[string]interface{}{
+ "count": mc.httpTotal,
+ "rate": mc.calculateRate(mc.httpTotal, mc.startTime, mc.endTime),
+ },
+ }
+
+ metrics.Metrics["checks"] = &Metric{
+ Type: "rate",
+ Values: map[string]interface{}{
+ "passes": mc.httpTotal - mc.httpErrors,
+ "fails": mc.httpErrors,
+ "value": float64(mc.httpTotal-mc.httpErrors) / float64(mc.httpTotal),
+ },
+ }
+
+ estimatedDataSent := mc.httpTotal * 1024
+ estimatedDataReceived := mc.httpTotal * 2048
+
+ metrics.Metrics["data_sent"] = &Metric{
+ Type: "counter",
+ Values: map[string]interface{}{
+ "count": estimatedDataSent,
+ "rate": mc.calculateRate(estimatedDataSent, mc.startTime, mc.endTime),
+ },
+ }
+
+ metrics.Metrics["data_received"] = &Metric{
+ Type: "counter",
+ Values: map[string]interface{}{
+ "count": estimatedDataReceived,
+ "rate": mc.calculateRate(estimatedDataReceived, mc.startTime, mc.endTime),
+ },
+ }
+
+ metrics.Metrics["vus"] = &Metric{
+ Type: "gauge",
+ Values: map[string]interface{}{
+ "value": 1,
+ "min": 1,
+ "max": 1,
+ },
+ }
+
+ metrics.Metrics["vus_max"] = &Metric{
+ Type: "gauge",
+ Values: map[string]interface{}{
+ "value": 1,
+ "min": 1,
+ "max": 1,
+ },
+ }
+
+ // Per-endpoint HTTP metrics
+ for endpointKey, requests := range mc.httpRequestsByEndpoint {
+ if len(requests) > 0 {
+ endpointDuration := mc.calculateDurationMetrics(requests)
+ endpointDuration.Values["count"] = mc.httpTotalByEndpoint[endpointKey]
+ endpointDuration.Values["rate"] = mc.calculateRate(mc.httpTotalByEndpoint[endpointKey], mc.startTime, mc.endTime)
+
+ endpointName := endpointKey
+ if idx := strings.Index(endpointKey, " "); idx != -1 {
+ endpointName = endpointKey[idx+1:]
+ }
+
+ metricName := endpointName
+ metrics.Metrics[metricName] = endpointDuration
+
+ checkID := generateID(fmt.Sprintf("::%s", endpointName))
+ metrics.RootGroup.Checks[endpointName] = &TestCheck{
+ Name: endpointName,
+ Path: fmt.Sprintf("::%s", endpointName),
+ ID: checkID,
+ Passes: mc.httpTotalByEndpoint[endpointKey] - mc.httpErrorsByEndpoint[endpointKey],
+ Fails: mc.httpErrorsByEndpoint[endpointKey],
+ }
+
+ // Per-endpoint status codes
+ if statusCodes, exists := mc.httpStatusCodesByEndpoint[endpointKey]; exists {
+ for statusCode, count := range statusCodes {
+ statusMetricName := fmt.Sprintf("http_req_status_%s_%d", endpointName, statusCode)
+ metrics.Metrics[statusMetricName] = &Metric{
+ Type: "counter",
+ Values: map[string]interface{}{
+ "count": count,
+ },
+ }
+ }
+ }
+
+ // Per-endpoint error rate
+ if mc.httpErrorsByEndpoint[endpointKey] > 0 {
+ endpointErrorRate := float64(mc.httpErrorsByEndpoint[endpointKey]) / float64(mc.httpTotalByEndpoint[endpointKey]) * 100
+ errorMetricName := fmt.Sprintf("http_req_failed_%s", endpointName)
+ metrics.Metrics[errorMetricName] = &Metric{
+ Type: "rate",
+ Values: map[string]interface{}{
+ "value": endpointErrorRate,
+ },
+ }
+ }
+ }
+ }
+ }
+
+ return metrics
+}
+
+func (mc *metricsCollector) Reset() {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+
+ mc.httpRequests = nil
+ mc.httpStatusCodes = make(map[int]int64)
+ mc.httpErrors = 0
+ mc.httpTotal = 0
+
+ mc.httpRequestsByEndpoint = make(map[string][]time.Duration)
+ mc.httpStatusCodesByEndpoint = make(map[string]map[int]int64)
+ mc.httpErrorsByEndpoint = make(map[string]int64)
+ mc.httpTotalByEndpoint = make(map[string]int64)
+
+ mc.testChecks = make(map[string]*TestCheck)
+
+ mc.startTime = time.Now()
+ mc.endTime = time.Time{}
+}
+
+func (mc *metricsCollector) calculateDurationMetrics(durations []time.Duration) *Metric {
+ if len(durations) == 0 {
+ return &Metric{
+ Type: "trend",
+ Values: make(map[string]interface{}),
+ }
+ }
+
+ values := make([]float64, len(durations))
+ for i, d := range durations {
+ values[i] = float64(d.Milliseconds())
+ }
+
+ sort.Float64s(values)
+
+ metric := &Metric{
+ Type: "trend",
+ Values: map[string]interface{}{
+ "min": values[0],
+ "max": values[len(values)-1],
+ "avg": mc.calculateAverage(values),
+ },
+ }
+
+ if len(values) > 0 {
+ metric.Values["p(50)"] = mc.calculatePercentile(values, 50)
+ metric.Values["p(90)"] = mc.calculatePercentile(values, 90)
+ metric.Values["p(95)"] = mc.calculatePercentile(values, 95)
+ metric.Values["p(99)"] = mc.calculatePercentile(values, 99)
+ }
+
+ return metric
+}
+
+func (mc *metricsCollector) calculateAverage(values []float64) float64 {
+ if len(values) == 0 {
+ return 0
+ }
+
+ sum := 0.0
+ for _, v := range values {
+ sum += v
+ }
+ return sum / float64(len(values))
+}
+
+func (mc *metricsCollector) calculatePercentile(values []float64, percentile int) float64 {
+ if len(values) == 0 {
+ return 0
+ }
+
+ index := float64(percentile) / 100.0 * float64(len(values)-1)
+ if index == float64(int(index)) {
+ return values[int(index)]
+ }
+
+ lower := int(math.Floor(index))
+ upper := int(math.Ceil(index))
+ weight := index - float64(lower)
+
+ return values[lower]*(1-weight) + values[upper]*weight
+}
+
+func (mc *metricsCollector) calculateRate(count int64, start, end time.Time) float64 {
+ duration := end.Sub(start).Seconds()
+ if duration <= 0 {
+ return 0
+ }
+ return float64(count) / duration
+}
+
+func SaveMetricsToFile(metrics *Metrics, filename string) error {
+ data, err := json.MarshalIndent(metrics, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal metrics: %w", err)
+ }
+
+ if err := os.WriteFile(filename, data, 0644); err != nil {
+ return fmt.Errorf("failed to write metrics file: %w", err)
+ }
+
+ return nil
+}
+
+func PrintMetricsSummary(ctx context.Context, metrics *Metrics) {
+ if metrics == nil || len(metrics.Metrics) == 0 {
+ return
+ }
+
+ if globalLogger != nil {
+ globalLogger.Debug(ctx, "=== Performance Metrics Summary ===")
+ }
+
+ // HTTP metrics summary
+ if httpReqDuration, exists := metrics.Metrics["http_req_duration"]; exists {
+ values := httpReqDuration.Values
+ if globalLogger != nil {
+ globalLogger.Debug(ctx, "HTTP Requests:")
+ globalLogger.Debug(ctx, " Total: %v", metrics.Metrics["http_reqs"].Values["count"])
+ globalLogger.Debug(ctx, " Duration - Min: %.2fms, Max: %.2fms, Avg: %.2fms",
+ values["min"], values["max"], values["avg"])
+ if p50, ok := values["p(50)"].(float64); ok {
+ globalLogger.Debug(ctx, " Percentiles - P50: %.2fms, P90: %.2fms, P95: %.2fms, P99: %.2fms",
+ p50, values["p(90)"], values["p(95)"], values["p(99)"])
+ }
+ }
+ }
+
+ // Exec metrics summary
+ if execDuration, exists := metrics.Metrics["exec_duration"]; exists {
+ values := execDuration.Values
+ if globalLogger != nil {
+ globalLogger.Debug(ctx, "Exec Commands:")
+ globalLogger.Debug(ctx, " Total: %v", metrics.Metrics["exec_commands"].Values["count"])
+ globalLogger.Debug(ctx, " Duration - Min: %.2fms, Max: %.2fms, Avg: %.2fms",
+ values["min"], values["max"], values["avg"])
+ if p50, ok := values["p(50)"].(float64); ok {
+ globalLogger.Debug(ctx, " Percentiles - P50: %.2fms, P90: %.2fms, P95: %.2fms, P99: %.2fms",
+ p50, values["p(90)"], values["p(95)"], values["p(99)"])
+ }
+ }
+ }
+
+ if globalLogger != nil {
+ globalLogger.Debug(ctx, "=== End Performance Metrics ===")
+ }
+}
+
+const MetricsCollectorContextKey ContextKey = "metrics_collector"
+
+// ContextKey represents a context key type
+type ContextKey string
+
+func GetMetricsCollectorFromCtx(ctx context.Context) MetricsCollector {
+ if collector, ok := ctx.Value(MetricsCollectorContextKey).(MetricsCollector); ok {
+ return collector
+ }
+ return nil
+}
+
+func (mc *metricsCollector) RecordTestStructure(groups map[string]*TestGroup, setupData map[string]string) {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+
+ mc.testGroups = groups
+ mc.setupData = setupData
+}
+
+func generateID(input string) string {
+ hash := md5.Sum([]byte(input))
+ return fmt.Sprintf("%x", hash)
+}
diff --git a/reporting/metrics_html.go b/reporting/metrics_html.go
new file mode 100644
index 00000000..1ad8a256
--- /dev/null
+++ b/reporting/metrics_html.go
@@ -0,0 +1,169 @@
+package reporting
+
+import (
+ "embed"
+ "encoding/json"
+ "fmt"
+ "html/template"
+ "os"
+
+ "github.com/ovh/venom/reporting/aggregator"
+)
+
+//go:embed metrics_html_template.html
+var templateContent embed.FS
+
+func GenerateMetricsHTMLReport(metrics *aggregator.Metrics, outputFile string) error {
+ return GenerateMetricsHTMLReportWithThresholds(metrics, outputFile, nil)
+}
+
+func GenerateMetricsHTMLReportWithThresholds(metrics *aggregator.Metrics, outputFile string, thresholdConfig *ThresholdConfig) error {
+ metricsJSON, err := json.Marshal(metrics)
+ if err != nil {
+ return fmt.Errorf("failed to marshal metrics to JSON: %w", err)
+ }
+
+ // Use default threshold config if none provided
+ if thresholdConfig == nil {
+ thresholdConfig = DefaultThresholdConfig()
+ }
+
+ // Convert threshold config to JavaScript-compatible format
+ jsThresholds := convertThresholdsForJS(thresholdConfig)
+ thresholdsJSON, err := json.Marshal(jsThresholds)
+ if err != nil {
+ return fmt.Errorf("failed to marshal thresholds to JSON: %w", err)
+ }
+
+ templateData, err := templateContent.ReadFile("metrics_html_template.html")
+ if err != nil {
+ return fmt.Errorf("failed to read template file: %w", err)
+ }
+
+ tmpl, err := template.New("metrics_html_report").Parse(string(templateData))
+ if err != nil {
+ return fmt.Errorf("failed to parse template: %w", err)
+ }
+
+ file, err := os.Create(outputFile)
+ if err != nil {
+ return fmt.Errorf("failed to create output file: %w", err)
+ }
+ defer file.Close()
+
+ data := struct {
+ MetricsJSON template.JS
+ ThresholdsJSON template.JS
+ }{
+ MetricsJSON: template.JS(metricsJSON),
+ ThresholdsJSON: template.JS(thresholdsJSON),
+ }
+
+ err = tmpl.Execute(file, data)
+ if err != nil {
+ return fmt.Errorf("failed to execute template: %w", err)
+ }
+
+ return nil
+}
+
+// JSThresholdValues represents threshold values in JavaScript-compatible format
+type JSThresholdValues struct {
+ P50 *float64 `json:"p50,omitempty"`
+ P90 *float64 `json:"p90,omitempty"`
+ P95 *float64 `json:"p95,omitempty"`
+ P99 *float64 `json:"p99,omitempty"`
+ Avg *float64 `json:"avg,omitempty"`
+ Max *float64 `json:"max,omitempty"`
+ ErrorRate *float64 `json:"error_rate,omitempty"`
+ RPS *float64 `json:"rps,omitempty"`
+ MinSamples *int `json:"min_samples,omitempty"`
+}
+
+// JSThresholdConfig represents the complete threshold configuration in JavaScript-compatible format
+type JSThresholdConfig struct {
+ Defaults JSThresholdValues `json:"defaults"`
+ Groups map[string]JSThresholdValues `json:"groups"`
+ Endpoints map[string]JSThresholdValues `json:"endpoints"`
+ Options struct {
+ TolerancePercent float64 `json:"tolerance_percent"`
+ MinSamples int `json:"min_samples"`
+ SoftFail bool `json:"soft_fail"`
+ } `json:"options"`
+}
+
+// convertThresholdsForJS converts Go threshold config to JavaScript-compatible format
+func convertThresholdsForJS(config *ThresholdConfig) *JSThresholdConfig {
+ jsConfig := &JSThresholdConfig{
+ Defaults: convertThresholdValuesForJS(config.Defaults),
+ Groups: make(map[string]JSThresholdValues),
+ Endpoints: make(map[string]JSThresholdValues),
+ Options: struct {
+ TolerancePercent float64 `json:"tolerance_percent"`
+ MinSamples int `json:"min_samples"`
+ SoftFail bool `json:"soft_fail"`
+ }{
+ TolerancePercent: config.Options.TolerancePercent,
+ MinSamples: config.Options.MinSamples,
+ SoftFail: config.Options.SoftFail,
+ },
+ }
+
+ // Convert groups
+ for name, values := range config.Groups {
+ jsConfig.Groups[name] = convertThresholdValuesForJS(values)
+ }
+
+ // Convert endpoints
+ for name, values := range config.Endpoints {
+ jsConfig.Endpoints[name] = convertThresholdValuesForJS(values)
+ }
+
+ return jsConfig
+}
+
+// convertThresholdValuesForJS converts ThresholdValues to JavaScript-compatible format
+func convertThresholdValuesForJS(values ThresholdValues) JSThresholdValues {
+ jsValues := JSThresholdValues{}
+
+ // Convert duration thresholds to milliseconds
+ if values.P50 != nil {
+ ms := float64(values.P50.Value.Nanoseconds()) / 1e6
+ jsValues.P50 = &ms
+ }
+ if values.P90 != nil {
+ ms := float64(values.P90.Value.Nanoseconds()) / 1e6
+ jsValues.P90 = &ms
+ }
+ if values.P95 != nil {
+ ms := float64(values.P95.Value.Nanoseconds()) / 1e6
+ jsValues.P95 = &ms
+ }
+ if values.P99 != nil {
+ ms := float64(values.P99.Value.Nanoseconds()) / 1e6
+ jsValues.P99 = &ms
+ }
+ if values.Avg != nil {
+ ms := float64(values.Avg.Value.Nanoseconds()) / 1e6
+ jsValues.Avg = &ms
+ }
+ if values.Max != nil {
+ ms := float64(values.Max.Value.Nanoseconds()) / 1e6
+ jsValues.Max = &ms
+ }
+
+ // Convert rate thresholds (already in correct format)
+ if values.ErrorRate != nil {
+ jsValues.ErrorRate = &values.ErrorRate.Value
+ }
+ if values.RPS != nil {
+ jsValues.RPS = &values.RPS.Value
+ }
+
+ // Copy min samples
+ if values.MinSamples != nil {
+ jsValues.MinSamples = values.MinSamples
+ }
+
+ return jsValues
+}
diff --git a/reporting/metrics_html_template.html b/reporting/metrics_html_template.html
new file mode 100644
index 00000000..1a94a725
--- /dev/null
+++ b/reporting/metrics_html_template.html
@@ -0,0 +1,3942 @@
+
+
+
+
+
+
+ Venom Performance Metrics Report
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Overview
+
+
+ Endpoints
+
+
+ Performance
+
+
+ Checks
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Performance Trends
+
Historical performance tracking and trend analysis will be available soon. This feature will show how your API performance changes over time.
+
+
+
+
+
+
+
+
+
+
Regression Detection
+
Automatic detection of performance regressions will be available soon. This feature will identify endpoints that have become slower compared to previous runs.
+
+
+
+
+
+
+
+
+
+
+
+
+ Min Requests:
+
+ 1
+
+
+ All Performance
+ Excellent (<200ms)
+ Good (200-500ms)
+ Poor (500-1000ms)
+ Critical (>1000ms)
+
+
+ All Thresholds
+ Compliant
+ Warnings
+ Breaches
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Search:
+
+
+
+ Status:
+
+ All
+ Passed Only
+ Failed Only
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Check Name
+
+
+ Passes
+
+
+ Fails
+
+
+ Success Rate
+
+
+ Status
+
+
+ Total
+
+
+ Fail Rate
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/reporting/thresholds.go b/reporting/thresholds.go
new file mode 100644
index 00000000..114238a1
--- /dev/null
+++ b/reporting/thresholds.go
@@ -0,0 +1,458 @@
+package reporting
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "gopkg.in/yaml.v3"
+)
+
+// ThresholdConfig represents the complete threshold configuration
+type ThresholdConfig struct {
+ Defaults ThresholdValues `json:"defaults" yaml:"defaults"`
+ Groups map[string]ThresholdValues `json:"groups" yaml:"groups"`
+ Endpoints map[string]ThresholdValues `json:"endpoints" yaml:"endpoints"`
+ Options ThresholdOptions `json:"options" yaml:"options"`
+}
+
+// ThresholdValues defines the threshold values for various metrics
+type ThresholdValues struct {
+ P50 *DurationThreshold `json:"p50,omitempty" yaml:"p50,omitempty"`
+ P90 *DurationThreshold `json:"p90,omitempty" yaml:"p90,omitempty"`
+ P95 *DurationThreshold `json:"p95,omitempty" yaml:"p95,omitempty"`
+ P99 *DurationThreshold `json:"p99,omitempty" yaml:"p99,omitempty"`
+ Avg *DurationThreshold `json:"avg,omitempty" yaml:"avg,omitempty"`
+ Max *DurationThreshold `json:"max,omitempty" yaml:"max,omitempty"`
+ ErrorRate *RateThreshold `json:"error_rate,omitempty" yaml:"error_rate,omitempty"`
+ RPS *RateThreshold `json:"rps,omitempty" yaml:"rps,omitempty"`
+ MinSamples *int `json:"min_samples,omitempty" yaml:"min_samples,omitempty"`
+}
+
+// DurationThreshold represents a duration-based threshold
+type DurationThreshold struct {
+ Value time.Duration `json:"value" yaml:"value"`
+ Tolerance *float64 `json:"tolerance_percent,omitempty" yaml:"tolerance_percent,omitempty"`
+}
+
+// UnmarshalYAML implements custom YAML unmarshaling for DurationThreshold
+func (dt *DurationThreshold) UnmarshalYAML(value *yaml.Node) error {
+ if value.Kind == yaml.ScalarNode {
+ // Handle string duration like "500ms"
+ durationStr := value.Value
+ duration, err := time.ParseDuration(durationStr)
+ if err != nil {
+ return fmt.Errorf("invalid duration format '%s': %w", durationStr, err)
+ }
+ dt.Value = duration
+ return nil
+ } else if value.Kind == yaml.MappingNode {
+ // Handle object format with value and tolerance
+ var temp struct {
+ Value string `yaml:"value"`
+ Tolerance *float64 `yaml:"tolerance_percent,omitempty"`
+ }
+ if err := value.Decode(&temp); err != nil {
+ return err
+ }
+
+ duration, err := time.ParseDuration(temp.Value)
+ if err != nil {
+ return fmt.Errorf("invalid duration format '%s': %w", temp.Value, err)
+ }
+ dt.Value = duration
+ dt.Tolerance = temp.Tolerance
+ return nil
+ }
+ return fmt.Errorf("invalid DurationThreshold format")
+}
+
+// RateThreshold represents a rate-based threshold (0.0 to 1.0 for rates, any value for RPS)
+type RateThreshold struct {
+ Value float64 `json:"value" yaml:"value"`
+ Tolerance *float64 `json:"tolerance_percent,omitempty" yaml:"tolerance_percent,omitempty"`
+}
+
+// UnmarshalYAML implements custom YAML unmarshaling for RateThreshold
+func (rt *RateThreshold) UnmarshalYAML(value *yaml.Node) error {
+ if value.Kind == yaml.ScalarNode {
+ // Handle numeric value
+ val, err := strconv.ParseFloat(value.Value, 64)
+ if err != nil {
+ return fmt.Errorf("invalid rate value '%s': %w", value.Value, err)
+ }
+ rt.Value = val
+ return nil
+ } else if value.Kind == yaml.MappingNode {
+ // Handle object format with value and tolerance
+ var temp struct {
+ Value float64 `yaml:"value"`
+ Tolerance *float64 `yaml:"tolerance_percent,omitempty"`
+ }
+ if err := value.Decode(&temp); err != nil {
+ return err
+ }
+ rt.Value = temp.Value
+ rt.Tolerance = temp.Tolerance
+ return nil
+ }
+ return fmt.Errorf("invalid RateThreshold format")
+}
+
+// ThresholdOptions defines global options for threshold validation
+type ThresholdOptions struct {
+ TolerancePercent float64 `json:"tolerance_percent" yaml:"tolerance_percent"`
+ MinSamples int `json:"min_samples" yaml:"min_samples"`
+ SoftFail bool `json:"soft_fail" yaml:"soft_fail"`
+}
+
+// ThresholdBreach represents a threshold breach
+type ThresholdBreach struct {
+ Endpoint string `json:"endpoint"`
+ Metric string `json:"metric"`
+ Value float64 `json:"value"`
+ Threshold float64 `json:"threshold"`
+ Unit string `json:"unit"`
+ Severity string `json:"severity"`
+ SampleCount int64 `json:"sample_count"`
+}
+
+// DefaultThresholdConfig returns a sensible default threshold configuration
+func DefaultThresholdConfig() *ThresholdConfig {
+ return &ThresholdConfig{
+ Defaults: ThresholdValues{
+ P95: &DurationThreshold{Value: 500 * time.Millisecond},
+ P99: &DurationThreshold{Value: 1000 * time.Millisecond},
+ Avg: &DurationThreshold{Value: 200 * time.Millisecond},
+ ErrorRate: &RateThreshold{Value: 0.01}, // 1%
+ },
+ Groups: map[string]ThresholdValues{
+ "auth/*": {
+ P95: &DurationThreshold{Value: 350 * time.Millisecond},
+ },
+ "catalog/*": {
+ P95: &DurationThreshold{Value: 450 * time.Millisecond},
+ },
+ },
+ Endpoints: map[string]ThresholdValues{
+ "GET /users": {
+ P95: &DurationThreshold{Value: 300 * time.Millisecond},
+ Avg: &DurationThreshold{Value: 150 * time.Millisecond},
+ },
+ "POST /orders": {
+ P95: &DurationThreshold{Value: 800 * time.Millisecond},
+ Avg: &DurationThreshold{Value: 400 * time.Millisecond},
+ },
+ },
+ Options: ThresholdOptions{
+ TolerancePercent: 10.0, // 10% tolerance by default
+ MinSamples: 100, // Require at least 100 samples for reliable percentiles
+ SoftFail: false,
+ },
+ }
+}
+
+// GetThresholdForEndpoint returns the effective threshold values for a given endpoint
+// Priority: endpoints > groups > defaults
+func (tc *ThresholdConfig) GetThresholdForEndpoint(endpoint string) ThresholdValues {
+ result := tc.Defaults
+
+ // Check group patterns (order matters - first match wins)
+ for pattern, groupThresholds := range tc.Groups {
+ if matchesPattern(endpoint, pattern) {
+ result = mergeThresholdValues(result, groupThresholds)
+ break
+ }
+ }
+
+ // Check exact endpoint matches
+ if endpointThresholds, exists := tc.Endpoints[endpoint]; exists {
+ result = mergeThresholdValues(result, endpointThresholds)
+ }
+
+ return result
+}
+
+// matchesPattern checks if an endpoint matches a pattern (supports wildcards and basic regex)
+func matchesPattern(endpoint, pattern string) bool {
+ // Convert wildcard pattern to regex
+ regexPattern := strings.ReplaceAll(pattern, "*", ".*")
+ regexPattern = "^" + regexPattern + "$"
+
+ matched, err := regexp.MatchString(regexPattern, endpoint)
+ if err != nil {
+ // If regex compilation fails, fall back to exact match
+ return endpoint == pattern
+ }
+
+ return matched
+}
+
+// mergeThresholdValues merges two threshold value sets, with the second taking precedence
+func mergeThresholdValues(base, override ThresholdValues) ThresholdValues {
+ result := base
+
+ if override.P50 != nil {
+ result.P50 = override.P50
+ }
+ if override.P90 != nil {
+ result.P90 = override.P90
+ }
+ if override.P95 != nil {
+ result.P95 = override.P95
+ }
+ if override.P99 != nil {
+ result.P99 = override.P99
+ }
+ if override.Avg != nil {
+ result.Avg = override.Avg
+ }
+ if override.Max != nil {
+ result.Max = override.Max
+ }
+ if override.ErrorRate != nil {
+ result.ErrorRate = override.ErrorRate
+ }
+ if override.RPS != nil {
+ result.RPS = override.RPS
+ }
+ if override.MinSamples != nil {
+ result.MinSamples = override.MinSamples
+ }
+
+ return result
+}
+
+// ValidateThresholds checks if metrics violate any thresholds
+func (tc *ThresholdConfig) ValidateThresholds(metrics *Metrics) []ThresholdBreach {
+ var breaches []ThresholdBreach
+
+ // Get minimum samples requirement
+ minSamples := tc.Options.MinSamples
+ if minSamples == 0 {
+ minSamples = 100 // Default minimum
+ }
+
+ // Check each endpoint metric
+ for metricName, metric := range metrics.Metrics {
+ // Skip non-endpoint metrics
+ if !isEndpointMetric(metricName) {
+ continue
+ }
+
+
+ endpoint := metricName
+ thresholds := tc.GetThresholdForEndpoint(endpoint)
+
+ // Check sample count first
+ sampleCount := int64(0)
+ if count, ok := metric.Values["count"].(int64); ok {
+ sampleCount = count
+ } else if count, ok := metric.Values["count"].(float64); ok {
+ sampleCount = int64(count)
+ }
+
+ // Skip if not enough samples
+ if sampleCount < int64(minSamples) {
+ continue
+ }
+
+ // Check duration-based thresholds
+ breaches = append(breaches, tc.checkDurationThresholds(endpoint, metric, thresholds, sampleCount)...)
+
+ // Check rate-based thresholds
+ breaches = append(breaches, tc.checkRateThresholds(endpoint, metric, thresholds, sampleCount)...)
+ }
+
+ return breaches
+}
+
+// isEndpointMetric checks if a metric name represents an endpoint metric
+func isEndpointMetric(metricName string) bool {
+ // Skip system metrics
+ systemMetrics := []string{
+ "http_req_duration", "http_reqs", "http_req_failed", "iterations",
+ "checks", "data_sent", "data_received", "vus", "vus_max",
+ }
+
+ for _, sys := range systemMetrics {
+ if strings.HasPrefix(metricName, sys) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// checkDurationThresholds checks duration-based thresholds (p50, p90, p95, p99, avg, max)
+func (tc *ThresholdConfig) checkDurationThresholds(endpoint string, metric *Metric, thresholds ThresholdValues, sampleCount int64) []ThresholdBreach {
+ var breaches []ThresholdBreach
+
+ // Define duration threshold checks
+ checks := []struct {
+ key string
+ threshold *DurationThreshold
+ unit string
+ }{
+ {"p(50)", thresholds.P50, "ms"},
+ {"p(90)", thresholds.P90, "ms"},
+ {"p(95)", thresholds.P95, "ms"},
+ {"p(99)", thresholds.P99, "ms"},
+ {"avg", thresholds.Avg, "ms"},
+ {"max", thresholds.Max, "ms"},
+ }
+
+ for _, check := range checks {
+ if check.threshold == nil {
+ continue
+ }
+
+ value, ok := metric.Values[check.key].(float64)
+ if !ok {
+ continue
+ }
+
+ thresholdMs := float64(check.threshold.Value.Milliseconds())
+ tolerance := tc.Options.TolerancePercent
+ if check.threshold.Tolerance != nil {
+ tolerance = *check.threshold.Tolerance
+ }
+
+ // Apply tolerance
+ effectiveThreshold := thresholdMs * (1 + tolerance/100)
+
+ if value > effectiveThreshold {
+ severity := "error"
+ if value <= thresholdMs*(1+tolerance/100*1.5) {
+ severity = "warning"
+ }
+
+ breaches = append(breaches, ThresholdBreach{
+ Endpoint: endpoint,
+ Metric: check.key,
+ Value: value,
+ Threshold: thresholdMs,
+ Unit: check.unit,
+ Severity: severity,
+ SampleCount: sampleCount,
+ })
+ }
+ }
+
+ return breaches
+}
+
+// checkRateThresholds checks rate-based thresholds (error_rate, rps)
+func (tc *ThresholdConfig) checkRateThresholds(endpoint string, metric *Metric, thresholds ThresholdValues, sampleCount int64) []ThresholdBreach {
+ var breaches []ThresholdBreach
+
+ // Check error rate
+ if thresholds.ErrorRate != nil {
+ // Calculate error rate from the metric
+ errorRate := 0.0
+ if fails, ok := metric.Values["fails"].(int64); ok {
+ if total, ok := metric.Values["count"].(int64); ok && total > 0 {
+ errorRate = float64(fails) / float64(total)
+ }
+ }
+
+ threshold := thresholds.ErrorRate.Value
+ tolerance := tc.Options.TolerancePercent
+ if thresholds.ErrorRate.Tolerance != nil {
+ tolerance = *thresholds.ErrorRate.Tolerance
+ }
+
+ effectiveThreshold := threshold * (1 + tolerance/100)
+
+ if errorRate > effectiveThreshold {
+ severity := "error"
+ if errorRate <= threshold*(1+tolerance/100*1.5) {
+ severity = "warning"
+ }
+
+ breaches = append(breaches, ThresholdBreach{
+ Endpoint: endpoint,
+ Metric: "error_rate",
+ Value: errorRate * 100, // Convert to percentage for display
+ Threshold: threshold * 100,
+ Unit: "%",
+ Severity: severity,
+ SampleCount: sampleCount,
+ })
+ }
+ }
+
+ // Check RPS
+ if thresholds.RPS != nil {
+ if rate, ok := metric.Values["rate"].(float64); ok {
+ threshold := thresholds.RPS.Value
+ tolerance := tc.Options.TolerancePercent
+ if thresholds.RPS.Tolerance != nil {
+ tolerance = *thresholds.RPS.Tolerance
+ }
+
+ effectiveThreshold := threshold * (1 + tolerance/100)
+
+ if rate > effectiveThreshold {
+ severity := "error"
+ if rate <= threshold*(1+tolerance/100*1.5) {
+ severity = "warning"
+ }
+
+ breaches = append(breaches, ThresholdBreach{
+ Endpoint: endpoint,
+ Metric: "rps",
+ Value: rate,
+ Threshold: threshold,
+ Unit: "req/s",
+ Severity: severity,
+ SampleCount: sampleCount,
+ })
+ }
+ }
+ }
+
+ return breaches
+}
+
+// GetBreachSummary returns a summary of threshold breaches
+func (tc *ThresholdConfig) GetBreachSummary(breaches []ThresholdBreach) map[string]int {
+ summary := map[string]int{
+ "total": len(breaches),
+ "error": 0,
+ "warning": 0,
+ }
+
+ for _, v := range breaches {
+ summary[v.Severity]++
+ }
+
+ return summary
+}
+
+// LoadThresholdConfig loads threshold configuration from a YAML file
+func LoadThresholdConfig(filename string) (*ThresholdConfig, error) {
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read threshold config file: %w", err)
+ }
+
+ var config ThresholdConfig
+ err = yaml.Unmarshal(data, &config)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse threshold config YAML: %w", err)
+ }
+
+ // Set defaults if not provided
+ if config.Options.TolerancePercent == 0 {
+ config.Options.TolerancePercent = 10.0
+ }
+ if config.Options.MinSamples == 0 {
+ config.Options.MinSamples = 100
+ }
+
+ return &config, nil
+}
diff --git a/reporting/thresholds.yml b/reporting/thresholds.yml
new file mode 100644
index 00000000..b996edd0
--- /dev/null
+++ b/reporting/thresholds.yml
@@ -0,0 +1,59 @@
+# Venom Threshold Configuration
+# This file defines performance thresholds for API endpoints
+
+# Global options
+options:
+ tolerance_percent: 10 # Allow 10% headroom above thresholds
+ min_samples: 100 # Require at least 100 samples for reliable percentiles
+ soft_fail: false # Exit with error code on violations
+
+# Default thresholds applied to all endpoints
+defaults:
+ p95: 500ms # 95th percentile response time
+ p99: 1000ms # 99th percentile response time
+ avg: 200ms # Average response time
+ error_rate: 0.01 # 1% error rate maximum
+
+# Group-based thresholds (supports wildcards)
+groups:
+ "auth/*": # All authentication endpoints
+ p95: 350ms
+ avg: 150ms
+ error_rate: 0.005 # 0.5% error rate for auth endpoints
+
+ "catalog/*": # All catalog endpoints
+ p95: 450ms
+ avg: 180ms
+
+ "api/v1/*": # All v1 API endpoints
+ p95: 400ms
+ avg: 160ms
+
+# Endpoint-specific thresholds (highest priority)
+endpoints:
+ "GET /users": # User listing endpoint
+ p95: 300ms
+ avg: 150ms
+ error_rate: 0.001 # Very low error rate for user data
+
+ "POST /orders": # Order creation endpoint
+ p95: 800ms # Allow longer for order processing
+ avg: 400ms
+ error_rate: 0.002
+
+ "GET /health": # Health check endpoint
+ p95: 50ms # Health checks should be very fast
+ avg: 20ms
+ error_rate: 0.0 # No errors allowed for health checks
+
+ "POST /auth/login": # Login endpoint
+ p95: 200ms
+ avg: 100ms
+ error_rate: 0.01 # Higher error rate allowed for auth failures
+
+# Example with custom tolerance
+ "GET /reports": # Report generation endpoint
+ p95: 2000ms # Reports can take longer
+ avg: 800ms
+ error_rate: 0.02 # 2% error rate for complex operations
+ tolerance_percent: 20 # Allow 20% headroom for this endpoint
diff --git a/reporting/thresholds_test.go b/reporting/thresholds_test.go
new file mode 100644
index 00000000..07b33635
--- /dev/null
+++ b/reporting/thresholds_test.go
@@ -0,0 +1,191 @@
+package reporting
+
+import (
+ "testing"
+ "time"
+)
+
+func TestDefaultThresholdConfig(t *testing.T) {
+ config := DefaultThresholdConfig()
+
+ if config.Defaults.P95 == nil {
+ t.Error("Default P95 threshold should be set")
+ }
+
+ if config.Defaults.P95.Value != 500*time.Millisecond {
+ t.Errorf("Expected default P95 to be 500ms, got %v", config.Defaults.P95.Value)
+ }
+
+ if config.Options.TolerancePercent != 10.0 {
+ t.Errorf("Expected default tolerance to be 10%%, got %v", config.Options.TolerancePercent)
+ }
+}
+
+func TestGetThresholdForEndpoint(t *testing.T) {
+ config := DefaultThresholdConfig()
+
+ // Test exact endpoint match
+ thresholds := config.GetThresholdForEndpoint("GET /users")
+ if thresholds.P95 == nil || thresholds.P95.Value != 300*time.Millisecond {
+ t.Error("GET /users should have P95 threshold of 300ms")
+ }
+
+ // Test group pattern match
+ thresholds = config.GetThresholdForEndpoint("auth/login")
+ if thresholds.P95 == nil || thresholds.P95.Value != 350*time.Millisecond {
+ t.Error("auth/* endpoints should have P95 threshold of 350ms")
+ }
+
+ // Test default fallback
+ thresholds = config.GetThresholdForEndpoint("GET /unknown")
+ if thresholds.P95 == nil || thresholds.P95.Value != 500*time.Millisecond {
+ t.Error("Unknown endpoints should use default P95 threshold of 500ms")
+ }
+}
+
+func TestMatchesPattern(t *testing.T) {
+ tests := []struct {
+ endpoint string
+ pattern string
+ expected bool
+ }{
+ {"auth/login", "auth/*", true},
+ {"auth/logout", "auth/*", true},
+ {"catalog/products", "catalog/*", true},
+ {"GET /users/profile", "auth/*", false},
+ {"GET /users", "GET /users", true},
+ {"POST /users", "GET /users", false},
+ }
+
+ for _, test := range tests {
+ result := matchesPattern(test.endpoint, test.pattern)
+ if result != test.expected {
+ t.Errorf("matchesPattern(%q, %q) = %v, expected %v",
+ test.endpoint, test.pattern, result, test.expected)
+ }
+ }
+}
+
+func TestValidateThresholds(t *testing.T) {
+ config := DefaultThresholdConfig()
+
+ // Create test metrics with a breach
+ metrics := &Metrics{
+ Metrics: map[string]*Metric{
+ "GET /users": {
+ Type: "trend",
+ Values: map[string]interface{}{
+ "p(95)": 400.0, // Exceeds 300ms threshold
+ "avg": 120.0, // Within 150ms threshold
+ "count": int64(150), // Above min samples
+ },
+ },
+ "GET /slow": {
+ Type: "trend",
+ Values: map[string]interface{}{
+ "p(95)": 600.0, // Exceeds default 500ms threshold
+ "count": int64(50), // Below min samples - should be skipped
+ },
+ },
+ },
+ }
+
+ breaches := config.ValidateThresholds(metrics)
+
+ if len(breaches) != 1 {
+ t.Errorf("Expected 1 breach, got %d", len(breaches))
+ }
+
+ if len(breaches) > 0 {
+ v := breaches[0]
+ if v.Endpoint != "GET /users" {
+ t.Errorf("Expected breach for GET /users, got %s", v.Endpoint)
+ }
+ if v.Metric != "p(95)" {
+ t.Errorf("Expected p(95) breach, got %s", v.Metric)
+ }
+ if v.Value != 400.0 {
+ t.Errorf("Expected breach value 400.0, got %v", v.Value)
+ }
+ }
+}
+
+func TestMergeThresholdValues(t *testing.T) {
+ base := ThresholdValues{
+ P95: &DurationThreshold{Value: 500 * time.Millisecond},
+ Avg: &DurationThreshold{Value: 200 * time.Millisecond},
+ }
+
+ override := ThresholdValues{
+ P95: &DurationThreshold{Value: 300 * time.Millisecond},
+ P99: &DurationThreshold{Value: 1000 * time.Millisecond},
+ }
+
+ result := mergeThresholdValues(base, override)
+
+ // P95 should be overridden
+ if result.P95.Value != 300*time.Millisecond {
+ t.Errorf("Expected P95 to be overridden to 300ms, got %v", result.P95.Value)
+ }
+
+ // Avg should remain from base
+ if result.Avg.Value != 200*time.Millisecond {
+ t.Errorf("Expected Avg to remain 200ms, got %v", result.Avg.Value)
+ }
+
+ // P99 should be added from override
+ if result.P99.Value != 1000*time.Millisecond {
+ t.Errorf("Expected P99 to be added as 1000ms, got %v", result.P99.Value)
+ }
+}
+
+func TestIsEndpointMetric(t *testing.T) {
+ tests := []struct {
+ metricName string
+ expected bool
+ }{
+ {"GET /users", true},
+ {"POST /orders", true},
+ {"http_req_duration", false},
+ {"http_reqs", false},
+ {"http_req_failed", false},
+ {"iterations", false},
+ {"checks", false},
+ {"data_sent", false},
+ {"data_received", false},
+ {"vus", false},
+ {"vus_max", false},
+ }
+
+ for _, test := range tests {
+ result := isEndpointMetric(test.metricName)
+ if result != test.expected {
+ t.Errorf("isEndpointMetric(%q) = %v, expected %v",
+ test.metricName, result, test.expected)
+ }
+ }
+}
+
+func TestGetBreachSummary(t *testing.T) {
+ config := DefaultThresholdConfig()
+
+ breaches := []ThresholdBreach{
+ {Severity: "error"},
+ {Severity: "error"},
+ {Severity: "warning"},
+ {Severity: "warning"},
+ {Severity: "warning"},
+ }
+
+ summary := config.GetBreachSummary(breaches)
+
+ if summary["total"] != 5 {
+ t.Errorf("Expected total breaches 5, got %d", summary["total"])
+ }
+ if summary["error"] != 2 {
+ t.Errorf("Expected error breaches 2, got %d", summary["error"])
+ }
+ if summary["warning"] != 3 {
+ t.Errorf("Expected warning breaches 3, got %d", summary["warning"])
+ }
+}
diff --git a/venom.go b/venom.go
index 4d48dbdf..af97191c 100644
--- a/venom.go
+++ b/venom.go
@@ -75,6 +75,14 @@ type Venom struct {
OpenApiReport bool
FailureLinkHeader string
FailureLinkTemplate string
+ MetricsEnabled bool
+ MetricsOutput string
+ metricsCollector reporting.MetricsCollector
+}
+
+// SetMetricsCollector sets the metrics collector for the Venom instance
+func (v *Venom) SetMetricsCollector(collector reporting.MetricsCollector) {
+ v.metricsCollector = collector
}
var trace = color.New(color.Attribute(90)).SprintFunc()