diff --git a/.changes/v1.15/ENHANCEMENTS-20260303-083543.yaml b/.changes/v1.15/ENHANCEMENTS-20260303-083543.yaml new file mode 100644 index 000000000000..2b3c59503e72 --- /dev/null +++ b/.changes/v1.15/ENHANCEMENTS-20260303-083543.yaml @@ -0,0 +1,5 @@ +kind: ENHANCEMENTS +body: 'Terraform Test: Add strict mode' +time: 2026-03-03T08:35:43.709504+01:00 +custom: + Issue: "38224" diff --git a/internal/backend/local/test.go b/internal/backend/local/test.go index dbab3fb9bcf2..44b4bf3c3d9e 100644 --- a/internal/backend/local/test.go +++ b/internal/backend/local/test.go @@ -74,6 +74,9 @@ type TestSuiteRunner struct { CommandMode moduletest.CommandMode + // Strict tells the runner to fail tests that produce warnings. + Strict bool + // Repair is used to indicate whether the test cleanup command should run in // "repair" mode. In this mode, the cleanup command will only remove state // files that are a result of failed destroy operations, leaving any @@ -291,6 +294,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) { CancelCtx: runner.Suite.CancelledCtx, StopCtx: runner.Suite.StoppedCtx, Verbose: runner.Suite.Verbose, + Strict: runner.Suite.Strict, Render: runner.Suite.View, UnparsedVariables: currentGlobalVariables, FileStates: states, diff --git a/internal/command/arguments/test.go b/internal/command/arguments/test.go index 7275e0e07363..937cebdf195f 100644 --- a/internal/command/arguments/test.go +++ b/internal/command/arguments/test.go @@ -52,6 +52,10 @@ type Test struct { // the same-named flag in the Operation struct. DeferralAllowed bool + // Strict causes test runs to fail if any warnings are produced during + // plan or apply operations. + Strict bool + // These flags are only relevant to the "test cleanup" command. Repair bool } @@ -73,6 +77,7 @@ func ParseTest(args []string) (*Test, tfdiags.Diagnostics) { cmdFlags.IntVar(&test.OperationParallelism, "parallelism", DefaultParallelism, "parallelism") cmdFlags.IntVar(&test.RunParallelism, "run-parallelism", DefaultParallelism, "run-parallelism") cmdFlags.BoolVar(&test.DeferralAllowed, "allow-deferral", false, "allow-deferral") + cmdFlags.BoolVar(&test.Strict, "strict", false, "strict") cmdFlags.BoolVar(&test.Repair, "repair", false, "repair") // TODO: Finalise the name of this flag. diff --git a/internal/command/arguments/test_test.go b/internal/command/arguments/test_test.go index 8ffd69ccdd7a..ed53f90af6ff 100644 --- a/internal/command/arguments/test_test.go +++ b/internal/command/arguments/test_test.go @@ -191,6 +191,19 @@ func TestParseTest(t *testing.T) { }, wantDiags: nil, }, + "strict": { + args: []string{"-strict"}, + want: &Test{ + Filter: nil, + TestDirectory: "tests", + ViewType: ViewHuman, + Strict: true, + Vars: &Vars{}, + OperationParallelism: 10, + RunParallelism: 10, + }, + wantDiags: nil, + }, "unknown flag": { args: []string{"-boop"}, want: &Test{ diff --git a/internal/command/test.go b/internal/command/test.go index 404f162724c6..5803e5837f62 100644 --- a/internal/command/test.go +++ b/internal/command/test.go @@ -174,6 +174,7 @@ func (c *TestCommand) Run(rawArgs []string) int { CancelledCtx: cancelCtx, Filter: args.Filter, Verbose: args.Verbose, + Strict: args.Strict, Concurrency: args.RunParallelism, DeferralAllowed: args.DeferralAllowed, } diff --git a/internal/command/test_test.go b/internal/command/test_test.go index d10fe31a3a0b..465f9357fbd1 100644 --- a/internal/command/test_test.go +++ b/internal/command/test_test.go @@ -433,6 +433,36 @@ func TestTest_Runs(t *testing.T) { expectedOut: []string{"2 passed, 0 failed."}, code: 0, }, + "strict_mode_no_warnings_passes": { + override: "simple_pass", + args: []string{"-strict"}, + expectedOut: []string{"1 passed, 0 failed."}, + code: 0, + description: "strict mode does not affect tests with no warnings", + }, + "strict_mode_warnings_fail": { + override: "invalid-cleanup-warnings", + args: []string{"-strict"}, + expectedOut: []string{"0 passed, 1 failed."}, + expectedErr: []string{"Error: Value for undeclared variable"}, + code: 1, + description: "strict mode causes warnings to fail the test", + }, + "strict_mode_plan_warnings_fail": { + override: "strict-warnings-plan", + args: []string{"-strict"}, + expectedOut: []string{"0 passed, 1 failed."}, + expectedErr: []string{"Error: Value for undeclared variable"}, + code: 1, + description: "strict mode causes warnings to fail a plan-only test", + }, + "strict_mode_expect_failures_still_pass": { + override: "expect_failures_checks", + args: []string{"-strict"}, + expectedOut: []string{"1 passed, 0 failed."}, + code: 0, + description: "strict mode does not interfere with expected failures", + }, } for name, tc := range tcs { t.Run(name, func(t *testing.T) { diff --git a/internal/command/testdata/test/strict-warnings-plan/main.tf b/internal/command/testdata/test/strict-warnings-plan/main.tf new file mode 100644 index 000000000000..97c6896727a3 --- /dev/null +++ b/internal/command/testdata/test/strict-warnings-plan/main.tf @@ -0,0 +1,5 @@ +variable "input" {} + +resource "test_resource" "resource" { + value = var.input +} diff --git a/internal/command/testdata/test/strict-warnings-plan/main.tftest.hcl b/internal/command/testdata/test/strict-warnings-plan/main.tftest.hcl new file mode 100644 index 000000000000..118a43c6313e --- /dev/null +++ b/internal/command/testdata/test/strict-warnings-plan/main.tftest.hcl @@ -0,0 +1,13 @@ +run "test" { + command = plan + + variables { + input = "Hello, world!" + undeclared = "this triggers a warning" + } + + assert { + condition = test_resource.resource.value == "Hello, world!" + error_message = "bad value" + } +} diff --git a/internal/moduletest/graph/apply.go b/internal/moduletest/graph/apply.go index 07d352db1764..f124e4b92d4f 100644 --- a/internal/moduletest/graph/apply.go +++ b/internal/moduletest/graph/apply.go @@ -30,8 +30,7 @@ func (n *NodeTestRun) testApply(ctx *EvalContext, variables terraform.InputValue // FilterVariablesToModule only returns warnings, so we don't check the // returned diags for errors. - setVariables, testOnlyVariables, setVariableDiags := FilterVariablesToModule(run.ModuleConfig, variables) - run.Diagnostics = run.Diagnostics.Append(setVariableDiags) + setVariables, testOnlyVariables := FilterVariablesToModule(run.ModuleConfig, variables) // ignore diags because validate has covered it tfCtx, _ := terraform.NewContext(n.opts.ContextOpts) @@ -42,6 +41,9 @@ func (n *NodeTestRun) testApply(ctx *EvalContext, variables terraform.InputValue // Any error during the planning prevents our apply from // continuing which is an error. planDiags = moduletest.ExplainExpectedFailures(run.Config, planDiags) + // Note: we intentionally do NOT promote warnings here in strict mode. + // The plan phase of an apply is a stepping stone — check block warnings + // from planning are filtered out below and re-evaluated during apply. run.Diagnostics = run.Diagnostics.Append(planDiags) if planDiags.HasErrors() { run.Status = moduletest.Error @@ -65,6 +67,11 @@ func (n *NodeTestRun) testApply(ctx *EvalContext, variables terraform.InputValue // execute the apply operation applyScope, updated, applyDiags := apply(tfCtx, run.Config, run.ModuleConfig, plan, moduletest.Running, variables, providers, waiter) + // Apply strictness to diags if needed + if ctx.Strict() { + applyDiags = moduletest.PromoteWarningsToErrors(applyDiags) + } + // Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't. // We'll also update the run status based on the presence of errors or missing expected failures. status, applyDiags := checkForMissingExpectedFailures(ctx, run.Config, applyDiags) diff --git a/internal/moduletest/graph/eval_context.go b/internal/moduletest/graph/eval_context.go index dd33a7a2b330..e67d98fcd352 100644 --- a/internal/moduletest/graph/eval_context.go +++ b/internal/moduletest/graph/eval_context.go @@ -73,6 +73,7 @@ type EvalContext struct { config *configs.Config renderer views.Test verbose bool + strict bool // mode and repair affect the behaviour of the cleanup process of the graph. // @@ -98,6 +99,7 @@ type EvalContext struct { type EvalContextOpts struct { Verbose bool + Strict bool Repair bool Render views.Test CancelCtx context.Context @@ -135,6 +137,7 @@ func NewEvalContext(opts EvalContextOpts) *EvalContext { stopFunc: stop, config: opts.Config, verbose: opts.Verbose, + strict: opts.Strict, repair: opts.Repair, renderer: opts.Render, mode: opts.Mode, @@ -176,6 +179,12 @@ func (ec *EvalContext) Verbose() bool { return ec.verbose } +// Strict returns true if the context is in strict mode, +// meaning warnings should be treated as errors. +func (ec *EvalContext) Strict() bool { + return ec.strict +} + func (ec *EvalContext) HclContext(references []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics diff --git a/internal/moduletest/graph/node_state_cleanup.go b/internal/moduletest/graph/node_state_cleanup.go index 92808d991258..879ed62ef1cf 100644 --- a/internal/moduletest/graph/node_state_cleanup.go +++ b/internal/moduletest/graph/node_state_cleanup.go @@ -122,7 +122,7 @@ func (n *NodeStateCleanup) restore(ctx *EvalContext, file *configs.TestFile, run // Anything that would have been reported here was already reported during // the original plan, and a successful destroy operation is the only thing // we care about. - setVariables, _, _ := FilterVariablesToModule(module, variables) + setVariables, _ := FilterVariablesToModule(module, variables) planOpts := &terraform.PlanOpts{ Mode: plans.NormalMode, @@ -174,7 +174,7 @@ func (n *NodeStateCleanup) destroy(ctx *EvalContext, file *configs.TestFile, run // Anything that would have been reported here was already reported during // the original plan, and a successful destroy operation is the only thing // we care about. - setVariables, _, _ := FilterVariablesToModule(module, variables) + setVariables, _ := FilterVariablesToModule(module, variables) planOpts := &terraform.PlanOpts{ Mode: plans.DestroyMode, diff --git a/internal/moduletest/graph/node_test_run.go b/internal/moduletest/graph/node_test_run.go index 442558178cd5..c5b818b04df3 100644 --- a/internal/moduletest/graph/node_test_run.go +++ b/internal/moduletest/graph/node_test_run.go @@ -186,6 +186,9 @@ func (n *NodeTestRun) execute(ctx *EvalContext, waiter *operationWaiter) { } variables, variableDiags := GetVariables(ctx, run.Config, run.ModuleConfig, true) + if ctx.Strict() { + variableDiags = moduletest.PromoteWarningsToErrors(variableDiags) + } run.Diagnostics = run.Diagnostics.Append(variableDiags) if variableDiags.HasErrors() { run.Status = moduletest.Error diff --git a/internal/moduletest/graph/plan.go b/internal/moduletest/graph/plan.go index c0092d2bc3a8..0d0be6111d60 100644 --- a/internal/moduletest/graph/plan.go +++ b/internal/moduletest/graph/plan.go @@ -29,8 +29,7 @@ func (n *NodeTestRun) testPlan(ctx *EvalContext, variables terraform.InputValues // FilterVariablesToModule only returns warnings, so we don't check the // returned diags for errors. - setVariables, testOnlyVariables, setVariableDiags := FilterVariablesToModule(run.ModuleConfig, variables) - run.Diagnostics = run.Diagnostics.Append(setVariableDiags) + setVariables, testOnlyVariables := FilterVariablesToModule(run.ModuleConfig, variables) // ignore diags because validate has covered it tfCtx, _ := terraform.NewContext(n.opts.ContextOpts) @@ -41,6 +40,10 @@ func (n *NodeTestRun) testPlan(ctx *EvalContext, variables terraform.InputValues // diagnostics, and if an expected failure is not found, we add a new error diagnostic. planDiags := moduletest.ValidateExpectedFailures(run.Config, originalDiags) + if ctx.Strict() { + planDiags = moduletest.PromoteWarningsToErrors(planDiags) + } + if ctx.Verbose() { // in verbose mode, we still add all the original diagnostics for // display. diff --git a/internal/moduletest/graph/variables.go b/internal/moduletest/graph/variables.go index bed466d339b5..4d1f3f82cdc2 100644 --- a/internal/moduletest/graph/variables.go +++ b/internal/moduletest/graph/variables.go @@ -196,10 +196,7 @@ func GetVariables(ctx *EvalContext, run *configs.TestRun, module *configs.Config // This function is essentially the opposite of AddVariablesToConfig which // makes the config match the variables rather than the variables match the // config. -// -// This function can only return warnings, and the callers can rely on this so -// please check the callers of this function if you add any error diagnostics. -func FilterVariablesToModule(config *configs.Config, values terraform.InputValues) (moduleVars, testOnlyVars terraform.InputValues, diags tfdiags.Diagnostics) { +func FilterVariablesToModule(config *configs.Config, values terraform.InputValues) (moduleVars, testOnlyVars terraform.InputValues) { moduleVars = make(terraform.InputValues) testOnlyVars = make(terraform.InputValues) for name, value := range values { @@ -212,5 +209,5 @@ func FilterVariablesToModule(config *configs.Config, values terraform.InputValue moduleVars[name] = value } - return moduleVars, testOnlyVars, diags + return moduleVars, testOnlyVars } diff --git a/internal/moduletest/run.go b/internal/moduletest/run.go index 79200e6a30eb..42d42055ecd6 100644 --- a/internal/moduletest/run.go +++ b/internal/moduletest/run.go @@ -441,6 +441,21 @@ func ValidateExpectedFailures(config *configs.TestRun, originals tfdiags.Diagnos return diags } +// PromoteWarningsToErrors converts all warning-severity diagnostics into +// errors using tfdiags.Override. This is used by strict mode to make tests +// fail when warnings are produced. +func PromoteWarningsToErrors(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + var result tfdiags.Diagnostics + for _, diag := range diags { + if diag.Severity() == tfdiags.Warning { + result = result.Append(tfdiags.Override(diag, tfdiags.Error, nil)) + } else { + result = result.Append(diag) + } + } + return result +} + // DiagnosticExtraFromMissingExpectedFailure provides an interface for diagnostic ExtraInfo to // denote that a diagnostic was generated as a result of a missing expected failure. type DiagnosticExtraFromMissingExpectedFailure interface { diff --git a/internal/moduletest/run_test.go b/internal/moduletest/run_test.go index a1670c9ca3f4..3953991b9345 100644 --- a/internal/moduletest/run_test.go +++ b/internal/moduletest/run_test.go @@ -794,6 +794,157 @@ func TestRun_ValidateExpectedFailures(t *testing.T) { } } +func TestPromoteWarningsToErrors(t *testing.T) { + type output struct { + Description tfdiags.Description + Severity tfdiags.Severity + } + + tcs := map[string]struct { + Input tfdiags.Diagnostics + Output []output + }{ + "empty": { + Input: nil, + Output: nil, + }, + "warnings_only": { + Input: createDiagnostics(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "first warning", + Detail: "should become an error", + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "second warning", + Detail: "should also become an error", + }) + return diags + }), + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "first warning", + Detail: "should become an error", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "second warning", + Detail: "should also become an error", + }, + Severity: tfdiags.Error, + }, + }, + }, + "errors_unchanged": { + Input: createDiagnostics(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "existing error", + Detail: "should pass through untouched", + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "another error", + Detail: "should also pass through untouched", + }) + return diags + }), + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "existing error", + Detail: "should pass through untouched", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "another error", + Detail: "should also pass through untouched", + }, + Severity: tfdiags.Error, + }, + }, + }, + "mixed": { + Input: createDiagnostics(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "first warning", + Detail: "should become an error", + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "original error", + Detail: "should pass through untouched", + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "second warning", + Detail: "should also become an error", + }) + return diags + }), + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "first warning", + Detail: "should become an error", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "original error", + Detail: "should pass through untouched", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "second warning", + Detail: "should also become an error", + }, + Severity: tfdiags.Error, + }, + }, + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + out := PromoteWarningsToErrors(tc.Input) + ix := 0 + for ; ix < len(tc.Output); ix++ { + expected := tc.Output[ix] + + if ix >= len(out) { + t.Errorf("missing diagnostic at %d, expected: [%s] %s, %s", ix, expected.Severity, expected.Description.Summary, expected.Description.Detail) + continue + } + + actual := output{ + Description: out[ix].Description(), + Severity: out[ix].Severity(), + } + + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Errorf("mismatched diagnostic at %d:\n%s", ix, diff) + } + } + + for ; ix < len(out); ix++ { + actual := out[ix] + t.Errorf("additional diagnostic at %d: [%s] %s, %s", ix, actual.Severity(), actual.Description().Summary, actual.Description().Detail) + } + }) + } +} + func createDiagnostics(populate func(diags tfdiags.Diagnostics) tfdiags.Diagnostics) tfdiags.Diagnostics { var diags tfdiags.Diagnostics diags = populate(diags) diff --git a/testing/equivalence-tests/outputs/strict_mode_no_warnings/test b/testing/equivalence-tests/outputs/strict_mode_no_warnings/test new file mode 100644 index 000000000000..8db2484f8160 --- /dev/null +++ b/testing/equivalence-tests/outputs/strict_mode_no_warnings/test @@ -0,0 +1,3 @@ +main.tftest.hcl... pass + run "test"... pass +Success! 1 passed, 0 failed. diff --git a/testing/equivalence-tests/tests/strict_mode_no_warnings/main.tf b/testing/equivalence-tests/tests/strict_mode_no_warnings/main.tf new file mode 100644 index 000000000000..3123faa1c9f6 --- /dev/null +++ b/testing/equivalence-tests/tests/strict_mode_no_warnings/main.tf @@ -0,0 +1,18 @@ +terraform { + required_providers { + tfcoremock = { + source = "hashicorp/tfcoremock" + version = "0.1.1" + } + } +} + +provider "tfcoremock" {} + +variable "input" { + type = string +} + +resource "tfcoremock_simple_resource" "resource" { + string = var.input +} diff --git a/testing/equivalence-tests/tests/strict_mode_no_warnings/main.tftest.hcl b/testing/equivalence-tests/tests/strict_mode_no_warnings/main.tftest.hcl new file mode 100644 index 000000000000..9df1e1eebf09 --- /dev/null +++ b/testing/equivalence-tests/tests/strict_mode_no_warnings/main.tftest.hcl @@ -0,0 +1,15 @@ +# Copyright IBM Corp. 2014, 2026 +# SPDX-License-Identifier: BUSL-1.1 + +run "test" { + command = plan + + variables { + input = "Hello, world!" + } + + assert { + condition = tfcoremock_simple_resource.resource.string == "Hello, world!" + error_message = "expected string to be Hello, world!" + } +} diff --git a/testing/equivalence-tests/tests/strict_mode_no_warnings/spec.json b/testing/equivalence-tests/tests/strict_mode_no_warnings/spec.json new file mode 100644 index 000000000000..d92132fcd9cb --- /dev/null +++ b/testing/equivalence-tests/tests/strict_mode_no_warnings/spec.json @@ -0,0 +1,20 @@ +{ + "description": "tests that terraform test -strict passes cleanly when no warnings are produced", + "include_files": [], + "ignore_fields": {}, + "commands": [ + { + "name": "init", + "arguments": ["init"], + "capture_output": false + }, + { + "name": "test_strict", + "arguments": ["test", "-strict", "-no-color"], + "capture_output": true, + "output_file_name": "test", + "has_json_output": false, + "streams_json_output": false + } + ] +}