Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changes/v1.15/ENHANCEMENTS-20260303-083543.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
kind: ENHANCEMENTS
body: 'Terraform Test: Add strict mode'
time: 2026-03-03T08:35:43.709504+01:00
custom:
Issue: "38224"
4 changes: 4 additions & 0 deletions internal/backend/local/test.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,9 @@ type TestSuiteRunner struct {

CommandMode moduletest.CommandMode

// Strict tells the runner to fail tests that produce warnings.
Strict bool

// Repair is used to indicate whether the test cleanup command should run in
// "repair" mode. In this mode, the cleanup command will only remove state
// files that are a result of failed destroy operations, leaving any
Expand Down Expand Up @@ -291,6 +294,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
CancelCtx: runner.Suite.CancelledCtx,
StopCtx: runner.Suite.StoppedCtx,
Verbose: runner.Suite.Verbose,
Strict: runner.Suite.Strict,
Render: runner.Suite.View,
UnparsedVariables: currentGlobalVariables,
FileStates: states,
Expand Down
5 changes: 5 additions & 0 deletions internal/command/arguments/test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,10 @@ type Test struct {
// the same-named flag in the Operation struct.
DeferralAllowed bool

// Strict causes test runs to fail if any warnings are produced during
// plan or apply operations.
Strict bool

// These flags are only relevant to the "test cleanup" command.
Repair bool
}
Expand All @@ -73,6 +77,7 @@ func ParseTest(args []string) (*Test, tfdiags.Diagnostics) {
cmdFlags.IntVar(&test.OperationParallelism, "parallelism", DefaultParallelism, "parallelism")
cmdFlags.IntVar(&test.RunParallelism, "run-parallelism", DefaultParallelism, "run-parallelism")
cmdFlags.BoolVar(&test.DeferralAllowed, "allow-deferral", false, "allow-deferral")
cmdFlags.BoolVar(&test.Strict, "strict", false, "strict")
cmdFlags.BoolVar(&test.Repair, "repair", false, "repair")

// TODO: Finalise the name of this flag.
Expand Down
13 changes: 13 additions & 0 deletions internal/command/arguments/test_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,19 @@ func TestParseTest(t *testing.T) {
},
wantDiags: nil,
},
"strict": {
args: []string{"-strict"},
want: &Test{
Filter: nil,
TestDirectory: "tests",
ViewType: ViewHuman,
Strict: true,
Vars: &Vars{},
OperationParallelism: 10,
RunParallelism: 10,
},
wantDiags: nil,
},
"unknown flag": {
args: []string{"-boop"},
want: &Test{
Expand Down
1 change: 1 addition & 0 deletions internal/command/test.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@ func (c *TestCommand) Run(rawArgs []string) int {
CancelledCtx: cancelCtx,
Filter: args.Filter,
Verbose: args.Verbose,
Strict: args.Strict,
Concurrency: args.RunParallelism,
DeferralAllowed: args.DeferralAllowed,
}
Expand Down
30 changes: 30 additions & 0 deletions internal/command/test_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,36 @@ func TestTest_Runs(t *testing.T) {
expectedOut: []string{"2 passed, 0 failed."},
code: 0,
},
"strict_mode_no_warnings_passes": {
override: "simple_pass",
args: []string{"-strict"},
expectedOut: []string{"1 passed, 0 failed."},
code: 0,
description: "strict mode does not affect tests with no warnings",
},
"strict_mode_warnings_fail": {
override: "invalid-cleanup-warnings",
args: []string{"-strict"},
expectedOut: []string{"0 passed, 1 failed."},
expectedErr: []string{"Error: Value for undeclared variable"},
code: 1,
description: "strict mode causes warnings to fail the test",
},
"strict_mode_plan_warnings_fail": {
override: "strict-warnings-plan",
args: []string{"-strict"},
expectedOut: []string{"0 passed, 1 failed."},
expectedErr: []string{"Error: Value for undeclared variable"},
code: 1,
description: "strict mode causes warnings to fail a plan-only test",
},
"strict_mode_expect_failures_still_pass": {
override: "expect_failures_checks",
args: []string{"-strict"},
expectedOut: []string{"1 passed, 0 failed."},
code: 0,
description: "strict mode does not interfere with expected failures",
},
}
for name, tc := range tcs {
t.Run(name, func(t *testing.T) {
Expand Down
5 changes: 5 additions & 0 deletions internal/command/testdata/test/strict-warnings-plan/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
variable "input" {}

resource "test_resource" "resource" {
value = var.input
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
run "test" {
command = plan

variables {
input = "Hello, world!"
undeclared = "this triggers a warning"
}

assert {
condition = test_resource.resource.value == "Hello, world!"
error_message = "bad value"
}
}
11 changes: 9 additions & 2 deletions internal/moduletest/graph/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ func (n *NodeTestRun) testApply(ctx *EvalContext, variables terraform.InputValue

// FilterVariablesToModule only returns warnings, so we don't check the
// returned diags for errors.
setVariables, testOnlyVariables, setVariableDiags := FilterVariablesToModule(run.ModuleConfig, variables)
run.Diagnostics = run.Diagnostics.Append(setVariableDiags)
setVariables, testOnlyVariables := FilterVariablesToModule(run.ModuleConfig, variables)

// ignore diags because validate has covered it
tfCtx, _ := terraform.NewContext(n.opts.ContextOpts)
Expand All @@ -42,6 +41,9 @@ func (n *NodeTestRun) testApply(ctx *EvalContext, variables terraform.InputValue
// Any error during the planning prevents our apply from
// continuing which is an error.
planDiags = moduletest.ExplainExpectedFailures(run.Config, planDiags)
// Note: we intentionally do NOT promote warnings here in strict mode.
// The plan phase of an apply is a stepping stone — check block warnings
// from planning are filtered out below and re-evaluated during apply.
run.Diagnostics = run.Diagnostics.Append(planDiags)
if planDiags.HasErrors() {
run.Status = moduletest.Error
Expand All @@ -65,6 +67,11 @@ func (n *NodeTestRun) testApply(ctx *EvalContext, variables terraform.InputValue
// execute the apply operation
applyScope, updated, applyDiags := apply(tfCtx, run.Config, run.ModuleConfig, plan, moduletest.Running, variables, providers, waiter)

// Apply strictness to diags if needed
if ctx.Strict() {
applyDiags = moduletest.PromoteWarningsToErrors(applyDiags)
}

// Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't.
// We'll also update the run status based on the presence of errors or missing expected failures.
status, applyDiags := checkForMissingExpectedFailures(ctx, run.Config, applyDiags)
Expand Down
9 changes: 9 additions & 0 deletions internal/moduletest/graph/eval_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ type EvalContext struct {
config *configs.Config
renderer views.Test
verbose bool
strict bool

// mode and repair affect the behaviour of the cleanup process of the graph.
//
Expand All @@ -98,6 +99,7 @@ type EvalContext struct {

type EvalContextOpts struct {
Verbose bool
Strict bool
Repair bool
Render views.Test
CancelCtx context.Context
Expand Down Expand Up @@ -135,6 +137,7 @@ func NewEvalContext(opts EvalContextOpts) *EvalContext {
stopFunc: stop,
config: opts.Config,
verbose: opts.Verbose,
strict: opts.Strict,
repair: opts.Repair,
renderer: opts.Render,
mode: opts.Mode,
Expand Down Expand Up @@ -176,6 +179,12 @@ func (ec *EvalContext) Verbose() bool {
return ec.verbose
}

// Strict returns true if the context is in strict mode,
// meaning warnings should be treated as errors.
func (ec *EvalContext) Strict() bool {
return ec.strict
}

func (ec *EvalContext) HclContext(references []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics

Expand Down
4 changes: 2 additions & 2 deletions internal/moduletest/graph/node_state_cleanup.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ func (n *NodeStateCleanup) restore(ctx *EvalContext, file *configs.TestFile, run
// Anything that would have been reported here was already reported during
// the original plan, and a successful destroy operation is the only thing
// we care about.
setVariables, _, _ := FilterVariablesToModule(module, variables)
setVariables, _ := FilterVariablesToModule(module, variables)

planOpts := &terraform.PlanOpts{
Mode: plans.NormalMode,
Expand Down Expand Up @@ -174,7 +174,7 @@ func (n *NodeStateCleanup) destroy(ctx *EvalContext, file *configs.TestFile, run
// Anything that would have been reported here was already reported during
// the original plan, and a successful destroy operation is the only thing
// we care about.
setVariables, _, _ := FilterVariablesToModule(module, variables)
setVariables, _ := FilterVariablesToModule(module, variables)

planOpts := &terraform.PlanOpts{
Mode: plans.DestroyMode,
Expand Down
3 changes: 3 additions & 0 deletions internal/moduletest/graph/node_test_run.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,9 @@ func (n *NodeTestRun) execute(ctx *EvalContext, waiter *operationWaiter) {
}

variables, variableDiags := GetVariables(ctx, run.Config, run.ModuleConfig, true)
if ctx.Strict() {
variableDiags = moduletest.PromoteWarningsToErrors(variableDiags)
}
Comment on lines +189 to +191
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It may be important to clarify what kind of warnings we are turning into an error. In my opinion, the warnings here are not about the operation made by the run, but by declaring an unused variable within the test, and I would argue that should not cause the test to fail, even in strict mode. The warning we should be focused on are the ones produced by the test's execution of a terraform operation (plan/apply)

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey @garvitarai1 would you mind chiming in here?
The original Issue is this one .

We wonder what should constitute the strict mode - whether we should report every warning as an error (similar to some other toolings in other languages) or we should be selective with promotion of warnings to errors to the scope Sams proposed in the message above?

And the more important question is - does having one datapoint about the nice-to-have strict mode in "discuss" 2.5 years ago warrants introducing this mode?

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I checked Jira and quickly scanned GH issues as well, so this seems like a one off request. I have the same instinct as Sams that not all warnings are equal, and this might be a noisy experience to promote all warnings to errors.

I think the path forward depends on where the implementation stands. If the work is done, then I'd vote for shipping this as an experiment in an alpha release. In this case, let's bring this to the team meeting to get more opinions on the behaviour. If it needs significant effort, then lets de-prioritize.

run.Diagnostics = run.Diagnostics.Append(variableDiags)
if variableDiags.HasErrors() {
run.Status = moduletest.Error
Expand Down
7 changes: 5 additions & 2 deletions internal/moduletest/graph/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@ func (n *NodeTestRun) testPlan(ctx *EvalContext, variables terraform.InputValues

// FilterVariablesToModule only returns warnings, so we don't check the
// returned diags for errors.
setVariables, testOnlyVariables, setVariableDiags := FilterVariablesToModule(run.ModuleConfig, variables)
run.Diagnostics = run.Diagnostics.Append(setVariableDiags)
setVariables, testOnlyVariables := FilterVariablesToModule(run.ModuleConfig, variables)

// ignore diags because validate has covered it
tfCtx, _ := terraform.NewContext(n.opts.ContextOpts)
Expand All @@ -41,6 +40,10 @@ func (n *NodeTestRun) testPlan(ctx *EvalContext, variables terraform.InputValues
// diagnostics, and if an expected failure is not found, we add a new error diagnostic.
planDiags := moduletest.ValidateExpectedFailures(run.Config, originalDiags)

if ctx.Strict() {
planDiags = moduletest.PromoteWarningsToErrors(planDiags)
}

if ctx.Verbose() {
// in verbose mode, we still add all the original diagnostics for
// display.
Expand Down
7 changes: 2 additions & 5 deletions internal/moduletest/graph/variables.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,10 +196,7 @@ func GetVariables(ctx *EvalContext, run *configs.TestRun, module *configs.Config
// This function is essentially the opposite of AddVariablesToConfig which
// makes the config match the variables rather than the variables match the
// config.
//
// This function can only return warnings, and the callers can rely on this so
// please check the callers of this function if you add any error diagnostics.
func FilterVariablesToModule(config *configs.Config, values terraform.InputValues) (moduleVars, testOnlyVars terraform.InputValues, diags tfdiags.Diagnostics) {
func FilterVariablesToModule(config *configs.Config, values terraform.InputValues) (moduleVars, testOnlyVars terraform.InputValues) {
moduleVars = make(terraform.InputValues)
testOnlyVars = make(terraform.InputValues)
for name, value := range values {
Expand All @@ -212,5 +209,5 @@ func FilterVariablesToModule(config *configs.Config, values terraform.InputValue

moduleVars[name] = value
}
return moduleVars, testOnlyVars, diags
return moduleVars, testOnlyVars
}
15 changes: 15 additions & 0 deletions internal/moduletest/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,21 @@ func ValidateExpectedFailures(config *configs.TestRun, originals tfdiags.Diagnos
return diags
}

// PromoteWarningsToErrors converts all warning-severity diagnostics into
// errors using tfdiags.Override. This is used by strict mode to make tests
// fail when warnings are produced.
func PromoteWarningsToErrors(diags tfdiags.Diagnostics) tfdiags.Diagnostics {
var result tfdiags.Diagnostics
for _, diag := range diags {
if diag.Severity() == tfdiags.Warning {
result = result.Append(tfdiags.Override(diag, tfdiags.Error, nil))
} else {
result = result.Append(diag)
}
}
return result
}

// DiagnosticExtraFromMissingExpectedFailure provides an interface for diagnostic ExtraInfo to
// denote that a diagnostic was generated as a result of a missing expected failure.
type DiagnosticExtraFromMissingExpectedFailure interface {
Expand Down
Loading
Loading