diff --git a/.changes/v1.15/BUG FIXES-20260309-084347.yaml b/.changes/v1.15/BUG FIXES-20260309-084347.yaml new file mode 100644 index 000000000000..437fc538a131 --- /dev/null +++ b/.changes/v1.15/BUG FIXES-20260309-084347.yaml @@ -0,0 +1,5 @@ +kind: BUG FIXES +body: 'test: fix dependency ordering in parallel cleanup' +time: 2026-03-09T08:43:47.688216+01:00 +custom: + Issue: "38247" diff --git a/internal/command/test_test.go b/internal/command/test_test.go index 01d3f548ded9..ad48e6b9d4fa 100644 --- a/internal/command/test_test.go +++ b/internal/command/test_test.go @@ -28,6 +28,7 @@ import ( "github.com/hashicorp/terraform/internal/addrs" testing_command "github.com/hashicorp/terraform/internal/command/testing" "github.com/hashicorp/terraform/internal/command/views" + viewsJson "github.com/hashicorp/terraform/internal/command/views/json" "github.com/hashicorp/terraform/internal/configs" "github.com/hashicorp/terraform/internal/configs/configload" "github.com/hashicorp/terraform/internal/getproviders" @@ -40,6 +41,11 @@ import ( "github.com/hashicorp/terraform/internal/tfdiags" ) +type jsonLine struct { + Type string `json:"type"` + TestRun *viewsJson.TestRunStatus `json:"test_run,omitempty"` +} + func TestTest_Runs(t *testing.T) { tcs := map[string]struct { override string @@ -1515,7 +1521,7 @@ func TestTest_ParallelTeardown(t *testing.T) { value = test_resource.foo.value } `, - // c2 => a1, b1 => a1, a2 => b1, b2 => c1 + // c2 => a2, b1 => a1, a2 => b1, b2 => c2 "parallel.tftest.hcl": ` test { parallel = true @@ -1558,7 +1564,7 @@ func TestTest_ParallelTeardown(t *testing.T) { run "a2" { state_key = "a" variables { - foo = run.b1.value + foo = run.b2.value } providers = { @@ -1588,7 +1594,7 @@ func TestTest_ParallelTeardown(t *testing.T) { run "c2" { state_key = "c" variables { - foo = run.a1.value + foo = run.a2.value } } `, @@ -5674,6 +5680,126 @@ func TestTest_TeardownOrder(t *testing.T) { } } +func TestTest_ParallelDeps(t *testing.T) { + // This tests that parallel dependencies are handled correctly during teardown. + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "parallel_deps")), td) + t.Chdir(td) + + provider := testing_command.NewProvider(nil) + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.0.0"}, + }) + defer close() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + ui := new(cli.MockUi) + + meta := Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + Ui: ui, + View: view, + Streams: streams, + ProviderSource: providerSource, + AllowExperimentalFeatures: true, + } + + init := &InitCommand{ + Meta: meta, + } + + output := done(t) + + if code := init.Run(nil); code != 0 { + t.Fatalf("expected status code 0 but got %d: %s", code, output.All()) + } + + // Reset the streams for the next command. + streams, done = terminal.StreamsForTesting(t) + meta.Streams = streams + meta.View = views.NewView(streams) + + c := &TestCommand{ + Meta: meta, + } + + code := c.Run([]string{"-no-color", "-json"}) + output = done(t) + + if code != 0 { + t.Errorf("expected status code 0 but got %d", code) + } + + actual := output.All() + + var teardownOrder []string + lines, err := parseJSONLines(t, actual) + if err != nil { + t.Fatal(err) + } + for _, parsed := range lines { + if parsed.Type != "test_run" || parsed.TestRun == nil { + continue + } + if parsed.TestRun.Progress != "teardown" { + continue + } + + // We only care about teardowns with elapsed time of 0, indicating the start + // of the teardown phase. + if parsed.TestRun.Elapsed == nil || *parsed.TestRun.Elapsed != 0 { + continue + } + teardownOrder = append(teardownOrder, parsed.TestRun.Run) + } + + // test_two depends on test_three (via run.test_three.id), so during + // teardown the dependency order should be reversed, i.e test_two must + // be torn down before test_three. + testThreeIdx := -1 + testTwoIdx := -1 + for i, name := range teardownOrder { + switch name { + case "test_three": + testThreeIdx = i + case "test_two": + testTwoIdx = i + } + } + + if testThreeIdx == -1 { + t.Fatalf("expected test_three teardown (elapsed=0) in output but did not find it.\nteardown order: %v\nfull output:\n%s", teardownOrder, actual) + } + if testTwoIdx == -1 { + t.Fatalf("expected test_two teardown (elapsed=0) in output but did not find it.\nteardown order: %v\nfull output:\n%s", teardownOrder, actual) + } + if testThreeIdx <= testTwoIdx { + t.Errorf("expected test_two teardown to come before test_three teardown, but got test_three at index %d and test_two at index %d.\nteardown order: %v\nfull output:\n%s", + testThreeIdx, testTwoIdx, teardownOrder, actual) + } + + if provider.ResourceCount() != 0 { + t.Errorf("should have deleted all resources") + } +} + +func parseJSONLines(t *testing.T, actual string) ([]jsonLine, error) { + t.Helper() + var lines []jsonLine + for line := range strings.SplitSeq(strings.TrimSpace(actual), "\n") { + if line == "" { + continue + } + var parsed jsonLine + if err := json.Unmarshal([]byte(line), &parsed); err != nil { + return nil, err + } + lines = append(lines, parsed) + } + return lines, nil +} + // testModuleInline takes a map of path -> config strings and yields a config // structure with those files loaded from disk func testModuleInline(t *testing.T, sources map[string]string) (*configs.Config, string, func()) { diff --git a/internal/command/testdata/test/cleanup/main.tftest.hcl b/internal/command/testdata/test/cleanup/main.tftest.hcl index d1816814e044..b4dae88c1ac2 100644 --- a/internal/command/testdata/test/cleanup/main.tftest.hcl +++ b/internal/command/testdata/test/cleanup/main.tftest.hcl @@ -23,4 +23,4 @@ run "test_four" { variables { id = "test_four" } -} \ No newline at end of file +} diff --git a/internal/command/testdata/test/parallel_deps/main.tf b/internal/command/testdata/test/parallel_deps/main.tf new file mode 100644 index 000000000000..df92f1eb42a6 --- /dev/null +++ b/internal/command/testdata/test/parallel_deps/main.tf @@ -0,0 +1,20 @@ +variable "id" { + type = string +} + +variable "unused" { + type = string + default = "unused" +} + +resource "test_resource" "resource" { + value = var.id +} + +output "id" { + value = test_resource.resource.id +} + +output "unused" { + value = var.unused +} \ No newline at end of file diff --git a/internal/command/testdata/test/parallel_deps/main.tftest.hcl b/internal/command/testdata/test/parallel_deps/main.tftest.hcl new file mode 100644 index 000000000000..b9f66d1a51e2 --- /dev/null +++ b/internal/command/testdata/test/parallel_deps/main.tftest.hcl @@ -0,0 +1,30 @@ +test { + parallel = true +} + +run "test" { + variables { + id = "test" + unused = "unused" + } +} + +run "test_two" { + state_key = "state2" + variables { + // This dependency is a later run, but that should be fine because we are in parallel mode. + id = run.test_three.id + + // The output state data for this dependency will also be left behind, but the actual + // resource will have been destroyed by the cleanup step of test_three. + unused = run.test.unused + } +} + +run "test_three" { + state_key = "state3" + variables { + id = "test_three" + unused = run.test.unused + } +} diff --git a/internal/moduletest/graph/test_graph_builder.go b/internal/moduletest/graph/test_graph_builder.go index d8a63bbba484..80858c9c8eb5 100644 --- a/internal/moduletest/graph/test_graph_builder.go +++ b/internal/moduletest/graph/test_graph_builder.go @@ -54,7 +54,7 @@ func (b *TestGraphBuilder) Steps() []terraform.GraphTransformer { &TestVariablesTransformer{File: b.File}, terraform.DynamicTransformer(validateRunConfigs), terraform.DynamicTransformer(func(g *terraform.Graph) error { - cleanup := &TeardownSubgraph{opts: opts, parent: g, mode: b.CommandMode} + cleanup := &TeardownSubgraph{opts: opts, runGraph: g, mode: b.CommandMode} g.Add(cleanup) // ensure that the teardown node runs after all the run nodes diff --git a/internal/moduletest/graph/transform_state_cleanup.go b/internal/moduletest/graph/transform_state_cleanup.go index f41c26fba783..1a6a0734e71c 100644 --- a/internal/moduletest/graph/transform_state_cleanup.go +++ b/internal/moduletest/graph/transform_state_cleanup.go @@ -24,31 +24,35 @@ type Subgrapher interface { // TeardownSubgraph is a subgraph for cleaning up the state of // resources defined in the state files created by the test runs. type TeardownSubgraph struct { - opts *graphOptions - parent *terraform.Graph - mode moduletest.CommandMode + opts *graphOptions + runGraph *terraform.Graph + mode moduletest.CommandMode } func (b *TeardownSubgraph) Execute(ctx *EvalContext) { ctx.Renderer().File(b.opts.File, moduletest.TearDown) - runRefMap := make(map[addrs.Run][]string) + runRefs := make(map[addrs.Run][]*moduletest.Run) + // Build a map of run nodes to other run nodes they depend on. + // In cleanup mode, the run node is the NodeTestRunCleanup struct. if b.mode == moduletest.CleanupMode { - for runNode := range dag.SelectSeq[*NodeTestRunCleanup](b.parent.VerticesSeq()) { - refs := b.parent.Ancestors(runNode) - for _, ref := range refs { - if ref, ok := ref.(*NodeTestRunCleanup); ok && ref.run.Config.StateKey != runNode.run.Config.StateKey { - runRefMap[runNode.run.Addr()] = append(runRefMap[runNode.run.Addr()], ref.run.Config.StateKey) + for runNode := range dag.SelectSeq[*NodeTestRunCleanup](b.runGraph.VerticesSeq()) { + addr := runNode.run.Addr() + parents := b.runGraph.Ancestors(runNode) + for _, ref := range parents { + if ref, ok := ref.(*NodeTestRunCleanup); ok { + runRefs[addr] = append(runRefs[addr], ref.run) } } } } else { - for runNode := range dag.SelectSeq[*NodeTestRun](b.parent.VerticesSeq()) { - refs := b.parent.Ancestors(runNode) - for _, ref := range refs { - if ref, ok := ref.(*NodeTestRun); ok && ref.run.Config.StateKey != runNode.run.Config.StateKey { - runRefMap[runNode.run.Addr()] = append(runRefMap[runNode.run.Addr()], ref.run.Config.StateKey) + for runNode := range dag.SelectSeq[*NodeTestRun](b.runGraph.VerticesSeq()) { + addr := runNode.run.Addr() + parents := b.runGraph.Ancestors(runNode) + for _, ref := range parents { + if ref, ok := ref.(*NodeTestRun); ok { + runRefs[addr] = append(runRefs[addr], ref.run) } } } @@ -57,7 +61,7 @@ func (b *TeardownSubgraph) Execute(ctx *EvalContext) { // Create a new graph for the cleanup nodes g, diags := (&terraform.BasicGraphBuilder{ Steps: []terraform.GraphTransformer{ - &TestStateCleanupTransformer{opts: b.opts, runStateRefs: runRefMap}, + &TestStateCleanupTransformer{opts: b.opts, runDependencyMap: runRefs}, &CloseTestGraphTransformer{}, &terraform.TransitiveReductionTransformer{}, }, @@ -78,20 +82,21 @@ func (b *TeardownSubgraph) isSubGrapher() {} // TestStateCleanupTransformer is a GraphTransformer that adds a cleanup node // for each state that is created by the test runs. type TestStateCleanupTransformer struct { - opts *graphOptions - runStateRefs map[addrs.Run][]string + opts *graphOptions + runDependencyMap map[addrs.Run][]*moduletest.Run } func (t *TestStateCleanupTransformer) Transform(g *terraform.Graph) error { - cleanupMap := make(map[string]*NodeStateCleanup) - arr := make([]*NodeStateCleanup, 0, len(t.opts.File.Runs)) + type cleanupObj struct { + node *NodeStateCleanup + dependencies []*moduletest.Run + } - // dependency map for state keys, which will be used to traverse - // the cleanup nodes in a depth-first manner. - depStateKeys := make(map[string][]string) + cleanupMap := make(map[string]cleanupObj) + runNodesUsedForCleanup := make(map[addrs.Run]bool) - // iterate in reverse order of the run index, so that the last run for each state key - // is attached to the cleanup node. + // iterate in reverse order of the run index, so that the dependency map of the last + // run for each state key is used for the cleanup node. for _, run := range slices.Backward(t.opts.File.Runs) { key := run.Config.StateKey @@ -100,42 +105,31 @@ func (t *TestStateCleanupTransformer) Transform(g *terraform.Graph) error { stateKey: key, opts: t.opts, } - cleanupMap[key] = node - arr = append(arr, node) + cleanupMap[key] = cleanupObj{ + node: node, + dependencies: t.runDependencyMap[run.Addr()], + } g.Add(node) - - // The dependency map for the state's last run will be used for the cleanup node. - depStateKeys[key] = t.runStateRefs[run.Addr()] + runNodesUsedForCleanup[run.Addr()] = true continue } } - // Depth-first traversal to connect the cleanup nodes based on their dependencies. - // If an edge would create a cycle, we skip it. - visited := make(map[string]bool) - for _, node := range arr { - t.depthFirstTraverse(g, node, visited, cleanupMap, depStateKeys) - } - return nil -} - -func (t *TestStateCleanupTransformer) depthFirstTraverse(g *terraform.Graph, node *NodeStateCleanup, visited map[string]bool, cleanupNodes map[string]*NodeStateCleanup, depStateKeys map[string][]string) { - if visited[node.stateKey] { - return - } - // don't mark the node as visited if it's a leaf node, this ensures that other dependencies are still added to it - if len(depStateKeys[node.stateKey]) == 0 { - return - } - visited[node.stateKey] = true - - for _, refStateKey := range depStateKeys[node.stateKey] { - // If the reference node has already been visited, skip it. - if visited[refStateKey] { - continue + // We connect the cleanup nodes to their dependencies in reverse order, + // i.e a cleanup node for a run will evaluate before its references. + // We only connect references that are also cleanup nodes. If a referenced run + // is not used by a cleanup node, it will not be connected. + for _, obj := range cleanupMap { + for _, dep := range obj.dependencies { + if _, exists := runNodesUsedForCleanup[dep.Addr()]; exists { + depCleanupNode := cleanupMap[dep.Config.StateKey].node + objCleanupNode := obj.node + if depCleanupNode == objCleanupNode { + continue + } + g.Connect(dag.BasicEdge(depCleanupNode, objCleanupNode)) + } } - refNode := cleanupNodes[refStateKey] - g.Connect(dag.BasicEdge(refNode, node)) - t.depthFirstTraverse(g, refNode, visited, cleanupNodes, depStateKeys) } + return nil }