-
Notifications
You must be signed in to change notification settings - Fork 10.3k
Expand file tree
/
Copy pathnode_state_cleanup.go
More file actions
155 lines (130 loc) · 5.27 KB
/
node_state_cleanup.go
File metadata and controls
155 lines (130 loc) · 5.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package graph
import (
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/moduletest"
"github.com/hashicorp/terraform/internal/moduletest/mocking"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
)
var (
_ GraphNodeExecutable = (*NodeStateCleanup)(nil)
)
type NodeStateCleanup struct {
stateKey string
opts *graphOptions
parallel bool
}
func (n *NodeStateCleanup) Name() string {
return fmt.Sprintf("cleanup.%s", n.stateKey)
}
// Execute destroys the resources created in the state file.
// This function should never return non-fatal error diagnostics, as that would
// prevent further cleanup from happening. Instead, the diagnostics
// will be rendered directly.
func (n *NodeStateCleanup) Execute(evalCtx *EvalContext) {
file := n.opts.File
state := evalCtx.GetFileState(n.stateKey)
log.Printf("[TRACE] TestStateManager: cleaning up state for %s", file.Name)
if evalCtx.Cancelled() {
// Don't try and clean anything up if the execution has been cancelled.
log.Printf("[DEBUG] TestStateManager: skipping state cleanup for %s due to cancellation", file.Name)
return
}
empty := true
if !state.State.Empty() {
for _, module := range state.State.Modules {
for _, resource := range module.Resources {
if resource.Addr.Resource.Mode == addrs.ManagedResourceMode {
empty = false
break
}
}
}
}
if empty {
// The state can be empty for a run block that just executed a plan
// command, or a run block that only read data sources. We'll just
// skip empty run blocks.
return
}
if state.Run == nil {
log.Printf("[ERROR] TestFileRunner: found inconsistent run block and state file in %s for module %s", file.Name, n.stateKey)
// The state can have a nil run block if it only executed a plan
// command. In which case, we shouldn't have reached here as the
// state should also have been empty and this will have been skipped
// above. If we do reach here, then something has gone badly wrong
// and we can't really recover from it.
diags := tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Error, "Inconsistent state", fmt.Sprintf("Found inconsistent state while cleaning up %s. This is a bug in Terraform - please report it", file.Name))}
file.UpdateStatus(moduletest.Error)
evalCtx.Renderer().DestroySummary(diags, nil, file, state.State)
// intentionally return nil to allow further cleanup
return
}
runNode := &NodeTestRun{run: state.Run, opts: n.opts}
updated := state.State
startTime := time.Now().UTC()
waiter := NewOperationWaiter(nil, evalCtx, runNode, moduletest.Running, startTime.UnixMilli())
var destroyDiags tfdiags.Diagnostics
cancelled := waiter.Run(func() {
updated, destroyDiags = n.destroy(evalCtx, runNode, waiter)
})
if cancelled {
destroyDiags = destroyDiags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources."))
}
if !updated.Empty() {
// Then we failed to adequately clean up the state, so mark success
// as false.
file.UpdateStatus(moduletest.Error)
}
evalCtx.Renderer().DestroySummary(destroyDiags, state.Run, file, updated)
}
func (n *NodeStateCleanup) destroy(ctx *EvalContext, runNode *NodeTestRun, waiter *operationWaiter) (*states.State, tfdiags.Diagnostics) {
file := n.opts.File
fileState := ctx.GetFileState(n.stateKey)
state := fileState.State
run := runNode.run
log.Printf("[TRACE] TestFileRunner: called destroy for %s/%s", file.Name, run.Name)
if state.Empty() {
// Nothing to do!
return state, nil
}
variables, diags := runNode.GetVariables(ctx, false)
if diags.HasErrors() {
return state, diags
}
// we ignore the diagnostics from here, because we will have reported them
// during the initial execution of the run block and we would not have
// executed the run block if there were any errors.
providers, mocks, _ := runNode.getProviders(ctx)
// During the destroy operation, we don't add warnings from this operation.
// Anything that would have been reported here was already reported during
// the original plan, and a successful destroy operation is the only thing
// we care about.
setVariables, _, _ := runNode.FilterVariablesToModule(variables)
planOpts := &terraform.PlanOpts{
Mode: plans.DestroyMode,
SetVariables: setVariables,
Overrides: mocking.PackageOverrides(run.Config, file.Config, mocks),
ExternalProviders: providers,
SkipRefresh: true,
OverridePreventDestroy: true,
}
tfCtx, _ := terraform.NewContext(n.opts.ContextOpts)
ctx.Renderer().Run(run, file, moduletest.TearDown, 0)
waiter.update(tfCtx, moduletest.TearDown, nil)
plan, planDiags := tfCtx.Plan(run.ModuleConfig, state, planOpts)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return state, diags
}
_, updated, applyDiags := runNode.apply(tfCtx, plan, moduletest.TearDown, variables, providers, waiter)
diags = diags.Append(applyDiags)
return updated, diags
}