Skip to content

Commit b9b39e9

Browse files
core: Eliminate NodePlannableResource indirection
We previously did two levels of DynamicExpand to go from ConfigResource to AbsResource and then from AbsResource to AbsResourceInstance. We'll now do the full expansion from ConfigResource to AbsResourceInstance in a single DynamicExpand step inside nodeExpandPlannableResource. The new approach is essentially functionally equivalent to the old except that it fixes a bug in the previous implementation: we will now call checkState.ReportCheckableObjects only once for the entire set of instances for a particular resource, which is what the checkable objects infrastructure expects so that it can always mention all of the checkable objects in the check report even if we bail out partway through due to a downstream error. This is essentially the same code but now turned into additional methods on nodeExpandPlannableResource instead of having the extra graph node type. This has the further advantage of this now being straight-through code with standard control flow, instead of the unusual inversion of control we were doing before bouncing in and out of different Execute and DynamicExpand implementations to get this done.
1 parent c1f0ff5 commit b9b39e9

File tree

4 files changed

+212
-180
lines changed

4 files changed

+212
-180
lines changed

internal/dag/graph.go

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,28 @@ func (g *Graph) Connect(edge Edge) {
230230
s.Add(source)
231231
}
232232

233+
// Subsume imports all of the nodes and edges from the given graph into the
234+
// reciever, leaving the given graph unchanged.
235+
//
236+
// If any of the nodes in the given graph are already present in the reciever
237+
// then the existing node will be retained and any new edges from the given
238+
// graph will be connected with it.
239+
//
240+
// If the given graph has edges in common with the reciever then they will be
241+
// ignored, because each pair of nodes can only be connected once.
242+
func (g *Graph) Subsume(other *Graph) {
243+
// We're using Set.Filter just as a "visit each element" here, so we're
244+
// not doing anything with the result (which will always be empty).
245+
other.vertices.Filter(func(i interface{}) bool {
246+
g.Add(i)
247+
return false
248+
})
249+
other.edges.Filter(func(i interface{}) bool {
250+
g.Connect(i.(Edge))
251+
return false
252+
})
253+
}
254+
233255
// String outputs some human-friendly output for the graph structure.
234256
func (g *Graph) StringWithNodeTypes() string {
235257
var buf bytes.Buffer

internal/terraform/context_plan2_test.go

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -401,6 +401,111 @@ resource "test_resource" "b" {
401401
}
402402
}
403403

404+
func TestContext2Plan_resourceChecksInExpandedModule(t *testing.T) {
405+
// When a resource is in a nested module we have two levels of expansion
406+
// to do: first expand the module the resource is declared in, and then
407+
// expand the resource itself.
408+
//
409+
// In earlier versions of Terraform we did that expansion as two levels
410+
// of DynamicExpand, which led to a bug where we didn't have any central
411+
// location from which to register all of the instances of a checkable
412+
// resource.
413+
//
414+
// We now handle the full expansion all in one graph node and one dynamic
415+
// subgraph, which avoids the problem. This is a regression test for the
416+
// earlier bug. If this test is panicking with "duplicate checkable objects
417+
// report" then that suggests the bug is reintroduced and we're now back
418+
// to reporting each module instance separately again, which is incorrect.
419+
420+
p := testProvider("test")
421+
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
422+
Provider: providers.Schema{
423+
Block: &configschema.Block{},
424+
},
425+
ResourceTypes: map[string]providers.Schema{
426+
"test": {
427+
Block: &configschema.Block{},
428+
},
429+
},
430+
}
431+
p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
432+
resp.NewState = req.PriorState
433+
return resp
434+
}
435+
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
436+
resp.PlannedState = cty.EmptyObjectVal
437+
return resp
438+
}
439+
p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) {
440+
resp.NewState = req.PlannedState
441+
return resp
442+
}
443+
444+
m := testModuleInline(t, map[string]string{
445+
"main.tf": `
446+
module "child" {
447+
source = "./child"
448+
count = 2 # must be at least 2 for this test to be valid
449+
}
450+
`,
451+
"child/child.tf": `
452+
locals {
453+
a = "a"
454+
}
455+
456+
resource "test" "test" {
457+
lifecycle {
458+
postcondition {
459+
# It doesn't matter what this checks as long as it
460+
# passes, because if we don't handle expansion properly
461+
# then we'll crash before we even get to evaluating this.
462+
condition = local.a == local.a
463+
error_message = "Postcondition failed."
464+
}
465+
}
466+
}
467+
`,
468+
})
469+
470+
ctx := testContext2(t, &ContextOpts{
471+
Providers: map[addrs.Provider]providers.Factory{
472+
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
473+
},
474+
})
475+
476+
priorState := states.NewState()
477+
plan, diags := ctx.Plan(m, priorState, DefaultPlanOpts)
478+
assertNoErrors(t, diags)
479+
480+
resourceInsts := []addrs.AbsResourceInstance{
481+
mustResourceInstanceAddr("module.child[0].test.test"),
482+
mustResourceInstanceAddr("module.child[1].test.test"),
483+
}
484+
485+
for _, instAddr := range resourceInsts {
486+
t.Run(fmt.Sprintf("results for %s", instAddr), func(t *testing.T) {
487+
if rc := plan.Changes.ResourceInstance(instAddr); rc != nil {
488+
if got, want := rc.Action, plans.Create; got != want {
489+
t.Errorf("wrong action for %s\ngot: %s\nwant: %s", instAddr, got, want)
490+
}
491+
if got, want := rc.ActionReason, plans.ResourceInstanceChangeNoReason; got != want {
492+
t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", instAddr, got, want)
493+
}
494+
} else {
495+
t.Errorf("no planned change for %s", instAddr)
496+
}
497+
498+
if checkResult := plan.Checks.GetObjectResult(instAddr); checkResult != nil {
499+
if got, want := checkResult.Status, checks.StatusPass; got != want {
500+
t.Errorf("wrong check status for %s\ngot: %s\nwant: %s", instAddr, got, want)
501+
}
502+
} else {
503+
t.Errorf("no check result for %s", instAddr)
504+
}
505+
})
506+
}
507+
}
508+
404509
func TestContext2Plan_dataResourceChecksManagedResourceChange(t *testing.T) {
405510
// This tests the situation where the remote system contains data that
406511
// isn't valid per a data resource postcondition, but that the

0 commit comments

Comments
 (0)