Skip to content

Commit 7f2dff1

Browse files
authored
Merge pull request #585 from Charliekenney23/fix-k8supgrade-test
refactor k8supgrade test to wait for all nodes ready
2 parents dd8b4a1 + 0af3049 commit 7f2dff1

File tree

1 file changed

+48
-0
lines changed

1 file changed

+48
-0
lines changed

linode/lke/resource_test.go

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"sort"
99
"strconv"
1010
"testing"
11+
"time"
1112

1213
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
1314
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
@@ -102,6 +103,44 @@ func checkLKEExists(cluster *linodego.LKECluster) resource.TestCheckFunc {
102103
}
103104
}
104105

106+
// waitForAllNodesReady waits for every Node in every NodePool of the LKE Cluster to be in
107+
// a ready state.
108+
func waitForAllNodesReady(t *testing.T, cluster *linodego.LKECluster, pollInterval, timeout time.Duration) {
109+
t.Helper()
110+
111+
ctx := context.Background()
112+
client := acceptance.TestAccProvider.Meta().(*helper.ProviderMeta).Client
113+
114+
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(timeout))
115+
defer cancel()
116+
117+
for {
118+
select {
119+
case <-ctx.Done():
120+
t.Fatalf("timed out waiting for LKE Cluster (%d) Nodes to be ready", cluster.ID)
121+
122+
case <-time.NewTicker(pollInterval).C:
123+
nodePools, err := client.ListLKEClusterPools(ctx, cluster.ID, &linodego.ListOptions{})
124+
if err != nil {
125+
t.Fatalf("failed to get NodePools for LKE Cluster (%d): %s", cluster.ID, err)
126+
}
127+
128+
// Check that all NodePools are ready.
129+
for _, nodePool := range nodePools {
130+
for _, linode := range nodePool.Linodes {
131+
if linode.Status != linodego.LKELinodeReady {
132+
// This NodePool is not finished initializing; check again later.
133+
continue
134+
}
135+
}
136+
}
137+
138+
// If we get to this point, all NodePools must be ready.
139+
return
140+
}
141+
}
142+
}
143+
105144
func TestAccResourceLKECluster_basic(t *testing.T) {
106145
t.Parallel()
107146

@@ -137,6 +176,8 @@ func TestAccResourceLKECluster_basic(t *testing.T) {
137176
func TestAccResourceLKECluster_k8sUpgrade(t *testing.T) {
138177
t.Parallel()
139178

179+
var cluster linodego.LKECluster
180+
140181
clusterName := acctest.RandomWithPrefix("tf_test")
141182
resource.Test(t, resource.TestCase{
142183
PreCheck: func() { acceptance.PreCheck(t) },
@@ -146,12 +187,19 @@ func TestAccResourceLKECluster_k8sUpgrade(t *testing.T) {
146187
{
147188
Config: tmpl.ManyPools(t, clusterName, k8sVersionPrevious),
148189
Check: resource.ComposeTestCheckFunc(
190+
checkLKEExists(&cluster),
149191
resource.TestCheckResourceAttr(resourceClusterName, "label", clusterName),
150192
resource.TestCheckResourceAttr(resourceClusterName, "region", "us-central"),
151193
resource.TestCheckResourceAttr(resourceClusterName, "k8s_version", k8sVersionPrevious),
152194
),
153195
},
154196
{
197+
PreConfig: func() {
198+
// Before we upgrade the Cluster to a newer version of Kubernetes, we need to first
199+
// ensure that every Node in each of this cluster's NodePool is ready. Otherwise, the
200+
// recycle will not actually occur.
201+
waitForAllNodesReady(t, &cluster, time.Second*5, time.Minute*5)
202+
},
155203
Config: tmpl.ManyPools(t, clusterName, k8sVersionLatest),
156204
Check: resource.ComposeTestCheckFunc(
157205
resource.TestCheckResourceAttr(resourceClusterName, "label", clusterName),

0 commit comments

Comments
 (0)