Skip to content

Commit

Permalink
Merge pull request #23560 from hashicorp/t-rds-acctest-failures
Browse files Browse the repository at this point in the history
RDS Acceptance Test Failures
  • Loading branch information
YakDriver committed Mar 14, 2022
2 parents 68ebcac + f539a8c commit 84ebd6c
Show file tree
Hide file tree
Showing 31 changed files with 2,563 additions and 1,008 deletions.
15 changes: 15 additions & 0 deletions .changelog/23560.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
```release-note:bug
resource/aws_rds_event_subscription: Fix issue where `enabled` was sometimes not updated
```

```release-note:bug
resource/aws_db_instance: Fix issues where configured update timeout was not respected, and update would fail if instance were in the process of being configured.
```

```release-note:bug
resource/aws_rds_global_cluster: Fix ability to perform cluster version upgrades, including of clusters in distinct regions, such as previously got error: "Invalid database cluster identifier"
```

```release-note:enhancement
resource/aws_rds_global_cluster: Add configurable timeouts
```
2 changes: 2 additions & 0 deletions internal/conns/conns.go
Original file line number Diff line number Diff line change
Expand Up @@ -1145,6 +1145,7 @@ type AWSClient struct {
ServiceDiscoveryConn *servicediscovery.ServiceDiscovery
ServiceQuotasConn *servicequotas.ServiceQuotas
SESConn *ses.SES
Session *session.Session
SESV2Conn *sesv2.SESV2
SFNConn *sfn.SFN
ShieldConn *shield.Shield
Expand Down Expand Up @@ -1545,6 +1546,7 @@ func (c *Config) Client(ctx context.Context) (interface{}, diag.Diagnostics) {
ServiceQuotasConn: servicequotas.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[ServiceQuotas])})),
SESConn: ses.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[SES])})),
SESV2Conn: sesv2.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[SESV2])})),
Session: sess,
SFNConn: sfn.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[SFN])})),
SignerConn: signer.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[Signer])})),
SimpleDBConn: simpledb.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints[SimpleDB])})),
Expand Down
40 changes: 13 additions & 27 deletions internal/service/rds/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ import (
)

const (
rdsClusterScalingConfiguration_DefaultMinCapacity = 1
rdsClusterScalingConfiguration_DefaultMaxCapacity = 16
rdsClusterTimeoutDelete = 2 * time.Minute
clusterScalingConfiguration_DefaultMinCapacity = 1
clusterScalingConfiguration_DefaultMaxCapacity = 16
clusterTimeoutDelete = 2 * time.Minute
)

func ResourceCluster() *schema.Resource {
Expand Down Expand Up @@ -205,12 +205,12 @@ func ResourceCluster() *schema.Resource {
"max_capacity": {
Type: schema.TypeInt,
Optional: true,
Default: rdsClusterScalingConfiguration_DefaultMaxCapacity,
Default: clusterScalingConfiguration_DefaultMaxCapacity,
},
"min_capacity": {
Type: schema.TypeInt,
Optional: true,
Default: rdsClusterScalingConfiguration_DefaultMinCapacity,
Default: clusterScalingConfiguration_DefaultMinCapacity,
},
"seconds_until_auto_pause": {
Type: schema.TypeInt,
Expand Down Expand Up @@ -953,8 +953,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error {

log.Printf("[INFO] RDS Cluster ID: %s", d.Id())

log.Println(
"[INFO] Waiting for RDS Cluster to be available")
log.Println("[INFO] Waiting for RDS Cluster to be available")

stateConf := &resource.StateChangeConf{
Pending: resourceClusterCreatePendingStates,
Expand Down Expand Up @@ -990,7 +989,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error {
}

log.Printf("[INFO] Waiting for RDS Cluster (%s) to be available", d.Id())
err = waitForRDSClusterUpdate(conn, d.Id(), d.Timeout(schema.TimeoutCreate))
err = waitForClusterUpdate(conn, d.Id(), d.Timeout(schema.TimeoutCreate))
if err != nil {
return fmt.Errorf("error waiting for RDS Cluster (%s) to be available: %s", d.Id(), err)
}
Expand Down Expand Up @@ -1081,7 +1080,7 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error {
d.Set("hosted_zone_id", dbc.HostedZoneId)
d.Set("iam_database_authentication_enabled", dbc.IAMDatabaseAuthenticationEnabled)

rdsClusterSetResourceDataEngineVersionFromCluster(d, dbc)
clusterSetResourceDataEngineVersionFromCluster(d, dbc)

var roles []string
for _, r := range dbc.AssociatedRoles {
Expand Down Expand Up @@ -1288,7 +1287,7 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error {
}

log.Printf("[INFO] Waiting for RDS Cluster (%s) to be available", d.Id())
err = waitForRDSClusterUpdate(conn, d.Id(), d.Timeout(schema.TimeoutUpdate))
err = waitForClusterUpdate(conn, d.Id(), d.Timeout(schema.TimeoutUpdate))
if err != nil {
return fmt.Errorf("error waiting for RDS Cluster (%s) to be available: %s", d.Id(), err)
}
Expand Down Expand Up @@ -1397,7 +1396,7 @@ func resourceClusterDelete(d *schema.ResourceData, meta interface{}) error {

log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts)

err := resource.Retry(rdsClusterTimeoutDelete, func() *resource.RetryError {
err := resource.Retry(clusterTimeoutDelete, func() *resource.RetryError {
_, err := conn.DeleteDBCluster(&deleteOpts)
if err != nil {
if tfawserr.ErrMessageContains(err, rds.ErrCodeInvalidDBClusterStateFault, "is not currently in the available state") {
Expand Down Expand Up @@ -1506,7 +1505,7 @@ var resourceClusterUpdatePendingStates = []string{
"upgrading",
}

func waitForRDSClusterUpdate(conn *rds.RDS, id string, timeout time.Duration) error {
func waitForClusterUpdate(conn *rds.RDS, id string, timeout time.Duration) error {
stateConf := &resource.StateChangeConf{
Pending: resourceClusterUpdatePendingStates,
Target: []string{"available"},
Expand All @@ -1515,6 +1514,7 @@ func waitForRDSClusterUpdate(conn *rds.RDS, id string, timeout time.Duration) er
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}

_, err := stateConf.WaitForState()
return err
}
Expand All @@ -1534,22 +1534,8 @@ func WaitForClusterDeletion(conn *rds.RDS, id string, timeout time.Duration) err
return err
}

func rdsClusterSetResourceDataEngineVersionFromCluster(d *schema.ResourceData, c *rds.DBCluster) {
func clusterSetResourceDataEngineVersionFromCluster(d *schema.ResourceData, c *rds.DBCluster) {
oldVersion := d.Get("engine_version").(string)
newVersion := aws.StringValue(c.EngineVersion)
compareActualEngineVersion(d, oldVersion, newVersion)
}

func compareActualEngineVersion(d *schema.ResourceData, oldVersion string, newVersion string) {
newVersionSubstr := newVersion

if len(newVersion) > len(oldVersion) {
newVersionSubstr = string([]byte(newVersion)[0 : len(oldVersion)+1])
}

if oldVersion != newVersion && string(append([]byte(oldVersion), []byte(".")...)) != newVersionSubstr {
d.Set("engine_version", newVersion)
}

d.Set("engine_version_actual", newVersion)
}
12 changes: 6 additions & 6 deletions internal/service/rds/cluster_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ import (
)

const (
AWSRDSClusterEndpointCreateTimeout = 30 * time.Minute
AWSRDSClusterEndpointRetryDelay = 5 * time.Second
ClusterEndpointRetryMinTimeout = 3 * time.Second
clusterEndpointCreateTimeout = 30 * time.Minute
clusterEndpointRetryDelay = 5 * time.Second
ClusterEndpointRetryMinTimeout = 3 * time.Second
)

func ResourceClusterEndpoint() *schema.Resource {
Expand Down Expand Up @@ -114,7 +114,7 @@ func resourceClusterEndpointCreate(d *schema.ResourceData, meta interface{}) err

d.SetId(endpointId)

err = resourceClusterEndpointWaitForAvailable(AWSRDSClusterEndpointCreateTimeout, d.Id(), conn)
err = resourceClusterEndpointWaitForAvailable(clusterEndpointCreateTimeout, d.Id(), conn)
if err != nil {
return err
}
Expand Down Expand Up @@ -252,7 +252,7 @@ func resourceClusterEndpointWaitForDestroy(timeout time.Duration, id string, con
Target: []string{"destroyed"},
Refresh: DBClusterEndpointStateRefreshFunc(conn, id),
Timeout: timeout,
Delay: AWSRDSClusterEndpointRetryDelay,
Delay: clusterEndpointRetryDelay,
MinTimeout: ClusterEndpointRetryMinTimeout,
}
_, err := stateConf.WaitForState()
Expand All @@ -270,7 +270,7 @@ func resourceClusterEndpointWaitForAvailable(timeout time.Duration, id string, c
Target: []string{"available"},
Refresh: DBClusterEndpointStateRefreshFunc(conn, id),
Timeout: timeout,
Delay: AWSRDSClusterEndpointRetryDelay,
Delay: clusterEndpointRetryDelay,
MinTimeout: ClusterEndpointRetryMinTimeout,
}

Expand Down
30 changes: 23 additions & 7 deletions internal/service/rds/cluster_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ import (
)

func TestAccRDSClusterEndpoint_basic(t *testing.T) {
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}

rInt := sdkacctest.RandInt()
var customReaderEndpoint rds.DBClusterEndpoint
var customEndpoint rds.DBClusterEndpoint
Expand Down Expand Up @@ -61,6 +65,10 @@ func TestAccRDSClusterEndpoint_basic(t *testing.T) {
}

func TestAccRDSClusterEndpoint_tags(t *testing.T) {
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}

rInt := sdkacctest.RandInt()
var customReaderEndpoint rds.DBClusterEndpoint
resourceName := "aws_rds_cluster_endpoint.reader"
Expand Down Expand Up @@ -211,7 +219,9 @@ func testAccCheckClusterEndpointExistsWithProvider(resourceName string, endpoint
}

func testAccClusterEndpointBaseConfig(n int) string {
return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(`
return acctest.ConfigCompose(
acctest.ConfigAvailableAZsNoOptIn(),
fmt.Sprintf(`
data "aws_rds_orderable_db_instance" "test" {
engine = aws_rds_cluster.default.engine
engine_version = aws_rds_cluster.default.engine_version
Expand Down Expand Up @@ -249,7 +259,9 @@ resource "aws_rds_cluster_instance" "test2" {
}

func testAccClusterEndpointConfig(n int) string {
return testAccClusterEndpointBaseConfig(n) + fmt.Sprintf(`
return acctest.ConfigCompose(
testAccClusterEndpointBaseConfig(n),
fmt.Sprintf(`
resource "aws_rds_cluster_endpoint" "reader" {
cluster_identifier = aws_rds_cluster.default.id
cluster_endpoint_identifier = "reader-%[1]d"
Expand All @@ -265,11 +277,13 @@ resource "aws_rds_cluster_endpoint" "default" {
excluded_members = [aws_rds_cluster_instance.test2.id]
}
`, n)
`, n))
}

func testAccClusterEndpointTags1Config(n int, tagKey1, tagValue1 string) string {
return testAccClusterEndpointBaseConfig(n) + fmt.Sprintf(`
return acctest.ConfigCompose(
testAccClusterEndpointBaseConfig(n),
fmt.Sprintf(`
resource "aws_rds_cluster_endpoint" "reader" {
cluster_identifier = aws_rds_cluster.default.id
cluster_endpoint_identifier = "reader-%[1]d"
Expand All @@ -281,11 +295,13 @@ resource "aws_rds_cluster_endpoint" "reader" {
%[2]q = %[3]q
}
}
`, n, tagKey1, tagValue1)
`, n, tagKey1, tagValue1))
}

func testAccClusterEndpointTags2Config(n int, tagKey1, tagValue1, tagKey2, tagValue2 string) string {
return testAccClusterEndpointBaseConfig(n) + fmt.Sprintf(`
return acctest.ConfigCompose(
testAccClusterEndpointBaseConfig(n),
fmt.Sprintf(`
resource "aws_rds_cluster_endpoint" "reader" {
cluster_identifier = aws_rds_cluster.default.id
cluster_endpoint_identifier = "reader-%[1]d"
Expand All @@ -298,5 +314,5 @@ resource "aws_rds_cluster_endpoint" "reader" {
%[4]q = %[5]q
}
}
`, n, tagKey1, tagValue1, tagKey2, tagValue2)
`, n, tagKey1, tagValue1, tagKey2, tagValue2))
}
Loading

0 comments on commit 84ebd6c

Please sign in to comment.