diff --git a/docs/data-sources/database.md b/docs/data-sources/database.md
new file mode 100644
index 00000000..de18addf
--- /dev/null
+++ b/docs/data-sources/database.md
@@ -0,0 +1,47 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "redshift_database Data Source - terraform-provider-redshift"
+subcategory: ""
+description: |-
+ Fetches information about a Redshift database.
+---
+
+# redshift_database (Data Source)
+
+Fetches information about a Redshift database.
+
+## Example Usage
+
+```terraform
+data "redshift_database" "database" {
+ name = "my_database"
+}
+```
+
+
+## Schema
+
+### Required
+
+- **name** (String) Name of the database
+
+### Optional
+
+- **datashare_source** (Block List, Max: 1) Configuration for a database created from a redshift datashare. (see [below for nested schema](#nestedblock--datashare_source))
+- **id** (String) The ID of this resource.
+
+### Read-Only
+
+- **connection_limit** (Number) The maximum number of concurrent connections that can be made to this database. A value of -1 means no limit.
+- **owner** (String) Owner of the database, usually the user who created it
+
+
+### Nested Schema for `datashare_source`
+
+Optional:
+
+- **account_id** (String) The AWS account ID of the producer cluster.
+- **namespace** (String) The namespace (guid) of the producer cluster
+- **share_name** (String) The name of the datashare on the producer cluster
+
+
diff --git a/docs/resources/database.md b/docs/resources/database.md
new file mode 100644
index 00000000..5c3d9f56
--- /dev/null
+++ b/docs/resources/database.md
@@ -0,0 +1,69 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "redshift_database Resource - terraform-provider-redshift"
+subcategory: ""
+description: |-
+ Defines a local database.
+---
+
+# redshift_database (Resource)
+
+Defines a local database.
+
+## Example Usage
+
+```terraform
+# Example resource declaration of a local database
+resource "redshift_database" "db" {
+ name = "my_database"
+ owner = "my_user"
+ connection_limit = 123456 # use -1 for unlimited
+
+ lifecycle {
+ prevent_destroy = true
+ }
+}
+
+
+# Example resource declaration of a database
+# created from a datashare of another redshift cluster
+resource "redshift_database" "datashare_db" {
+ name = "my_datashare_consumer_db"
+ owner = "my_user"
+ connection_limit = 123456 # use -1 for unlimited
+
+ datashare_source {
+ share_name = "my_datashare"
+ account_id = "123456789012" # 12 digit AWS account number of the producer cluster (optional, default is current account)
+ namespace = "00000000-0000-0000-0000-000000000000" # producer cluster namespace (uuid)
+ }
+}
+```
+
+
+## Schema
+
+### Required
+
+- **name** (String) Name of the database
+
+### Optional
+
+- **connection_limit** (Number) The maximum number of concurrent connections that can be made to this database. A value of -1 means no limit.
+- **datashare_source** (Block List, Max: 1) Configuration for creating a database from a redshift datashare. (see [below for nested schema](#nestedblock--datashare_source))
+- **id** (String) The ID of this resource.
+- **owner** (String) Owner of the database, usually the user who created it
+
+
+### Nested Schema for `datashare_source`
+
+Required:
+
+- **namespace** (String) The namespace (guid) of the producer cluster
+- **share_name** (String) The name of the datashare on the producer cluster
+
+Optional:
+
+- **account_id** (String) The AWS account ID of the producer cluster.
+
+
diff --git a/examples/data-sources/redshift_database/data-source.tf b/examples/data-sources/redshift_database/data-source.tf
new file mode 100644
index 00000000..4415db04
--- /dev/null
+++ b/examples/data-sources/redshift_database/data-source.tf
@@ -0,0 +1,3 @@
+data "redshift_database" "database" {
+ name = "my_database"
+}
diff --git a/examples/resources/redshift_database/resource.tf b/examples/resources/redshift_database/resource.tf
new file mode 100644
index 00000000..84114af6
--- /dev/null
+++ b/examples/resources/redshift_database/resource.tf
@@ -0,0 +1,25 @@
+# Example resource declaration of a local database
+resource "redshift_database" "db" {
+ name = "my_database"
+ owner = "my_user"
+ connection_limit = 123456 # use -1 for unlimited
+
+ lifecycle {
+ prevent_destroy = true
+ }
+}
+
+
+# Example resource declaration of a database
+# created from a datashare of another redshift cluster
+resource "redshift_database" "datashare_db" {
+ name = "my_datashare_consumer_db"
+ owner = "my_user"
+ connection_limit = 123456 # use -1 for unlimited
+
+ datashare_source {
+ share_name = "my_datashare"
+ account_id = "123456789012" # 12 digit AWS account number of the producer cluster (optional, default is current account)
+ namespace = "00000000-0000-0000-0000-000000000000" # producer cluster namespace (uuid)
+ }
+}
diff --git a/redshift/custom_diff.go b/redshift/custom_diff.go
new file mode 100644
index 00000000..58564c9f
--- /dev/null
+++ b/redshift/custom_diff.go
@@ -0,0 +1,16 @@
+package redshift
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+func forceNewIfListSizeChanged(key string) schema.CustomizeDiffFunc {
+ return customdiff.ForceNewIfChange(key, listSizeChanged)
+}
+
+func listSizeChanged(ctx context.Context, old, new, meta interface{}) bool {
+ return len(old.([]interface{})) != len(new.([]interface{}))
+}
diff --git a/redshift/data_source_redshift_database.go b/redshift/data_source_redshift_database.go
new file mode 100644
index 00000000..c72c552c
--- /dev/null
+++ b/redshift/data_source_redshift_database.go
@@ -0,0 +1,119 @@
+package redshift
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+func dataSourceRedshiftDatabase() *schema.Resource {
+ return &schema.Resource{
+ Description: `Fetches information about a Redshift database.`,
+ Read: RedshiftResourceFunc(dataSourceRedshiftDatabaseRead),
+ Schema: map[string]*schema.Schema{
+ databaseNameAttr: {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "Name of the database",
+ StateFunc: func(val interface{}) string {
+ return strings.ToLower(val.(string))
+ },
+ },
+ databaseOwnerAttr: {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Owner of the database, usually the user who created it",
+ },
+ databaseConnLimitAttr: {
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "The maximum number of concurrent connections that can be made to this database. A value of -1 means no limit.",
+ },
+ databaseDatashareSourceAttr: {
+ Type: schema.TypeList,
+ Optional: true,
+ MaxItems: 1,
+ Description: "Configuration for a database created from a redshift datashare.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ databaseDatashareSourceShareNameAttr: {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ Description: "The name of the datashare on the producer cluster",
+ StateFunc: func(val interface{}) string {
+ return strings.ToLower(val.(string))
+ },
+ },
+ databaseDatashareSourceNamespaceAttr: {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ Description: "The namespace (guid) of the producer cluster",
+ StateFunc: func(val interface{}) string {
+ return strings.ToLower(val.(string))
+ },
+ },
+ databaseDatashareSourceAccountAttr: {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ Description: "The AWS account ID of the producer cluster.",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func dataSourceRedshiftDatabaseRead(db *DBConnection, d *schema.ResourceData) error {
+ var id, owner, connLimit, databaseType, shareName, producerAccount, producerNamespace string
+
+ err := db.QueryRow(`SELECT
+ pg_database_info.datid,
+ trim(pg_user_info.usename),
+ COALESCE(pg_database_info.datconnlimit::text, 'UNLIMITED'),
+ svv_redshift_databases.database_type,
+ trim(COALESCE(svv_datashares.share_name, '')),
+ trim(COALESCE(svv_datashares.producer_account, '')),
+ trim(COALESCE(svv_datashares.producer_namespace, ''))
+FROM
+ svv_redshift_databases
+LEFT JOIN pg_database_info
+ ON svv_redshift_databases.database_name=pg_database_info.datname
+LEFT JOIN pg_user_info
+ ON pg_user_info.usesysid = svv_redshift_databases.database_owner
+LEFT JOIN svv_datashares
+ ON (svv_redshift_databases.database_name = svv_datashares.consumer_database AND svv_redshift_databases.database_type = 'shared' AND svv_datashares.share_type = 'INBOUND')
+WHERE svv_redshift_databases.database_name = $1
+ `, d.Get(databaseNameAttr).(string)).Scan(&id, &owner, &connLimit, &databaseType, &shareName, &producerAccount, &producerNamespace)
+
+ if err != nil {
+ return err
+ }
+
+ connLimitNumber := -1
+ if connLimit != "UNLIMITED" {
+ if connLimitNumber, err = strconv.Atoi(connLimit); err != nil {
+ return err
+ }
+ }
+
+ d.SetId(id)
+ d.Set(databaseOwnerAttr, owner)
+ d.Set(databaseConnLimitAttr, connLimitNumber)
+
+ dataShareConfiguration := make([]map[string]interface{}, 0, 1)
+ if databaseType == "shared" {
+ config := make(map[string]interface{})
+ config[databaseDatashareSourceShareNameAttr] = &shareName
+ config[databaseDatashareSourceAccountAttr] = &producerAccount
+ config[databaseDatashareSourceNamespaceAttr] = &producerNamespace
+ dataShareConfiguration = append(dataShareConfiguration, config)
+ }
+ d.Set(databaseDatashareSourceAttr, dataShareConfiguration)
+
+ return nil
+}
diff --git a/redshift/data_source_redshift_database_test.go b/redshift/data_source_redshift_database_test.go
new file mode 100644
index 00000000..4cf2b28d
--- /dev/null
+++ b/redshift/data_source_redshift_database_test.go
@@ -0,0 +1,42 @@
+package redshift
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+)
+
+func TestAccDataSourceRedshiftDatabase_basic(t *testing.T) {
+ dbName := strings.ReplaceAll(acctest.RandomWithPrefix("tf_acc_data_basic"), "-", "_")
+ resource.ParallelTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccDataSourceRedshiftDatabaseConfig_basic(dbName),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ testAccCheckDatabaseExists(dbName),
+ resource.TestCheckResourceAttr("data.redshift_database.db", databaseNameAttr, dbName),
+ resource.TestCheckResourceAttrSet("data.redshift_database.db", databaseOwnerAttr),
+ resource.TestCheckResourceAttrSet("data.redshift_database.db", databaseConnLimitAttr),
+ resource.TestCheckResourceAttr("data.redshift_database.db", fmt.Sprintf("%s.#", databaseDatashareSourceAttr), "0"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccDataSourceRedshiftDatabaseConfig_basic(dbName string) string {
+ return fmt.Sprintf(`
+resource "redshift_database" "db" {
+ %[1]s = %[2]q
+}
+
+data "redshift_database" "db" {
+ %[1]s = redshift_database.db.%[1]s
+}
+ `, databaseNameAttr, dbName)
+}
diff --git a/redshift/provider.go b/redshift/provider.go
index 988ebfa6..29b9ced9 100644
--- a/redshift/provider.go
+++ b/redshift/provider.go
@@ -120,11 +120,13 @@ func Provider() *schema.Provider {
"redshift_group": redshiftGroup(),
"redshift_schema": redshiftSchema(),
"redshift_privilege": redshiftPrivilege(),
+ "redshift_database": redshiftDatabase(),
},
DataSourcesMap: map[string]*schema.Resource{
- "redshift_user": dataSourceRedshiftUser(),
- "redshift_group": dataSourceRedshiftGroup(),
- "redshift_schema": dataSourceRedshiftSchema(),
+ "redshift_user": dataSourceRedshiftUser(),
+ "redshift_group": dataSourceRedshiftGroup(),
+ "redshift_schema": dataSourceRedshiftSchema(),
+ "redshift_database": dataSourceRedshiftDatabase(),
},
ConfigureFunc: providerConfigure,
}
diff --git a/redshift/resource_redshift_database.go b/redshift/resource_redshift_database.go
new file mode 100644
index 00000000..c63d8d90
--- /dev/null
+++ b/redshift/resource_redshift_database.go
@@ -0,0 +1,342 @@
+package redshift
+
+import (
+ "database/sql"
+ "fmt"
+ "log"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+ "github.com/lib/pq"
+)
+
+const databaseNameAttr = "name"
+const databaseOwnerAttr = "owner"
+const databaseConnLimitAttr = "connection_limit"
+const databaseDatashareSourceAttr = "datashare_source"
+const databaseDatashareSourceShareNameAttr = "share_name"
+const databaseDatashareSourceNamespaceAttr = "namespace"
+const databaseDatashareSourceAccountAttr = "account_id"
+
+var awsAccountIdRegexp = regexp.MustCompile(`^\d{12}$`)
+
+func redshiftDatabase() *schema.Resource {
+ return &schema.Resource{
+ Description: `Defines a local database.`,
+ Exists: RedshiftResourceExistsFunc(resourceRedshiftDatabaseExists),
+ Create: RedshiftResourceFunc(resourceRedshiftDatabaseCreate),
+ Read: RedshiftResourceFunc(resourceRedshiftDatabaseRead),
+ Update: RedshiftResourceFunc(resourceRedshiftDatabaseUpdate),
+ Delete: RedshiftResourceFunc(resourceRedshiftDatabaseDelete),
+ Importer: &schema.ResourceImporter{
+ State: schema.ImportStatePassthrough,
+ },
+ CustomizeDiff: forceNewIfListSizeChanged(databaseDatashareSourceAttr),
+ Schema: map[string]*schema.Schema{
+ databaseNameAttr: {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "Name of the database",
+ StateFunc: func(val interface{}) string {
+ return strings.ToLower(val.(string))
+ },
+ },
+ databaseOwnerAttr: {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ Description: "Owner of the database, usually the user who created it",
+ StateFunc: func(val interface{}) string {
+ return strings.ToLower(val.(string))
+ },
+ },
+ databaseConnLimitAttr: {
+ Type: schema.TypeInt,
+ Optional: true,
+ Description: "The maximum number of concurrent connections that can be made to this database. A value of -1 means no limit.",
+ Default: -1,
+ ValidateFunc: validation.IntAtLeast(-1),
+ },
+ databaseDatashareSourceAttr: {
+ Type: schema.TypeList,
+ Optional: true,
+ MaxItems: 1,
+ Description: "Configuration for creating a database from a redshift datashare.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ databaseDatashareSourceShareNameAttr: {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The name of the datashare on the producer cluster",
+ StateFunc: func(val interface{}) string {
+ return strings.ToLower(val.(string))
+ },
+ },
+ databaseDatashareSourceNamespaceAttr: {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "The namespace (guid) of the producer cluster",
+ StateFunc: func(val interface{}) string {
+ return strings.ToLower(val.(string))
+ },
+ },
+ databaseDatashareSourceAccountAttr: {
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Computed: true,
+ Description: "The AWS account ID of the producer cluster.",
+ ValidateFunc: validation.StringMatch(awsAccountIdRegexp, "AWS account id must be a 12-digit number"),
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func resourceRedshiftDatabaseExists(db *DBConnection, d *schema.ResourceData) (bool, error) {
+ var name string
+ query := "SELECT datname FROM pg_database WHERE oid = $1"
+ log.Printf("[DEBUG] check if database exists: %s\n", query)
+ err := db.QueryRow(query, d.Id()).Scan(&name)
+
+ switch {
+ case err == sql.ErrNoRows:
+ return false, nil
+ case err != nil:
+ return false, err
+ }
+
+ return true, nil
+}
+
+func resourceRedshiftDatabaseCreate(db *DBConnection, d *schema.ResourceData) error {
+ if _, isDataShare := d.GetOk(fmt.Sprintf("%s.0.%s", databaseDatashareSourceAttr, databaseDatashareSourceShareNameAttr)); isDataShare {
+ return resourceRedshiftDatabaseCreateFromDatashare(db, d)
+ }
+ return resourceRedshiftDatabaseCreateInternal(db, d)
+}
+
+func resourceRedshiftDatabaseCreateFromDatashare(db *DBConnection, d *schema.ResourceData) error {
+ dbName := d.Get(databaseNameAttr).(string)
+ shareName := d.Get(fmt.Sprintf("%s.0.%s", databaseDatashareSourceAttr, databaseDatashareSourceShareNameAttr)).(string)
+ query := fmt.Sprintf("CREATE DATABASE %s FROM DATASHARE %s OF", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(shareName))
+ if sourceAccount, ok := d.GetOk(fmt.Sprintf("%s.0.%s", databaseDatashareSourceAttr, databaseDatashareSourceAccountAttr)); ok {
+ query = fmt.Sprintf("%s ACCOUNT '%s'", query, pqQuoteLiteral(sourceAccount.(string)))
+ }
+ namespace := d.Get(fmt.Sprintf("%s.0.%s", databaseDatashareSourceAttr, databaseDatashareSourceNamespaceAttr))
+ query = fmt.Sprintf("%s NAMESPACE '%s'", query, pqQuoteLiteral(namespace.(string)))
+
+ if _, err := db.Exec(query); err != nil {
+ return err
+ }
+
+ // eagerly get the resource ID in case the below statements fail for some reason
+ var oid string
+ query = "SELECT oid FROM pg_database WHERE datname = $1"
+ log.Printf("[DEBUG] get oid from database: %s\n", query)
+ if err := db.QueryRow(query, strings.ToLower(dbName)).Scan(&oid); err != nil {
+ return err
+ }
+ d.SetId(oid)
+
+ // CREATE DATABASE isn't allowed to run inside a transaction, however ALTER DATABASE
+ // can be
+ tx, err := startTransaction(db.client, "")
+ if err != nil {
+ return err
+ }
+ defer deferredRollback(tx)
+
+ // CREATE DATABASE FROM DATASHARE... doesn't allow you to specify an owner in the create statement,
+ // so we need to set the owner after creation using ALTER DATABASE...
+ owner, ownerIsSet := d.GetOk(databaseOwnerAttr)
+ if ownerIsSet {
+ if _, err = tx.Exec(fmt.Sprintf("ALTER DATABASE %s OWNER TO %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner.(string)))); err != nil {
+ return err
+ }
+ }
+
+ // CREATE DATABASE FROM DATASHARE... doesn't allow you to specify the connection limit in the create statement,
+ // so we need to set the owner after creation using ALTER DATABASE...
+ connLimit, connLimitIsSet := d.GetOk(databaseConnLimitAttr)
+ if connLimitIsSet {
+ if _, err = tx.Exec(fmt.Sprintf("ALTER DATABASE %s CONNECTION LIMIT %d", pq.QuoteIdentifier(dbName), connLimit.(int))); err != nil {
+ return err
+ }
+ }
+ if err = tx.Commit(); err != nil {
+ return err
+ }
+
+ return resourceRedshiftDatabaseRead(db, d)
+}
+
+func resourceRedshiftDatabaseCreateInternal(db *DBConnection, d *schema.ResourceData) error {
+ dbName := d.Get(databaseNameAttr).(string)
+ query := fmt.Sprintf("CREATE DATABASE %s", pq.QuoteIdentifier(dbName))
+
+ if v, ok := d.GetOk(databaseOwnerAttr); ok {
+ query = fmt.Sprintf("%s OWNER %s", query, pq.QuoteIdentifier(v.(string)))
+ }
+ if v, ok := d.GetOk(databaseConnLimitAttr); ok {
+ query = fmt.Sprintf("%s CONNECTION LIMIT %d", query, v.(int))
+ }
+ log.Printf("[DEBUG] create database %s: %s\n", dbName, query)
+ if _, err := db.Exec(query); err != nil {
+ return err
+ }
+
+ var oid string
+ query = "SELECT oid FROM pg_database WHERE datname = $1"
+ log.Printf("[DEBUG] get oid from database: %s\n", query)
+ if err := db.QueryRow(query, strings.ToLower(dbName)).Scan(&oid); err != nil {
+ return err
+ }
+
+ d.SetId(oid)
+
+ return resourceRedshiftDatabaseRead(db, d)
+}
+
+func resourceRedshiftDatabaseRead(db *DBConnection, d *schema.ResourceData) error {
+ var name, owner, connLimit, databaseType, shareName, producerAccount, producerNamespace string
+
+ query := `SELECT
+ trim(svv_redshift_databases.database_name),
+ trim(pg_user_info.usename),
+ COALESCE(pg_database_info.datconnlimit::text, 'UNLIMITED'),
+ svv_redshift_databases.database_type,
+ trim(COALESCE(svv_datashares.share_name, '')),
+ trim(COALESCE(svv_datashares.producer_account, '')),
+ trim(COALESCE(svv_datashares.producer_namespace, ''))
+FROM
+ svv_redshift_databases
+LEFT JOIN pg_database_info
+ ON svv_redshift_databases.database_name=pg_database_info.datname
+LEFT JOIN pg_user_info
+ ON pg_user_info.usesysid = svv_redshift_databases.database_owner
+LEFT JOIN svv_datashares
+ ON (svv_redshift_databases.database_name = svv_datashares.consumer_database AND svv_redshift_databases.database_type = 'shared' AND svv_datashares.share_type = 'INBOUND')
+WHERE pg_database_info.datid = $1
+`
+ log.Printf("[DEBUG] read database: %s\n", query)
+ err := db.QueryRow(query, d.Id()).Scan(&name, &owner, &connLimit, &databaseType, &shareName, &producerAccount, &producerNamespace)
+
+ if err != nil {
+ return err
+ }
+
+ connLimitNumber := -1
+ if connLimit != "UNLIMITED" {
+ if connLimitNumber, err = strconv.Atoi(connLimit); err != nil {
+ return err
+ }
+ }
+
+ d.Set(databaseNameAttr, name)
+ d.Set(databaseOwnerAttr, owner)
+ d.Set(databaseConnLimitAttr, connLimitNumber)
+
+ dataShareConfiguration := make([]map[string]interface{}, 0, 1)
+ if databaseType == "shared" {
+ config := make(map[string]interface{})
+ config[databaseDatashareSourceShareNameAttr] = &shareName
+ config[databaseDatashareSourceAccountAttr] = &producerAccount
+ config[databaseDatashareSourceNamespaceAttr] = &producerNamespace
+ dataShareConfiguration = append(dataShareConfiguration, config)
+ }
+ d.Set(databaseDatashareSourceAttr, dataShareConfiguration)
+
+ return nil
+}
+
+func resourceRedshiftDatabaseUpdate(db *DBConnection, d *schema.ResourceData) error {
+ tx, err := startTransaction(db.client, "")
+ if err != nil {
+ return err
+ }
+ defer deferredRollback(tx)
+
+ if err := setDatabaseName(tx, d); err != nil {
+ return err
+ }
+
+ if err := setDatabaseOwner(tx, d); err != nil {
+ return err
+ }
+
+ if err := setDatabaseConnLimit(tx, d); err != nil {
+ return err
+ }
+
+ if err = tx.Commit(); err != nil {
+ return fmt.Errorf("could not commit transaction: %w", err)
+ }
+
+ return resourceRedshiftDatabaseRead(db, d)
+}
+
+func setDatabaseName(tx *sql.Tx, d *schema.ResourceData) error {
+ if !d.HasChange(databaseNameAttr) {
+ return nil
+ }
+
+ oldRaw, newRaw := d.GetChange(databaseNameAttr)
+ oldValue := oldRaw.(string)
+ newValue := newRaw.(string)
+
+ if newValue == "" {
+ return fmt.Errorf("Error setting database name to an empty string")
+ }
+
+ query := fmt.Sprintf("ALTER DATABASE %s RENAME TO %s", pq.QuoteIdentifier(oldValue), pq.QuoteIdentifier(newValue))
+ log.Printf("[DEBUG] renaming database %s to %s: %s\n", oldValue, newValue, query)
+ if _, err := tx.Exec(query); err != nil {
+ return fmt.Errorf("Error updating database NAME: %w", err)
+ }
+
+ return nil
+}
+
+func setDatabaseOwner(tx *sql.Tx, d *schema.ResourceData) error {
+ if !d.HasChange(databaseOwnerAttr) {
+ return nil
+ }
+
+ databaseName := d.Get(databaseNameAttr).(string)
+ databaseOwner := d.Get(databaseOwnerAttr).(string)
+
+ query := fmt.Sprintf("ALTER DATABASE %s OWNER TO %s", pq.QuoteIdentifier(databaseName), pq.QuoteIdentifier(databaseOwner))
+ log.Printf("[DEBUG] changing database owner: %s\n", query)
+ _, err := tx.Exec(query)
+ return err
+}
+
+func setDatabaseConnLimit(tx *sql.Tx, d *schema.ResourceData) error {
+ if !d.HasChange(databaseConnLimitAttr) {
+ return nil
+ }
+
+ databaseName := d.Get(databaseNameAttr).(string)
+ connLimit := d.Get(databaseConnLimitAttr).(int)
+ query := fmt.Sprintf("ALTER DATABASE %s CONNECTION LIMIT %d", pq.QuoteIdentifier(databaseName), connLimit)
+ log.Printf("[DEBUG] changing database connection limit: %s\n", query)
+ _, err := tx.Exec(query)
+ return err
+}
+
+func resourceRedshiftDatabaseDelete(db *DBConnection, d *schema.ResourceData) error {
+ databaseName := d.Get(databaseNameAttr).(string)
+
+ query := fmt.Sprintf("DROP DATABASE %s", pqQuoteLiteral(databaseName))
+ log.Printf("[DEBUG] dropping database %s: %s\n", databaseName, query)
+ _, err := db.Exec(query)
+ return err
+}
diff --git a/redshift/resource_redshift_database_test.go b/redshift/resource_redshift_database_test.go
new file mode 100644
index 00000000..77dacc9d
--- /dev/null
+++ b/redshift/resource_redshift_database_test.go
@@ -0,0 +1,160 @@
+package redshift
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
+)
+
+func TestAccResourceRedshiftDatabase_Basic(t *testing.T) {
+ dbName := strings.ReplaceAll(acctest.RandomWithPrefix("tf_acc_resource_basic"), "-", "_")
+ resource.ParallelTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckRedshiftDatabaseDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccResourceRedshiftDatabaseConfig_Basic(dbName),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ testAccCheckDatabaseExists(dbName),
+ resource.TestCheckResourceAttr("redshift_database.db", databaseNameAttr, dbName),
+ resource.TestCheckResourceAttrSet("redshift_database.db", databaseOwnerAttr),
+ resource.TestCheckResourceAttrSet("redshift_database.db", databaseConnLimitAttr),
+ ),
+ },
+ {
+ ResourceName: "redshift_database.db",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func testAccResourceRedshiftDatabaseConfig_Basic(dbName string) string {
+ return fmt.Sprintf(`
+resource "redshift_database" "db" {
+ %[1]s = %[2]q
+}
+ `, databaseNameAttr, dbName)
+}
+
+func TestAccResourceRedshiftDatabase_Update(t *testing.T) {
+
+ dbNameOriginal := strings.ReplaceAll(acctest.RandomWithPrefix("tf_acc_resource_original"), "-", "_")
+ dbNameNew := strings.ReplaceAll(acctest.RandomWithPrefix("tf_acc_resource_update"), "-", "_")
+ userName := strings.ReplaceAll(acctest.RandomWithPrefix("tf_acc_resource_update"), "-", "_")
+
+ configCreate := fmt.Sprintf(`
+resource "redshift_database" "db" {
+ %[1]s = %[2]q
+}
+`, databaseNameAttr, dbNameOriginal)
+
+ configUpdate := fmt.Sprintf(`
+resource "redshift_database" "db" {
+ %[1]s = %[2]q
+ %[3]s = redshift_user.user.%[4]s
+ %[5]s = 0
+}
+
+resource "redshift_user" "user" {
+ %[4]s = %[6]q
+}
+ `, databaseNameAttr, dbNameNew, databaseOwnerAttr, userNameAttr, databaseConnLimitAttr, userName)
+
+ resource.ParallelTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckRedshiftDatabaseDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: configCreate,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ testAccCheckDatabaseExists(dbNameOriginal),
+ resource.TestCheckResourceAttr("redshift_database.db", databaseNameAttr, dbNameOriginal),
+ resource.TestCheckResourceAttrSet("redshift_database.db", databaseOwnerAttr),
+ resource.TestCheckResourceAttrSet("redshift_database.db", databaseConnLimitAttr),
+ ),
+ },
+ {
+ Config: configUpdate,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ testAccCheckDatabaseExists(dbNameNew),
+ resource.TestCheckResourceAttr("redshift_database.db", databaseNameAttr, dbNameNew),
+ resource.TestCheckResourceAttr("redshift_database.db", databaseOwnerAttr, userName),
+ resource.TestCheckResourceAttr("redshift_database.db", databaseConnLimitAttr, "0"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccResourceRedshiftDatabaseConfig_basic(dbName string) string {
+ return fmt.Sprintf(`
+resource "redshift_database" "db" {
+ %[1]s = %[2]q
+}
+ `, databaseNameAttr, dbName)
+}
+
+func testAccCheckRedshiftDatabaseDestroy(s *terraform.State) error {
+ client := testAccProvider.Meta().(*Client)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "redshift_database" {
+ continue
+ }
+
+ exists, err := checkDatabaseExists(client, rs.Primary.ID)
+
+ if err != nil {
+ return fmt.Errorf("Error checking database %s", err)
+ }
+
+ if exists {
+ return fmt.Errorf("Database still exists after destroy")
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckDatabaseExists(dbName string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ client := testAccProvider.Meta().(*Client)
+
+ exists, err := checkDatabaseExists(client, dbName)
+ if err != nil {
+ return fmt.Errorf("Error checking database %w", err)
+ }
+
+ if !exists {
+ return fmt.Errorf("Database not found")
+ }
+
+ return nil
+ }
+}
+
+func checkDatabaseExists(client *Client, database string) (bool, error) {
+ db, err := client.Connect()
+ if err != nil {
+ return false, err
+ }
+ var _rez int
+ err = db.QueryRow("SELECT 1 from pg_database WHERE datname=$1", strings.ToLower(database)).Scan(&_rez)
+ switch {
+ case err == sql.ErrNoRows:
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("Error reading info about database: %s", err)
+ }
+
+ return true, nil
+}