Skip to content
This repository was archived by the owner on Jan 25, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions examples/vault-consul-ami/vault-consul.json
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
{
"min_packer_version": "0.12.0",
"min_packer_version": "1.5.4",
"variables": {
"aws_region": "us-east-1",
"vault_version": "1.5.4",
"vault_version": "1.6.1",
"consul_module_version": "v0.8.0",
"consul_version": "1.5.3",
"consul_version": "1.9.2",
"consul_download_url": "{{env `CONSUL_DOWNLOAD_URL`}}",
"vault_download_url": "{{env `VAULT_DOWNLOAD_URL`}}",
"install_auth_signing_script": "true",
Expand Down
9 changes: 5 additions & 4 deletions examples/vault-s3-backend/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,11 @@ data "template_file" "user_data_vault_cluster" {
template = file("${path.module}/user-data-vault.sh")

vars = {
aws_region = data.aws_region.current.name
s3_bucket_name = var.s3_bucket_name
consul_cluster_tag_key = var.consul_cluster_tag_key
consul_cluster_tag_value = var.consul_cluster_name
aws_region = data.aws_region.current.name
s3_bucket_name = var.s3_bucket_name
consul_cluster_tag_key = var.consul_cluster_tag_key
consul_cluster_tag_value = var.consul_cluster_name
consul_agent_service_registration_address = var.consul_agent_service_registration_address
}
}

Expand Down
2 changes: 1 addition & 1 deletion examples/vault-s3-backend/user-data-vault.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@ readonly VAULT_TLS_KEY_FILE="/opt/vault/tls/vault.key.pem"

# The variables below are filled in via Terraform interpolation
/opt/consul/bin/run-consul --client --cluster-tag-key "${consul_cluster_tag_key}" --cluster-tag-value "${consul_cluster_tag_value}"
/opt/vault/bin/run-vault --tls-cert-file "$VAULT_TLS_CERT_FILE" --tls-key-file "$VAULT_TLS_KEY_FILE" --enable-s3-backend --s3-bucket "${s3_bucket_name}" --s3-bucket-region "${aws_region}"
/opt/vault/bin/run-vault --tls-cert-file "$VAULT_TLS_CERT_FILE" --tls-key-file "$VAULT_TLS_KEY_FILE" --enable-s3-backend --s3-bucket "${s3_bucket_name}" --s3-bucket-region "${aws_region}" --consul-agent-service-registration-address "${consul_agent_service_registration_address}"
5 changes: 5 additions & 0 deletions examples/vault-s3-backend/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -87,3 +87,8 @@ variable "force_destroy_s3_bucket" {
default = false
}

variable "consul_agent_service_registration_address" {
description = "Specifies the address of the Consul agent to communicate with. This can be an IP address, DNS record, or unix socket. It is recommended that you communicate with a local Consul agent; do not communicate directly with a server."
type = string
default = "127.0.0.1:8500"
}
37 changes: 29 additions & 8 deletions modules/run-vault/run-vault
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ function print_usage {
echo -e " --s3-bucket\tSpecifies the S3 bucket to use to store Vault data. Only used if '--enable-s3-backend' is set."
echo -e " --s3-bucket-path\tSpecifies the S3 bucket path to use to store Vault data. Only used if '--enable-s3-backend' is set."
echo -e " --s3-bucket-region\tSpecifies the AWS region where '--s3-bucket' lives. Only used if '--enable-s3-backend' is set."
echo -e " --consul-agent-service-registration-address\tSpecifies the address of the Consul agent to communicate with when using a different storage backend, in this case an S3 backend. Only used if '--enable-s3-backend' is set."
echo -e " --enable-dynamo-backend\tIf this flag is set, DynamoDB will be enabled as the backend storage (HA)"
echo -e " --dynamo-region\tSpecifies the AWS region where --dynamo-table lives. Only used if '--enable-dynamo-backend is on'"
echo -e " --dynamo--table\tSpecifies the DynamoDB table to use for HA Storage. Only used if '--enable-dynamo-backend is on'"
Expand Down Expand Up @@ -73,7 +74,7 @@ function print_usage {
echo
echo "Or"
echo
echo " run-vault --tls-cert-file /opt/vault/tls/vault.crt.pem --tls-key-file /opt/vault/tls/vault.key.pem --enable-s3-backend --s3-bucket my-vault-bucket --s3-bucket-region us-east-1"
echo " run-vault --tls-cert-file /opt/vault/tls/vault.crt.pem --tls-key-file /opt/vault/tls/vault.key.pem --enable-s3-backend --s3-bucket my-vault-bucket --s3-bucket-region us-east-1 --consul-agent-service-registration-address 127.0.0.1:8500"
}

function log {
Expand Down Expand Up @@ -237,13 +238,14 @@ function generate_vault_config {
local -r s3_bucket="$9"
local -r s3_bucket_path="${10}"
local -r s3_bucket_region="${11}"
local -r enable_dynamo_backend="${12}"
local -r dynamo_region="${13}"
local -r dynamo_table="${14}"
local -r consul_agent_service_registration_address="${12}"
local -r enable_dynamo_backend="${13}"
local -r dynamo_region="${14}"
local -r dynamo_table="${15}"
local -r enable_auto_unseal="${15}"
local -r auto_unseal_kms_key_id="${16}"
local -r auto_unseal_kms_key_region="${17}"
local -r auto_unseal_endpoint="${18}"
local -r auto_unseal_kms_key_id="${17}"
local -r auto_unseal_kms_key_region="${18}"
local -r auto_unseal_endpoint="${19}"
local -r config_path="$config_dir/$VAULT_CONFIG_FILE"

local instance_ip_address
Expand Down Expand Up @@ -288,6 +290,7 @@ EOF
local dynamodb_storage_type="storage"
local s3_config=""
local vault_storage_backend=""
local service_registration=""
if [[ "$enable_s3_backend" == "true" ]]; then
s3_config=$(cat <<EOF
storage "s3" {
Expand All @@ -299,9 +302,14 @@ EOF
)
consul_storage_type="ha_storage"
dynamodb_storage_type="ha_storage"
service_registration=$(cat <<EOF
service_registration "consul" {
address = "$consul_agent_service_registration_address"
}\n
EOF
)
fi


if [[ "$enable_dynamo_backend" == "true" ]]; then
vault_storage_backend=$(cat <<EOF
$dynamodb_storage_type "dynamodb" {
Expand Down Expand Up @@ -335,6 +343,7 @@ EOF
echo -e "$listener_config" >> "$config_path"
echo -e "$s3_config" >> "$config_path"
echo -e "$vault_storage_backend" >> "$config_path"
echo -e "$service_registration" >> "$config_path"

chown "$user:$user" "$config_path"
}
Expand Down Expand Up @@ -368,6 +377,8 @@ Documentation=https://www.vaultproject.io/docs/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=$config_path
StartLimitIntervalSec=60
StartLimitBurst=3

EOF
)
Expand All @@ -392,9 +403,12 @@ KillSignal=SIGINT
Restart=on-failure
RestartSec=5
TimeoutStopSec=30
StartLimitInterval=60
StartLimitIntervalSec=60
StartLimitBurst=3
LimitNOFILE=65536
LimitMEMLOCK=infinity

EOF
)

Expand Down Expand Up @@ -449,6 +463,7 @@ function run {
local s3_bucket=""
local s3_bucket_path=""
local s3_bucket_region=""
local consul_agent_service_registration_address=""
local enable_dynamo_backend="false"
local dynamo_region=""
local dynamo_table=""
Expand Down Expand Up @@ -547,6 +562,10 @@ function run {
s3_bucket_region="$2"
shift
;;
--consul-agent-service-registration-address)
consul_agent_service_registration_address="$2"
shift
;;
--enable-dynamo-backend)
enable_dynamo_backend="true"
;;
Expand Down Expand Up @@ -639,6 +658,7 @@ function run {
if [[ "$enable_s3_backend" == "true" ]]; then
assert_not_empty "--s3-bucket" "$s3_bucket"
assert_not_empty "--s3-bucket-region" "$s3_bucket_region"
assert_not_empty "--consul-agent-service-registration-address" "$consul_agent_service_registration_address"
fi
fi

Expand Down Expand Up @@ -714,6 +734,7 @@ function run {
"$s3_bucket" \
"$s3_bucket_path" \
"$s3_bucket_region" \
"$consul_agent_service_registration_address" \
"$enable_dynamo_backend" \
"$dynamo_region" \
"$dynamo_table" \
Expand Down
1 change: 1 addition & 0 deletions test/go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,7 @@ github.com/gruntwork-io/gruntwork-cli v0.5.1 h1:mVmVsFubUSLSCO8bGigI63HXzvzkC0uW
github.com/gruntwork-io/gruntwork-cli v0.5.1/go.mod h1:IBX21bESC1/LGoV7jhXKUnTQTZgQ6dYRsoj/VqxUSZQ=
github.com/gruntwork-io/terratest v0.28.15 h1:in1DRBq8/RjxMyb6Amr1SRrczOK/hGnPi+gQXOOtbZI=
github.com/gruntwork-io/terratest v0.28.15/go.mod h1:PkVylPuUNmItkfOTwSiFreYA4FkanK8AluBuNeGxQOw=
github.com/gruntwork-io/terratest v0.32.1 h1:Uho3H7VWD4tEulWov7pWW90V3XATLKxSh88AtrxTYvU=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
Expand Down
11 changes: 9 additions & 2 deletions test/vault_cluster_private_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,14 +39,21 @@ func runVaultPrivateClusterTest(t *testing.T, amiId string, awsRegion string, ss
VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf("consul-test-%s", uniqueId),
VAR_CONSUL_CLUSTER_TAG_KEY: fmt.Sprintf("consul-test-%s", uniqueId),
}
deployCluster(t, amiId, awsRegion, examplesDir, random.UniqueId(), terraformVars)
deployCluster(t, amiId, awsRegion, examplesDir, uniqueId, terraformVars)
})

test_structure.RunTestStage(t, "initialize_unseal", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
})

test_structure.RunTestStage(t, "validate", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

cluster := initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
cluster := getInitializedAndUnsealedVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
testVaultUsesConsulForDns(t, cluster)
})
}
12 changes: 10 additions & 2 deletions test/vault_cluster_public_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,22 @@ func runVaultPublicClusterTest(t *testing.T, amiId string, awsRegion string, ssh
VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf("consul-test-%s", uniqueId),
VAR_CONSUL_CLUSTER_TAG_KEY: fmt.Sprintf("consul-test-%s", uniqueId),
}
deployCluster(t, amiId, awsRegion, examplesDir, random.UniqueId(), terraformVars)
deployCluster(t, amiId, awsRegion, examplesDir, uniqueId, terraformVars)
})

test_structure.RunTestStage(t, "validate", func() {
test_structure.RunTestStage(t, "initialize_unseal", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
})

test_structure.RunTestStage(t, "validate", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

cluster := getInitializedAndUnsealedVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
testVaultViaElb(t, terraformOptions)
testVaultUsesConsulForDns(t, cluster)
})
}
24 changes: 14 additions & 10 deletions test/vault_cluster_s3_backend_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ const VAULT_CLUSTER_S3_BACKEND_PATH = "examples/vault-s3-backend"

const VAR_S3_BUCKET_NAME = "s3_bucket_name"
const VAR_FORCE_DESTROY_S3_BUCKET = "force_destroy_s3_bucket"
const VAR_CONSUL_AGENT_SERVICE_REGISTRATION_ADDRESS = "consul_agent_service_registration_address"

// Test the Vault with S3 Backend example by:
//
Expand Down Expand Up @@ -39,24 +40,27 @@ func runVaultWithS3BackendClusterTest(t *testing.T, amiId string, awsRegion, ssh
test_structure.RunTestStage(t, "deploy", func() {
uniqueId := random.UniqueId()
terraformVars := map[string]interface{}{
VAR_S3_BUCKET_NAME: s3BucketName(uniqueId),
VAR_FORCE_DESTROY_S3_BUCKET: true,
VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf("consul-test-%s", uniqueId),
VAR_CONSUL_CLUSTER_TAG_KEY: fmt.Sprintf("consul-test-%s", uniqueId),
VAR_S3_BUCKET_NAME: s3BucketName(uniqueId),
VAR_FORCE_DESTROY_S3_BUCKET: true,
VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf("consul-test-%s", uniqueId),
VAR_CONSUL_CLUSTER_TAG_KEY: fmt.Sprintf("consul-test-%s", uniqueId),
VAR_CONSUL_AGENT_SERVICE_REGISTRATION_ADDRESS: "127.0.0.1:8500",
}
deployCluster(t, amiId, awsRegion, examplesDir, uniqueId, terraformVars)
})

test_structure.RunTestStage(t, "validate", func() {
test_structure.RunTestStage(t, "initialize_unseal", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
})

test_structure.RunTestStage(t, "validate", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)
keyPair := test_structure.LoadEc2KeyPair(t, examplesDir)

// TODO: temporarily disable DNS check until https://github.com/hashicorp/terraform-aws-consul/issues/155 is
// fixed. See https://github.com/hashicorp/terraform-aws-vault/pull/222 for details.
//
// cluster := initializeAndUnsealVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, nil)
// testVaultUsesConsulForDns(t, cluster)
cluster := getInitializedAndUnsealedVaultCluster(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair)
testVaultUsesConsulForDns(t, cluster)
})
}
62 changes: 61 additions & 1 deletion test/vault_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,24 @@ func initializeAndUnsealVaultCluster(t *testing.T, asgNameOutputVar string, sshU
return cluster
}

// Find the initialized and unsealed Vault cluster,
// exit if cluster is not initialized and unsealed
func getInitializedAndUnsealedVaultCluster(t *testing.T, asgNameOutputVar string, sshUserName string, terraformOptions *terraform.Options, awsRegion string, keyPair *aws.Ec2Keypair) VaultCluster {
cluster := findVaultClusterNodes(t, asgNameOutputVar, sshUserName, terraformOptions, awsRegion, keyPair)

establishConnectionToCluster(t, cluster)
isInitializedAndUnsealed, err := isVaultClusterInitializedAndUnsealed(t, cluster)
if err != nil {
require.NoError(t, err, "Failed to check is vault cluster is already initialized and unsealed")
}
// exit if cluster is not initialized and unsealed
if !isInitializedAndUnsealed {
t.Fatalf("Expected to find an initialized and unsealed cluster but it wasn't: [Leader: %s, Standby1:%s , Standby2: %s]", cluster.Leader.Hostname, cluster.Standby1.Hostname, cluster.Standby2.Hostname)
}

return cluster
}

// Find the nodes in the given Vault ASG and return them in a VaultCluster struct
func findVaultClusterNodes(t *testing.T, asgNameOutputVar string, sshUserName string, terraformOptions *terraform.Options, awsRegion string, keyPair *aws.Ec2Keypair) VaultCluster {
asgName := terraform.Output(t, terraformOptions, asgNameOutputVar)
Expand Down Expand Up @@ -401,7 +419,7 @@ func boolToTerraformVar(val bool) int {
}
}

// Check that the Vault node at the given host has the given
// Check that the Vault node at the given host has the given status
func assertStatus(t *testing.T, host ssh.Host, expectedStatus VaultStatus) {
description := fmt.Sprintf("Check that the Vault node %s has status %d", host.Hostname, int(expectedStatus))
logger.Logf(t, description)
Expand Down Expand Up @@ -444,3 +462,45 @@ func checkStatus(t *testing.T, host ssh.Host, expectedStatus VaultStatus) (strin
return "", fmt.Errorf("Expected status code %d for host %s, but got %d", int(expectedStatus), host.Hostname, status)
}
}

// Check if the given Vault cluster has been initialized and unsealed.
func isVaultClusterInitializedAndUnsealed(t *testing.T, cluster VaultCluster) (bool, error) {
leader, err := hasExpectedStatus(t, cluster.Leader, Leader)
if err != nil {
return false, err
}
standby1, err := hasExpectedStatus(t, cluster.Standby1, Standby)
if err != nil {
return false, err
}
standby2, err := hasExpectedStatus(t, cluster.Standby2, Standby)
if err != nil {
return false, err
}
if leader && standby1 && standby2 {
return true, nil
} else {
return false, nil
}
}

// Check the status of the given Vault node and ensure it matches the expected status.
// Returns true if the status match, false otherwise.
func hasExpectedStatus(t *testing.T, host ssh.Host, expectedStatus VaultStatus) (bool, error) {
curlCommand := "curl -s -o /dev/null -w '%{http_code}' https://127.0.0.1:8200/v1/sys/health"
logger.Logf(t, "Using curl to check status of Vault server %s: %s", host.Hostname, curlCommand)

output, err := ssh.CheckSshCommandE(t, host, curlCommand)
if err != nil {
return false, err
}
status, err := strconv.Atoi(output)
if err != nil {
return false, err
}
if status == int(expectedStatus) {
return true, nil
} else {
return false, nil
}
}
1 change: 1 addition & 0 deletions test/vault_main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ func TestMainVaultCluster(t *testing.T) {

// os.Setenv("SKIP_setup_amis", "true")
// os.Setenv("SKIP_deploy", "true")
// os.Setenv("SKIP_initialize_unseal", "true")
// os.Setenv("SKIP_validate", "true")
// os.Setenv("SKIP_log", "true")
// os.Setenv("SKIP_teardown", "true")
Expand Down