-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathteardown.sh
More file actions
executable file
·90 lines (77 loc) · 4.11 KB
/
teardown.sh
File metadata and controls
executable file
·90 lines (77 loc) · 4.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#!/usr/bin/env bash
# Tears down ALL AWS resources tagged with the project tag to avoid ongoing charges.
# Safe to run multiple times (idempotent). Also cleans up local state files.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/lib/common.sh"
activate_local_venv
ensure_dirs
if [ -f "${STATE_FILE}" ]; then
load_state_file
fi
log "Starting teardown for project tag ${PROJECT_TAG} in ${AWS_REGION}"
# Teardown order matters: instances first (releases ENIs), then EIPs, orphaned volumes,
# security groups (can't delete while instances reference them), and finally key pairs.
INSTANCE_IDS="$(find_managed_instances)"
if [ -n "${INSTANCE_IDS}" ] && [ "${INSTANCE_IDS}" != "None" ]; then
log "Terminating managed EC2 instance(s): ${INSTANCE_IDS}"
aws_cli ec2 terminate-instances --instance-ids ${INSTANCE_IDS} >/dev/null
aws_cli ec2 wait instance-terminated --instance-ids ${INSTANCE_IDS}
else
log "No managed EC2 instances are currently active."
fi
# Release any Elastic IPs so they stop incurring idle-EIP charges.
EIP_ALLOCATIONS="$(aws_cli ec2 describe-addresses \
--filters "Name=tag:Project,Values=${PROJECT_TAG}" "Name=tag:ManagedBy,Values=codex" \
--query 'Addresses[].AllocationId' \
--output text 2>/dev/null || true)"
if [ -n "${EIP_ALLOCATIONS}" ] && [ "${EIP_ALLOCATIONS}" != "None" ]; then
local_allocation=""
for local_allocation in ${EIP_ALLOCATIONS}; do
log "Releasing Elastic IP ${local_allocation}"
aws_cli ec2 release-address --allocation-id "${local_allocation}"
done
fi
# Delete orphaned EBS volumes (only those in "available" state, i.e., not attached).
VOLUME_IDS="$(aws_cli ec2 describe-volumes \
--filters "Name=tag:Project,Values=${PROJECT_TAG}" "Name=tag:ManagedBy,Values=codex" "Name=status,Values=available" \
--query 'Volumes[].VolumeId' \
--output text 2>/dev/null || true)"
if [ -n "${VOLUME_IDS}" ] && [ "${VOLUME_IDS}" != "None" ]; then
local_volume=""
for local_volume in ${VOLUME_IDS}; do
log "Deleting available EBS volume ${local_volume}"
aws_cli ec2 delete-volume --volume-id "${local_volume}"
done
fi
if aws_cli ec2 describe-security-groups --filters "Name=group-name,Values=${SECURITY_GROUP_NAME}" >/dev/null 2>&1; then
SECURITY_GROUP_IDS="$(aws_cli ec2 describe-security-groups \
--filters "Name=group-name,Values=${SECURITY_GROUP_NAME}" "Name=tag:Project,Values=${PROJECT_TAG}" \
--query 'SecurityGroups[].GroupId' \
--output text)"
if [ -n "${SECURITY_GROUP_IDS}" ] && [ "${SECURITY_GROUP_IDS}" != "None" ]; then
local_sg=""
for local_sg in ${SECURITY_GROUP_IDS}; do
log "Deleting security group ${local_sg}"
aws_cli ec2 delete-security-group --group-id "${local_sg}"
done
fi
fi
if aws_cli ec2 describe-key-pairs --key-names "${KEY_NAME}" >/dev/null 2>&1; then
log "Deleting key pair ${KEY_NAME}"
aws_cli ec2 delete-key-pair --key-name "${KEY_NAME}"
fi
# Clean up local artifacts so the next deploy starts fresh.
rm -f "${KEY_PEM_PATH}" "${STATE_FILE}"
# Verification: print queries the user can run to confirm nothing was left behind.
log "Teardown verification queries:"
log " aws --region ${AWS_REGION} ec2 describe-instances --filters Name=tag:Project,Values=${PROJECT_TAG} Name=instance-state-name,Values=pending,running,stopping,stopped"
log " aws --region ${AWS_REGION} ec2 describe-security-groups --filters Name=tag:Project,Values=${PROJECT_TAG}"
log " aws --region ${AWS_REGION} ec2 describe-key-pairs --key-names ${KEY_NAME}"
log " aws --region ${AWS_REGION} ec2 describe-volumes --filters Name=tag:Project,Values=${PROJECT_TAG} Name=status,Values=available,in-use"
log " aws --region ${AWS_REGION} ec2 describe-addresses --filters Name=tag:Project,Values=${PROJECT_TAG}"
# Final safety check: fail loudly if any managed instances survived termination.
REMAINING_INSTANCES="$(find_managed_instances)"
[ -z "${REMAINING_INSTANCES}" ] || [ "${REMAINING_INSTANCES}" = "None" ] || die "Teardown incomplete: instances still active (${REMAINING_INSTANCES})"
log "Teardown complete"