-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathalias
More file actions
208 lines (186 loc) · 6.79 KB
/
alias
File metadata and controls
208 lines (186 loc) · 6.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
[toplevel]
whoami =
!f() {
echo "Profile: ${AWS_PROFILE:-default}"
aws sts get-caller-identity --output=yaml
}; f
id = sts get-caller-identity --query "Account" --output text
op-credentials =
!f() {
op --vault=${2:-Private} item get "${1}" --format=json \
--fields="label=AccessKeyId,label=SecretAccessKey" | \
jq 'map({key: .label, value: .value}) | from_entries + {Version: 1}'
}; f
op-credentials-mfa =
!f() {
local CREDS CREDS_EXP EXP NOW
local CACHE_DIR="${HOME}/.aws/cli/cache"
local CACHE_FILE="${CACHE_DIR}/${AWS_PROFILE}.json"
# if the cache file exists, use it
if [ -f $CACHE_FILE ]; then
CREDS="$(cat $CACHE_FILE)"
CREDS_EXP="$(echo "$CREDS" | jq -r '.Credentials.Expiration')"
EXP="$(date -jf '%Y-%m-%dT%H:%M:%S' "$CREDS_EXP" "+%s" 2>/dev/null)"
NOW="$(date +%s)"
# regen cache if current one is expired
if [ "$EXP" -lt "$NOW" ]; then
CREDS="$(aws sts op-mfa "$1" "$2")"
echo "$CREDS" > $CACHE_FILE
fi
else
CREDS="$(aws sts op-mfa "$1" "$2")"
echo "$CREDS" > $CACHE_FILE
fi
echo "$CREDS" | jq -r '.Credentials | . + {Version: 1}'
}; f
jit =
!f() {
duploctl jit web $@
}; f
console =
!f() {
local ACCOUNT_ID="$(aws id)"
local url="https://${ACCOUNT_ID}.signin.aws.amazon.com/console"
open "$url"
}; f
temp-session =
!f() {
local creds="$(aws sts temp)"
cat <<EOF
export AWS_ACCESS_KEY_ID="$(echo $creds | jq -r '.Credentials.AccessKeyId')"
export AWS_SECRET_ACCESS_KEY="$(echo $creds | jq -r '.Credentials.SecretAccessKey')"
export AWS_SESSION_TOKEN="$(echo $creds | jq -r '.Credentials.SessionToken')"
EOF
}; f
gcl-session =
!f() {
local creds="$(aws sts temp)"
cat <<EOF > .gitlab-ci-local-variables.yml
AWS_ACCESS_KEY_ID: "$(echo $creds | jq -r '.Credentials.AccessKeyId')"
AWS_SECRET_ACCESS_KEY: "$(echo $creds | jq -r '.Credentials.SecretAccessKey')"
AWS_SESSION_TOKEN: "$(echo $creds | jq -r '.Credentials.SessionToken')"
AWS_ACCOUNT_ID: "$(aws id)"
AWS_DEFAULT_REGION: $AWS_DEFAULT_REGION
EOF
}; f
act-session =
!f() {
local creds="$(aws sts temp)"
local act_args=()
act_args+=(--env AWS_ACCESS_KEY_ID="$(echo $creds | jq -r '.Credentials.AccessKeyId')")
act_args+=(--env AWS_SECRET_ACCESS_KEY="$(echo $creds | jq -r '.Credentials.SecretAccessKey')")
act_args+=(--env AWS_SESSION_TOKEN="$(echo $creds | jq -r '.Credentials.SessionToken')")
act_args+=(--env AWS_ACCOUNT_ID="$(aws id)")
act_args+=(--env AWS_DEFAULT_REGION="$AWS_DEFAULT_REGION")
act_args+=(--env AWS_REGION="$AWS_DEFAULT_REGION")
echo "${act_args[*]}"
}; f
[command sts]
temp =
!f() {
aws sts assume-role \
--role-arn arn:aws:iam::$(aws id):role/duplomaster \
--role-session-name "AdminSession"
}; f
op-mfa =
!f() {
local base_creds="$(aws op-credentials "$1")"
export AWS_ACCESS_KEY_ID="$(echo $base_creds | jq -r '.AccessKeyId')"
export AWS_SECRET_ACCESS_KEY="$(echo $base_creds | jq -r '.SecretAccessKey')"
aws sts get-session-token \
--serial-number $(aws iam mymfa) \
--token-code $(op item get "$2" --otp --vault=Private) \
| jq -r '. + {ProviderType: "opmfa"}'
}; f
[command ssm]
proxy =
!f() {
# Arguments
USER=$1
HOSTNAME=$2
PORT=$3
# Prepare temporary SSH key
ONE_TIME_KEY_FILE_NAME="$(mktemp /tmp/${HOSTNAME}.${USER}.XXXXXX)"
yes | ssh-keygen -t rsa -b 4096 -f ${ONE_TIME_KEY_FILE_NAME} -N ''
ssh-add -t 60 ${ONE_TIME_KEY_FILE_NAME}
# Send the SSH key to the target instance
AZ="$(aws ec2 describe-instances \
--instance-ids ${HOSTNAME} \
--output=text \
--query 'Reservations[0].Instances[0].Placement.AvailabilityZone')"
aws ec2-instance-connect send-ssh-public-key \
--instance-id ${HOSTNAME} \
--availability-zone $AZ \
--instance-os-user "${USER}" \
--ssh-public-key "file://${ONE_TIME_KEY_FILE_NAME}.pub"
# Start the SSM session
aws ssm start-session \
--target ${HOSTNAME} \
--document-name AWS-StartSSHSession \
--parameters "portNumber=${PORT}" \
--region $AWS_DEFAULT_REGION
}; f
[command ec2]
select =
!f() {
local selected=$(aws ec2 describe-instances \
--query "Reservations[*].Instances[*].{name: Tags[?Key=='Name'] | [0].Value, instance_id: InstanceId, ip_address: PrivateIpAddress, state: State.Name}" \
--output table | fzf --header="Select an EC2 instance" --tac)
local selected_parts=(${selected//|/})
echo "${selected_parts[0]}"
}; f
exec =
!f() {
local user="${1:-ec2-user}"
local ec2id=`aws ec2 select`
ssh "$user@$ec2id"
}; f
[command eks]
add-config =
!f() {
local cluster=`aws eks list-clusters --query "clusters[*]" | jq -r ".[]" | fzf`
aws eks update-kubeconfig --name "$cluster"
}; f
[command iam]
mymfa = list-mfa-devices --query 'MFADevices[0].SerialNumber' --output text
[command ecr]
login =
!f() {
local REGISTRY ACCOUNT_ID
ACCOUNT_ID="$(aws id)"
REGISTRY="${ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com"
aws ecr get-login-password | docker login --username AWS --password-stdin $REGISTRY
}; f
[command rds]
select =
!f() {
local selected=$(aws rds describe-db-clusters \
--output table \
--query "DBClusters[*].{name: TagList[?Key=='Name'] | [0].Value, id: DBClusterIdentifier, tenant: TagList[?Key=='TENANT_NAME'] | [0].Value}" | fzf --header="Select an RDS cluster" --tac)
local selected_parts=(${selected//|/})
echo "${selected_parts[0]}"
}; f
port-forward =
!f() {
local target_instance target_cluster endpoint
target_instance=$(aws ec2 select)
target_cluster=$(aws rds select)
ports=${1:-5432:5432}
local local_port=${ports%%:*}
local remote_port=${ports##*:}
rds_endpoint=$(aws rds describe-db-cluster-endpoints \
--output text \
--db-cluster-identifier "${target_cluster}" \
--query "DBClusterEndpoints[?EndpointType=='WRITER'] | [0].Endpoint")
echo """
ec2 instance: ${target_instance}
rds cluster: ${target_cluster}
rds endpoint: ${rds_endpoint}
local port: ${local_port}
remote port: ${remote_port}
"""
aws ssm start-session \
--target "${target_instance}" \
--document-name AWS-StartPortForwardingSessionToRemoteHost \
--parameters host="${rds_endpoint}",portNumber="${remote_port}",localPortNumber="${local_port}"
}; f