This repository was archived by the owner on Sep 9, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 15
Expand file tree
/
Copy pathgluster-create-pv.sh
More file actions
executable file
·212 lines (179 loc) · 4.89 KB
/
gluster-create-pv.sh
File metadata and controls
executable file
·212 lines (179 loc) · 4.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
#!/bin/bash
set -o pipefail
vol_nr=0
gluster_name="glusterfs-cluster"
# Set GlusterFS nodes and IP addresses
nodes[0]=
nodes[1]=
nodes_ip[0]=
nodes_ip[0]=
# OSE projects in which the endpoint will be created
projects[0]=default
projects[1]=openshift-infra
projects[2]=logging
create_gluster_brick() {
vol_name=$1
vol_size=$2
node=$3
if ! ssh $node test -b "/dev/vg_gluster/$vol_name"; then
ssh $node lvcreate --name $vol_name --size ${vol_size}G vg_gluster -y >/dev/null 2>&1
ssh $node mkfs.xfs -f -i size=512 /dev/vg_gluster/$vol_name >/dev/null 2>&1
else
echo "ERROR: Failed to create logical volume /dev/vg_gluster/$vol_name. Volume already exists. Aborting."
exit 1
fi
ssh $node "echo \"/dev/vg_gluster/${vol_name} /data/${vol_name} xfs noatime 0 0\" >> /etc/fstab"
if ! ssh $node test -d "/data/$vol_name"; then
ssh $node mkdir -p /data/$vol_name/brick
fi
ssh $node mount /dev/vg_gluster/$vol_name /data/$vol_name
}
create_gluster_volume() {
vol_name=$1
vol_size=$2
# Create gluster volume if it doesn't yet exist
if ! ssh ${nodes[0]} gluster volume list | grep $vol_name >/dev/null 2>&1; then
for node in "${nodes[@]}"; do
# Create gluster brick if it doesn't yet exist
if ! create_gluster_brick $vol_name $vol_size $node; then
echo "ERROR: Failed to create gluster brick on node ${node}. Aborting."
exit 1
fi
done
if ! ssh ${nodes[0]} gluster volume create $vol_name replica ${#nodes[@]} ${nodes[@]/%/:\/data\/$vol_name\/brick} >/dev/null 2>&1; then
echo "ERROR: Failed to create gluster volume $vol_name. Aborting."
exit 1
fi
fi
if ! ssh ${nodes[0]} gluster volume info $vol_name | grep "Status: Started" >/dev/null 2>&1; then
if ! ssh ${nodes[0]} gluster volume start $vol_name >/dev/null 2>&1; then
echo "ERROR: Failed to start gluster volume $vol_name. Aborting."
exit 1
fi
fi
for node in "${nodes[@]}"; do
ssh $node "chmod -R 777 /data/$vol_name/"
done
}
create_persistent_volume() {
vol_size=$1
vol_nr=$((vol_nr+1))
vol_name="gluster_vol_${vol_nr}"
if ! create_gluster_volume $vol_name ${vol_size}; then
echo "ERROR: Failed to create gluster volume. Aborting."
exit 1
else
echo " Successfully created gluster volume $vol_name on ${#nodes[@]} nodes (${nodes[@]})."
fi
# Create Gluster Service
if ! oc get -n default services ${gluster_name} >/dev/null 2>&1; then
cat <<-EOF | oc create -n default -f -
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "${gluster_name}"
},
"spec": {
"ports": [
{
"port": 1
}
]
}
}
EOF
else
echo "service ${gluster_name} already exists. Skipping."
fi
# Create Gluster Endpoints
for project in "${projects[@]}"; do
if ! oc get -n ${project} endpoints ${gluster_name} >/dev/null 2>&1; then
cat <<-EOF | oc create -n ${project} -f -
{
"apiVersion": "v1",
"kind": "Endpoints",
"metadata": {
"name": "${gluster_name}"
},
"subsets": [
{
"addresses": [
{
"ip": "${nodes_ip[0]}"
},
{
"ip": "${nodes_ip[1]}"
}
],
"ports": [
{
"port": 1,
"protocol": "TCP"
}
]
}
]
}
EOF
else
echo "endpoint ${gluster_name} already exists. Skipping."
fi
done
# create persistent volume
if ! oc get -n default persistentvolumes gluster-pv${vol_nr} >/dev/null 2>&1; then
cat <<-EOF | oc create -n default -f -
{
"apiVersion": "v1",
"kind": "PersistentVolume",
"metadata": {
"name": "gluster-pv${vol_nr}"
},
"spec": {
"capacity": {
"storage": "${vol_size}Gi"
},
"accessModes": [
"ReadWriteOnce",
"ReadWriteMany"
],
"glusterfs": {
"endpoints": "${gluster_name}",
"path": "/gluster_vol_${vol_nr}",
"readOnly": false
},
"persistentVolumeReclaimPolicy": "Recycle"
}
}
EOF
else
echo "ERROR: OpenShift persistent volume already exists. This seems wrong. Aborting."
exit 1
fi
}
# Check if executed as root
if [[ $EUID -ne 0 ]]; then
echo "ERROR: This script must be run as root. Aborting."
exit 1
fi
# Check if executed on OSE master
if ! systemctl status atomic-openshift-master >/dev/null 2>&1; then
echo "ERROR: This script must be run on an OpenShift master. Aborting."
exit 1
fi
# Create persistent volumes for:
# - Docker registry
create_persistent_volume 50
# - Logging
create_persistent_volume 20
# - Metrics
create_persistent_volume 20
# fill up to 100G
for i in `seq 1 25`; do
create_persistent_volume 1
done
create_persistent_volume 1
# Finally restart gluster daemon so file permissions change takes effect
for node in "${nodes[@]}"; do
ssh $node systemctl restart glusterd
done