diff options
Diffstat (limited to 'scripts/disaster/gluster_endpoints')
8 files changed, 318 insertions, 0 deletions
diff --git a/scripts/disaster/gluster_endpoints/add_endpoints.sh b/scripts/disaster/gluster_endpoints/add_endpoints.sh new file mode 100644 index 0000000..4badee9 --- /dev/null +++ b/scripts/disaster/gluster_endpoints/add_endpoints.sh @@ -0,0 +1,17 @@ +[[ $# -ne 1 ]] && { echo "Usage: $0 <NEW_NODE_IP>"; exit 1; } + +NEW_IP="$1" + +oc get namespaces -o name | sed 's/namespaces\///' | \ +while read NS; do + if oc -n "$NS" get endpoints gfs &>/dev/null; then + echo "✓ Patching $NS/gfs with $NEW_IP" +# echo oc -n "$NS" patch endpoints gfs --type=strategic --patch="{\"subsets\":[{\"addresses\":[{\"ip\":\"$NEW_IP\"}]}]}" +# echo oc -n "$NS" patch ep gfs --type=strategic --patch='{"subsets":[{"addresses":[{"ip":"'"$NEW_IP"'"}]}]}' + oc -n "$NS" patch ep gfs --type=json -p='[{"op": "add", "path": "/subsets/0/addresses/-", "value": {"ip": "'"$NEW_IP"'"}}]' + else + echo "✗ No gfs endpoint in $NS (skipping)" + fi +done + +echo "Done. Verify: oc get ep gfs -A -o wide"
\ No newline at end of file diff --git a/scripts/disaster/gluster_endpoints/backups/ipekatrin1-edited.yaml b/scripts/disaster/gluster_endpoints/backups/ipekatrin1-edited.yaml new file mode 100644 index 0000000..6a8dc63 --- /dev/null +++ b/scripts/disaster/gluster_endpoints/backups/ipekatrin1-edited.yaml @@ -0,0 +1,85 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + alpha.kubernetes.io/provided-node-ip: 192.168.13.1 + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: 2018-03-23T04:20:04Z + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + compute_node: "0" + fat_memory: "0" + fqdn: ipekatrin1.ipe.kit.edu + gpu_node: "0" + hostid: "1" + hostname: ipekatrin1 + kubernetes.io/hostname: ipekatrin1.ipe.kit.edu + master: "1" + node-role.kubernetes.io/master: "true" + openshift-infra: apiserver + permanent: "1" + pod_node: "1" + production: "1" + region: infra + server: "1" + zone: default + name: ipekatrin1.ipe.kit.edu + resourceVersion: "1138908753" + selfLink: /api/v1/nodes/ipekatrin1.ipe.kit.edu + uid: 7616a958-2e51-11e8-969e-0cc47adef108 +spec: + externalID: ipekatrin1.ipe.kit.edu +status: + addresses: + - address: 192.168.13.1 + type: InternalIP + - address: ipekatrin1.ipe.kit.edu + type: Hostname + allocatable: + cpu: "40" + memory: 263757760Ki + pods: "250" + capacity: + cpu: "40" + memory: 263860160Ki + pods: "250" + conditions: + - lastHeartbeatTime: 2025-10-23T19:01:20Z + lastTransitionTime: 2025-10-23T19:02:02Z + message: Kubelet stopped posting node status. + reason: NodeStatusUnknown + status: Unknown + type: OutOfDisk + - lastHeartbeatTime: 2025-10-23T19:01:20Z + lastTransitionTime: 2025-10-23T19:02:02Z + message: Kubelet stopped posting node status. + reason: NodeStatusUnknown + status: Unknown + type: MemoryPressure + - lastHeartbeatTime: 2025-10-23T19:01:20Z + lastTransitionTime: 2025-10-23T19:02:02Z + message: Kubelet stopped posting node status. + reason: NodeStatusUnknown + status: Unknown + type: DiskPressure + - lastHeartbeatTime: 2025-10-23T19:01:20Z + lastTransitionTime: 2025-10-23T19:02:02Z + message: Kubelet stopped posting node status. + reason: NodeStatusUnknown + status: Unknown + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + nodeInfo: + architecture: amd64 + bootID: a87a0b63-abf8-4b1d-9a1a-49197b26817e + containerRuntimeVersion: docker://1.12.6 + kernelVersion: 3.10.0-693.21.1.el7.x86_64 + kubeProxyVersion: v1.7.6+a08f5eeb62 + kubeletVersion: v1.7.6+a08f5eeb62 + machineID: 73b3f7f0088b44adb16582623d7747b1 + operatingSystem: linux + osImage: CentOS Linux 7 (Core) + systemUUID: 00000000-0000-0000-0000-0CC47ADEF108 diff --git a/scripts/disaster/gluster_endpoints/backups/ipekatrin1.yaml b/scripts/disaster/gluster_endpoints/backups/ipekatrin1.yaml new file mode 100644 index 0000000..5e45f12 --- /dev/null +++ b/scripts/disaster/gluster_endpoints/backups/ipekatrin1.yaml @@ -0,0 +1,87 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + alpha.kubernetes.io/provided-node-ip: 192.168.13.1 + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: 2018-03-23T04:20:04Z + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + compute_node: "0" + fat_memory: "0" + fat_storage: "1" + fqdn: ipekatrin1.ipe.kit.edu + glusterfs: storage-host + gpu_node: "0" + hostid: "1" + hostname: ipekatrin1 + kubernetes.io/hostname: ipekatrin1.ipe.kit.edu + master: "1" + node-role.kubernetes.io/master: "true" + openshift-infra: apiserver + permanent: "1" + pod_node: "1" + production: "1" + region: infra + server: "1" + zone: default + name: ipekatrin1.ipe.kit.edu + resourceVersion: "1137118496" + selfLink: /api/v1/nodes/ipekatrin1.ipe.kit.edu + uid: 7616a958-2e51-11e8-969e-0cc47adef108 +spec: + externalID: ipekatrin1.ipe.kit.edu +status: + addresses: + - address: 192.168.13.1 + type: InternalIP + - address: ipekatrin1.ipe.kit.edu + type: Hostname + allocatable: + cpu: "40" + memory: 263757760Ki + pods: "250" + capacity: + cpu: "40" + memory: 263860160Ki + pods: "250" + conditions: + - lastHeartbeatTime: 2025-10-23T19:01:20Z + lastTransitionTime: 2025-10-23T19:02:02Z + message: Kubelet stopped posting node status. + reason: NodeStatusUnknown + status: Unknown + type: OutOfDisk + - lastHeartbeatTime: 2025-10-23T19:01:20Z + lastTransitionTime: 2025-10-23T19:02:02Z + message: Kubelet stopped posting node status. + reason: NodeStatusUnknown + status: Unknown + type: MemoryPressure + - lastHeartbeatTime: 2025-10-23T19:01:20Z + lastTransitionTime: 2025-10-23T19:02:02Z + message: Kubelet stopped posting node status. + reason: NodeStatusUnknown + status: Unknown + type: DiskPressure + - lastHeartbeatTime: 2025-10-23T19:01:20Z + lastTransitionTime: 2025-10-23T19:02:02Z + message: Kubelet stopped posting node status. + reason: NodeStatusUnknown + status: Unknown + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + nodeInfo: + architecture: amd64 + bootID: a87a0b63-abf8-4b1d-9a1a-49197b26817e + containerRuntimeVersion: docker://1.12.6 + kernelVersion: 3.10.0-693.21.1.el7.x86_64 + kubeProxyVersion: v1.7.6+a08f5eeb62 + kubeletVersion: v1.7.6+a08f5eeb62 + machineID: 73b3f7f0088b44adb16582623d7747b1 + operatingSystem: linux + osImage: CentOS Linux 7 (Core) + systemUUID: 00000000-0000-0000-0000-0CC47ADEF108 diff --git a/scripts/disaster/gluster_endpoints/backups/storageclasses_backup_2025-10-29.yaml b/scripts/disaster/gluster_endpoints/backups/storageclasses_backup_2025-10-29.yaml new file mode 100644 index 0000000..77e3452 --- /dev/null +++ b/scripts/disaster/gluster_endpoints/backups/storageclasses_backup_2025-10-29.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +items: +- apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + creationTimestamp: 2018-03-23T04:24:52Z + name: glusterfs-storage + namespace: "" + resourceVersion: "6403" + selfLink: /apis/storage.k8s.io/v1/storageclasses/glusterfs-storage + uid: 219550a3-2e52-11e8-969e-0cc47adef108 + parameters: + resturl: http://heketi-storage.glusterfs.svc.cluster.local:8080 + restuser: admin + secretName: heketi-storage-admin-secret + secretNamespace: glusterfs + provisioner: kubernetes.io/glusterfs +- apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + creationTimestamp: 2018-03-23T04:25:31Z + name: glusterfs-storage-block + namespace: "" + resourceVersion: "6528" + selfLink: /apis/storage.k8s.io/v1/storageclasses/glusterfs-storage-block + uid: 38ff5088-2e52-11e8-969e-0cc47adef108 + parameters: + chapauthenabled: "true" + hacount: "3" + restsecretname: heketi-storage-admin-secret-block + restsecretnamespace: glusterfs + resturl: http://heketi-storage.glusterfs.svc.cluster.local:8080 + restuser: admin + provisioner: gluster.org/glusterblock +kind: List +metadata: + resourceVersion: "" + selfLink: "" diff --git a/scripts/disaster/gluster_endpoints/check_pv.sh b/scripts/disaster/gluster_endpoints/check_pv.sh new file mode 100644 index 0000000..1f2a7e4 --- /dev/null +++ b/scripts/disaster/gluster_endpoints/check_pv.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +pvs=$(oc get pv -o json | jq -r ' + .items[] + | select(.spec.glusterfs?) + | select(.spec.glusterfs.endpoints != "gfs") + | "\(.metadata.name) → endpoints=\(.spec.glusterfs.endpoints // "NONE")"') + + +echo "PV usage:" +echo + +#pvs=$(oc get pv --no-headers | awk '{print $1}') + +for pv in $pvs; do + # Extract PVC and namespace bound to PV + pvc=$(oc get pv "$pv" -o jsonpath='{.spec.claimRef.name}' 2>/dev/null) + ns=$(oc get pv "$pv" -o jsonpath='{.spec.claimRef.namespace}' 2>/dev/null) + + if [[ -z "$pvc" || -z "$ns" ]]; then + echo "$pv → UNUSED" + echo + continue + fi + + echo "$pv → PVC: $ns/$pvc" + + # Grep instead of JSONPath filter — much safer + pods=$(oc get pods -n "$ns" -o name \ + | while read -r pod; do + oc get "$pod" -n "$ns" -o json \ + | jq -r --arg pvc "$pvc" ' + . as $pod | + .spec.volumes[]? + | select(.persistentVolumeClaim? and .persistentVolumeClaim.claimName == $pvc) + | $pod.metadata.name + ' 2>/dev/null + done \ + | sort -u + ) + + if [[ -z "$pods" ]]; then + echo " → PVC bound but no running Pod is using it" + else + echo " → Pods:" + echo "$pods" | sed 's/^/ - /' + fi + + echo +done diff --git a/scripts/disaster/gluster_endpoints/find_inline_gluster_in_pods.sh b/scripts/disaster/gluster_endpoints/find_inline_gluster_in_pods.sh new file mode 100644 index 0000000..e116fb7 --- /dev/null +++ b/scripts/disaster/gluster_endpoints/find_inline_gluster_in_pods.sh @@ -0,0 +1,7 @@ +#! /bin/bash + +for p in $(oc get pods --all-namespaces --no-headers | awk '{print $2":"$1}'); do + pod=${p%:*}; ns=${p#*:}; + echo "=== $ns/$pod ===" + oc -n "$ns" get pod "$pod" -o json | grep gluster +done diff --git a/scripts/disaster/gluster_endpoints/remove_endpoints.sh b/scripts/disaster/gluster_endpoints/remove_endpoints.sh new file mode 100644 index 0000000..f4623f6 --- /dev/null +++ b/scripts/disaster/gluster_endpoints/remove_endpoints.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +TARGET_IP="192.168.12.1" + +for ns in $(oc get ns --no-headers | awk '{print $1}'); do + for epname in gfs glusterfs-dynamic-etcd glusterfs-dynamic-metrics-cassandra-1 glusterfs-dynamic-mongodb glusterfs-dynamic-registry-claim glusterfs-dynamic-sharelatex-docker; do + ep=$(oc get endpoints "$epname" -n "$ns" -o json 2>/dev/null) || continue + + modified="$(printf '%s' "$ep" | jq \ + --arg ip "$TARGET_IP" \ + '(.subsets[]?.addresses |= map(select(.ip != $ip)))' + )" + + if diff <(echo "$ep") <(echo "$modified") >/dev/null; then + continue + fi + + echo -n "Namespace: $ns/$epname:" + echo -n "$ep" | jq '.subsets[].addresses' + echo -n " ===> " + echo -n "$modified" | jq '.subsets[].addresses' + echo + + # When verified, uncomment the following line to APPLY: + echo "$modified" | oc replace -f - -n "$ns" + done +done diff --git a/scripts/disaster/gluster_endpoints/remove_storageclasses.sh b/scripts/disaster/gluster_endpoints/remove_storageclasses.sh new file mode 100644 index 0000000..063650d --- /dev/null +++ b/scripts/disaster/gluster_endpoints/remove_storageclasses.sh @@ -0,0 +1,7 @@ +# Backups provided +oc delete sc glusterfs-storage +oc delete sc glusterfs-storage-block + +# It was a single replica +oc scale dc/glusterblock-storage-provisioner-dc -n glusterfs --replicas=0 +oc scale dc/heketi-storage -n glusterfs --replicas=0 |
