cleanup
This commit is contained in:
parent
663a460875
commit
b3a5895897
1 changed files with 16 additions and 29 deletions
|
@ -4,70 +4,57 @@ set -x
|
|||
|
||||
function main()
|
||||
{
|
||||
date
|
||||
|
||||
local bucket_name="${1:-mybucket}"; shift
|
||||
|
||||
# enable tls for k3s with cert-manager
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
||||
|
||||
kubectl apply -f localstack.yaml
|
||||
|
||||
date
|
||||
|
||||
until kubectl apply -f certificate.yaml
|
||||
do
|
||||
echo "[INFO] Waiting for certificate ..."
|
||||
sleep 30
|
||||
done
|
||||
|
||||
echo
|
||||
# wait for ingress to be ready
|
||||
bash -c 'external_ip=""; while [ -z $external_ip ]; do echo "[INFO] Waiting for end point..."; external_ip=$(kubectl get ingress -o jsonpath="{$.items[*].status.loadBalancer.ingress[*].ip}"); [ -z "$external_ip" ] && sleep 10; done; echo "End point ready - $external_ip";'
|
||||
|
||||
date
|
||||
|
||||
echo
|
||||
export ENDPOINT=$(kubectl get ingress ingress-localstack -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")
|
||||
sudo bash -c "echo \"$ENDPOINT k3stesthost cloudhost\" >> /etc/hosts" # Remove this, works for testing, but fills your /etc/hosts
|
||||
export INGRESS_IP=$(kubectl get ingress ingress-localstack -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")
|
||||
|
||||
cd ../../../../ # c4k-nextcloud project root
|
||||
lein uberjar
|
||||
java -jar target/uberjar/c4k-nextcloud-standalone.jar config-local.edn auth-local.edn | kubectl apply -f -
|
||||
POD=$(kubectl get pod -l app=cloud-app -o name)
|
||||
kubectl wait $POD --for=condition=Ready --timeout=240s
|
||||
|
||||
CLOUD_POD=$(kubectl get pod -l app=cloud-app -o name)
|
||||
kubectl wait $CLOUD_POD --for=condition=Ready --timeout=240s
|
||||
|
||||
# wait for nextcloud config file available
|
||||
timeout 180 bash -c "kubectl exec -t $POD -- bash -c \"until [ -f /var/www/html/config/config.php ]; do sleep 10; done\""
|
||||
|
||||
# ensure an instance of pod backup-restore
|
||||
kubectl scale deployment backup-restore --replicas 1
|
||||
|
||||
date
|
||||
echo
|
||||
# wait for localstack health endpoint
|
||||
echo "$INGRESS_IP k3stesthost cloudhost" >> /etc/hosts
|
||||
until curl --fail --silent k3stesthost/health | grep -oe '"s3": "available"' -oe '"s3": "running"'
|
||||
do
|
||||
curl --fail k3stesthost/health
|
||||
echo "[INFO] Waiting for s3 running"
|
||||
sleep 10
|
||||
done
|
||||
echo
|
||||
|
||||
POD=$(kubectl get pod -l app=backup-restore -o name)
|
||||
BACKUP_POD=$(kubectl get pod -l app=backup-restore -o name)
|
||||
kubectl wait $BACKUP_POD --for=condition=Ready --timeout=240s
|
||||
|
||||
kubectl wait $POD --for=condition=Ready --timeout=240s
|
||||
|
||||
kubectl exec -t $POD -- bash -c "echo \"$ENDPOINT k3stesthost cloudhost\" >> /etc/hosts"
|
||||
kubectl exec -t $POD -- /usr/local/bin/init.sh
|
||||
kubectl exec -t $BACKUP_POD -- bash -c "echo \"$INGRESS_IP k3stesthost cloudhost\" >> /etc/hosts"
|
||||
kubectl exec -t $BACKUP_POD -- /usr/local/bin/init.sh
|
||||
|
||||
echo ================= BACKUP =================
|
||||
kubectl exec -t $POD -- ls -l /var/backups/config
|
||||
kubectl exec -t $POD -- /usr/local/bin/backup.sh
|
||||
kubectl exec -t $BACKUP_POD -- /usr/local/bin/backup.sh
|
||||
|
||||
date
|
||||
sleep 10 # avoid race conditions
|
||||
|
||||
echo ================= RESTORE =================
|
||||
|
||||
kubectl exec -t $POD -- ls -l /var/backups/config
|
||||
kubectl exec -t $POD -- /usr/local/bin/restore.sh
|
||||
kubectl exec -t $BACKUP_POD -- /usr/local/bin/restore.sh
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
|
Loading…
Reference in a new issue