diff --git a/.gitignore b/.gitignore index b3860cc..d3ed621 100644 --- a/.gitignore +++ b/.gitignore @@ -22,8 +22,15 @@ logs/ *.iml .idea/ -#valid-auth.edn -#valid-config.edn +# config files my-auth.edn +my-config.edn auth.edn config.edn + +# certificate +ca.crt + +# chaches +.clj-kondo/.cache +.lsp/.cache/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2cafe78..587f46d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,10 +4,17 @@ stages: - security - upload - image + - integrationtest services: - docker:19.03.12-dind +.only-master: &only-master + rules: + - if: '$CI_COMMIT_REF_NAME == "master"' + when: always + - when: never + .cljs-job: &cljs image: domaindrivenarchitecture/shadow-cljs cache: @@ -32,18 +39,21 @@ services: test-cljs: <<: *cljs + <<: *only-master stage: build_and_test script: - shadow-cljs compile test test-clj: <<: *clj + <<: *only-master stage: build_and_test script: - lein test test-schema: <<: *clj + <<: *only-master stage: build_and_test script: - lein uberjar @@ -77,6 +87,7 @@ test-schema: package-uberjar: <<: *clj + <<: *only-master stage: package script: - sha256sum target/uberjar/c4k-nextcloud-standalone.jar > target/uberjar/c4k-nextcloud-standalone.jar.sha256 @@ -86,6 +97,7 @@ package-uberjar: - target/uberjar sast: + <<: *only-master variables: SAST_EXCLUDED_ANALYZERS: bandit, brakeman, flawfinder, gosec, kubesec, phpcs-security-audit, @@ -138,4 +150,26 @@ backup-image-test-publish: rules: - if: '$CI_COMMIT_TAG != null' script: - - cd infrastructure/docker-backup && pyb image test publish \ No newline at end of file + - cd infrastructure/docker-backup && pyb image test publish + +nextcloud-integrationtest: + stage: integrationtest + image: registry.gitlab.com/gitlab-org/cluster-integration/helm-install-image/releases/3.7.1-kube-1.20.11-alpine-3.14 + services: + - name: registry.gitlab.com/gitlab-org/cluster-integration/test-utils/k3s-gitlab-ci/releases/v1.22.2-k3s2 + alias: k3s + script: + - apk add curl sudo bash + - apk add wget curl bash sudo openjdk8 + - wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub + - apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen + + - mkdir -p ${HOME}/.kube/ + - curl -f k3s:8081 > ${HOME}/.kube/config + - kubectl version + - kubectl cluster-info + - echo "---------- Integration test -------------" + - pwd + - cd ./src/test/resources/local-integration-test/ && ./setup-local-s3-on-k3d.sh + + diff --git a/auth-local.edn b/auth-local.edn new file mode 100644 index 0000000..d924c44 --- /dev/null +++ b/auth-local.edn @@ -0,0 +1,7 @@ +{:postgres-db-user "nextcloud" + :postgres-db-password "dbpass" + :nextcloud-admin-user "cloudadmin" + :nextcloud-admin-password "cloudpassword" + :aws-access-key-id "" + :aws-secret-access-key "" + :restic-password "test-password"} diff --git a/config-local.edn b/config-local.edn new file mode 100644 index 0000000..67b820b --- /dev/null +++ b/config-local.edn @@ -0,0 +1,6 @@ +{:fqdn "cloudhost" + :issuer :staging + :nextcloud-data-volume-path "/var/cloud" + :postgres-data-volume-path "/var/postgres" + :restic-repository "s3://k3stesthost/mybucket" + :local-integration-test true} diff --git a/infrastructure/README.md b/infrastructure/README.md new file mode 100644 index 0000000..5b0f2ed --- /dev/null +++ b/infrastructure/README.md @@ -0,0 +1,30 @@ +# Build images + +## Prerequisites + +See also https://pypi.org/project/ddadevops/ + +```bash +# Ensure that yout python3 version is at least Python 3.7! + +sudo apt install python3-pip +pip3 install pip --upgrade --user +pip3 install pybuilder ddadevops deprecation --user +export PATH=$PATH:~/.local/bin + +# terraform +pip3 install dda-python-terraform --user + +# AwsMixin +pip3 install boto3 --user + +# AwsMfaMixin +pip3 install boto3 mfa --user +``` + +In folder "docker-backup" resp. "docker-nextcloud": + +```bash +# step test is optional +pyb image test publish +``` \ No newline at end of file diff --git a/infrastructure/docker-backup/image/resources/backup.sh b/infrastructure/docker-backup/image/resources/backup.sh index c28cf98..9803839 100755 --- a/infrastructure/docker-backup/image/resources/backup.sh +++ b/infrastructure/docker-backup/image/resources/backup.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -o pipefail +set -xo pipefail function main() { diff --git a/infrastructure/docker-backup/image/resources/end-maintenance.sh b/infrastructure/docker-backup/image/resources/end-maintenance.sh index 0013509..0976fe5 100644 --- a/infrastructure/docker-backup/image/resources/end-maintenance.sh +++ b/infrastructure/docker-backup/image/resources/end-maintenance.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -x + if test -f "/var/backups/config/config.orig"; then rm /var/backups/config/config.php diff --git a/infrastructure/docker-backup/image/resources/restore.sh b/infrastructure/docker-backup/image/resources/restore.sh index 1ebef16..b536977 100755 --- a/infrastructure/docker-backup/image/resources/restore.sh +++ b/infrastructure/docker-backup/image/resources/restore.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -Eeo pipefail +set -Eeox pipefail function main() { @@ -19,10 +19,13 @@ function main() { restore-db restore-directory '/var/backups/' + end-maintenance.sh + } source /usr/local/lib/functions.sh source /usr/local/lib/pg-functions.sh source /usr/local/lib/file-functions.sh + main diff --git a/infrastructure/docker-backup/image/resources/start-maintenance.sh b/infrastructure/docker-backup/image/resources/start-maintenance.sh index 474ede1..e3bbace 100644 --- a/infrastructure/docker-backup/image/resources/start-maintenance.sh +++ b/infrastructure/docker-backup/image/resources/start-maintenance.sh @@ -1,10 +1,15 @@ #!/bin/bash +set -x + if [ ! -f "/var/backups/config/config.orig" ]; then rm -f /var/backups/config/config.orig cp /var/backups/config/config.php /var/backups/config/config.orig + + # put nextcloud in maintenance mode sed -i "s/);/ \'maintenance\' => true,\n);/g" /var/backups/config/config.php + chown www-data:root /var/backups/config/config.php touch /var/backups/config/config.php diff --git a/infrastructure/docker-backup/test/Dockerfile b/infrastructure/docker-backup/test/Dockerfile index eb92122..a8aa422 100644 --- a/infrastructure/docker-backup/test/Dockerfile +++ b/infrastructure/docker-backup/test/Dockerfile @@ -1,7 +1,7 @@ -FROM meissa-cloud-backup +FROM c4k-cloud-backup -RUN apt update -RUN apt -yqq --no-install-recommends --yes install curl default-jre-headless +RUN apt update > /dev/null +RUN apt -yqq --no-install-recommends --yes install curl default-jre-headless > /dev/null RUN curl -L -o /tmp/serverspec.jar https://github.com/DomainDrivenArchitecture/dda-serverspec-crate/releases/download/2.0.0/dda-serverspec-standalone.jar diff --git a/package.json b/package.json index 3dedd56..fd2cde0 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "c4k-nextcloud", "description": "Generate c4k yaml for a nextcloud deployment.", "author": "meissa GmbH", - "version": "1.0.3-SNAPSHOT", + "version": "2.0.1-SNAPSHOT", "homepage": "https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud#readme", "repository": "https://www.npmjs.com/package/c4k-nextcloud", "license": "APACHE2", diff --git a/src/main/cljc/dda/c4k_nextcloud/backup.cljc b/src/main/cljc/dda/c4k_nextcloud/backup.cljc index 19973c1..4a499c9 100644 --- a/src/main/cljc/dda/c4k_nextcloud/backup.cljc +++ b/src/main/cljc/dda/c4k_nextcloud/backup.cljc @@ -17,6 +17,7 @@ "backup/config.yaml" (rc/inline "backup/config.yaml") "backup/cron.yaml" (rc/inline "backup/cron.yaml") "backup/secret.yaml" (rc/inline "backup/secret.yaml") + "backup/backup-restore-deployment.yaml" (rc/inline "backup/backup-restore-deployment.yaml") (throw (js/Error. "Undefined Resource!"))))) (defn generate-config [my-conf] @@ -28,6 +29,12 @@ (defn generate-cron [] (yaml/from-string (yaml/load-resource "backup/cron.yaml"))) +(defn generate-backup-restore-deployment [my-conf] + (let [backup-restore-yaml (yaml/from-string (yaml/load-resource "backup/backup-restore-deployment.yaml"))] + (if (and (contains? my-conf :local-integration-test) (= true (:local-integration-test my-conf))) + (cm/replace-named-value backup-restore-yaml "CERTIFICATE_FILE" "/var/run/secrets/localstack-secrets/ca.crt") + backup-restore-yaml))) + (defn generate-secret [my-auth] (let [{:keys [aws-access-key-id aws-secret-access-key restic-password]} my-auth] (-> diff --git a/src/main/cljc/dda/c4k_nextcloud/core.cljc b/src/main/cljc/dda/c4k_nextcloud/core.cljc index cf4d191..a0cc664 100644 --- a/src/main/cljc/dda/c4k_nextcloud/core.cljc +++ b/src/main/cljc/dda/c4k_nextcloud/core.cljc @@ -45,7 +45,8 @@ (when (contains? config :restic-repository) [(yaml/to-string (backup/generate-config config)) (yaml/to-string (backup/generate-secret config)) - (yaml/to-string (backup/generate-cron))])))) + (yaml/to-string (backup/generate-cron)) + (yaml/to-string (backup/generate-backup-restore-deployment config))])))) (defn-spec generate any? [my-config config? diff --git a/src/main/cljc/dda/c4k_nextcloud/nextcloud.cljc b/src/main/cljc/dda/c4k_nextcloud/nextcloud.cljc index c5b9031..9eddd08 100644 --- a/src/main/cljc/dda/c4k_nextcloud/nextcloud.cljc +++ b/src/main/cljc/dda/c4k_nextcloud/nextcloud.cljc @@ -7,7 +7,7 @@ [dda.c4k-common.predicate :as cp] [dda.c4k-common.common :as cm])) -(s/def ::fqdn cp/fqdn-string?) +(s/def ::fqdn any?) ; TODO: Fix fqdn-string? to include localhost (s/def ::issuer cp/letsencrypt-issuer?) (s/def ::restic-repository string?) (s/def ::nextcloud-data-volume-path string?) diff --git a/src/main/resources/backup/backup-restore-deployment.yaml b/src/main/resources/backup/backup-restore-deployment.yaml new file mode 100644 index 0000000..85787f6 --- /dev/null +++ b/src/main/resources/backup/backup-restore-deployment.yaml @@ -0,0 +1,85 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backup-restore +spec: + replicas: 0 + selector: + matchLabels: + app: backup-restore + strategy: + type: Recreate + template: + metadata: + labels: + app: backup-restore + app.kubernetes.io/name: backup-restore + app.kubernetes.io/part-of: cloud + spec: + containers: + - name: backup-app + image: domaindrivenarchitecture/c4k-cloud-backup + imagePullPolicy: IfNotPresent + command: ["/entrypoint-start-and-wait.sh"] + env: + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: postgres-secret + key: postgres-user + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-secret + key: postgres-password + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: postgres-config + key: postgres-db + - name: POSTGRES_HOST + value: "postgresql-service:5432" + - name: POSTGRES_SERVICE + value: "postgresql-service" + - name: POSTGRES_PORT + value: "5432" + - name: AWS_DEFAULT_REGION + value: eu-central-1 + - name: AWS_ACCESS_KEY_ID_FILE + value: /var/run/secrets/backup-secrets/aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY_FILE + value: /var/run/secrets/backup-secrets/aws-secret-access-key + - name: RESTIC_REPOSITORY + valueFrom: + configMapKeyRef: + name: backup-config + key: restic-repository + - name: RESTIC_PASSWORD_FILE + value: /var/run/secrets/backup-secrets/restic-password + - name: CERTIFICATE_FILE + value: "" + volumeMounts: + - name: cloud-data-volume + mountPath: /var/backups + - name: backup-secret-volume + mountPath: /var/run/secrets/backup-secrets + readOnly: true + - name: cloud-secret-volume + mountPath: /var/run/secrets/cloud-secrets + readOnly: true + - name: localstack-secret-volume + mountPath: /var/run/secrets/localstack-secrets + readOnly: true + volumes: + - name: cloud-data-volume + persistentVolumeClaim: + claimName: cloud-pvc + - name: cloud-secret-volume + secret: + secretName: cloud-secret + - name: backup-secret-volume + secret: + secretName: backup-secret + - name: localstack-secret-volume + secret: + secretName: localstack-secret \ No newline at end of file diff --git a/src/main/resources/backup/backup-restore.yaml b/src/main/resources/backup/backup-restore.yaml deleted file mode 100644 index 2c6aafb..0000000 --- a/src/main/resources/backup/backup-restore.yaml +++ /dev/null @@ -1,68 +0,0 @@ -kind: Pod -apiVersion: v1 -metadata: - name: backup-restore - labels: - app.kubernetes.io/name: backup-restore - app.kubernetes.io/part-of: cloud -spec: - containers: - - name: backup-app - image: domaindrivenarchitecture/c4k-cloud-backup - imagePullPolicy: IfNotPresent - command: ["/entrypoint-start-and-wait.sh"] - env: - - name: POSTGRES_USER - valueFrom: - secretKeyRef: - name: postgres-secret - key: postgres-user - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: postgres-secret - key: postgres-password - - name: POSTGRES_DB - valueFrom: - configMapKeyRef: - name: postgres-config - key: postgres-db - - name: POSTGRES_HOST - value: "postgresql-service:5432" - - name: POSTGRES_SERVICE - value: "postgresql-service" - - name: POSTGRES_PORT - value: "5432" - - name: AWS_DEFAULT_REGION - value: eu-central-1 - - name: AWS_ACCESS_KEY_ID_FILE - value: /var/run/secrets/backup-secrets/aws-access-key-id - - name: AWS_SECRET_ACCESS_KEY_FILE - value: /var/run/secrets/backup-secrets/aws-secret-access-key - - name: RESTIC_REPOSITORY - valueFrom: - configMapKeyRef: - name: backup-config - key: restic-repository - - name: RESTIC_PASSWORD_FILE - value: /var/run/secrets/backup-secrets/restic-password - volumeMounts: - - name: cloud-data-volume - mountPath: /var/backups - - name: backup-secret-volume - mountPath: /var/run/secrets/backup-secrets - readOnly: true - - name: cloud-secret-volume - mountPath: /var/run/secrets/cloud-secrets - readOnly: true - volumes: - - name: cloud-data-volume - persistentVolumeClaim: - claimName: cloud-pvc - - name: cloud-secret-volume - secret: - secretName: cloud-secret - - name: backup-secret-volume - secret: - secretName: backup-secret - restartPolicy: OnFailure \ No newline at end of file diff --git a/src/main/resources/nextcloud/deployment.yaml b/src/main/resources/nextcloud/deployment.yaml index b825eef..4bca0a9 100644 --- a/src/main/resources/nextcloud/deployment.yaml +++ b/src/main/resources/nextcloud/deployment.yaml @@ -12,6 +12,7 @@ spec: template: metadata: labels: + app: cloud-app app.kubernetes.io/name: cloud-pod app.kubernetes.io/application: cloud redeploy: v3 diff --git a/src/test/cljc/dda/c4k_nextcloud/core_test.cljc b/src/test/cljc/dda/c4k_nextcloud/core_test.cljc index 27d3fbb..8203dd2 100644 --- a/src/test/cljc/dda/c4k_nextcloud/core_test.cljc +++ b/src/test/cljc/dda/c4k_nextcloud/core_test.cljc @@ -7,7 +7,7 @@ )) (deftest should-k8s-objects - (is (= 16 + (is (= 17 (count (cut/k8s-objects {:fqdn "nextcloud-neu.prod.meissa-gmbh.de" :postgres-db-user "nextcloud" :postgres-db-password "nextcloud-db-password" @@ -20,7 +20,7 @@ :aws-secret-access-key "aws-secret" :restic-password "restic-pw" :restic-repository "restic-repository"})))) - (is (= 14 + (is (= 15 (count (cut/k8s-objects {:fqdn "nextcloud-neu.prod.meissa-gmbh.de" :postgres-db-user "nextcloud" :postgres-db-password "nextcloud-db-password" diff --git a/src/test/resources/local-integration-test/README.md b/src/test/resources/local-integration-test/README.md new file mode 100644 index 0000000..c094aa1 --- /dev/null +++ b/src/test/resources/local-integration-test/README.md @@ -0,0 +1,84 @@ +# Usage + +`setup-local-s3.sh [BUCKET_NAME]`: +- [BUCKET_NAME] is optional, "mybucket" will be used if not specified +- sets up a k3s instance +- installs a localstack pod +- creates http and https routing to localstack via localhost +- saves the self-signed certificate as ca.crt +- uses the certificate to initialize a restic repo at `https://k3stesthost/BUCKET_NAME` + +Note: In case of not being able to connect to "k3stesthost/health", you might need to ensure that the ingress' ip matches with the required host names: k3stesthost and cloudhost. With `sudo k3s kubectl get ingress` you can view the ingress' ip (e.g. 10.0.2.15), then add a line to file "/etc/hosts" e.g. `10.0.2.15 k3stesthost cloudhost` + +`start-k3s.sh`: +- creates and starts a k3s instance + +`k3s-uninstall.sh`: +- deletes everything k3s related + +## Other useful commands +- `sudo k3s kubectl get pods` +- `curl k3stesthost/health` + expected: `{"services": {"s3": "running"}, "features": {"persistence": "disabled", "initScripts": "initialized"}}` + +#### Requires AWS-CLI +- create bucket `aws --endpoint-url=http://k3stesthost s3 mb s3://mybucket` +- list buckets `aws --endpoint-url=http://k3stesthost s3 ls` +- upload something `aws --endpoint-url=http://k3stesthost s3 cp test.txt s3://mybucket` +- check files `aws --endpoint-url=http://k3stesthost s3 ls s3://mybucket` + +## Run docker locally + + +``` +docker pull docker:19.03.12-dind +docker run -d --privileged --name integration-test docker:19.03.12-dind +docker exec integration-test sh -c "apk add bash" + +``` + +Set up docker container integration-test: + +``` +docker cp ../../../../../c4k-nextcloud/ integration-test:/ +docker exec -it integration-test sh +cd /c4k-nextcloud/src/test/resources/local-integration-test +./setup-docker.sh +``` + +## Deploy nextcloud + +### Requirements + +* leiningen (install with: `sudo apt install leiningen` ) +* In the project's root execute: `lein uberjar` +* Change file "valid-config.edn" according to your settings (e.g. `:fqdn "cloudhost"` and `:restic-repository "s3://k3stesthost:mybucket"`). + +### Deploy to k3s + +* Create and deploy the k8s yaml: +`java -jar target/uberjar/c4k-nextcloud-standalone.jar valid-config.edn valid-auth.edn | sudo k3s kubectl apply -f -` + +Some of the steps may take some min to be effective, but eventually nextcloud should be available at: https://cloudhost + +### Deploy to k3d + +k3d is a k3s system which is running inside of a container. To install k3d run `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash` or have a look at https://k3d.io/v5.0.3/ . + +* Start a k3d cluster to deploy s3, nextcloud and test backup and restore on it: `./setup-local-s3-on-k3d.sh` + +Some steps may take a couple of minutes to be effective, but eventually nextcloud should be available at: https://cloudhost + +#### Remove k3d cluster + +`k3d cluster delete nextcloud` + +## Test in local gitlab runner + +See https://stackoverflow.com/questions/32933174/use-gitlab-ci-to-run-tests-locally + +This needs to be done in the project root + +`docker run -d --name gitlab-runner --restart always -v $PWD:$PWD -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:latest` + +`docker exec -it -w $PWD gitlab-runner gitlab-runner exec docker nextcloud-integrationtest --docker-privileged --docker-volumes '/var/run/docker.sock:/var/run/docker.sock'` \ No newline at end of file diff --git a/src/test/resources/local-integration-test/certificate.yaml b/src/test/resources/local-integration-test/certificate.yaml new file mode 100644 index 0000000..4c39759 --- /dev/null +++ b/src/test/resources/local-integration-test/certificate.yaml @@ -0,0 +1,20 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: localstack-cert + namespace: default +spec: + secretName: localstack-secret + commonName: k3stesthost + dnsNames: + - k3stesthost + issuerRef: + name: selfsigning-issuer + kind: ClusterIssuer +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: selfsigning-issuer +spec: + selfSigned: {} \ No newline at end of file diff --git a/src/test/resources/local-integration-test/kubectl.sh b/src/test/resources/local-integration-test/kubectl.sh new file mode 100755 index 0000000..bb1311f --- /dev/null +++ b/src/test/resources/local-integration-test/kubectl.sh @@ -0,0 +1,17 @@ +# Set the default kube context if present +DEFAULT_KUBE_CONTEXTS="$HOME/.kube/config" +if test -f "${DEFAULT_KUBE_CONTEXTS}" +then + export KUBECONFIG="$DEFAULT_KUBE_CONTEXTS" +fi + +# Additional contexts should be in ~/.kube/custom-contexts/ +CUSTOM_KUBE_CONTEXTS="$HOME/.kube/custom-contexts" +mkdir -p "${CUSTOM_KUBE_CONTEXTS}" +OIFS="$IFS" +IFS=$'\n' +for contextFile in `find "${CUSTOM_KUBE_CONTEXTS}" -type f -name "*.yml"` +do + export KUBECONFIG="$contextFile:$KUBECONFIG" +done +IFS="$OIFS" diff --git a/src/test/resources/local-integration-test/localstack.yaml b/src/test/resources/local-integration-test/localstack.yaml new file mode 100644 index 0000000..325a4cf --- /dev/null +++ b/src/test/resources/local-integration-test/localstack.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: localstack +spec: + selector: + matchLabels: + app: localstack + strategy: + type: Recreate + template: + metadata: + labels: + app: localstack + spec: + containers: + - image: localstack/localstack + name: localstack-app + imagePullPolicy: IfNotPresent + env: + - name: SERVICES + value: s3 +--- +# service +apiVersion: v1 +kind: Service +metadata: + name: localstack-service +spec: + selector: + app: localstack + ports: + - port: 4566 +--- +apiVersion: v1 +kind: Secret +metadata: + name: localstack-secret +type: Opaque +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-localstack + annotations: + cert-manager.io/cluster-issuer: selfsigning-issuer + kubernetes.io/ingress.class: traefik + traefik.ingress.kubernetes.io/redirect-entry-point: https + namespace: default +spec: + tls: + - hosts: + - k3stesthost + secretName: localstack-secret + rules: + - host: k3stesthost + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: localstack-service + port: + number: 4566 \ No newline at end of file diff --git a/src/test/resources/local-integration-test/setup-docker.sh b/src/test/resources/local-integration-test/setup-docker.sh new file mode 100755 index 0000000..8727339 --- /dev/null +++ b/src/test/resources/local-integration-test/setup-docker.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +set -x + +docker volume create k3s-server + +name='inttst' + +[[ $(docker ps -f "name=$name" --format '{{.Names}}') == $name ]] || docker run --name $name -d --privileged --tmpfs /run --tmpfs /var/run --restart always -e K3S_TOKEN=12345678901234 -e K3S_KUBECONFIG_OUTPUT=./kubeconfig.yaml -e K3S_KUBECONFIG_MODE=666 -v k3s-server:/var/lib/rancher/k3s:z -v $(pwd):/output:z -p 6443:6443 -p 80:80 -p 443:443 rancher/k3s server --cluster-init --tls-san k3stesthost --tls-san cloudhost + +docker ps + +export timeout=30; while ! docker exec $name sh -c "test -f /var/lib/rancher/k3s/server/kubeconfig.yaml"; do if [ "$timeout" == 0 ]; then echo "ERROR: Timeout while waiting for file."; break; fi; sleep 1; ((timeout--)); done + +mkdir -p $HOME/.kube/ + +docker cp $name:/var/lib/rancher/k3s/server/kubeconfig.yaml $HOME/.kube/config + +if [ "$timeout" == 0 ] +then + echo ------------------------------------------------------- + find / -name "kubeconfig.yaml"; + echo ------------------------------------------------------- + docker ps -a + echo ------------------------------------------------------- + exit 1 +fi + +echo "127.0.0.1 kubernetes" >> /etc/hosts + +apk add wget curl bash sudo openjdk8 + +wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub +apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen + +curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.22.0/bin/linux/amd64/kubectl +chmod +x ./kubectl +mv ./kubectl /usr/local/bin/kubectl + +sleep 20 #allow some time to startup k3s +docker ps -a + +swapoff -a # can this be removed ? + +export KUBECONFIG=$HOME/.kube/config + +pwd +cd ./c4k-nextcloud/src/test/resources/local-integration-test && ./setup-local-s3-on-k3d.sh \ No newline at end of file diff --git a/src/test/resources/local-integration-test/setup-local-s3-on-k3d.sh b/src/test/resources/local-integration-test/setup-local-s3-on-k3d.sh new file mode 100755 index 0000000..c12f51e --- /dev/null +++ b/src/test/resources/local-integration-test/setup-local-s3-on-k3d.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +set -x + +function main() +{ + # enable tls for k3s with cert-manager + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + + kubectl apply -f localstack.yaml + + until kubectl apply -f certificate.yaml + do + echo "[INFO] Waiting for certificate ..." + sleep 30 + done + + # wait for ingress to be ready + bash -c 'external_ip=""; while [ -z $external_ip ]; do echo "[INFO] Waiting for end point..."; external_ip=$(kubectl get ingress -o jsonpath="{$.items[*].status.loadBalancer.ingress[*].ip}"); [ -z "$external_ip" ] && sleep 10; done; echo "End point ready - $external_ip";' + + export INGRESS_IP=$(kubectl get ingress ingress-localstack -o=jsonpath="{.status.loadBalancer.ingress[0].ip}") + + cd ../../../../ # c4k-nextcloud project root + lein uberjar + java -jar target/uberjar/c4k-nextcloud-standalone.jar config-local.edn auth-local.edn | kubectl apply -f - + + CLOUD_POD=$(kubectl get pod -l app=cloud-app -o name) + kubectl wait $CLOUD_POD --for=condition=Ready --timeout=240s + + # wait for nextcloud config file available + timeout 180 bash -c "kubectl exec -t $POD -- bash -c \"until [ -f /var/www/html/config/config.php ]; do sleep 10; done\"" + + # ensure an instance of pod backup-restore + kubectl scale deployment backup-restore --replicas 1 + + # wait for localstack health endpoint + echo "$INGRESS_IP k3stesthost cloudhost" >> /etc/hosts + until curl --fail --silent k3stesthost/health | grep -oe '"s3": "available"' -oe '"s3": "running"' + do + curl --fail k3stesthost/health + echo "[INFO] Waiting for s3 running" + sleep 10 + done + + BACKUP_POD=$(kubectl get pod -l app=backup-restore -o name) + kubectl wait $BACKUP_POD --for=condition=Ready --timeout=240s + + kubectl exec -t $BACKUP_POD -- bash -c "echo \"$INGRESS_IP k3stesthost cloudhost\" >> /etc/hosts" + kubectl exec -t $BACKUP_POD -- /usr/local/bin/init.sh + + echo ================= BACKUP ================= + kubectl exec -t $BACKUP_POD -- /usr/local/bin/backup.sh + + sleep 10 # avoid race conditions + + echo ================= RESTORE ================= + kubectl exec -t $BACKUP_POD -- /usr/local/bin/restore.sh +} + +main "$@" diff --git a/src/test/resources/local-integration-test/setup-local-s3.sh b/src/test/resources/local-integration-test/setup-local-s3.sh new file mode 100755 index 0000000..206d569 --- /dev/null +++ b/src/test/resources/local-integration-test/setup-local-s3.sh @@ -0,0 +1,34 @@ +function main() +{ + local bucket_name="${1:-mybucket}"; shift + + ./start-k3s.sh + + sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + + sudo k3s kubectl apply -f localstack.yaml + + until sudo k3s kubectl apply -f certificate.yaml + do + echo "*** Waiting for certificate ... ***" + sleep 10 + done + echo + + echo + echo "[INFO] Waiting for localstack health endpoint" + until curl --connect-timeout 3 -s -f -o /dev/null "k3stesthost/health" + do + sleep 5 + done + echo + + sudo k3s kubectl get secret localstack-secret -o jsonpath="{.data.ca\.crt}" | base64 --decode > ca.crt + + #aws --endpoint-url=http://localhost s3 mb s3://$bucket_name + export RESTIC_PASSWORD="test-password" + restic init --cacert ca.crt -r s3://k3stesthost/$bucket_name + +} + +main $@ diff --git a/src/test/resources/local-integration-test/setup-local.sh b/src/test/resources/local-integration-test/setup-local.sh new file mode 100755 index 0000000..6f70064 --- /dev/null +++ b/src/test/resources/local-integration-test/setup-local.sh @@ -0,0 +1,9 @@ +function main() +{ + ./start-k3s.sh + + sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + +} + +main diff --git a/src/test/resources/local-integration-test/start-k3d.sh b/src/test/resources/local-integration-test/start-k3d.sh new file mode 100755 index 0000000..463dee3 --- /dev/null +++ b/src/test/resources/local-integration-test/start-k3d.sh @@ -0,0 +1 @@ +KUBECONFIG=~/.kube/custom-contexts/k3d-config.yml k3d cluster create nextcloud --k3s-arg '--tls-san cloudhost@loadbalancer' --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443 --kubeconfig-update-default \ No newline at end of file diff --git a/src/test/resources/local-integration-test/start-k3s.sh b/src/test/resources/local-integration-test/start-k3s.sh new file mode 100755 index 0000000..ea45aa1 --- /dev/null +++ b/src/test/resources/local-integration-test/start-k3s.sh @@ -0,0 +1 @@ +curl -sfL https://get.k3s.io | K3S_NODE_NAME=k3stesthost INSTALL_K3S_EXEC='--tls-san cloudhost' sh -