Merge branch 'local-integration-test' into 'master'
Local integration test See merge request domaindrivenarchitecture/c4k-nextcloud!2
This commit is contained in:
commit
d228fdfd2a
28 changed files with 540 additions and 81 deletions
11
.gitignore
vendored
11
.gitignore
vendored
|
@ -22,8 +22,15 @@ logs/
|
||||||
*.iml
|
*.iml
|
||||||
.idea/
|
.idea/
|
||||||
|
|
||||||
#valid-auth.edn
|
# config files
|
||||||
#valid-config.edn
|
|
||||||
my-auth.edn
|
my-auth.edn
|
||||||
|
my-config.edn
|
||||||
auth.edn
|
auth.edn
|
||||||
config.edn
|
config.edn
|
||||||
|
|
||||||
|
# certificate
|
||||||
|
ca.crt
|
||||||
|
|
||||||
|
# chaches
|
||||||
|
.clj-kondo/.cache
|
||||||
|
.lsp/.cache/
|
||||||
|
|
|
@ -4,10 +4,17 @@ stages:
|
||||||
- security
|
- security
|
||||||
- upload
|
- upload
|
||||||
- image
|
- image
|
||||||
|
- integrationtest
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- docker:19.03.12-dind
|
- docker:19.03.12-dind
|
||||||
|
|
||||||
|
.only-master: &only-master
|
||||||
|
rules:
|
||||||
|
- if: '$CI_COMMIT_REF_NAME == "master"'
|
||||||
|
when: always
|
||||||
|
- when: never
|
||||||
|
|
||||||
.cljs-job: &cljs
|
.cljs-job: &cljs
|
||||||
image: domaindrivenarchitecture/shadow-cljs
|
image: domaindrivenarchitecture/shadow-cljs
|
||||||
cache:
|
cache:
|
||||||
|
@ -32,18 +39,21 @@ services:
|
||||||
|
|
||||||
test-cljs:
|
test-cljs:
|
||||||
<<: *cljs
|
<<: *cljs
|
||||||
|
<<: *only-master
|
||||||
stage: build_and_test
|
stage: build_and_test
|
||||||
script:
|
script:
|
||||||
- shadow-cljs compile test
|
- shadow-cljs compile test
|
||||||
|
|
||||||
test-clj:
|
test-clj:
|
||||||
<<: *clj
|
<<: *clj
|
||||||
|
<<: *only-master
|
||||||
stage: build_and_test
|
stage: build_and_test
|
||||||
script:
|
script:
|
||||||
- lein test
|
- lein test
|
||||||
|
|
||||||
test-schema:
|
test-schema:
|
||||||
<<: *clj
|
<<: *clj
|
||||||
|
<<: *only-master
|
||||||
stage: build_and_test
|
stage: build_and_test
|
||||||
script:
|
script:
|
||||||
- lein uberjar
|
- lein uberjar
|
||||||
|
@ -77,6 +87,7 @@ test-schema:
|
||||||
|
|
||||||
package-uberjar:
|
package-uberjar:
|
||||||
<<: *clj
|
<<: *clj
|
||||||
|
<<: *only-master
|
||||||
stage: package
|
stage: package
|
||||||
script:
|
script:
|
||||||
- sha256sum target/uberjar/c4k-nextcloud-standalone.jar > target/uberjar/c4k-nextcloud-standalone.jar.sha256
|
- sha256sum target/uberjar/c4k-nextcloud-standalone.jar > target/uberjar/c4k-nextcloud-standalone.jar.sha256
|
||||||
|
@ -86,6 +97,7 @@ package-uberjar:
|
||||||
- target/uberjar
|
- target/uberjar
|
||||||
|
|
||||||
sast:
|
sast:
|
||||||
|
<<: *only-master
|
||||||
variables:
|
variables:
|
||||||
SAST_EXCLUDED_ANALYZERS:
|
SAST_EXCLUDED_ANALYZERS:
|
||||||
bandit, brakeman, flawfinder, gosec, kubesec, phpcs-security-audit,
|
bandit, brakeman, flawfinder, gosec, kubesec, phpcs-security-audit,
|
||||||
|
@ -138,4 +150,26 @@ backup-image-test-publish:
|
||||||
rules:
|
rules:
|
||||||
- if: '$CI_COMMIT_TAG != null'
|
- if: '$CI_COMMIT_TAG != null'
|
||||||
script:
|
script:
|
||||||
- cd infrastructure/docker-backup && pyb image test publish
|
- cd infrastructure/docker-backup && pyb image test publish
|
||||||
|
|
||||||
|
nextcloud-integrationtest:
|
||||||
|
stage: integrationtest
|
||||||
|
image: registry.gitlab.com/gitlab-org/cluster-integration/helm-install-image/releases/3.7.1-kube-1.20.11-alpine-3.14
|
||||||
|
services:
|
||||||
|
- name: registry.gitlab.com/gitlab-org/cluster-integration/test-utils/k3s-gitlab-ci/releases/v1.22.2-k3s2
|
||||||
|
alias: k3s
|
||||||
|
script:
|
||||||
|
- apk add curl sudo bash
|
||||||
|
- apk add wget curl bash sudo openjdk8
|
||||||
|
- wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
|
||||||
|
- apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
|
||||||
|
|
||||||
|
- mkdir -p ${HOME}/.kube/
|
||||||
|
- curl -f k3s:8081 > ${HOME}/.kube/config
|
||||||
|
- kubectl version
|
||||||
|
- kubectl cluster-info
|
||||||
|
- echo "---------- Integration test -------------"
|
||||||
|
- pwd
|
||||||
|
- cd ./src/test/resources/local-integration-test/ && ./setup-local-s3-on-k3d.sh
|
||||||
|
|
||||||
|
|
||||||
|
|
7
auth-local.edn
Normal file
7
auth-local.edn
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
{:postgres-db-user "nextcloud"
|
||||||
|
:postgres-db-password "dbpass"
|
||||||
|
:nextcloud-admin-user "cloudadmin"
|
||||||
|
:nextcloud-admin-password "cloudpassword"
|
||||||
|
:aws-access-key-id ""
|
||||||
|
:aws-secret-access-key ""
|
||||||
|
:restic-password "test-password"}
|
6
config-local.edn
Normal file
6
config-local.edn
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
{:fqdn "cloudhost"
|
||||||
|
:issuer :staging
|
||||||
|
:nextcloud-data-volume-path "/var/cloud"
|
||||||
|
:postgres-data-volume-path "/var/postgres"
|
||||||
|
:restic-repository "s3://k3stesthost/mybucket"
|
||||||
|
:local-integration-test true}
|
30
infrastructure/README.md
Normal file
30
infrastructure/README.md
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
# Build images
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
See also https://pypi.org/project/ddadevops/
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ensure that yout python3 version is at least Python 3.7!
|
||||||
|
|
||||||
|
sudo apt install python3-pip
|
||||||
|
pip3 install pip --upgrade --user
|
||||||
|
pip3 install pybuilder ddadevops deprecation --user
|
||||||
|
export PATH=$PATH:~/.local/bin
|
||||||
|
|
||||||
|
# terraform
|
||||||
|
pip3 install dda-python-terraform --user
|
||||||
|
|
||||||
|
# AwsMixin
|
||||||
|
pip3 install boto3 --user
|
||||||
|
|
||||||
|
# AwsMfaMixin
|
||||||
|
pip3 install boto3 mfa --user
|
||||||
|
```
|
||||||
|
|
||||||
|
In folder "docker-backup" resp. "docker-nextcloud":
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# step test is optional
|
||||||
|
pyb image test publish
|
||||||
|
```
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -o pipefail
|
set -xo pipefail
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
if test -f "/var/backups/config/config.orig"; then
|
if test -f "/var/backups/config/config.orig"; then
|
||||||
|
|
||||||
rm /var/backups/config/config.php
|
rm /var/backups/config/config.php
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -Eeo pipefail
|
set -Eeox pipefail
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
|
|
||||||
|
@ -19,10 +19,13 @@ function main() {
|
||||||
restore-db
|
restore-db
|
||||||
restore-directory '/var/backups/'
|
restore-directory '/var/backups/'
|
||||||
|
|
||||||
|
end-maintenance.sh
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
source /usr/local/lib/functions.sh
|
source /usr/local/lib/functions.sh
|
||||||
source /usr/local/lib/pg-functions.sh
|
source /usr/local/lib/pg-functions.sh
|
||||||
source /usr/local/lib/file-functions.sh
|
source /usr/local/lib/file-functions.sh
|
||||||
|
|
||||||
main
|
main
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,15 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
if [ ! -f "/var/backups/config/config.orig" ]; then
|
if [ ! -f "/var/backups/config/config.orig" ]; then
|
||||||
|
|
||||||
rm -f /var/backups/config/config.orig
|
rm -f /var/backups/config/config.orig
|
||||||
cp /var/backups/config/config.php /var/backups/config/config.orig
|
cp /var/backups/config/config.php /var/backups/config/config.orig
|
||||||
|
|
||||||
|
# put nextcloud in maintenance mode
|
||||||
sed -i "s/);/ \'maintenance\' => true,\n);/g" /var/backups/config/config.php
|
sed -i "s/);/ \'maintenance\' => true,\n);/g" /var/backups/config/config.php
|
||||||
|
|
||||||
chown www-data:root /var/backups/config/config.php
|
chown www-data:root /var/backups/config/config.php
|
||||||
touch /var/backups/config/config.php
|
touch /var/backups/config/config.php
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
FROM meissa-cloud-backup
|
FROM c4k-cloud-backup
|
||||||
|
|
||||||
RUN apt update
|
RUN apt update > /dev/null
|
||||||
RUN apt -yqq --no-install-recommends --yes install curl default-jre-headless
|
RUN apt -yqq --no-install-recommends --yes install curl default-jre-headless > /dev/null
|
||||||
|
|
||||||
RUN curl -L -o /tmp/serverspec.jar https://github.com/DomainDrivenArchitecture/dda-serverspec-crate/releases/download/2.0.0/dda-serverspec-standalone.jar
|
RUN curl -L -o /tmp/serverspec.jar https://github.com/DomainDrivenArchitecture/dda-serverspec-crate/releases/download/2.0.0/dda-serverspec-standalone.jar
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
"name": "c4k-nextcloud",
|
"name": "c4k-nextcloud",
|
||||||
"description": "Generate c4k yaml for a nextcloud deployment.",
|
"description": "Generate c4k yaml for a nextcloud deployment.",
|
||||||
"author": "meissa GmbH",
|
"author": "meissa GmbH",
|
||||||
"version": "1.0.3-SNAPSHOT",
|
"version": "2.0.1-SNAPSHOT",
|
||||||
"homepage": "https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud#readme",
|
"homepage": "https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud#readme",
|
||||||
"repository": "https://www.npmjs.com/package/c4k-nextcloud",
|
"repository": "https://www.npmjs.com/package/c4k-nextcloud",
|
||||||
"license": "APACHE2",
|
"license": "APACHE2",
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
"backup/config.yaml" (rc/inline "backup/config.yaml")
|
"backup/config.yaml" (rc/inline "backup/config.yaml")
|
||||||
"backup/cron.yaml" (rc/inline "backup/cron.yaml")
|
"backup/cron.yaml" (rc/inline "backup/cron.yaml")
|
||||||
"backup/secret.yaml" (rc/inline "backup/secret.yaml")
|
"backup/secret.yaml" (rc/inline "backup/secret.yaml")
|
||||||
|
"backup/backup-restore-deployment.yaml" (rc/inline "backup/backup-restore-deployment.yaml")
|
||||||
(throw (js/Error. "Undefined Resource!")))))
|
(throw (js/Error. "Undefined Resource!")))))
|
||||||
|
|
||||||
(defn generate-config [my-conf]
|
(defn generate-config [my-conf]
|
||||||
|
@ -28,6 +29,12 @@
|
||||||
(defn generate-cron []
|
(defn generate-cron []
|
||||||
(yaml/from-string (yaml/load-resource "backup/cron.yaml")))
|
(yaml/from-string (yaml/load-resource "backup/cron.yaml")))
|
||||||
|
|
||||||
|
(defn generate-backup-restore-deployment [my-conf]
|
||||||
|
(let [backup-restore-yaml (yaml/from-string (yaml/load-resource "backup/backup-restore-deployment.yaml"))]
|
||||||
|
(if (and (contains? my-conf :local-integration-test) (= true (:local-integration-test my-conf)))
|
||||||
|
(cm/replace-named-value backup-restore-yaml "CERTIFICATE_FILE" "/var/run/secrets/localstack-secrets/ca.crt")
|
||||||
|
backup-restore-yaml)))
|
||||||
|
|
||||||
(defn generate-secret [my-auth]
|
(defn generate-secret [my-auth]
|
||||||
(let [{:keys [aws-access-key-id aws-secret-access-key restic-password]} my-auth]
|
(let [{:keys [aws-access-key-id aws-secret-access-key restic-password]} my-auth]
|
||||||
(->
|
(->
|
||||||
|
|
|
@ -45,7 +45,8 @@
|
||||||
(when (contains? config :restic-repository)
|
(when (contains? config :restic-repository)
|
||||||
[(yaml/to-string (backup/generate-config config))
|
[(yaml/to-string (backup/generate-config config))
|
||||||
(yaml/to-string (backup/generate-secret config))
|
(yaml/to-string (backup/generate-secret config))
|
||||||
(yaml/to-string (backup/generate-cron))]))))
|
(yaml/to-string (backup/generate-cron))
|
||||||
|
(yaml/to-string (backup/generate-backup-restore-deployment config))]))))
|
||||||
|
|
||||||
(defn-spec generate any?
|
(defn-spec generate any?
|
||||||
[my-config config?
|
[my-config config?
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
[dda.c4k-common.predicate :as cp]
|
[dda.c4k-common.predicate :as cp]
|
||||||
[dda.c4k-common.common :as cm]))
|
[dda.c4k-common.common :as cm]))
|
||||||
|
|
||||||
(s/def ::fqdn cp/fqdn-string?)
|
(s/def ::fqdn any?) ; TODO: Fix fqdn-string? to include localhost
|
||||||
(s/def ::issuer cp/letsencrypt-issuer?)
|
(s/def ::issuer cp/letsencrypt-issuer?)
|
||||||
(s/def ::restic-repository string?)
|
(s/def ::restic-repository string?)
|
||||||
(s/def ::nextcloud-data-volume-path string?)
|
(s/def ::nextcloud-data-volume-path string?)
|
||||||
|
|
85
src/main/resources/backup/backup-restore-deployment.yaml
Normal file
85
src/main/resources/backup/backup-restore-deployment.yaml
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: backup-restore
|
||||||
|
spec:
|
||||||
|
replicas: 0
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: backup-restore
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: backup-restore
|
||||||
|
app.kubernetes.io/name: backup-restore
|
||||||
|
app.kubernetes.io/part-of: cloud
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: backup-app
|
||||||
|
image: domaindrivenarchitecture/c4k-cloud-backup
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
command: ["/entrypoint-start-and-wait.sh"]
|
||||||
|
env:
|
||||||
|
- name: POSTGRES_USER
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: postgres-secret
|
||||||
|
key: postgres-user
|
||||||
|
- name: POSTGRES_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: postgres-secret
|
||||||
|
key: postgres-password
|
||||||
|
- name: POSTGRES_DB
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: postgres-config
|
||||||
|
key: postgres-db
|
||||||
|
- name: POSTGRES_HOST
|
||||||
|
value: "postgresql-service:5432"
|
||||||
|
- name: POSTGRES_SERVICE
|
||||||
|
value: "postgresql-service"
|
||||||
|
- name: POSTGRES_PORT
|
||||||
|
value: "5432"
|
||||||
|
- name: AWS_DEFAULT_REGION
|
||||||
|
value: eu-central-1
|
||||||
|
- name: AWS_ACCESS_KEY_ID_FILE
|
||||||
|
value: /var/run/secrets/backup-secrets/aws-access-key-id
|
||||||
|
- name: AWS_SECRET_ACCESS_KEY_FILE
|
||||||
|
value: /var/run/secrets/backup-secrets/aws-secret-access-key
|
||||||
|
- name: RESTIC_REPOSITORY
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: backup-config
|
||||||
|
key: restic-repository
|
||||||
|
- name: RESTIC_PASSWORD_FILE
|
||||||
|
value: /var/run/secrets/backup-secrets/restic-password
|
||||||
|
- name: CERTIFICATE_FILE
|
||||||
|
value: ""
|
||||||
|
volumeMounts:
|
||||||
|
- name: cloud-data-volume
|
||||||
|
mountPath: /var/backups
|
||||||
|
- name: backup-secret-volume
|
||||||
|
mountPath: /var/run/secrets/backup-secrets
|
||||||
|
readOnly: true
|
||||||
|
- name: cloud-secret-volume
|
||||||
|
mountPath: /var/run/secrets/cloud-secrets
|
||||||
|
readOnly: true
|
||||||
|
- name: localstack-secret-volume
|
||||||
|
mountPath: /var/run/secrets/localstack-secrets
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: cloud-data-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: cloud-pvc
|
||||||
|
- name: cloud-secret-volume
|
||||||
|
secret:
|
||||||
|
secretName: cloud-secret
|
||||||
|
- name: backup-secret-volume
|
||||||
|
secret:
|
||||||
|
secretName: backup-secret
|
||||||
|
- name: localstack-secret-volume
|
||||||
|
secret:
|
||||||
|
secretName: localstack-secret
|
|
@ -1,68 +0,0 @@
|
||||||
kind: Pod
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: backup-restore
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: backup-restore
|
|
||||||
app.kubernetes.io/part-of: cloud
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: backup-app
|
|
||||||
image: domaindrivenarchitecture/c4k-cloud-backup
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
command: ["/entrypoint-start-and-wait.sh"]
|
|
||||||
env:
|
|
||||||
- name: POSTGRES_USER
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: postgres-secret
|
|
||||||
key: postgres-user
|
|
||||||
- name: POSTGRES_PASSWORD
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: postgres-secret
|
|
||||||
key: postgres-password
|
|
||||||
- name: POSTGRES_DB
|
|
||||||
valueFrom:
|
|
||||||
configMapKeyRef:
|
|
||||||
name: postgres-config
|
|
||||||
key: postgres-db
|
|
||||||
- name: POSTGRES_HOST
|
|
||||||
value: "postgresql-service:5432"
|
|
||||||
- name: POSTGRES_SERVICE
|
|
||||||
value: "postgresql-service"
|
|
||||||
- name: POSTGRES_PORT
|
|
||||||
value: "5432"
|
|
||||||
- name: AWS_DEFAULT_REGION
|
|
||||||
value: eu-central-1
|
|
||||||
- name: AWS_ACCESS_KEY_ID_FILE
|
|
||||||
value: /var/run/secrets/backup-secrets/aws-access-key-id
|
|
||||||
- name: AWS_SECRET_ACCESS_KEY_FILE
|
|
||||||
value: /var/run/secrets/backup-secrets/aws-secret-access-key
|
|
||||||
- name: RESTIC_REPOSITORY
|
|
||||||
valueFrom:
|
|
||||||
configMapKeyRef:
|
|
||||||
name: backup-config
|
|
||||||
key: restic-repository
|
|
||||||
- name: RESTIC_PASSWORD_FILE
|
|
||||||
value: /var/run/secrets/backup-secrets/restic-password
|
|
||||||
volumeMounts:
|
|
||||||
- name: cloud-data-volume
|
|
||||||
mountPath: /var/backups
|
|
||||||
- name: backup-secret-volume
|
|
||||||
mountPath: /var/run/secrets/backup-secrets
|
|
||||||
readOnly: true
|
|
||||||
- name: cloud-secret-volume
|
|
||||||
mountPath: /var/run/secrets/cloud-secrets
|
|
||||||
readOnly: true
|
|
||||||
volumes:
|
|
||||||
- name: cloud-data-volume
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: cloud-pvc
|
|
||||||
- name: cloud-secret-volume
|
|
||||||
secret:
|
|
||||||
secretName: cloud-secret
|
|
||||||
- name: backup-secret-volume
|
|
||||||
secret:
|
|
||||||
secretName: backup-secret
|
|
||||||
restartPolicy: OnFailure
|
|
|
@ -12,6 +12,7 @@ spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
|
app: cloud-app
|
||||||
app.kubernetes.io/name: cloud-pod
|
app.kubernetes.io/name: cloud-pod
|
||||||
app.kubernetes.io/application: cloud
|
app.kubernetes.io/application: cloud
|
||||||
redeploy: v3
|
redeploy: v3
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
))
|
))
|
||||||
|
|
||||||
(deftest should-k8s-objects
|
(deftest should-k8s-objects
|
||||||
(is (= 16
|
(is (= 17
|
||||||
(count (cut/k8s-objects {:fqdn "nextcloud-neu.prod.meissa-gmbh.de"
|
(count (cut/k8s-objects {:fqdn "nextcloud-neu.prod.meissa-gmbh.de"
|
||||||
:postgres-db-user "nextcloud"
|
:postgres-db-user "nextcloud"
|
||||||
:postgres-db-password "nextcloud-db-password"
|
:postgres-db-password "nextcloud-db-password"
|
||||||
|
@ -20,7 +20,7 @@
|
||||||
:aws-secret-access-key "aws-secret"
|
:aws-secret-access-key "aws-secret"
|
||||||
:restic-password "restic-pw"
|
:restic-password "restic-pw"
|
||||||
:restic-repository "restic-repository"}))))
|
:restic-repository "restic-repository"}))))
|
||||||
(is (= 14
|
(is (= 15
|
||||||
(count (cut/k8s-objects {:fqdn "nextcloud-neu.prod.meissa-gmbh.de"
|
(count (cut/k8s-objects {:fqdn "nextcloud-neu.prod.meissa-gmbh.de"
|
||||||
:postgres-db-user "nextcloud"
|
:postgres-db-user "nextcloud"
|
||||||
:postgres-db-password "nextcloud-db-password"
|
:postgres-db-password "nextcloud-db-password"
|
||||||
|
|
84
src/test/resources/local-integration-test/README.md
Normal file
84
src/test/resources/local-integration-test/README.md
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
# Usage
|
||||||
|
|
||||||
|
`setup-local-s3.sh [BUCKET_NAME]`:
|
||||||
|
- [BUCKET_NAME] is optional, "mybucket" will be used if not specified
|
||||||
|
- sets up a k3s instance
|
||||||
|
- installs a localstack pod
|
||||||
|
- creates http and https routing to localstack via localhost
|
||||||
|
- saves the self-signed certificate as ca.crt
|
||||||
|
- uses the certificate to initialize a restic repo at `https://k3stesthost/BUCKET_NAME`
|
||||||
|
|
||||||
|
Note: In case of not being able to connect to "k3stesthost/health", you might need to ensure that the ingress' ip matches with the required host names: k3stesthost and cloudhost. With `sudo k3s kubectl get ingress` you can view the ingress' ip (e.g. 10.0.2.15), then add a line to file "/etc/hosts" e.g. `10.0.2.15 k3stesthost cloudhost`
|
||||||
|
|
||||||
|
`start-k3s.sh`:
|
||||||
|
- creates and starts a k3s instance
|
||||||
|
|
||||||
|
`k3s-uninstall.sh`:
|
||||||
|
- deletes everything k3s related
|
||||||
|
|
||||||
|
## Other useful commands
|
||||||
|
- `sudo k3s kubectl get pods`
|
||||||
|
- `curl k3stesthost/health`
|
||||||
|
expected: `{"services": {"s3": "running"}, "features": {"persistence": "disabled", "initScripts": "initialized"}}`
|
||||||
|
|
||||||
|
#### Requires AWS-CLI
|
||||||
|
- create bucket `aws --endpoint-url=http://k3stesthost s3 mb s3://mybucket`
|
||||||
|
- list buckets `aws --endpoint-url=http://k3stesthost s3 ls`
|
||||||
|
- upload something `aws --endpoint-url=http://k3stesthost s3 cp test.txt s3://mybucket`
|
||||||
|
- check files `aws --endpoint-url=http://k3stesthost s3 ls s3://mybucket`
|
||||||
|
|
||||||
|
## Run docker locally
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull docker:19.03.12-dind
|
||||||
|
docker run -d --privileged --name integration-test docker:19.03.12-dind
|
||||||
|
docker exec integration-test sh -c "apk add bash"
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Set up docker container integration-test:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker cp ../../../../../c4k-nextcloud/ integration-test:/
|
||||||
|
docker exec -it integration-test sh
|
||||||
|
cd /c4k-nextcloud/src/test/resources/local-integration-test
|
||||||
|
./setup-docker.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deploy nextcloud
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
* leiningen (install with: `sudo apt install leiningen` )
|
||||||
|
* In the project's root execute: `lein uberjar`
|
||||||
|
* Change file "valid-config.edn" according to your settings (e.g. `:fqdn "cloudhost"` and `:restic-repository "s3://k3stesthost:mybucket"`).
|
||||||
|
|
||||||
|
### Deploy to k3s
|
||||||
|
|
||||||
|
* Create and deploy the k8s yaml:
|
||||||
|
`java -jar target/uberjar/c4k-nextcloud-standalone.jar valid-config.edn valid-auth.edn | sudo k3s kubectl apply -f -`
|
||||||
|
|
||||||
|
Some of the steps may take some min to be effective, but eventually nextcloud should be available at: https://cloudhost
|
||||||
|
|
||||||
|
### Deploy to k3d
|
||||||
|
|
||||||
|
k3d is a k3s system which is running inside of a container. To install k3d run `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash` or have a look at https://k3d.io/v5.0.3/ .
|
||||||
|
|
||||||
|
* Start a k3d cluster to deploy s3, nextcloud and test backup and restore on it: `./setup-local-s3-on-k3d.sh`
|
||||||
|
|
||||||
|
Some steps may take a couple of minutes to be effective, but eventually nextcloud should be available at: https://cloudhost
|
||||||
|
|
||||||
|
#### Remove k3d cluster
|
||||||
|
|
||||||
|
`k3d cluster delete nextcloud`
|
||||||
|
|
||||||
|
## Test in local gitlab runner
|
||||||
|
|
||||||
|
See https://stackoverflow.com/questions/32933174/use-gitlab-ci-to-run-tests-locally
|
||||||
|
|
||||||
|
This needs to be done in the project root
|
||||||
|
|
||||||
|
`docker run -d --name gitlab-runner --restart always -v $PWD:$PWD -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:latest`
|
||||||
|
|
||||||
|
`docker exec -it -w $PWD gitlab-runner gitlab-runner exec docker nextcloud-integrationtest --docker-privileged --docker-volumes '/var/run/docker.sock:/var/run/docker.sock'`
|
20
src/test/resources/local-integration-test/certificate.yaml
Normal file
20
src/test/resources/local-integration-test/certificate.yaml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: Certificate
|
||||||
|
metadata:
|
||||||
|
name: localstack-cert
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
secretName: localstack-secret
|
||||||
|
commonName: k3stesthost
|
||||||
|
dnsNames:
|
||||||
|
- k3stesthost
|
||||||
|
issuerRef:
|
||||||
|
name: selfsigning-issuer
|
||||||
|
kind: ClusterIssuer
|
||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: ClusterIssuer
|
||||||
|
metadata:
|
||||||
|
name: selfsigning-issuer
|
||||||
|
spec:
|
||||||
|
selfSigned: {}
|
17
src/test/resources/local-integration-test/kubectl.sh
Executable file
17
src/test/resources/local-integration-test/kubectl.sh
Executable file
|
@ -0,0 +1,17 @@
|
||||||
|
# Set the default kube context if present
|
||||||
|
DEFAULT_KUBE_CONTEXTS="$HOME/.kube/config"
|
||||||
|
if test -f "${DEFAULT_KUBE_CONTEXTS}"
|
||||||
|
then
|
||||||
|
export KUBECONFIG="$DEFAULT_KUBE_CONTEXTS"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Additional contexts should be in ~/.kube/custom-contexts/
|
||||||
|
CUSTOM_KUBE_CONTEXTS="$HOME/.kube/custom-contexts"
|
||||||
|
mkdir -p "${CUSTOM_KUBE_CONTEXTS}"
|
||||||
|
OIFS="$IFS"
|
||||||
|
IFS=$'\n'
|
||||||
|
for contextFile in `find "${CUSTOM_KUBE_CONTEXTS}" -type f -name "*.yml"`
|
||||||
|
do
|
||||||
|
export KUBECONFIG="$contextFile:$KUBECONFIG"
|
||||||
|
done
|
||||||
|
IFS="$OIFS"
|
65
src/test/resources/local-integration-test/localstack.yaml
Normal file
65
src/test/resources/local-integration-test/localstack.yaml
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: localstack
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: localstack
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: localstack
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- image: localstack/localstack
|
||||||
|
name: localstack-app
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
env:
|
||||||
|
- name: SERVICES
|
||||||
|
value: s3
|
||||||
|
---
|
||||||
|
# service
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: localstack-service
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: localstack
|
||||||
|
ports:
|
||||||
|
- port: 4566
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: localstack-secret
|
||||||
|
type: Opaque
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: ingress-localstack
|
||||||
|
annotations:
|
||||||
|
cert-manager.io/cluster-issuer: selfsigning-issuer
|
||||||
|
kubernetes.io/ingress.class: traefik
|
||||||
|
traefik.ingress.kubernetes.io/redirect-entry-point: https
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
tls:
|
||||||
|
- hosts:
|
||||||
|
- k3stesthost
|
||||||
|
secretName: localstack-secret
|
||||||
|
rules:
|
||||||
|
- host: k3stesthost
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: localstack-service
|
||||||
|
port:
|
||||||
|
number: 4566
|
48
src/test/resources/local-integration-test/setup-docker.sh
Executable file
48
src/test/resources/local-integration-test/setup-docker.sh
Executable file
|
@ -0,0 +1,48 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
docker volume create k3s-server
|
||||||
|
|
||||||
|
name='inttst'
|
||||||
|
|
||||||
|
[[ $(docker ps -f "name=$name" --format '{{.Names}}') == $name ]] || docker run --name $name -d --privileged --tmpfs /run --tmpfs /var/run --restart always -e K3S_TOKEN=12345678901234 -e K3S_KUBECONFIG_OUTPUT=./kubeconfig.yaml -e K3S_KUBECONFIG_MODE=666 -v k3s-server:/var/lib/rancher/k3s:z -v $(pwd):/output:z -p 6443:6443 -p 80:80 -p 443:443 rancher/k3s server --cluster-init --tls-san k3stesthost --tls-san cloudhost
|
||||||
|
|
||||||
|
docker ps
|
||||||
|
|
||||||
|
export timeout=30; while ! docker exec $name sh -c "test -f /var/lib/rancher/k3s/server/kubeconfig.yaml"; do if [ "$timeout" == 0 ]; then echo "ERROR: Timeout while waiting for file."; break; fi; sleep 1; ((timeout--)); done
|
||||||
|
|
||||||
|
mkdir -p $HOME/.kube/
|
||||||
|
|
||||||
|
docker cp $name:/var/lib/rancher/k3s/server/kubeconfig.yaml $HOME/.kube/config
|
||||||
|
|
||||||
|
if [ "$timeout" == 0 ]
|
||||||
|
then
|
||||||
|
echo -------------------------------------------------------
|
||||||
|
find / -name "kubeconfig.yaml";
|
||||||
|
echo -------------------------------------------------------
|
||||||
|
docker ps -a
|
||||||
|
echo -------------------------------------------------------
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "127.0.0.1 kubernetes" >> /etc/hosts
|
||||||
|
|
||||||
|
apk add wget curl bash sudo openjdk8
|
||||||
|
|
||||||
|
wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
|
||||||
|
apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
|
||||||
|
|
||||||
|
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.22.0/bin/linux/amd64/kubectl
|
||||||
|
chmod +x ./kubectl
|
||||||
|
mv ./kubectl /usr/local/bin/kubectl
|
||||||
|
|
||||||
|
sleep 20 #allow some time to startup k3s
|
||||||
|
docker ps -a
|
||||||
|
|
||||||
|
swapoff -a # can this be removed ?
|
||||||
|
|
||||||
|
export KUBECONFIG=$HOME/.kube/config
|
||||||
|
|
||||||
|
pwd
|
||||||
|
cd ./c4k-nextcloud/src/test/resources/local-integration-test && ./setup-local-s3-on-k3d.sh
|
60
src/test/resources/local-integration-test/setup-local-s3-on-k3d.sh
Executable file
60
src/test/resources/local-integration-test/setup-local-s3-on-k3d.sh
Executable file
|
@ -0,0 +1,60 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
function main()
|
||||||
|
{
|
||||||
|
# enable tls for k3s with cert-manager
|
||||||
|
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
||||||
|
|
||||||
|
kubectl apply -f localstack.yaml
|
||||||
|
|
||||||
|
until kubectl apply -f certificate.yaml
|
||||||
|
do
|
||||||
|
echo "[INFO] Waiting for certificate ..."
|
||||||
|
sleep 30
|
||||||
|
done
|
||||||
|
|
||||||
|
# wait for ingress to be ready
|
||||||
|
bash -c 'external_ip=""; while [ -z $external_ip ]; do echo "[INFO] Waiting for end point..."; external_ip=$(kubectl get ingress -o jsonpath="{$.items[*].status.loadBalancer.ingress[*].ip}"); [ -z "$external_ip" ] && sleep 10; done; echo "End point ready - $external_ip";'
|
||||||
|
|
||||||
|
export INGRESS_IP=$(kubectl get ingress ingress-localstack -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")
|
||||||
|
|
||||||
|
cd ../../../../ # c4k-nextcloud project root
|
||||||
|
lein uberjar
|
||||||
|
java -jar target/uberjar/c4k-nextcloud-standalone.jar config-local.edn auth-local.edn | kubectl apply -f -
|
||||||
|
|
||||||
|
CLOUD_POD=$(kubectl get pod -l app=cloud-app -o name)
|
||||||
|
kubectl wait $CLOUD_POD --for=condition=Ready --timeout=240s
|
||||||
|
|
||||||
|
# wait for nextcloud config file available
|
||||||
|
timeout 180 bash -c "kubectl exec -t $POD -- bash -c \"until [ -f /var/www/html/config/config.php ]; do sleep 10; done\""
|
||||||
|
|
||||||
|
# ensure an instance of pod backup-restore
|
||||||
|
kubectl scale deployment backup-restore --replicas 1
|
||||||
|
|
||||||
|
# wait for localstack health endpoint
|
||||||
|
echo "$INGRESS_IP k3stesthost cloudhost" >> /etc/hosts
|
||||||
|
until curl --fail --silent k3stesthost/health | grep -oe '"s3": "available"' -oe '"s3": "running"'
|
||||||
|
do
|
||||||
|
curl --fail k3stesthost/health
|
||||||
|
echo "[INFO] Waiting for s3 running"
|
||||||
|
sleep 10
|
||||||
|
done
|
||||||
|
|
||||||
|
BACKUP_POD=$(kubectl get pod -l app=backup-restore -o name)
|
||||||
|
kubectl wait $BACKUP_POD --for=condition=Ready --timeout=240s
|
||||||
|
|
||||||
|
kubectl exec -t $BACKUP_POD -- bash -c "echo \"$INGRESS_IP k3stesthost cloudhost\" >> /etc/hosts"
|
||||||
|
kubectl exec -t $BACKUP_POD -- /usr/local/bin/init.sh
|
||||||
|
|
||||||
|
echo ================= BACKUP =================
|
||||||
|
kubectl exec -t $BACKUP_POD -- /usr/local/bin/backup.sh
|
||||||
|
|
||||||
|
sleep 10 # avoid race conditions
|
||||||
|
|
||||||
|
echo ================= RESTORE =================
|
||||||
|
kubectl exec -t $BACKUP_POD -- /usr/local/bin/restore.sh
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
34
src/test/resources/local-integration-test/setup-local-s3.sh
Executable file
34
src/test/resources/local-integration-test/setup-local-s3.sh
Executable file
|
@ -0,0 +1,34 @@
|
||||||
|
function main()
|
||||||
|
{
|
||||||
|
local bucket_name="${1:-mybucket}"; shift
|
||||||
|
|
||||||
|
./start-k3s.sh
|
||||||
|
|
||||||
|
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
||||||
|
|
||||||
|
sudo k3s kubectl apply -f localstack.yaml
|
||||||
|
|
||||||
|
until sudo k3s kubectl apply -f certificate.yaml
|
||||||
|
do
|
||||||
|
echo "*** Waiting for certificate ... ***"
|
||||||
|
sleep 10
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "[INFO] Waiting for localstack health endpoint"
|
||||||
|
until curl --connect-timeout 3 -s -f -o /dev/null "k3stesthost/health"
|
||||||
|
do
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
|
||||||
|
sudo k3s kubectl get secret localstack-secret -o jsonpath="{.data.ca\.crt}" | base64 --decode > ca.crt
|
||||||
|
|
||||||
|
#aws --endpoint-url=http://localhost s3 mb s3://$bucket_name
|
||||||
|
export RESTIC_PASSWORD="test-password"
|
||||||
|
restic init --cacert ca.crt -r s3://k3stesthost/$bucket_name
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
main $@
|
9
src/test/resources/local-integration-test/setup-local.sh
Executable file
9
src/test/resources/local-integration-test/setup-local.sh
Executable file
|
@ -0,0 +1,9 @@
|
||||||
|
function main()
|
||||||
|
{
|
||||||
|
./start-k3s.sh
|
||||||
|
|
||||||
|
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
1
src/test/resources/local-integration-test/start-k3d.sh
Executable file
1
src/test/resources/local-integration-test/start-k3d.sh
Executable file
|
@ -0,0 +1 @@
|
||||||
|
KUBECONFIG=~/.kube/custom-contexts/k3d-config.yml k3d cluster create nextcloud --k3s-arg '--tls-san cloudhost@loadbalancer' --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443 --kubeconfig-update-default
|
1
src/test/resources/local-integration-test/start-k3s.sh
Executable file
1
src/test/resources/local-integration-test/start-k3s.sh
Executable file
|
@ -0,0 +1 @@
|
||||||
|
curl -sfL https://get.k3s.io | K3S_NODE_NAME=k3stesthost INSTALL_K3S_EXEC='--tls-san cloudhost' sh -
|
Loading…
Reference in a new issue