Compare commits

...

90 commits

Author SHA1 Message Date
17364c3a79 further cleanup as merge preparation 2021-12-15 13:19:26 +01:00
b3a5895897 cleanup 2021-12-15 13:05:55 +01:00
663a460875 add wait for nc config.php 2021-12-15 12:45:55 +01:00
53938c0d82 add sleep 2021-12-15 12:30:42 +01:00
018aea4808 [skip ci] move k3s config to default path 2021-12-15 12:27:02 +01:00
e784ed80c6 [skip ci] move k3s config to default path 2021-12-15 12:23:14 +01:00
bom
57e497afe0 moved label to pod 2021-12-15 12:21:00 +01:00
bom
51a6341441 added label to cloud-deployment 2021-12-15 12:15:22 +01:00
bom
df46d9e2b5 another try 2021-12-15 12:11:00 +01:00
bom
3b0ff10188 wip 2021-12-15 12:00:58 +01:00
bom
c2628ac470 wip 2021-12-15 11:44:31 +01:00
bom
e31b9ca74a moved some debug stuff 2021-12-15 11:13:22 +01:00
bom
a2735ae9a7 2nd try 2021-12-15 10:24:15 +01:00
bom
2df41b6c64 Merge branch 'integration-test-w-o-db-backup' of gitlab.com:domaindrivenarchitecture/c4k-nextcloud into integration-test-w-o-db-backup 2021-12-15 10:17:36 +01:00
bom
4b781ce37a wip debug 2021-12-15 10:17:34 +01:00
49d2fdb058 Changes to setup-local-s3-on-k3d.sh 2021-12-15 10:09:58 +01:00
7e135ee4f3 [skip ci] add readme pyb 2021-12-10 12:21:55 +01:00
bb8bd63ebd add end-maintenance to restore.sh 2021-12-10 10:36:41 +01:00
0a7c2676e8 remove skipped db backup 2021-12-10 10:21:36 +01:00
22ed463637 [skip ci] add debugging info 2021-12-09 18:46:11 +01:00
f32a91826d [skip ci] add debugging info 2021-12-09 18:16:40 +01:00
f135aaff97 [skip ci] rm tmp file 2021-12-09 17:50:01 +01:00
215172660b fix ci 2021-12-09 15:35:05 +01:00
a5c0637e5e add timestamps 2021-12-09 15:32:11 +01:00
7e176d3ce1 [skip ci] remove further backups for fast execution 2021-12-09 15:01:10 +01:00
bab234d4bd [skip ci] fix local ci 2021-12-09 14:26:13 +01:00
6e6759b59d [skip ci] move run docker from ci to script 2021-12-09 14:02:52 +01:00
6871e48176 [skip ci] fix dir for local gitlab-runner 2021-12-09 13:35:23 +01:00
d34af06c4a [skip ci] pin docker version 2021-12-09 12:54:16 +01:00
c75a1e4587 [skip ci] add doc for local gitlab runner 2021-12-08 19:06:23 +01:00
30e07d4e3b start docker in ci outside script 2021-12-08 18:15:41 +01:00
fbfb34b60f add some debugs 2021-12-08 17:59:49 +01:00
bom
a4ff34d291 try other bind-address 2021-12-08 12:28:47 +01:00
785eb2ace8 wip 2021-12-08 12:18:33 +01:00
79fc3d8b72 config path 2021-12-08 12:15:45 +01:00
34114720f8 add debug 2021-12-08 12:01:28 +01:00
39e18f9627 wip 2021-12-08 11:56:22 +01:00
89ed38f335 wip 2021-12-08 11:51:36 +01:00
253b961a52 chg path 2021-12-08 11:47:36 +01:00
c5199d9750 wip 2021-12-08 11:39:58 +01:00
214f5bc530 wip 2021-12-08 11:35:11 +01:00
3cb84b2e56 wip 2021-12-08 11:31:46 +01:00
cbd9bd3fd0 wip 2021-12-08 11:27:17 +01:00
7df7ae4b17 wip 2021-12-08 10:58:44 +01:00
ce1783cc14 wip 2021-12-08 10:55:51 +01:00
47155fb1e4 fix ci 2021-12-08 10:45:06 +01:00
c913f3b96d wip 2021-12-08 10:41:02 +01:00
7ea46bf520 remove docker build from ci 2021-12-08 10:24:44 +01:00
dc2720ab02 initial 2021-12-08 10:20:22 +01:00
az
5c42451610 expand find to root 2021-12-01 18:37:47 +01:00
az
6890cfe6c4 fix missing docker volume 2021-12-01 18:30:41 +01:00
az
f59b3fca11 add error handling for missing config 2021-12-01 18:24:29 +01:00
az
b266053f19 div fixes 2021-12-01 17:46:59 +01:00
az
f4a250d592 fix wrong backup dir 2021-12-01 17:43:09 +01:00
az
222fa0a433 fix branch name local-integration-test 2021-12-01 17:31:00 +01:00
ansgarz
3959f10bbe Update .gitlab-ci.yml file 2021-12-01 16:26:03 +00:00
az
1b25d7ff28 Merge branch 'local-integration-test' of gitlab.com:domaindrivenarchitecture/c4k-nextcloud into local-integration-test 2021-12-01 17:24:35 +01:00
az
51f1903ade remove dependency 2021-12-01 17:21:26 +01:00
ansgarz
4f43120156 Update .gitlab-ci.yml file 2021-12-01 16:15:24 +00:00
az
82589b668b add info to ci 2021-12-01 17:12:01 +01:00
az
2bb5358245 short ci for branch integration-local 2021-12-01 17:10:38 +01:00
az
e0890646d8 add bash to integrationtest 2021-12-01 13:31:33 +01:00
az
a2cd12ef0a fix gitlab ci 2021-12-01 13:17:17 +01:00
az
21e5d84bbb add integrationtests to ci 2021-12-01 13:12:45 +01:00
az
4e876b202c update README 2021-12-01 12:54:52 +01:00
bom
d44121975f added setup-docker script 2021-12-01 12:10:15 +01:00
bom
efd71a689a changes to script 2021-12-01 12:02:22 +01:00
bom
a7e1b2f882 changed setup script to be able to run in ci 2021-12-01 11:55:28 +01:00
leo
49587f46a7 added restore 2021-11-26 13:07:24 +01:00
bom
7e9a1823a7 included jar execution and backup and init 2021-11-26 12:26:38 +01:00
bom
316c802212 added yaml alteration for local tests 2021-11-26 10:35:22 +01:00
bom
4df204abb6 fixed endpoint retrieval 2021-11-20 15:01:56 +01:00
az
61e6578a81 improve check for running localstack 2021-11-17 16:51:02 +01:00
az
a82cdf16de update .gitignore 2021-11-13 19:34:15 +01:00
bom
1852924fbc use http again 2021-11-12 11:53:17 +01:00
bom
22fe84cb45 update readme 2021-11-12 11:53:00 +01:00
bom
2cff7879ac use correct inherited docker image 2021-11-10 13:33:43 +01:00
bom
1fa6758b66 use custom-kubectl-context for k3d setup,
connect to correct health endpoint
2021-11-10 12:18:17 +01:00
az
507a5f8913 update README 2021-11-04 21:32:55 +01:00
az
c4dde09600 add scripts and update README for k3d 2021-11-04 21:29:09 +01:00
bom
dd2ef8ec88 version bump 2021-11-04 18:09:24 +01:00
bom
4edce1c5c7 release 2021-11-04 18:07:49 +01:00
zwa
88fa59f54c update README 2021-10-30 14:10:33 +02:00
zwa
abfa94b499 get nc working with localstack
- use 2 different hostnames for localstack resp. nextcloud
- upd README
2021-10-30 13:59:42 +02:00
az
5e9db34b26 add comment to readme about local dns 2021-10-29 15:11:09 +02:00
bom
72fbe23a5d fixed lein test 2021-10-27 16:34:55 +02:00
bom
d81814a33f temporary change to be able to use localhost as fqdn 2021-10-27 16:29:36 +02:00
bom
521e3a5040 added localstack infrastructure 2021-10-27 16:15:29 +02:00
bom
2126916a6a fixed backup-restore-deployment 2021-10-27 16:14:41 +02:00
bom
5fff91dba7 added backup-restore as deployment 2021-10-27 16:12:58 +02:00
28 changed files with 543 additions and 84 deletions

11
.gitignore vendored
View file

@ -22,8 +22,15 @@ logs/
*.iml
.idea/
#valid-auth.edn
#valid-config.edn
# config files
my-auth.edn
my-config.edn
auth.edn
config.edn
# certificate
ca.crt
# chaches
.clj-kondo/.cache
.lsp/.cache/

View file

@ -4,10 +4,17 @@ stages:
- security
- upload
- image
- integrationtest
services:
- docker:19.03.12-dind
.only-master: &only-master
rules:
- if: '$CI_COMMIT_REF_NAME == "master"'
when: always
- when: never
.cljs-job: &cljs
image: domaindrivenarchitecture/shadow-cljs
cache:
@ -32,18 +39,21 @@ services:
test-cljs:
<<: *cljs
<<: *only-master
stage: build_and_test
script:
- shadow-cljs compile test
test-clj:
<<: *clj
<<: *only-master
stage: build_and_test
script:
- lein test
test-schema:
<<: *clj
<<: *only-master
stage: build_and_test
script:
- lein uberjar
@ -77,6 +87,7 @@ test-schema:
package-uberjar:
<<: *clj
<<: *only-master
stage: package
script:
- sha256sum target/uberjar/c4k-nextcloud-standalone.jar > target/uberjar/c4k-nextcloud-standalone.jar.sha256
@ -86,6 +97,7 @@ package-uberjar:
- target/uberjar
sast:
<<: *only-master
variables:
SAST_EXCLUDED_ANALYZERS:
bandit, brakeman, flawfinder, gosec, kubesec, phpcs-security-audit,
@ -108,7 +120,7 @@ release:
image: registry.gitlab.com/gitlab-org/release-cli:latest
stage: upload
rules:
- if: '$CI_COMMIT_TAG != null'
- if: '$CI_COMMIT_BRANCH == "master" && $CI_COMMIT_TAG == null'
artifacts:
paths:
- target/uberjar
@ -128,7 +140,7 @@ nextcloud-image-test-publish:
image: domaindrivenarchitecture/devops-build:latest
stage: image
rules:
- if: '$CI_COMMIT_TAG != null'
- if: '$CI_COMMIT_BRANCH == "master" && $CI_COMMIT_TAG != null'
script:
- cd infrastructure/docker-nextcloud && pyb image test publish
@ -136,6 +148,28 @@ backup-image-test-publish:
image: domaindrivenarchitecture/devops-build:latest
stage: image
rules:
- if: '$CI_COMMIT_TAG != null'
- if: '$CI_COMMIT_BRANCH == "master" && $CI_COMMIT_TAG != null'
script:
- cd infrastructure/docker-backup && pyb image test publish
- cd infrastructure/docker-backup && pyb image test publish
nextcloud-integrationtest:
stage: integrationtest
image: registry.gitlab.com/gitlab-org/cluster-integration/helm-install-image/releases/3.7.1-kube-1.20.11-alpine-3.14
services:
- name: registry.gitlab.com/gitlab-org/cluster-integration/test-utils/k3s-gitlab-ci/releases/v1.22.2-k3s2
alias: k3s
script:
- apk add curl sudo bash
- apk add wget curl bash sudo openjdk8
- wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
- apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
- mkdir -p ${HOME}/.kube/
- curl -f k3s:8081 > ${HOME}/.kube/config
- kubectl version
- kubectl cluster-info
- echo "---------- Integration test -------------"
- pwd
- cd ./src/test/resources/local-integration-test/ && ./setup-local-s3-on-k3d.sh

7
auth-local.edn Normal file
View file

@ -0,0 +1,7 @@
{:postgres-db-user "nextcloud"
:postgres-db-password "dbpass"
:nextcloud-admin-user "cloudadmin"
:nextcloud-admin-password "cloudpassword"
:aws-access-key-id ""
:aws-secret-access-key ""
:restic-password "test-password"}

6
config-local.edn Normal file
View file

@ -0,0 +1,6 @@
{:fqdn "cloudhost"
:issuer :staging
:nextcloud-data-volume-path "/var/cloud"
:postgres-data-volume-path "/var/postgres"
:restic-repository "s3://k3stesthost/mybucket"
:local-integration-test true}

30
infrastructure/README.md Normal file
View file

@ -0,0 +1,30 @@
# Build images
## Prerequisites
See also https://pypi.org/project/ddadevops/
```bash
# Ensure that yout python3 version is at least Python 3.7!
sudo apt install python3-pip
pip3 install pip --upgrade --user
pip3 install pybuilder ddadevops deprecation --user
export PATH=$PATH:~/.local/bin
# terraform
pip3 install dda-python-terraform --user
# AwsMixin
pip3 install boto3 --user
# AwsMfaMixin
pip3 install boto3 mfa --user
```
In folder "docker-backup" resp. "docker-nextcloud":
```bash
# step test is optional
pyb image test publish
```

View file

@ -1,6 +1,6 @@
#!/bin/bash
set -o pipefail
set -xo pipefail
function main() {

View file

@ -1,5 +1,7 @@
#!/bin/bash
set -x
if test -f "/var/backups/config/config.orig"; then
rm /var/backups/config/config.php

View file

@ -1,6 +1,6 @@
#!/bin/bash
set -Eeo pipefail
set -Eeox pipefail
function main() {
@ -19,10 +19,13 @@ function main() {
restore-db
restore-directory '/var/backups/'
end-maintenance.sh
}
source /usr/local/lib/functions.sh
source /usr/local/lib/pg-functions.sh
source /usr/local/lib/file-functions.sh
main

View file

@ -1,10 +1,15 @@
#!/bin/bash
set -x
if [ ! -f "/var/backups/config/config.orig" ]; then
rm -f /var/backups/config/config.orig
cp /var/backups/config/config.php /var/backups/config/config.orig
# put nextcloud in maintenance mode
sed -i "s/);/ \'maintenance\' => true,\n);/g" /var/backups/config/config.php
chown www-data:root /var/backups/config/config.php
touch /var/backups/config/config.php

View file

@ -1,7 +1,7 @@
FROM meissa-cloud-backup
FROM c4k-cloud-backup
RUN apt update
RUN apt -yqq --no-install-recommends --yes install curl default-jre-headless
RUN apt update > /dev/null
RUN apt -yqq --no-install-recommends --yes install curl default-jre-headless > /dev/null
RUN curl -L -o /tmp/serverspec.jar https://github.com/DomainDrivenArchitecture/dda-serverspec-crate/releases/download/2.0.0/dda-serverspec-standalone.jar

View file

@ -2,7 +2,7 @@
"name": "c4k-nextcloud",
"description": "Generate c4k yaml for a nextcloud deployment.",
"author": "meissa GmbH",
"version": "0.1.3-SNAPSHOT",
"version": "2.0.1-SNAPSHOT",
"homepage": "https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud#readme",
"repository": "https://www.npmjs.com/package/c4k-nextcloud",
"license": "APACHE2",

View file

@ -17,6 +17,7 @@
"backup/config.yaml" (rc/inline "backup/config.yaml")
"backup/cron.yaml" (rc/inline "backup/cron.yaml")
"backup/secret.yaml" (rc/inline "backup/secret.yaml")
"backup/backup-restore-deployment.yaml" (rc/inline "backup/backup-restore-deployment.yaml")
(throw (js/Error. "Undefined Resource!")))))
(defn generate-config [my-conf]
@ -28,6 +29,12 @@
(defn generate-cron []
(yaml/from-string (yaml/load-resource "backup/cron.yaml")))
(defn generate-backup-restore-deployment [my-conf]
(let [backup-restore-yaml (yaml/from-string (yaml/load-resource "backup/backup-restore-deployment.yaml"))]
(if (and (contains? my-conf :local-integration-test) (= true (:local-integration-test my-conf)))
(cm/replace-named-value backup-restore-yaml "CERTIFICATE_FILE" "/var/run/secrets/localstack-secrets/ca.crt")
backup-restore-yaml)))
(defn generate-secret [my-auth]
(let [{:keys [aws-access-key-id aws-secret-access-key restic-password]} my-auth]
(->

View file

@ -42,7 +42,8 @@
(when (contains? config :restic-repository)
[(yaml/to-string (backup/generate-config config))
(yaml/to-string (backup/generate-secret config))
(yaml/to-string (backup/generate-cron))]))))
(yaml/to-string (backup/generate-cron))
(yaml/to-string (backup/generate-backup-restore-deployment config))]))))
(defn-spec generate any?
[my-config config?

View file

@ -7,7 +7,7 @@
[dda.c4k-common.prefixes :as cp]
[dda.c4k-common.common :as cm]))
(s/def ::fqdn cp/fqdn-string?)
(s/def ::fqdn any?) ; TODO: Fix fqdn-string? to include localhost
(s/def ::issuer cp/letsencrypt-issuer?)
(s/def ::restic-repository string?)
(s/def ::nextcloud-data-volume-path string?)

View file

@ -0,0 +1,85 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: backup-restore
spec:
replicas: 0
selector:
matchLabels:
app: backup-restore
strategy:
type: Recreate
template:
metadata:
labels:
app: backup-restore
app.kubernetes.io/name: backup-restore
app.kubernetes.io/part-of: cloud
spec:
containers:
- name: backup-app
image: domaindrivenarchitecture/c4k-cloud-backup
imagePullPolicy: IfNotPresent
command: ["/entrypoint-start-and-wait.sh"]
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: postgres-secret
key: postgres-user
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres-secret
key: postgres-password
- name: POSTGRES_DB
valueFrom:
configMapKeyRef:
name: postgres-config
key: postgres-db
- name: POSTGRES_HOST
value: "postgresql-service:5432"
- name: POSTGRES_SERVICE
value: "postgresql-service"
- name: POSTGRES_PORT
value: "5432"
- name: AWS_DEFAULT_REGION
value: eu-central-1
- name: AWS_ACCESS_KEY_ID_FILE
value: /var/run/secrets/backup-secrets/aws-access-key-id
- name: AWS_SECRET_ACCESS_KEY_FILE
value: /var/run/secrets/backup-secrets/aws-secret-access-key
- name: RESTIC_REPOSITORY
valueFrom:
configMapKeyRef:
name: backup-config
key: restic-repository
- name: RESTIC_PASSWORD_FILE
value: /var/run/secrets/backup-secrets/restic-password
- name: CERTIFICATE_FILE
value: ""
volumeMounts:
- name: cloud-data-volume
mountPath: /var/backups
- name: backup-secret-volume
mountPath: /var/run/secrets/backup-secrets
readOnly: true
- name: cloud-secret-volume
mountPath: /var/run/secrets/cloud-secrets
readOnly: true
- name: localstack-secret-volume
mountPath: /var/run/secrets/localstack-secrets
readOnly: true
volumes:
- name: cloud-data-volume
persistentVolumeClaim:
claimName: cloud-pvc
- name: cloud-secret-volume
secret:
secretName: cloud-secret
- name: backup-secret-volume
secret:
secretName: backup-secret
- name: localstack-secret-volume
secret:
secretName: localstack-secret

View file

@ -1,68 +0,0 @@
kind: Pod
apiVersion: v1
metadata:
name: backup-restore
labels:
app.kubernetes.io/name: backup-restore
app.kubernetes.io/part-of: cloud
spec:
containers:
- name: backup-app
image: domaindrivenarchitecture/c4k-cloud-backup
imagePullPolicy: IfNotPresent
command: ["/entrypoint-start-and-wait.sh"]
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: postgres-secret
key: postgres-user
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres-secret
key: postgres-password
- name: POSTGRES_DB
valueFrom:
configMapKeyRef:
name: postgres-config
key: postgres-db
- name: POSTGRES_HOST
value: "postgresql-service:5432"
- name: POSTGRES_SERVICE
value: "postgresql-service"
- name: POSTGRES_PORT
value: "5432"
- name: AWS_DEFAULT_REGION
value: eu-central-1
- name: AWS_ACCESS_KEY_ID_FILE
value: /var/run/secrets/backup-secrets/aws-access-key-id
- name: AWS_SECRET_ACCESS_KEY_FILE
value: /var/run/secrets/backup-secrets/aws-secret-access-key
- name: RESTIC_REPOSITORY
valueFrom:
configMapKeyRef:
name: backup-config
key: restic-repository
- name: RESTIC_PASSWORD_FILE
value: /var/run/secrets/backup-secrets/restic-password
volumeMounts:
- name: cloud-data-volume
mountPath: /var/backups
- name: backup-secret-volume
mountPath: /var/run/secrets/backup-secrets
readOnly: true
- name: cloud-secret-volume
mountPath: /var/run/secrets/cloud-secrets
readOnly: true
volumes:
- name: cloud-data-volume
persistentVolumeClaim:
claimName: cloud-pvc
- name: cloud-secret-volume
secret:
secretName: cloud-secret
- name: backup-secret-volume
secret:
secretName: backup-secret
restartPolicy: OnFailure

View file

@ -12,6 +12,7 @@ spec:
template:
metadata:
labels:
app: cloud-app
app.kubernetes.io/name: cloud-pod
app.kubernetes.io/application: cloud
redeploy: v3

View file

@ -5,7 +5,7 @@
[dda.c4k-nextcloud.core :as cut]))
(deftest should-k8s-objects
(is (= 16
(is (= 17
(count (cut/k8s-objects {:fqdn "nextcloud-neu.prod.meissa-gmbh.de"
:postgres-db-user "nextcloud"
:postgres-db-password "nextcloud-db-password"
@ -18,7 +18,7 @@
:aws-secret-access-key "aws-secret"
:restic-password "restic-pw"
:restic-repository "restic-repository"}))))
(is (= 14
(is (= 15
(count (cut/k8s-objects {:fqdn "nextcloud-neu.prod.meissa-gmbh.de"
:postgres-db-user "nextcloud"
:postgres-db-password "nextcloud-db-password"

View file

@ -0,0 +1,84 @@
# Usage
`setup-local-s3.sh [BUCKET_NAME]`:
- [BUCKET_NAME] is optional, "mybucket" will be used if not specified
- sets up a k3s instance
- installs a localstack pod
- creates http and https routing to localstack via localhost
- saves the self-signed certificate as ca.crt
- uses the certificate to initialize a restic repo at `https://k3stesthost/BUCKET_NAME`
Note: In case of not being able to connect to "k3stesthost/health", you might need to ensure that the ingress' ip matches with the required host names: k3stesthost and cloudhost. With `sudo k3s kubectl get ingress` you can view the ingress' ip (e.g. 10.0.2.15), then add a line to file "/etc/hosts" e.g. `10.0.2.15 k3stesthost cloudhost`
`start-k3s.sh`:
- creates and starts a k3s instance
`k3s-uninstall.sh`:
- deletes everything k3s related
## Other useful commands
- `sudo k3s kubectl get pods`
- `curl k3stesthost/health`
expected: `{"services": {"s3": "running"}, "features": {"persistence": "disabled", "initScripts": "initialized"}}`
#### Requires AWS-CLI
- create bucket `aws --endpoint-url=http://k3stesthost s3 mb s3://mybucket`
- list buckets `aws --endpoint-url=http://k3stesthost s3 ls`
- upload something `aws --endpoint-url=http://k3stesthost s3 cp test.txt s3://mybucket`
- check files `aws --endpoint-url=http://k3stesthost s3 ls s3://mybucket`
## Run docker locally
```
docker pull docker:19.03.12-dind
docker run -d --privileged --name integration-test docker:19.03.12-dind
docker exec integration-test sh -c "apk add bash"
```
Set up docker container integration-test:
```
docker cp ../../../../../c4k-nextcloud/ integration-test:/
docker exec -it integration-test sh
cd /c4k-nextcloud/src/test/resources/local-integration-test
./setup-docker.sh
```
## Deploy nextcloud
### Requirements
* leiningen (install with: `sudo apt install leiningen` )
* In the project's root execute: `lein uberjar`
* Change file "valid-config.edn" according to your settings (e.g. `:fqdn "cloudhost"` and `:restic-repository "s3://k3stesthost:mybucket"`).
### Deploy to k3s
* Create and deploy the k8s yaml:
`java -jar target/uberjar/c4k-nextcloud-standalone.jar valid-config.edn valid-auth.edn | sudo k3s kubectl apply -f -`
Some of the steps may take some min to be effective, but eventually nextcloud should be available at: https://cloudhost
### Deploy to k3d
k3d is a k3s system which is running inside of a container. To install k3d run `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash` or have a look at https://k3d.io/v5.0.3/ .
* Start a k3d cluster to deploy s3, nextcloud and test backup and restore on it: `./setup-local-s3-on-k3d.sh`
Some steps may take a couple of minutes to be effective, but eventually nextcloud should be available at: https://cloudhost
#### Remove k3d cluster
`k3d cluster delete nextcloud`
## Test in local gitlab runner
See https://stackoverflow.com/questions/32933174/use-gitlab-ci-to-run-tests-locally
This needs to be done in the project root
`docker run -d --name gitlab-runner --restart always -v $PWD:$PWD -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:latest`
`docker exec -it -w $PWD gitlab-runner gitlab-runner exec docker nextcloud-integrationtest --docker-privileged --docker-volumes '/var/run/docker.sock:/var/run/docker.sock'`

View file

@ -0,0 +1,20 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: localstack-cert
namespace: default
spec:
secretName: localstack-secret
commonName: k3stesthost
dnsNames:
- k3stesthost
issuerRef:
name: selfsigning-issuer
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigning-issuer
spec:
selfSigned: {}

View file

@ -0,0 +1,17 @@
# Set the default kube context if present
DEFAULT_KUBE_CONTEXTS="$HOME/.kube/config"
if test -f "${DEFAULT_KUBE_CONTEXTS}"
then
export KUBECONFIG="$DEFAULT_KUBE_CONTEXTS"
fi
# Additional contexts should be in ~/.kube/custom-contexts/
CUSTOM_KUBE_CONTEXTS="$HOME/.kube/custom-contexts"
mkdir -p "${CUSTOM_KUBE_CONTEXTS}"
OIFS="$IFS"
IFS=$'\n'
for contextFile in `find "${CUSTOM_KUBE_CONTEXTS}" -type f -name "*.yml"`
do
export KUBECONFIG="$contextFile:$KUBECONFIG"
done
IFS="$OIFS"

View file

@ -0,0 +1,65 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: localstack
spec:
selector:
matchLabels:
app: localstack
strategy:
type: Recreate
template:
metadata:
labels:
app: localstack
spec:
containers:
- image: localstack/localstack
name: localstack-app
imagePullPolicy: IfNotPresent
env:
- name: SERVICES
value: s3
---
# service
apiVersion: v1
kind: Service
metadata:
name: localstack-service
spec:
selector:
app: localstack
ports:
- port: 4566
---
apiVersion: v1
kind: Secret
metadata:
name: localstack-secret
type: Opaque
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-localstack
annotations:
cert-manager.io/cluster-issuer: selfsigning-issuer
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/redirect-entry-point: https
namespace: default
spec:
tls:
- hosts:
- k3stesthost
secretName: localstack-secret
rules:
- host: k3stesthost
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: localstack-service
port:
number: 4566

View file

@ -0,0 +1,48 @@
#!/bin/bash
set -x
docker volume create k3s-server
name='inttst'
[[ $(docker ps -f "name=$name" --format '{{.Names}}') == $name ]] || docker run --name $name -d --privileged --tmpfs /run --tmpfs /var/run --restart always -e K3S_TOKEN=12345678901234 -e K3S_KUBECONFIG_OUTPUT=./kubeconfig.yaml -e K3S_KUBECONFIG_MODE=666 -v k3s-server:/var/lib/rancher/k3s:z -v $(pwd):/output:z -p 6443:6443 -p 80:80 -p 443:443 rancher/k3s server --cluster-init --tls-san k3stesthost --tls-san cloudhost
docker ps
export timeout=30; while ! docker exec $name sh -c "test -f /var/lib/rancher/k3s/server/kubeconfig.yaml"; do if [ "$timeout" == 0 ]; then echo "ERROR: Timeout while waiting for file."; break; fi; sleep 1; ((timeout--)); done
mkdir -p $HOME/.kube/
docker cp $name:/var/lib/rancher/k3s/server/kubeconfig.yaml $HOME/.kube/config
if [ "$timeout" == 0 ]
then
echo -------------------------------------------------------
find / -name "kubeconfig.yaml";
echo -------------------------------------------------------
docker ps -a
echo -------------------------------------------------------
exit 1
fi
echo "127.0.0.1 kubernetes" >> /etc/hosts
apk add wget curl bash sudo openjdk8
wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.22.0/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
sleep 20 #allow some time to startup k3s
docker ps -a
swapoff -a # can this be removed ?
export KUBECONFIG=$HOME/.kube/config
pwd
cd ./c4k-nextcloud/src/test/resources/local-integration-test && ./setup-local-s3-on-k3d.sh

View file

@ -0,0 +1,60 @@
#!/bin/bash
set -x
function main()
{
# enable tls for k3s with cert-manager
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
kubectl apply -f localstack.yaml
until kubectl apply -f certificate.yaml
do
echo "[INFO] Waiting for certificate ..."
sleep 30
done
# wait for ingress to be ready
bash -c 'external_ip=""; while [ -z $external_ip ]; do echo "[INFO] Waiting for end point..."; external_ip=$(kubectl get ingress -o jsonpath="{$.items[*].status.loadBalancer.ingress[*].ip}"); [ -z "$external_ip" ] && sleep 10; done; echo "End point ready - $external_ip";'
export INGRESS_IP=$(kubectl get ingress ingress-localstack -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")
cd ../../../../ # c4k-nextcloud project root
lein uberjar
java -jar target/uberjar/c4k-nextcloud-standalone.jar config-local.edn auth-local.edn | kubectl apply -f -
CLOUD_POD=$(kubectl get pod -l app=cloud-app -o name)
kubectl wait $CLOUD_POD --for=condition=Ready --timeout=240s
# wait for nextcloud config file available
timeout 180 bash -c "kubectl exec -t $POD -- bash -c \"until [ -f /var/www/html/config/config.php ]; do sleep 10; done\""
# ensure an instance of pod backup-restore
kubectl scale deployment backup-restore --replicas 1
# wait for localstack health endpoint
echo "$INGRESS_IP k3stesthost cloudhost" >> /etc/hosts
until curl --fail --silent k3stesthost/health | grep -oe '"s3": "available"' -oe '"s3": "running"'
do
curl --fail k3stesthost/health
echo "[INFO] Waiting for s3 running"
sleep 10
done
BACKUP_POD=$(kubectl get pod -l app=backup-restore -o name)
kubectl wait $BACKUP_POD --for=condition=Ready --timeout=240s
kubectl exec -t $BACKUP_POD -- bash -c "echo \"$INGRESS_IP k3stesthost cloudhost\" >> /etc/hosts"
kubectl exec -t $BACKUP_POD -- /usr/local/bin/init.sh
echo ================= BACKUP =================
kubectl exec -t $BACKUP_POD -- /usr/local/bin/backup.sh
sleep 10 # avoid race conditions
echo ================= RESTORE =================
kubectl exec -t $BACKUP_POD -- /usr/local/bin/restore.sh
}
main "$@"

View file

@ -0,0 +1,34 @@
function main()
{
local bucket_name="${1:-mybucket}"; shift
./start-k3s.sh
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
sudo k3s kubectl apply -f localstack.yaml
until sudo k3s kubectl apply -f certificate.yaml
do
echo "*** Waiting for certificate ... ***"
sleep 10
done
echo
echo
echo "[INFO] Waiting for localstack health endpoint"
until curl --connect-timeout 3 -s -f -o /dev/null "k3stesthost/health"
do
sleep 5
done
echo
sudo k3s kubectl get secret localstack-secret -o jsonpath="{.data.ca\.crt}" | base64 --decode > ca.crt
#aws --endpoint-url=http://localhost s3 mb s3://$bucket_name
export RESTIC_PASSWORD="test-password"
restic init --cacert ca.crt -r s3://k3stesthost/$bucket_name
}
main $@

View file

@ -0,0 +1,9 @@
function main()
{
./start-k3s.sh
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
}
main

View file

@ -0,0 +1 @@
KUBECONFIG=~/.kube/custom-contexts/k3d-config.yml k3d cluster create nextcloud --k3s-arg '--tls-san cloudhost@loadbalancer' --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443 --kubeconfig-update-default

View file

@ -0,0 +1 @@
curl -sfL https://get.k3s.io | K3S_NODE_NAME=k3stesthost INSTALL_K3S_EXEC='--tls-san cloudhost' sh -