Compare commits
93 commits
Author | SHA1 | Date | |
---|---|---|---|
d1a8479598 | |||
c4832b1107 | |||
7e3312e285 | |||
f636f7ffc3 | |||
bba6bbe830 | |||
8388d72517 | |||
f2b583c060 | |||
5a3aca38cf | |||
4764d1db67 | |||
8aef785bdc | |||
5eb83f78a0 | |||
d997e470a0 | |||
dc86531454 | |||
ca0d4ac7b2 | |||
e4666a592e | |||
fa48d9762a | |||
6a278ece0d | |||
dda45d92d6 | |||
c69c9da659 | |||
de53b9b7a5 | |||
cc845af696 | |||
f5aa2295f0 | |||
1d344abc27 | |||
7cd4e4101d | |||
5d2a65079e | |||
b36808de7c | |||
3e588c082c | |||
4a74a1bec0 | |||
1ff8a0dc13 | |||
fd27c15ec7 | |||
192e053afc | |||
cab9b573c1 | |||
226046d278 | |||
f108b67e62 | |||
96343b9af5 | |||
f072ff027d | |||
e396880365 | |||
83080b30a8 | |||
4a78a35424 | |||
8727f16c75 | |||
ac3f2a455d | |||
c05ecfa427 | |||
fff2c939d9 | |||
e8cec9de8a | |||
1fb309f213 | |||
70d41ca532 | |||
a96cba8cb1 | |||
3bdf2ea553 | |||
fcf6d7783e | |||
ab4c6e0d76 | |||
ef8bfa11fc | |||
7d9ca203bb | |||
f8bcbe63ba | |||
7219533c86 | |||
9b13b96ff6 | |||
351b2295e3 | |||
3a9694d9a1 | |||
3b458b980b | |||
5f38fb7526 | |||
fa81908791 | |||
17ffd5a10e | |||
f765b17dad | |||
2119a8b037 | |||
c2574c481f | |||
b08831c477 | |||
212a56b1ef | |||
ee2751509b | |||
59391d96d4 | |||
3b8ae92b83 | |||
408376a5ca | |||
e6785784a7 | |||
69f5e31c56 | |||
2f5dd28c23 | |||
de69d87c87 | |||
76cb0b5182 | |||
76cbffe348 | |||
4a2afd65b5 | |||
84f6974175 | |||
b8d241a2c8 | |||
439385933e | |||
2e90bae8c9 | |||
2771fed341 | |||
fad3a4d07c | |||
b51363bead | |||
47fa110f02 | |||
dc3e14517b | |||
741f0ce716 | |||
7ccf587c00 | |||
96b8b6a448 | |||
c931e36a0a | |||
252f14b987 | |||
99407b70f3 | |||
680f364b24 |
58 changed files with 297 additions and 630 deletions
|
@ -4,10 +4,9 @@ stages:
|
||||||
- security
|
- security
|
||||||
- upload
|
- upload
|
||||||
- image
|
- image
|
||||||
#- integrationtest
|
|
||||||
|
|
||||||
.img: &img
|
.img: &img
|
||||||
image: "domaindrivenarchitecture/ddadevops-dind:4.7.4"
|
image: "domaindrivenarchitecture/ddadevops-dind:4.11.3"
|
||||||
services:
|
services:
|
||||||
- docker:dind
|
- docker:dind
|
||||||
before_script:
|
before_script:
|
||||||
|
@ -17,7 +16,7 @@ stages:
|
||||||
- export IMAGE_TAG=$CI_COMMIT_TAG
|
- export IMAGE_TAG=$CI_COMMIT_TAG
|
||||||
|
|
||||||
.cljs-job: &cljs
|
.cljs-job: &cljs
|
||||||
image: "domaindrivenarchitecture/ddadevops-clj-cljs:4.7.4"
|
image: "domaindrivenarchitecture/ddadevops-clj-cljs:4.11.3"
|
||||||
cache:
|
cache:
|
||||||
key: ${CI_COMMIT_REF_SLUG}
|
key: ${CI_COMMIT_REF_SLUG}
|
||||||
paths:
|
paths:
|
||||||
|
@ -30,7 +29,7 @@ stages:
|
||||||
- npm install
|
- npm install
|
||||||
|
|
||||||
.clj-job: &clj
|
.clj-job: &clj
|
||||||
image: "domaindrivenarchitecture/ddadevops-clj-cljs:4.7.4"
|
image: "domaindrivenarchitecture/ddadevops-clj:4.11.3"
|
||||||
cache:
|
cache:
|
||||||
key: ${CI_COMMIT_REF_SLUG}
|
key: ${CI_COMMIT_REF_SLUG}
|
||||||
paths:
|
paths:
|
||||||
|
@ -94,6 +93,15 @@ package-uberjar:
|
||||||
paths:
|
paths:
|
||||||
- target/uberjar
|
- target/uberjar
|
||||||
|
|
||||||
|
package-native:
|
||||||
|
<<: *clj
|
||||||
|
stage: package
|
||||||
|
script:
|
||||||
|
- pyb package_native
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- target/graalvm
|
||||||
|
|
||||||
release-to-clojars:
|
release-to-clojars:
|
||||||
<<: *clj
|
<<: *clj
|
||||||
<<: *tag_only
|
<<: *tag_only
|
||||||
|
@ -121,23 +129,3 @@ nextcloud-image-publish:
|
||||||
stage: image
|
stage: image
|
||||||
script:
|
script:
|
||||||
- cd infrastructure/nextcloud && pyb image publish
|
- cd infrastructure/nextcloud && pyb image publish
|
||||||
|
|
||||||
#.nextcloud-integrationtest:
|
|
||||||
# stage: integrationtest
|
|
||||||
# image: registry.gitlab.com/gitlab-org/cluster-integration/helm-install-image/releases/3.7.1-kube-1.20.11-alpine-3.14
|
|
||||||
# services:
|
|
||||||
# - name: registry.gitlab.com/gitlab-org/cluster-integration/test-utils/k3s-gitlab-ci/releases/v1.22.2-k3s2
|
|
||||||
# alias: k3s
|
|
||||||
# script:
|
|
||||||
# - apk add curl sudo bash
|
|
||||||
# - apk add wget curl bash sudo openjdk8
|
|
||||||
# - wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
|
|
||||||
# - apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
|
|
||||||
#
|
|
||||||
# - mkdir -p ${HOME}/.kube/
|
|
||||||
# - curl -f k3s:8081 > ${HOME}/.kube/config
|
|
||||||
# - kubectl version
|
|
||||||
# - kubectl cluster-info
|
|
||||||
# - echo "---------- Integration test -------------"
|
|
||||||
# - pwd
|
|
||||||
# - cd ./src/test/resources/local-integration-test/ && ./setup-local-s3-on-k3d.sh
|
|
|
@ -41,7 +41,8 @@ Development happens at: https://repo.prod.meissa.de/meissa/c4k-nextcloud
|
||||||
|
|
||||||
Mirrors are:
|
Mirrors are:
|
||||||
|
|
||||||
* https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud (issues and PR, CI)
|
* https://codeberg.org/meissa/c4k-nextcloud (Issues and PR)
|
||||||
|
* https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud (CI)
|
||||||
* https://github.com/DomainDrivenArchitecture/c4k-nextcloud
|
* https://github.com/DomainDrivenArchitecture/c4k-nextcloud
|
||||||
|
|
||||||
For more details about our repository model see: https://repo.prod.meissa.de/meissa/federate-your-repos
|
For more details about our repository model see: https://repo.prod.meissa.de/meissa/federate-your-repos
|
||||||
|
@ -49,6 +50,6 @@ For more details about our repository model see: https://repo.prod.meissa.de/mei
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Copyright © 2021 meissa GmbH
|
Copyright © 2021, 2022, 2023, 2024 meissa GmbH
|
||||||
Licensed under the [Apache License, Version 2.0](LICENSE) (the "License")
|
Licensed under the [Apache License, Version 2.0](LICENSE) (the "License")
|
||||||
Pls. find licenses of our subcomponents [here](doc/SUBCOMPONENT_LICENSE)
|
Pls. find licenses of our subcomponents [here](doc/SUBCOMPONENT_LICENSE)
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
{:postgres-db-user "nextcloud"
|
|
||||||
:postgres-db-password "dbpass"
|
|
||||||
:nextcloud-admin-user "cloudadmin"
|
|
||||||
:nextcloud-admin-password "cloudpassword"
|
|
||||||
:aws-access-key-id ""
|
|
||||||
:aws-secret-access-key ""
|
|
||||||
:restic-password "test-password"}
|
|
81
build.py
81
build.py
|
@ -29,10 +29,11 @@ def initialize(project):
|
||||||
"release_organisation": "meissa",
|
"release_organisation": "meissa",
|
||||||
"release_repository_name": name,
|
"release_repository_name": name,
|
||||||
"release_artifacts": [
|
"release_artifacts": [
|
||||||
"target/uberjar/c4k-nextcloud-standalone.jar",
|
f"target/graalvm/{name}",
|
||||||
"target/frontend-build/c4k-nextcloud.js",
|
f"target/uberjar/{name}-standalone.jar",
|
||||||
|
f"target/frontend-build/{name}.js",
|
||||||
],
|
],
|
||||||
"release_main_branch": "master",
|
"release_main_branch": "main",
|
||||||
}
|
}
|
||||||
|
|
||||||
build = ReleaseMixin(project, input)
|
build = ReleaseMixin(project, input)
|
||||||
|
@ -40,18 +41,18 @@ def initialize(project):
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def test_clj(project):
|
def test_clj():
|
||||||
run("lein test", shell=True, check=True)
|
run("lein test", shell=True, check=True)
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def test_cljs(project):
|
def test_cljs():
|
||||||
run("shadow-cljs compile test", shell=True, check=True)
|
run("shadow-cljs compile test", shell=True, check=True)
|
||||||
run("node target/node-tests.js", shell=True, check=True)
|
run("node target/node-tests.js", shell=True, check=True)
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def test_schema(project):
|
def test_schema():
|
||||||
run("lein uberjar", shell=True, check=True)
|
run("lein uberjar", shell=True, check=True)
|
||||||
run(
|
run(
|
||||||
"java -jar target/uberjar/c4k-nextcloud-standalone.jar "
|
"java -jar target/uberjar/c4k-nextcloud-standalone.jar "
|
||||||
|
@ -62,6 +63,11 @@ def test_schema(project):
|
||||||
check=True,
|
check=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@task
|
||||||
|
def test():
|
||||||
|
test_clj()
|
||||||
|
test_cljs()
|
||||||
|
test_schema()
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def report_frontend(project):
|
def report_frontend(project):
|
||||||
|
@ -96,6 +102,7 @@ def package_frontend(project):
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def package_uberjar(project):
|
def package_uberjar(project):
|
||||||
|
run("lein uberjar", shell=True, check=True)
|
||||||
run(
|
run(
|
||||||
"sha256sum target/uberjar/c4k-nextcloud-standalone.jar > target/uberjar/c4k-nextcloud-standalone.jar.sha256",
|
"sha256sum target/uberjar/c4k-nextcloud-standalone.jar > target/uberjar/c4k-nextcloud-standalone.jar.sha256",
|
||||||
shell=True,
|
shell=True,
|
||||||
|
@ -107,6 +114,37 @@ def package_uberjar(project):
|
||||||
check=True,
|
check=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@task
|
||||||
|
def package_native(project):
|
||||||
|
run(
|
||||||
|
"mkdir -p target/graalvm",
|
||||||
|
shell=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
run(
|
||||||
|
"native-image " +
|
||||||
|
"--native-image-info " +
|
||||||
|
"--report-unsupported-elements-at-runtime " +
|
||||||
|
"--no-server " +
|
||||||
|
"--no-fallback " +
|
||||||
|
"--features=clj_easy.graal_build_time.InitClojureClasses " +
|
||||||
|
f"-jar target/uberjar/{project.name}-standalone.jar " +
|
||||||
|
"-H:IncludeResources=.*.yaml " +
|
||||||
|
"-H:Log=registerResource:verbose " +
|
||||||
|
f"-H:Name=target/graalvm/{project.name}",
|
||||||
|
shell=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
run(
|
||||||
|
f"sha256sum target/graalvm/{project.name} > target/graalvm/{project.name}.sha256",
|
||||||
|
shell=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
run(
|
||||||
|
f"sha512sum target/graalvm/{project.name} > target/graalvm/{project.name}.sha512",
|
||||||
|
shell=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def upload_clj(project):
|
def upload_clj(project):
|
||||||
|
@ -115,17 +153,32 @@ def upload_clj(project):
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def lint(project):
|
def lint(project):
|
||||||
#run(
|
run(
|
||||||
# "lein eastwood",
|
"lein eastwood",
|
||||||
# shell=True,
|
shell=True,
|
||||||
# check=True,
|
check=True,
|
||||||
#)
|
)
|
||||||
run(
|
run(
|
||||||
"lein ancient check",
|
"lein ancient check",
|
||||||
shell=True,
|
shell=True,
|
||||||
check=True,
|
check=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@task
|
||||||
|
def inst(project):
|
||||||
|
package_uberjar(project)
|
||||||
|
package_native(project)
|
||||||
|
run(
|
||||||
|
f"sudo install -m=755 target/uberjar/{project.name}-standalone.jar /usr/local/bin/{project.name}-standalone.jar",
|
||||||
|
shell=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
run(
|
||||||
|
f"sudo install -m=755 target/graalvm/{project.name} /usr/local/bin/{project.name}",
|
||||||
|
shell=True,
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def patch(project):
|
def patch(project):
|
||||||
|
@ -174,7 +227,7 @@ def release(project):
|
||||||
def linttest(project, release_type):
|
def linttest(project, release_type):
|
||||||
build = get_devops_build(project)
|
build = get_devops_build(project)
|
||||||
build.update_release_type(release_type)
|
build.update_release_type(release_type)
|
||||||
test_clj(project)
|
test_clj()
|
||||||
test_cljs(project)
|
test_cljs()
|
||||||
test_schema(project)
|
test_schema()
|
||||||
lint(project)
|
lint(project)
|
||||||
|
|
|
@ -1,6 +0,0 @@
|
||||||
{:fqdn "cloudhost"
|
|
||||||
:issuer :staging
|
|
||||||
:nextcloud-data-volume-path "/var/cloud"
|
|
||||||
:postgres-data-volume-path "/var/postgres"
|
|
||||||
:restic-repository "s3://k3stesthost/mybucket"
|
|
||||||
:local-integration-test true}
|
|
|
@ -10,37 +10,37 @@
|
||||||
## Manual init the restic repository for the first time
|
## Manual init the restic repository for the first time
|
||||||
|
|
||||||
1. Scale backup-restore deployment up:
|
1. Scale backup-restore deployment up:
|
||||||
`kubectl scale deployment backup-restore --replicas=1`
|
`kubectl -n nextcloud scale deployment backup-restore --replicas=1`
|
||||||
1. exec into pod and execute restore pod
|
1. exec into pod and execute restore pod
|
||||||
`kubectl exec -it backup-restore -- /usr/local/bin/init.sh`
|
`kubectl -n nextcloud exec -it backup-restore -- /usr/local/bin/init.sh`
|
||||||
1. Scale backup-restore deployment down:
|
1. Scale backup-restore deployment down:
|
||||||
`kubectl scale deployment backup-restore --replicas=0`
|
`kubectl -n nextcloud scale deployment backup-restore --replicas=0`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Manual backup the restic repository for the first time
|
## Manual backup
|
||||||
|
|
||||||
1. Scale Cloud deployment down:
|
1. Scale Cloud deployment down:
|
||||||
`kubectl scale deployment cloud-deployment --replicas=0`
|
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=0`
|
||||||
1. Scale backup-restore deployment up:
|
1. Scale backup-restore deployment up:
|
||||||
`kubectl scale deployment backup-restore --replicas=1`
|
`kubectl -n nextcloud scale deployment backup-restore --replicas=1`
|
||||||
1. exec into pod and execute restore pod
|
1. exec into pod and execute restore pod
|
||||||
`kubectl exec -it backup-restore -- /usr/local/bin/backup.sh`
|
`kubectl -n nextcloud exec -it backup-restore -- /usr/local/bin/backup.sh`
|
||||||
1. Scale backup-restore deployment down:
|
1. Scale backup-restore deployment down:
|
||||||
`kubectl scale deployment backup-restore --replicas=0`
|
`kubectl -n nextcloud scale deployment backup-restore --replicas=0`
|
||||||
1. Scale Cloud deployment up:
|
1. Scale Cloud deployment up:
|
||||||
`kubectl scale deployment cloud-deployment --replicas=1`
|
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=1`
|
||||||
|
|
||||||
|
|
||||||
## Manual restore
|
## Manual restore
|
||||||
|
|
||||||
1. Scale Cloud deployment down:
|
1. Scale Cloud deployment down:
|
||||||
`kubectl scale deployment cloud-deployment --replicas=0`
|
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=0`
|
||||||
2. Scale backup-restore deployment up:
|
2. Scale backup-restore deployment up:
|
||||||
`kubectl scale deployment backup-restore --replicas=1`
|
`kubectl -n nextcloud scale deployment backup-restore --replicas=1`
|
||||||
3. exec into pod and execute restore pod
|
3. exec into pod and execute restore pod
|
||||||
`kubectl exec -it backup-restore -- /usr/local/bin/restore.sh`
|
`kubectl -n nextcloud exec -it backup-restore -- /usr/local/bin/restore.sh`
|
||||||
4. Scale backup-restore deployment down:
|
4. Scale backup-restore deployment down:
|
||||||
`kubectl scale deployment backup-restore --replicas=0`
|
`kubectl -n nextcloud scale deployment backup-restore --replicas=0`
|
||||||
5. Scale Cloud deployment up:
|
5. Scale Cloud deployment up:
|
||||||
`kubectl scale deployment cloud-deployment --replicas=1`
|
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=1`
|
||||||
|
|
|
@ -39,34 +39,31 @@ npx shadow-cljs release frontend
|
||||||
## graalvm-setup
|
## graalvm-setup
|
||||||
|
|
||||||
```
|
```
|
||||||
curl -LO https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.0.0.2/graalvm-ce-java11-linux-amd64-21.0.0.2.tar.gz
|
curl -LO https://github.com/graalvm/graalvm-ce-builds/releases/download/jdk-21.0.2/graalvm-community-jdk-21.0.2_linux-x64_bin.tar.gz
|
||||||
|
|
||||||
# unpack
|
# unpack
|
||||||
tar -xzf graalvm-ce-java11-linux-amd64-21.0.0.2.tar.gz
|
tar -xzf graalvm-community-jdk-21.0.2_linux-x64_bin.tar.gz
|
||||||
|
|
||||||
sudo mv graalvm-ce-java11-21.0.0.2 /usr/lib/jvm/
|
sudo mv graalvm-community-openjdk-21.0.2+13.1 /usr/lib/jvm/
|
||||||
sudo ln -s /usr/lib/jvm/graalvm-ce-java11-21.0.0.2 /usr/lib/jvm/graalvm
|
sudo ln -s /usr/lib/jvm/graalvm-community-openjdk-21.0.2+13.1 /usr/lib/jvm/graalvm-21
|
||||||
sudo ln -s /usr/lib/jvm/graalvm/bin/gu /usr/local/bin
|
sudo ln -s /usr/lib/jvm/graalvm-21/bin/gu /usr/local/bin
|
||||||
sudo update-alternatives --install /usr/bin/java java /usr/lib/jvm/graalvm/bin/java 2
|
sudo update-alternatives --install /usr/bin/java java /usr/lib/jvm/graalvm-21/bin/java 2
|
||||||
sudo update-alternatives --config java
|
sudo update-alternatives --config java
|
||||||
|
sudo ln -s /usr/lib/jvm/graalvm-21/bin/native-image /usr/local/bin
|
||||||
# install native-image in graalvm-ce-java11-linux-amd64-21.0.0.2/bin
|
|
||||||
sudo gu install native-image
|
|
||||||
sudo ln -s /usr/lib/jvm/graalvm/bin/native-image /usr/local/bin
|
|
||||||
|
|
||||||
# deps
|
# deps
|
||||||
sudo apt-get install build-essential libz-dev zlib1g-dev
|
sudo apt-get install build-essential libz-dev zlib1g-dev
|
||||||
|
|
||||||
# build
|
# build
|
||||||
cd ~/repo/dda/c4k-cloud
|
cd ~/repo/c4k/c4k-nextcloud
|
||||||
lein uberjar
|
lein uberjar
|
||||||
mkdir -p target/graalvm
|
mkdir -p target/graalvm
|
||||||
lein native
|
lein native
|
||||||
|
|
||||||
# execute
|
# execute
|
||||||
./target/graalvm/c4k-cloud -h
|
./target/graalvm/c4k-nextcloud -h
|
||||||
./target/graalvm/c4k-cloud src/test/resources/valid-config.edn src/test/resources/valid-auth.edn
|
./target/graalvm/c4k-nextcloud src/test/resources/nextcloud-test/valid-config.yaml src/test/resources/nextcloud-test/valid-auth.yaml
|
||||||
./target/graalvm/c4k-cloud src/test/resources/invalid-config.edn src/test/resources/invalid-auth.edn
|
./target/graalvm/c4k-nextcloud src/test/resources/nextcloud-test/invalid-config.yaml src/test/resources/nextcloud-test/invalid-auth.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
## c4k-setup
|
## c4k-setup
|
||||||
|
|
|
@ -5,12 +5,17 @@
|
||||||
- 4.0.3: nextcloud 22
|
- 4.0.3: nextcloud 22
|
||||||
- 5.0.0: nextcloud 23
|
- 5.0.0: nextcloud 23
|
||||||
- 6.0.0: nextcloud 24
|
- 6.0.0: nextcloud 24
|
||||||
- 7.0.0: nextcloud 25
|
- 7.0.7: nextcloud 25.0.13
|
||||||
|
- 7.1.1: nextcloud 26.0.0 (manual publish) => attention - only upgrade to 26.0.0 is working
|
||||||
|
- 7.1.0: nextcloud 26.0.13 (manual publish)
|
||||||
|
- 7.2.0: nextcloud 27 (manual publish)
|
||||||
|
- 10.0.0: nextcloud 28.0.5
|
||||||
|
- 10.1.0: nextcloud 29.0.0
|
||||||
|
|
||||||
## Uprgrading process
|
## Uprgrading process
|
||||||
|
|
||||||
1. Change the version of the docker image in the deployment to the next major version
|
1. Change the version of the docker image in the deployment to the next major version
|
||||||
- `kubectl edit deploy cloud-deployment`
|
- `kubectl -n=nextcloud edit deploy cloud-deployment`
|
||||||
- change `image: domaindrivenarchitecture/c4k-cloud:4.0.3`
|
- change `image: domaindrivenarchitecture/c4k-cloud:4.0.3`
|
||||||
2. Wait for the pod to finish restarting
|
2. Wait for the pod to finish restarting
|
||||||
3. Verify the website is working and https://URL/settings/admin/overview shows the correct version
|
3. Verify the website is working and https://URL/settings/admin/overview shows the correct version
|
||||||
|
|
41
doc/RenameDatabase.md
Normal file
41
doc/RenameDatabase.md
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
# Rename Database
|
||||||
|
|
||||||
|
## Start
|
||||||
|
|
||||||
|
1. Scale down cloud deployment
|
||||||
|
`k -n nextcloud scale deployment cloud-deployment --replicas 0`
|
||||||
|
|
||||||
|
## Change db-name in postgres
|
||||||
|
|
||||||
|
1. Connect to postgres-pod
|
||||||
|
`k -n nextcloud exec -it postgresql-... -- bash`
|
||||||
|
2. Connect to a database
|
||||||
|
`PGPASSWORD=$POSTGRES_PASSWORD psql -h postgresql-service -U $POSTGRES_USER postgres`
|
||||||
|
3. List available databases
|
||||||
|
`\l`
|
||||||
|
4. Rename database
|
||||||
|
`ALTER DATABASE cloud RENAME TO nextcloud;`
|
||||||
|
5. Verify
|
||||||
|
`\l`
|
||||||
|
6. Quit
|
||||||
|
`\q`
|
||||||
|
|
||||||
|
## Update postgres-config
|
||||||
|
|
||||||
|
1. Edit configmap
|
||||||
|
`k -n nextcloud edit configmap postgres-config`
|
||||||
|
2. Update postgres-db value
|
||||||
|
3. Save
|
||||||
|
|
||||||
|
## Update nextcloud db-name
|
||||||
|
|
||||||
|
1. Scale up nextcloud
|
||||||
|
`k -n nextcloud scale deployment cloud-deployment --replicas 1`
|
||||||
|
2. Connect
|
||||||
|
`k -n nextcloud exec -it cloud-deployment-... -- bash`
|
||||||
|
3. Update db value in config.php
|
||||||
|
`apt update`
|
||||||
|
`apt install vim`
|
||||||
|
`vim config/config.php`
|
||||||
|
4. Update dbname field
|
||||||
|
5. Verify server+website is working
|
|
@ -6,7 +6,7 @@ from ddadevops import *
|
||||||
name = "c4k-cloud"
|
name = "c4k-cloud"
|
||||||
MODULE = "backup"
|
MODULE = "backup"
|
||||||
PROJECT_ROOT_PATH = "../.."
|
PROJECT_ROOT_PATH = "../.."
|
||||||
version = "7.0.1"
|
version = "10.2.1-dev"
|
||||||
|
|
||||||
|
|
||||||
@init
|
@init
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM domaindrivenarchitecture/dda-backup:1.0.7
|
FROM domaindrivenarchitecture/dda-backup:latest
|
||||||
|
|
||||||
# Prepare Entrypoint Script
|
# Prepare Entrypoint Script
|
||||||
ADD resources /tmp
|
ADD resources /tmp
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -o pipefail
|
set -Eexo pipefail
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -x
|
set -exo pipefail
|
||||||
|
|
||||||
if test -f "/var/backups/config/config.orig"; then
|
if test -f "/var/backups/config/config.orig"; then
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exo pipefail
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
file_env POSTGRES_DB
|
file_env POSTGRES_DB
|
||||||
file_env POSTGRES_PASSWORD
|
file_env POSTGRES_PASSWORD
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -Eexo pipefail
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
file_env POSTGRES_DB
|
file_env POSTGRES_DB
|
||||||
file_env POSTGRES_PASSWORD
|
file_env POSTGRES_PASSWORD
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -Eexo pipefail
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
file_env AWS_ACCESS_KEY_ID
|
file_env AWS_ACCESS_KEY_ID
|
||||||
file_env AWS_SECRET_ACCESS_KEY
|
file_env AWS_SECRET_ACCESS_KEY
|
||||||
|
|
|
@ -1,11 +1,21 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
install -m 0700 /tmp/entrypoint.sh /
|
set -exo pipefail
|
||||||
install -m 0700 /tmp/entrypoint-start-and-wait.sh /
|
|
||||||
|
|
||||||
install -m 0700 /tmp/init.sh /usr/local/bin/
|
function main() {
|
||||||
install -m 0700 /tmp/backup.sh /usr/local/bin/
|
{
|
||||||
install -m 0700 /tmp/restore.sh /usr/local/bin/
|
install -m 0700 /tmp/entrypoint.sh /
|
||||||
install -m 0700 /tmp/list-snapshots.sh /usr/local/bin/
|
install -m 0700 /tmp/entrypoint-start-and-wait.sh /
|
||||||
install -m 0700 /tmp/start-maintenance.sh /usr/local/bin/
|
|
||||||
install -m 0700 /tmp/end-maintenance.sh /usr/local/bin/
|
install -m 0700 /tmp/init.sh /usr/local/bin/
|
||||||
|
install -m 0700 /tmp/backup.sh /usr/local/bin/
|
||||||
|
install -m 0700 /tmp/restore.sh /usr/local/bin/
|
||||||
|
install -m 0700 /tmp/list-snapshots.sh /usr/local/bin/
|
||||||
|
install -m 0700 /tmp/start-maintenance.sh /usr/local/bin/
|
||||||
|
install -m 0700 /tmp/end-maintenance.sh /usr/local/bin/
|
||||||
|
cleanupDocker
|
||||||
|
} > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
source /tmp/install_functions_debian.sh
|
||||||
|
main
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -o pipefail
|
set -exo pipefail
|
||||||
|
|
||||||
function list-snapshot-files() {
|
function list-snapshot-files() {
|
||||||
if [ -z ${CERTIFICATE_FILE} ];
|
if [ -z ${CERTIFICATE_FILE} ];
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -Eeox pipefail
|
set -Eexo pipefail
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
local role_snapshot_id="${1:-latest}"
|
local role_snapshot_id="${1:-latest}"
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
if [ ! -f "/var/backups/config/config.orig" ]; then
|
if [ ! -f "/var/backups/config/config.orig" ]; then
|
||||||
|
|
||||||
rm -f /var/backups/config/config.orig
|
rm -f /var/backups/config/config.orig
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
FROM c4k-cloud-backup
|
|
||||||
|
|
||||||
RUN apt update > /dev/null
|
|
||||||
RUN apt -yqq --no-install-recommends --yes install curl default-jre-headless > /dev/null
|
|
||||||
|
|
||||||
RUN curl -L -o /tmp/serverspec.jar https://github.com/DomainDrivenArchitecture/dda-serverspec-crate/releases/download/2.0.0/dda-serverspec-standalone.jar
|
|
||||||
|
|
||||||
COPY serverspec.edn /tmp/serverspec.edn
|
|
||||||
|
|
||||||
RUN java -jar /tmp/serverspec.jar /tmp/serverspec.edn -v
|
|
|
@ -1,7 +0,0 @@
|
||||||
{:file [{:path "/usr/local/bin/init.sh" :mod "700"}
|
|
||||||
{:path "/usr/local/bin/backup.sh" :mod "700"}
|
|
||||||
{:path "/usr/local/bin/restore.sh" :mod "700"}
|
|
||||||
{:path "/usr/local/bin/start-maintenance.sh" :mod "700"}
|
|
||||||
{:path "/usr/local/bin/end-maintenance.sh" :mod "700"}
|
|
||||||
{:path "/entrypoint.sh" :mod "700"}
|
|
||||||
{:path "/entrypoint-start-and-wait.sh" :mod "700"}]}
|
|
|
@ -6,7 +6,7 @@ from ddadevops import *
|
||||||
name = 'c4k-cloud'
|
name = 'c4k-cloud'
|
||||||
MODULE = 'not_set'
|
MODULE = 'not_set'
|
||||||
PROJECT_ROOT_PATH = '../..'
|
PROJECT_ROOT_PATH = '../..'
|
||||||
version = "7.0.1"
|
version = "10.2.1-dev"
|
||||||
|
|
||||||
@init
|
@init
|
||||||
def initialize(project):
|
def initialize(project):
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
FROM nextcloud:25
|
FROM nextcloud:29
|
||||||
|
|
||||||
|
# REQUIRES docker >= 2.10.10
|
||||||
|
# https://docs.docker.com/engine/release-notes/20.10/#201010
|
||||||
|
|
||||||
# Prepare Entrypoint Script
|
# Prepare Entrypoint Script
|
||||||
ADD resources /tmp
|
ADD resources /tmp
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
#!/bin/sh
|
#!/bin/bash
|
||||||
set -eu
|
|
||||||
|
set -exo pipefail
|
||||||
|
|
||||||
# version_greater A B returns whether A > B
|
# version_greater A B returns whether A > B
|
||||||
version_greater() {
|
version_greater() {
|
||||||
|
|
|
@ -1,11 +1,20 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -Eeo pipefail
|
set -exo pipefail
|
||||||
|
|
||||||
apt update && apt -qqy install postgresql-client > /dev/null
|
function main() {
|
||||||
|
{
|
||||||
|
upgradeSystem
|
||||||
|
apt-get install -qqy ca-certificates curl gnupg postgresql-client
|
||||||
|
mkdir /var/data
|
||||||
|
} > /dev/null
|
||||||
|
|
||||||
mkdir /var/data
|
install -m 0700 /tmp/install-debug.sh /usr/local/bin/
|
||||||
|
install -m 0544 /tmp/upload-max-limit.ini /usr/local/etc/php/conf.d/
|
||||||
|
install -m 0544 /tmp/memory-limit.ini /usr/local/etc/php/conf.d/
|
||||||
|
install -m 0755 /tmp/entrypoint.sh /
|
||||||
|
|
||||||
install -m 0700 /tmp/install-debug.sh /usr/local/bin/
|
cleanupDocker
|
||||||
install -m 0544 /tmp/upload-max-limit.ini /usr/local/etc/php/conf.d/
|
}
|
||||||
install -m 0544 /tmp/memory-limit.ini /usr/local/etc/php/conf.d/
|
|
||||||
install -m 0755 /tmp/entrypoint.sh /
|
source /tmp/install_functions_debian.sh
|
||||||
|
DEBIAN_FRONTEND=noninteractive DEBCONF_NOWARNINGS=yes main
|
|
@ -1,10 +0,0 @@
|
||||||
FROM c4k-cloud
|
|
||||||
|
|
||||||
RUN apt update
|
|
||||||
RUN apt -yqq install --no-install-recommends --yes curl default-jre-headless
|
|
||||||
|
|
||||||
RUN curl -L -o /tmp/serverspec.jar https://github.com/DomainDrivenArchitecture/dda-serverspec-crate/releases/download/2.0.0/dda-serverspec-standalone.jar
|
|
||||||
|
|
||||||
COPY serverspec.edn /tmp/serverspec.edn
|
|
||||||
|
|
||||||
RUN java -jar /tmp/serverspec.jar /tmp/serverspec.edn -v
|
|
|
@ -1,5 +0,0 @@
|
||||||
{:file [{:path "/var/data"}
|
|
||||||
{:path "/usr/local/bin/install-debug.sh" :mod "700"}
|
|
||||||
{:path "/usr/local/etc/php/conf.d/upload-max-limit.ini" :mod "544"}
|
|
||||||
{:path "/usr/local/etc/php/conf.d/memory-limit.ini" :mod "544"}
|
|
||||||
{:path "/entrypoint.sh" :mod "755"}]}
|
|
|
@ -2,7 +2,7 @@
|
||||||
"name": "c4k-nextcloud",
|
"name": "c4k-nextcloud",
|
||||||
"description": "Generate c4k yaml for a nextcloud deployment.",
|
"description": "Generate c4k yaml for a nextcloud deployment.",
|
||||||
"author": "meissa GmbH",
|
"author": "meissa GmbH",
|
||||||
"version": "7.0.1",
|
"version": "10.2.1-SNAPSHOT",
|
||||||
"homepage": "https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud#readme",
|
"homepage": "https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud#readme",
|
||||||
"repository": "https://www.npmjs.com/package/c4k-nextcloud",
|
"repository": "https://www.npmjs.com/package/c4k-nextcloud",
|
||||||
"license": "APACHE2",
|
"license": "APACHE2",
|
||||||
|
|
29
project.clj
29
project.clj
|
@ -1,11 +1,11 @@
|
||||||
(defproject org.domaindrivenarchitecture/c4k-nextcloud "7.0.1"
|
(defproject org.domaindrivenarchitecture/c4k-nextcloud "10.2.1-SNAPSHOT"
|
||||||
:description "nextcloud c4k-installation package"
|
:description "nextcloud c4k-installation package"
|
||||||
:url "https://domaindrivenarchitecture.org"
|
:url "https://domaindrivenarchitecture.org"
|
||||||
:license {:name "Apache License, Version 2.0"
|
:license {:name "Apache License, Version 2.0"
|
||||||
:url "https://www.apache.org/licenses/LICENSE-2.0.html"}
|
:url "https://www.apache.org/licenses/LICENSE-2.0.html"}
|
||||||
:dependencies [[org.clojure/clojure "1.11.1"]
|
:dependencies [[org.clojure/clojure "1.11.3"]
|
||||||
[org.clojure/tools.reader "1.3.6"]
|
[org.clojure/tools.reader "1.4.2"]
|
||||||
[org.domaindrivenarchitecture/c4k-common-clj "6.0.3"]
|
[org.domaindrivenarchitecture/c4k-common-clj "6.3.1"]
|
||||||
[hickory "0.7.1" :exclusions [viebel/codox-klipse-theme]]]
|
[hickory "0.7.1" :exclusions [viebel/codox-klipse-theme]]]
|
||||||
:target-path "target/%s/"
|
:target-path "target/%s/"
|
||||||
:source-paths ["src/main/cljc"
|
:source-paths ["src/main/cljc"
|
||||||
|
@ -22,25 +22,14 @@
|
||||||
:uberjar {:aot :all
|
:uberjar {:aot :all
|
||||||
:main dda.c4k-nextcloud.uberjar
|
:main dda.c4k-nextcloud.uberjar
|
||||||
:uberjar-name "c4k-nextcloud-standalone.jar"
|
:uberjar-name "c4k-nextcloud-standalone.jar"
|
||||||
:dependencies [[org.clojure/tools.cli "1.0.219"]
|
:dependencies [[org.clojure/tools.cli "1.1.230"]
|
||||||
[ch.qos.logback/logback-classic "1.4.11"
|
[ch.qos.logback/logback-classic "1.5.6"
|
||||||
:exclusions [com.sun.mail/javax.mail]]
|
:exclusions [com.sun.mail/javax.mail]]
|
||||||
[org.slf4j/jcl-over-slf4j "2.0.9"]]}}
|
[org.slf4j/jcl-over-slf4j "2.0.13"]
|
||||||
|
[com.github.clj-easy/graal-build-time "1.0.5"]]}}
|
||||||
:release-tasks [["test"]
|
:release-tasks [["test"]
|
||||||
["vcs" "assert-committed"]
|
["vcs" "assert-committed"]
|
||||||
["change" "version" "leiningen.release/bump-version" "release"]
|
["change" "version" "leiningen.release/bump-version" "release"]
|
||||||
["vcs" "commit"]
|
["vcs" "commit"]
|
||||||
["vcs" "tag" "v" "--no-sign"]
|
["vcs" "tag" "v" "--no-sign"]
|
||||||
["change" "version" "leiningen.release/bump-version"]]
|
["change" "version" "leiningen.release/bump-version"]])
|
||||||
:aliases {"native" ["shell"
|
|
||||||
"native-image"
|
|
||||||
"--report-unsupported-elements-at-runtime"
|
|
||||||
"--initialize-at-build-time"
|
|
||||||
"-jar" "target/uberjar/c4k-nextcloud-standalone.jar"
|
|
||||||
"-H:ResourceConfigurationFiles=graalvm-resource-config.json"
|
|
||||||
"-H:Log=registerResource"
|
|
||||||
"-H:Name=target/graalvm/${:name}"]
|
|
||||||
"inst" ["shell"
|
|
||||||
"sh"
|
|
||||||
"-c"
|
|
||||||
"lein uberjar && sudo install -m=755 target/uberjar/c4k-nextcloud-standalone.jar /usr/local/bin/c4k-nextcloud-standalone.jar"]})
|
|
|
@ -4,7 +4,7 @@
|
||||||
"src/test/cljc"
|
"src/test/cljc"
|
||||||
"src/test/cljs"
|
"src/test/cljs"
|
||||||
"src/test/resources"]
|
"src/test/resources"]
|
||||||
:dependencies [[org.domaindrivenarchitecture/c4k-common-cljs "6.0.3"]
|
:dependencies [[org.domaindrivenarchitecture/c4k-common-cljs "6.3.1"]
|
||||||
[hickory "0.7.1"]]
|
[hickory "0.7.1"]]
|
||||||
:builds {:frontend {:target :browser
|
:builds {:frontend {:target :browser
|
||||||
:modules {:main {:init-fn dda.c4k-nextcloud.browser/init}}
|
:modules {:main {:init-fn dda.c4k-nextcloud.browser/init}}
|
||||||
|
|
|
@ -1,24 +1,20 @@
|
||||||
(ns dda.c4k-nextcloud.backup
|
(ns dda.c4k-nextcloud.backup
|
||||||
(:require
|
(:require
|
||||||
[clojure.spec.alpha :as s]
|
[clojure.spec.alpha :as s]
|
||||||
#?(:cljs [shadow.resource :as rc])
|
|
||||||
[dda.c4k-common.yaml :as yaml]
|
[dda.c4k-common.yaml :as yaml]
|
||||||
[dda.c4k-common.base64 :as b64]
|
[dda.c4k-common.base64 :as b64]
|
||||||
[dda.c4k-common.common :as cm]))
|
[dda.c4k-common.common :as cm]
|
||||||
|
[dda.c4k-common.predicate :as cp]
|
||||||
|
#?(:cljs [dda.c4k-common.macros :refer-macros [inline-resources]])))
|
||||||
|
|
||||||
(s/def ::aws-access-key-id cm/bash-env-string?)
|
(s/def ::aws-access-key-id cp/bash-env-string?)
|
||||||
(s/def ::aws-secret-access-key cm/bash-env-string?)
|
(s/def ::aws-secret-access-key cp/bash-env-string?)
|
||||||
(s/def ::restic-password cm/bash-env-string?)
|
(s/def ::restic-password cp/bash-env-string?)
|
||||||
(s/def ::restic-repository cm/bash-env-string?)
|
(s/def ::restic-repository cp/bash-env-string?)
|
||||||
|
|
||||||
#?(:cljs
|
#?(:cljs
|
||||||
(defmethod yaml/load-resource :backup [resource-name]
|
(defmethod yaml/load-resource :backup [resource-name]
|
||||||
(case resource-name
|
(get (inline-resources "backup") resource-name)))
|
||||||
"backup/config.yaml" (rc/inline "backup/config.yaml")
|
|
||||||
"backup/cron.yaml" (rc/inline "backup/cron.yaml")
|
|
||||||
"backup/secret.yaml" (rc/inline "backup/secret.yaml")
|
|
||||||
"backup/backup-restore-deployment.yaml" (rc/inline "backup/backup-restore-deployment.yaml")
|
|
||||||
(throw (js/Error. "Undefined Resource!")))))
|
|
||||||
|
|
||||||
(defn generate-config [my-conf]
|
(defn generate-config [my-conf]
|
||||||
(let [{:keys [restic-repository]} my-conf]
|
(let [{:keys [restic-repository]} my-conf]
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
(ns dda.c4k-nextcloud.core
|
(ns dda.c4k-nextcloud.core
|
||||||
(:require
|
(:require
|
||||||
[clojure.spec.alpha :as s]
|
|
||||||
#?(:clj [orchestra.core :refer [defn-spec]]
|
#?(:clj [orchestra.core :refer [defn-spec]]
|
||||||
:cljs [orchestra.core :refer-macros [defn-spec]])
|
:cljs [orchestra.core :refer-macros [defn-spec]])
|
||||||
[dda.c4k-common.common :as cm]
|
[dda.c4k-common.common :as cm]
|
||||||
|
@ -9,36 +8,36 @@
|
||||||
[dda.c4k-common.postgres :as postgres]
|
[dda.c4k-common.postgres :as postgres]
|
||||||
[dda.c4k-nextcloud.nextcloud :as nextcloud]
|
[dda.c4k-nextcloud.nextcloud :as nextcloud]
|
||||||
[dda.c4k-nextcloud.backup :as backup]
|
[dda.c4k-nextcloud.backup :as backup]
|
||||||
[dda.c4k-common.monitoring :as mon]))
|
[dda.c4k-common.monitoring :as mon]
|
||||||
|
[dda.c4k-common.namespace :as ns]))
|
||||||
|
|
||||||
(def default-storage-class :local-path)
|
(def config-defaults {:namespace "nextcloud"
|
||||||
|
:issuer "staging"
|
||||||
(def config-defaults {:issuer "staging"})
|
:pvc-storage-class-name "hcloud-volumes-encrypted"
|
||||||
|
:pv-storage-size-gb 200})
|
||||||
|
|
||||||
(defn-spec k8s-objects cp/map-or-seq?
|
(defn-spec k8s-objects cp/map-or-seq?
|
||||||
[config nextcloud/config?
|
[config nextcloud/config?
|
||||||
auth nextcloud/auth?]
|
auth nextcloud/auth?]
|
||||||
(let [nextcloud-default-storage-config {:pvc-storage-class-name default-storage-class
|
(let [resolved-config (merge config-defaults config)]
|
||||||
:pv-storage-size-gb 200}]
|
|
||||||
(map yaml/to-string
|
(map yaml/to-string
|
||||||
(filter
|
(filter
|
||||||
#(not (nil? %))
|
#(not (nil? %))
|
||||||
(cm/concat-vec
|
(cm/concat-vec
|
||||||
[(postgres/generate-config {:postgres-size :8gb :db-name "nextcloud"})
|
(ns/generate resolved-config)
|
||||||
(postgres/generate-secret auth)
|
(postgres/generate (merge resolved-config {:postgres-size :8gb
|
||||||
(postgres/generate-pvc {:pv-storage-size-gb 50
|
:db-name "cloud"
|
||||||
:pvc-storage-class-name default-storage-class})
|
:pv-storage-size-gb 50})
|
||||||
(postgres/generate-deployment)
|
auth)
|
||||||
(postgres/generate-service)
|
[(nextcloud/generate-secret auth)
|
||||||
(nextcloud/generate-secret auth)
|
(nextcloud/generate-pvc resolved-config)
|
||||||
(nextcloud/generate-pvc (merge nextcloud-default-storage-config config))
|
(nextcloud/generate-deployment resolved-config)
|
||||||
(nextcloud/generate-deployment config)
|
|
||||||
(nextcloud/generate-service)]
|
(nextcloud/generate-service)]
|
||||||
(nextcloud/generate-ingress-and-cert config)
|
(nextcloud/generate-ingress-and-cert resolved-config)
|
||||||
(when (:contains? config :restic-repository)
|
(when (:contains? resolved-config :restic-repository)
|
||||||
[(backup/generate-config config)
|
[(backup/generate-config resolved-config)
|
||||||
(backup/generate-secret auth)
|
(backup/generate-secret auth)
|
||||||
(backup/generate-cron)
|
(backup/generate-cron)
|
||||||
(backup/generate-backup-restore-deployment config)])
|
(backup/generate-backup-restore-deployment resolved-config)])
|
||||||
(when (:contains? config :mon-cfg)
|
(when (:contains? resolved-config :mon-cfg)
|
||||||
(mon/generate (:mon-cfg config) (:mon-auth auth))))))))
|
(mon/generate (:mon-cfg resolved-config) (:mon-auth auth))))))))
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
(ns dda.c4k-nextcloud.nextcloud
|
(ns dda.c4k-nextcloud.nextcloud
|
||||||
(:require
|
(:require
|
||||||
[clojure.spec.alpha :as s]
|
[clojure.spec.alpha :as s]
|
||||||
#?(:cljs [shadow.resource :as rc])
|
|
||||||
#?(:clj [orchestra.core :refer [defn-spec]]
|
#?(:clj [orchestra.core :refer [defn-spec]]
|
||||||
:cljs [orchestra.core :refer-macros [defn-spec]])
|
:cljs [orchestra.core :refer-macros [defn-spec]])
|
||||||
[dda.c4k-common.yaml :as yaml]
|
[dda.c4k-common.yaml :as yaml]
|
||||||
|
@ -10,7 +9,8 @@
|
||||||
[dda.c4k-common.predicate :as cp]
|
[dda.c4k-common.predicate :as cp]
|
||||||
[dda.c4k-common.postgres :as postgres]
|
[dda.c4k-common.postgres :as postgres]
|
||||||
[dda.c4k-common.common :as cm]
|
[dda.c4k-common.common :as cm]
|
||||||
[dda.c4k-common.monitoring :as mon]))
|
[dda.c4k-common.monitoring :as mon]
|
||||||
|
#?(:cljs [dda.c4k-common.macros :refer-macros [inline-resources]])))
|
||||||
|
|
||||||
(s/def ::fqdn cp/fqdn-string?)
|
(s/def ::fqdn cp/fqdn-string?)
|
||||||
(s/def ::issuer cp/letsencrypt-issuer?)
|
(s/def ::issuer cp/letsencrypt-issuer?)
|
||||||
|
@ -35,18 +35,13 @@
|
||||||
|
|
||||||
#?(:cljs
|
#?(:cljs
|
||||||
(defmethod yaml/load-resource :nextcloud [resource-name]
|
(defmethod yaml/load-resource :nextcloud [resource-name]
|
||||||
(case resource-name
|
(get (inline-resources "nextcloud") resource-name)))
|
||||||
"nextcloud/deployment.yaml" (rc/inline "nextcloud/deployment.yaml")
|
|
||||||
"nextcloud/pvc.yaml" (rc/inline "nextcloud/pvc.yaml")
|
|
||||||
"nextcloud/service.yaml" (rc/inline "nextcloud/service.yaml")
|
|
||||||
"nextcloud/secret.yaml" (rc/inline "nextcloud/secret.yaml")
|
|
||||||
(throw (js/Error. "Undefined Resource!")))))
|
|
||||||
|
|
||||||
(defn-spec generate-deployment cp/map-or-seq?
|
(defn-spec generate-deployment cp/map-or-seq?
|
||||||
[config config?]
|
[config config?]
|
||||||
(let [{:keys [fqdn]} config]
|
(let [{:keys [fqdn]} config]
|
||||||
(-> (yaml/load-as-edn "nextcloud/deployment.yaml")
|
(-> (yaml/load-as-edn "nextcloud/deployment.yaml")
|
||||||
(cm/replace-all-matching-values-by-new-value "fqdn" fqdn))))
|
(cm/replace-all-matching "fqdn" fqdn))))
|
||||||
|
|
||||||
(defn-spec generate-ingress-and-cert cp/map-or-seq?
|
(defn-spec generate-ingress-and-cert cp/map-or-seq?
|
||||||
[config config?]
|
[config config?]
|
||||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: backup-restore
|
name: backup-restore
|
||||||
|
namespace: nextcloud
|
||||||
spec:
|
spec:
|
||||||
replicas: 0
|
replicas: 0
|
||||||
selector:
|
selector:
|
||||||
|
@ -67,6 +68,9 @@ spec:
|
||||||
- name: cloud-secret-volume
|
- name: cloud-secret-volume
|
||||||
mountPath: /var/run/secrets/cloud-secrets
|
mountPath: /var/run/secrets/cloud-secrets
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
- name: rotation-credential-secret-volume
|
||||||
|
mountPath: /var/run/secrets/rotation-credential-secret
|
||||||
|
readOnly: true
|
||||||
volumes:
|
volumes:
|
||||||
- name: cloud-data-volume
|
- name: cloud-data-volume
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
|
@ -77,3 +81,7 @@ spec:
|
||||||
- name: backup-secret-volume
|
- name: backup-secret-volume
|
||||||
secret:
|
secret:
|
||||||
secretName: backup-secret
|
secretName: backup-secret
|
||||||
|
- name: rotation-credential-secret-volume
|
||||||
|
secret:
|
||||||
|
secretName: rotation-credential-secret
|
||||||
|
optional: true
|
||||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: backup-config
|
name: backup-config
|
||||||
|
namespace: nextcloud
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: backup
|
app.kubernetes.io/name: backup
|
||||||
app.kubernetes.io/part-of: cloud
|
app.kubernetes.io/part-of: cloud
|
||||||
|
|
8
src/main/resources/backup/credential-rotation.yaml
Normal file
8
src/main/resources/backup/credential-rotation.yaml
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: rotation-credential-secret
|
||||||
|
namespace: nextcloud
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
rotation-credential: "dGVzdAo="
|
|
@ -2,6 +2,7 @@ apiVersion: batch/v1
|
||||||
kind: CronJob
|
kind: CronJob
|
||||||
metadata:
|
metadata:
|
||||||
name: cloud-backup
|
name: cloud-backup
|
||||||
|
namespace: nextcloud
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.part-of: cloud
|
app.kubernetes.part-of: cloud
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
metadata:
|
metadata:
|
||||||
name: backup-secret
|
name: backup-secret
|
||||||
|
namespace: nextcloud
|
||||||
type: Opaque
|
type: Opaque
|
||||||
data:
|
data:
|
||||||
aws-access-key-id: "aws-access-key-id"
|
aws-access-key-id: "aws-access-key-id"
|
||||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: cloud-deployment
|
name: cloud-deployment
|
||||||
|
namespace: nextcloud
|
||||||
spec:
|
spec:
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
|
@ -18,7 +19,7 @@ spec:
|
||||||
redeploy: v3
|
redeploy: v3
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- image: domaindrivenarchitecture/c4k-cloud:7.0.0
|
- image: domaindrivenarchitecture/c4k-cloud:8.0.0
|
||||||
name: cloud-app
|
name: cloud-app
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
ports:
|
ports:
|
||||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
name: cloud-pvc
|
name: cloud-pvc
|
||||||
|
namespace: nextcloud
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/application: cloud
|
app.kubernetes.io/application: cloud
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
metadata:
|
metadata:
|
||||||
name: cloud-secret
|
name: cloud-secret
|
||||||
|
namespace: nextcloud
|
||||||
type: Opaque
|
type: Opaque
|
||||||
data:
|
data:
|
||||||
nextcloud-admin-user: "admin-user"
|
nextcloud-admin-user: "admin-user"
|
||||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: cloud-service
|
name: cloud-service
|
||||||
|
namespace: nextcloud
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: cloud-service
|
app.kubernetes.io/name: cloud-service
|
||||||
app.kubernetes.io/application: cloud
|
app.kubernetes.io/application: cloud
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
(deftest should-generate-secret
|
(deftest should-generate-secret
|
||||||
(is (= {:apiVersion "v1"
|
(is (= {:apiVersion "v1"
|
||||||
:kind "Secret"
|
:kind "Secret"
|
||||||
:metadata {:name "backup-secret"}
|
:metadata {:name "backup-secret", :namespace "nextcloud"}
|
||||||
:type "Opaque"
|
:type "Opaque"
|
||||||
:data
|
:data
|
||||||
{:aws-access-key-id "YXdzLWlk", :aws-secret-access-key "YXdzLXNlY3JldA==", :restic-password "cmVzdGljLXB3"}}
|
{:aws-access-key-id "YXdzLWlk", :aws-secret-access-key "YXdzLXNlY3JldA==", :restic-password "cmVzdGljLXB3"}}
|
||||||
|
@ -18,6 +18,7 @@
|
||||||
(is (= {:apiVersion "v1"
|
(is (= {:apiVersion "v1"
|
||||||
:kind "ConfigMap"
|
:kind "ConfigMap"
|
||||||
:metadata {:name "backup-config"
|
:metadata {:name "backup-config"
|
||||||
|
:namespace "nextcloud"
|
||||||
:labels {:app.kubernetes.io/name "backup"
|
:labels {:app.kubernetes.io/name "backup"
|
||||||
:app.kubernetes.io/part-of "cloud"}}
|
:app.kubernetes.io/part-of "cloud"}}
|
||||||
:data
|
:data
|
||||||
|
@ -27,7 +28,7 @@
|
||||||
(deftest should-generate-cron
|
(deftest should-generate-cron
|
||||||
(is (= {:apiVersion "batch/v1"
|
(is (= {:apiVersion "batch/v1"
|
||||||
:kind "CronJob"
|
:kind "CronJob"
|
||||||
:metadata {:name "cloud-backup", :labels {:app.kubernetes.part-of "cloud"}}
|
:metadata {:name "cloud-backup", :namespace "nextcloud", :labels {:app.kubernetes.part-of "cloud"}}
|
||||||
:spec
|
:spec
|
||||||
{:schedule "10 23 * * *"
|
{:schedule "10 23 * * *"
|
||||||
:successfulJobsHistoryLimit 1
|
:successfulJobsHistoryLimit 1
|
||||||
|
|
|
@ -2,21 +2,17 @@
|
||||||
(:require
|
(:require
|
||||||
#?(:clj [clojure.test :refer [deftest is are testing run-tests]]
|
#?(:clj [clojure.test :refer [deftest is are testing run-tests]]
|
||||||
:cljs [cljs.test :refer-macros [deftest is are testing run-tests]])
|
:cljs [cljs.test :refer-macros [deftest is are testing run-tests]])
|
||||||
#?(:cljs [shadow.resource :as rc])
|
|
||||||
[clojure.spec.alpha :as s]
|
[clojure.spec.alpha :as s]
|
||||||
[clojure.spec.test.alpha :as st]
|
[clojure.spec.test.alpha :as st]
|
||||||
[dda.c4k-common.yaml :as yaml]
|
[dda.c4k-common.yaml :as yaml]
|
||||||
[dda.c4k-nextcloud.nextcloud :as cut]))
|
[dda.c4k-nextcloud.nextcloud :as cut]
|
||||||
|
#?(:cljs [dda.c4k-common.macros :refer-macros [inline-resources]])))
|
||||||
|
|
||||||
(st/instrument)
|
(st/instrument)
|
||||||
|
|
||||||
#?(:cljs
|
#?(:cljs
|
||||||
(defmethod yaml/load-resource :nextcloud-test [resource-name]
|
(defmethod yaml/load-resource :nextcloud-test [resource-name]
|
||||||
(case resource-name
|
(get (inline-resources "nextcloud-test") resource-name)))
|
||||||
"nextcloud-test/valid-auth.yaml" (rc/inline "nextcloud-test/valid-auth.yaml")
|
|
||||||
"nextcloud-test/valid-config.yaml" (rc/inline "nextcloud-test/valid-config.yaml")
|
|
||||||
"nextcloud-test/invalid-auth.yaml" (rc/inline "nextcloud-test/invalid-auth.yaml")
|
|
||||||
"nextcloud-test/invalid-config.yaml" (rc/inline "nextcloud-test/invalid-config.yaml"))))
|
|
||||||
|
|
||||||
(deftest validate-valid-resources
|
(deftest validate-valid-resources
|
||||||
(is (s/valid? cut/config? (yaml/load-as-edn "nextcloud-test/valid-config.yaml")))
|
(is (s/valid? cut/config? (yaml/load-as-edn "nextcloud-test/valid-config.yaml")))
|
||||||
|
@ -27,7 +23,7 @@
|
||||||
(deftest should-generate-secret
|
(deftest should-generate-secret
|
||||||
(is (= {:apiVersion "v1"
|
(is (= {:apiVersion "v1"
|
||||||
:kind "Secret"
|
:kind "Secret"
|
||||||
:metadata {:name "cloud-secret"}
|
:metadata {:name "cloud-secret", :namespace "nextcloud"}
|
||||||
:type "Opaque"
|
:type "Opaque"
|
||||||
:data
|
:data
|
||||||
{:nextcloud-admin-user "Y2xvdWRhZG1pbg=="
|
{:nextcloud-admin-user "Y2xvdWRhZG1pbg=="
|
||||||
|
@ -81,6 +77,7 @@
|
||||||
(is (= {:apiVersion "v1"
|
(is (= {:apiVersion "v1"
|
||||||
:kind "PersistentVolumeClaim"
|
:kind "PersistentVolumeClaim"
|
||||||
:metadata {:name "cloud-pvc"
|
:metadata {:name "cloud-pvc"
|
||||||
|
:namespace "nextcloud"
|
||||||
:labels {:app.kubernetes.io/application "cloud"}}
|
:labels {:app.kubernetes.io/application "cloud"}}
|
||||||
:spec {:storageClassName "local-path"
|
:spec {:storageClassName "local-path"
|
||||||
:accessModes ["ReadWriteOnce"]
|
:accessModes ["ReadWriteOnce"]
|
||||||
|
@ -90,7 +87,7 @@
|
||||||
(deftest should-generate-deployment
|
(deftest should-generate-deployment
|
||||||
(is (= {:apiVersion "apps/v1"
|
(is (= {:apiVersion "apps/v1"
|
||||||
:kind "Deployment"
|
:kind "Deployment"
|
||||||
:metadata {:name "cloud-deployment"}
|
:metadata {:name "cloud-deployment", :namespace "nextcloud"}
|
||||||
:spec
|
:spec
|
||||||
{:selector {:matchLabels #:app.kubernetes.io{:name "cloud-pod", :application "cloud"}}
|
{:selector {:matchLabels #:app.kubernetes.io{:name "cloud-pod", :application "cloud"}}
|
||||||
:strategy {:type "Recreate"}
|
:strategy {:type "Recreate"}
|
||||||
|
@ -98,7 +95,7 @@
|
||||||
{:metadata {:labels {:app "cloud-app", :app.kubernetes.io/name "cloud-pod", :app.kubernetes.io/application "cloud", :redeploy "v3"}}
|
{:metadata {:labels {:app "cloud-app", :app.kubernetes.io/name "cloud-pod", :app.kubernetes.io/application "cloud", :redeploy "v3"}}
|
||||||
:spec
|
:spec
|
||||||
{:containers
|
{:containers
|
||||||
[{:image "domaindrivenarchitecture/c4k-cloud:7.0.0"
|
[{:image "domaindrivenarchitecture/c4k-cloud:8.0.0"
|
||||||
:name "cloud-app"
|
:name "cloud-app"
|
||||||
:imagePullPolicy "IfNotPresent"
|
:imagePullPolicy "IfNotPresent"
|
||||||
:ports [{:containerPort 80}]
|
:ports [{:containerPort 80}]
|
||||||
|
|
|
@ -1,84 +0,0 @@
|
||||||
# Usage
|
|
||||||
|
|
||||||
`setup-local-s3.sh [BUCKET_NAME]`:
|
|
||||||
- [BUCKET_NAME] is optional, "mybucket" will be used if not specified
|
|
||||||
- sets up a k3s instance
|
|
||||||
- installs a localstack pod
|
|
||||||
- creates http and https routing to localstack via localhost
|
|
||||||
- saves the self-signed certificate as ca.crt
|
|
||||||
- uses the certificate to initialize a restic repo at `https://k3stesthost/BUCKET_NAME`
|
|
||||||
|
|
||||||
Note: In case of not being able to connect to "k3stesthost/health", you might need to ensure that the ingress' ip matches with the required host names: k3stesthost and cloudhost. With `sudo k3s kubectl get ingress` you can view the ingress' ip (e.g. 10.0.2.15), then add a line to file "/etc/hosts" e.g. `10.0.2.15 k3stesthost cloudhost`
|
|
||||||
|
|
||||||
`start-k3s.sh`:
|
|
||||||
- creates and starts a k3s instance
|
|
||||||
|
|
||||||
`k3s-uninstall.sh`:
|
|
||||||
- deletes everything k3s related
|
|
||||||
|
|
||||||
## Other useful commands
|
|
||||||
- `sudo k3s kubectl get pods`
|
|
||||||
- `curl k3stesthost/health`
|
|
||||||
expected: `{"services": {"s3": "running"}, "features": {"persistence": "disabled", "initScripts": "initialized"}}`
|
|
||||||
|
|
||||||
#### Requires AWS-CLI
|
|
||||||
- create bucket `aws --endpoint-url=http://k3stesthost s3 mb s3://mybucket`
|
|
||||||
- list buckets `aws --endpoint-url=http://k3stesthost s3 ls`
|
|
||||||
- upload something `aws --endpoint-url=http://k3stesthost s3 cp test.txt s3://mybucket`
|
|
||||||
- check files `aws --endpoint-url=http://k3stesthost s3 ls s3://mybucket`
|
|
||||||
|
|
||||||
## Run docker locally
|
|
||||||
|
|
||||||
|
|
||||||
```
|
|
||||||
docker pull docker:19.03.12-dind
|
|
||||||
docker run -d --privileged --name integration-test docker:19.03.12-dind
|
|
||||||
docker exec integration-test sh -c "apk add bash"
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Set up docker container integration-test:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker cp ../../../../../c4k-nextcloud/ integration-test:/
|
|
||||||
docker exec -it integration-test sh
|
|
||||||
cd /c4k-nextcloud/src/test/resources/local-integration-test
|
|
||||||
./setup-docker.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deploy nextcloud
|
|
||||||
|
|
||||||
### Requirements
|
|
||||||
|
|
||||||
* leiningen (install with: `sudo apt install leiningen` )
|
|
||||||
* In the project's root execute: `lein uberjar`
|
|
||||||
* Change file "valid-config.edn" according to your settings (e.g. `:fqdn "cloudhost"` and `:restic-repository "s3://k3stesthost:mybucket"`).
|
|
||||||
|
|
||||||
### Deploy to k3s
|
|
||||||
|
|
||||||
* Create and deploy the k8s yaml:
|
|
||||||
`java -jar target/uberjar/c4k-nextcloud-standalone.jar valid-config.edn valid-auth.edn | sudo k3s kubectl apply -f -`
|
|
||||||
|
|
||||||
Some of the steps may take some min to be effective, but eventually nextcloud should be available at: https://cloudhost
|
|
||||||
|
|
||||||
### Deploy to k3d
|
|
||||||
|
|
||||||
k3d is a k3s system which is running inside of a container. To install k3d run `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash` or have a look at https://k3d.io/v5.0.3/ .
|
|
||||||
|
|
||||||
* Start a k3d cluster to deploy s3, nextcloud and test backup and restore on it: `./setup-local-s3-on-k3d.sh`
|
|
||||||
|
|
||||||
Some steps may take a couple of minutes to be effective, but eventually nextcloud should be available at: https://cloudhost
|
|
||||||
|
|
||||||
#### Remove k3d cluster
|
|
||||||
|
|
||||||
`k3d cluster delete nextcloud`
|
|
||||||
|
|
||||||
## Test in local gitlab runner
|
|
||||||
|
|
||||||
See https://stackoverflow.com/questions/32933174/use-gitlab-ci-to-run-tests-locally
|
|
||||||
|
|
||||||
This needs to be done in the project root
|
|
||||||
|
|
||||||
`docker run -d --name gitlab-runner --restart always -v $PWD:$PWD -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:latest`
|
|
||||||
|
|
||||||
`docker exec -it -w $PWD gitlab-runner gitlab-runner exec docker nextcloud-integrationtest --docker-privileged --docker-volumes '/var/run/docker.sock:/var/run/docker.sock'`
|
|
|
@ -1,20 +0,0 @@
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: localstack-cert
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
secretName: localstack-secret
|
|
||||||
commonName: k3stesthost
|
|
||||||
dnsNames:
|
|
||||||
- k3stesthost
|
|
||||||
issuerRef:
|
|
||||||
name: selfsigning-issuer
|
|
||||||
kind: ClusterIssuer
|
|
||||||
---
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: ClusterIssuer
|
|
||||||
metadata:
|
|
||||||
name: selfsigning-issuer
|
|
||||||
spec:
|
|
||||||
selfSigned: {}
|
|
|
@ -1,44 +0,0 @@
|
||||||
@startuml
|
|
||||||
|
|
||||||
autonumber
|
|
||||||
|
|
||||||
skinparam sequenceBox {
|
|
||||||
borderColor White
|
|
||||||
}
|
|
||||||
|
|
||||||
participant gitlab_runner
|
|
||||||
|
|
||||||
box "outer container" #LightBlue
|
|
||||||
|
|
||||||
participant .gitlab_ci
|
|
||||||
participant PreparingCommands
|
|
||||||
participant test_script
|
|
||||||
|
|
||||||
end box
|
|
||||||
|
|
||||||
|
|
||||||
box "k3s" #CornSilk
|
|
||||||
|
|
||||||
participant k3s_api_server
|
|
||||||
participant backup_pod
|
|
||||||
|
|
||||||
end box
|
|
||||||
|
|
||||||
|
|
||||||
gitlab_runner -> k3s_api_server: run k3s as container
|
|
||||||
gitlab_runner -> .gitlab_ci : run
|
|
||||||
|
|
||||||
.gitlab_ci -> PreparingCommands : Install packages (curl bash ...)
|
|
||||||
.gitlab_ci -> PreparingCommands : get k3s_api_server config for k3s_api_server
|
|
||||||
|
|
||||||
.gitlab_ci -> test_script : run
|
|
||||||
|
|
||||||
test_script -> k3s_api_server: apply cert-manager
|
|
||||||
test_script -> k3s_api_server: apply localstack
|
|
||||||
test_script -> k3s_api_server: enable tls / create certificates
|
|
||||||
test_script -> k3s_api_server: apply cloud
|
|
||||||
test_script -> k3s_api_server: create backup_pod (by scale to 1)
|
|
||||||
test_script -> backup_pod: backup
|
|
||||||
test_script -> backup_pod: restore
|
|
||||||
|
|
||||||
@enduml
|
|
|
@ -1,17 +0,0 @@
|
||||||
# Set the default kube context if present
|
|
||||||
DEFAULT_KUBE_CONTEXTS="$HOME/.kube/config"
|
|
||||||
if test -f "${DEFAULT_KUBE_CONTEXTS}"
|
|
||||||
then
|
|
||||||
export KUBECONFIG="$DEFAULT_KUBE_CONTEXTS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Additional contexts should be in ~/.kube/custom-contexts/
|
|
||||||
CUSTOM_KUBE_CONTEXTS="$HOME/.kube/custom-contexts"
|
|
||||||
mkdir -p "${CUSTOM_KUBE_CONTEXTS}"
|
|
||||||
OIFS="$IFS"
|
|
||||||
IFS=$'\n'
|
|
||||||
for contextFile in `find "${CUSTOM_KUBE_CONTEXTS}" -type f -name "*.yml"`
|
|
||||||
do
|
|
||||||
export KUBECONFIG="$contextFile:$KUBECONFIG"
|
|
||||||
done
|
|
||||||
IFS="$OIFS"
|
|
|
@ -1,65 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: localstack
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: localstack
|
|
||||||
strategy:
|
|
||||||
type: Recreate
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: localstack
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- image: localstack/localstack
|
|
||||||
name: localstack-app
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
env:
|
|
||||||
- name: SERVICES
|
|
||||||
value: s3
|
|
||||||
---
|
|
||||||
# service
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: localstack-service
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: localstack
|
|
||||||
ports:
|
|
||||||
- port: 4566
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: localstack-secret
|
|
||||||
type: Opaque
|
|
||||||
---
|
|
||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: ingress-localstack
|
|
||||||
annotations:
|
|
||||||
cert-manager.io/cluster-issuer: selfsigning-issuer
|
|
||||||
kubernetes.io/ingress.class: traefik
|
|
||||||
traefik.ingress.kubernetes.io/redirect-entry-point: https
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
tls:
|
|
||||||
- hosts:
|
|
||||||
- k3stesthost
|
|
||||||
secretName: localstack-secret
|
|
||||||
rules:
|
|
||||||
- host: k3stesthost
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
pathType: Prefix
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: localstack-service
|
|
||||||
port:
|
|
||||||
number: 4566
|
|
|
@ -1,48 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
docker volume create k3s-server
|
|
||||||
|
|
||||||
name='inttst'
|
|
||||||
|
|
||||||
[[ $(docker ps -f "name=$name" --format '{{.Names}}') == $name ]] || docker run --name $name -d --privileged --tmpfs /run --tmpfs /var/run --restart always -e K3S_TOKEN=12345678901234 -e K3S_KUBECONFIG_OUTPUT=./kubeconfig.yaml -e K3S_KUBECONFIG_MODE=666 -v k3s-server:/var/lib/rancher/k3s:z -v $(pwd):/output:z -p 6443:6443 -p 80:80 -p 443:443 rancher/k3s server --cluster-init --tls-san k3stesthost --tls-san cloudhost
|
|
||||||
|
|
||||||
docker ps
|
|
||||||
|
|
||||||
export timeout=30; while ! docker exec $name sh -c "test -f /var/lib/rancher/k3s/server/kubeconfig.yaml"; do if [ "$timeout" == 0 ]; then echo "ERROR: Timeout while waiting for file."; break; fi; sleep 1; ((timeout--)); done
|
|
||||||
|
|
||||||
mkdir -p $HOME/.kube/
|
|
||||||
|
|
||||||
docker cp $name:/var/lib/rancher/k3s/server/kubeconfig.yaml $HOME/.kube/config
|
|
||||||
|
|
||||||
if [ "$timeout" == 0 ]
|
|
||||||
then
|
|
||||||
echo -------------------------------------------------------
|
|
||||||
find / -name "kubeconfig.yaml";
|
|
||||||
echo -------------------------------------------------------
|
|
||||||
docker ps -a
|
|
||||||
echo -------------------------------------------------------
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "127.0.0.1 kubernetes" >> /etc/hosts
|
|
||||||
|
|
||||||
apk add wget curl bash sudo openjdk8
|
|
||||||
|
|
||||||
wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
|
|
||||||
apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
|
|
||||||
|
|
||||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.22.0/bin/linux/amd64/kubectl
|
|
||||||
chmod +x ./kubectl
|
|
||||||
mv ./kubectl /usr/local/bin/kubectl
|
|
||||||
|
|
||||||
sleep 20 #allow some time to startup k3s
|
|
||||||
docker ps -a
|
|
||||||
|
|
||||||
swapoff -a # can this be removed ?
|
|
||||||
|
|
||||||
export KUBECONFIG=$HOME/.kube/config
|
|
||||||
|
|
||||||
pwd
|
|
||||||
cd ./c4k-nextcloud/src/test/resources/local-integration-test && ./setup-local-s3-on-k3d.sh
|
|
|
@ -1,60 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
function main()
|
|
||||||
{
|
|
||||||
# enable tls for k3s with cert-manager
|
|
||||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
|
||||||
|
|
||||||
kubectl apply -f localstack.yaml
|
|
||||||
|
|
||||||
until kubectl apply -f certificate.yaml
|
|
||||||
do
|
|
||||||
echo "[INFO] Waiting for certificate ..."
|
|
||||||
sleep 30
|
|
||||||
done
|
|
||||||
|
|
||||||
# wait for ingress to be ready
|
|
||||||
bash -c 'external_ip=""; while [ -z $external_ip ]; do echo "[INFO] Waiting for end point..."; external_ip=$(kubectl get ingress -o jsonpath="{$.items[*].status.loadBalancer.ingress[*].ip}"); [ -z "$external_ip" ] && sleep 10; done; echo "End point ready - $external_ip";'
|
|
||||||
|
|
||||||
export INGRESS_IP=$(kubectl get ingress ingress-localstack -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")
|
|
||||||
|
|
||||||
cd ../../../../ # c4k-nextcloud project root
|
|
||||||
lein uberjar
|
|
||||||
java -jar target/uberjar/c4k-nextcloud-standalone.jar config-local.edn auth-local.edn | kubectl apply -f -
|
|
||||||
|
|
||||||
CLOUD_POD=$(kubectl get pod -l app=cloud-app -o name)
|
|
||||||
kubectl wait $CLOUD_POD --for=condition=Ready --timeout=240s
|
|
||||||
|
|
||||||
# wait for nextcloud config file available
|
|
||||||
timeout 180 bash -c "kubectl exec -t $POD -- bash -c \"until [ -f /var/www/html/config/config.php ]; do sleep 10; done\""
|
|
||||||
|
|
||||||
# ensure an instance of pod backup-restore
|
|
||||||
kubectl scale deployment backup-restore --replicas 1
|
|
||||||
|
|
||||||
# wait for localstack health endpoint
|
|
||||||
echo "$INGRESS_IP k3stesthost cloudhost" >> /etc/hosts
|
|
||||||
until curl --fail --silent k3stesthost/health | grep -oe '"s3": "available"' -oe '"s3": "running"'
|
|
||||||
do
|
|
||||||
curl --fail k3stesthost/health
|
|
||||||
echo "[INFO] Waiting for s3 running"
|
|
||||||
sleep 10
|
|
||||||
done
|
|
||||||
|
|
||||||
BACKUP_POD=$(kubectl get pod -l app=backup-restore -o name)
|
|
||||||
kubectl wait $BACKUP_POD --for=condition=Ready --timeout=240s
|
|
||||||
|
|
||||||
kubectl exec -t $BACKUP_POD -- bash -c "echo \"$INGRESS_IP k3stesthost cloudhost\" >> /etc/hosts"
|
|
||||||
kubectl exec -t $BACKUP_POD -- /usr/local/bin/init.sh
|
|
||||||
|
|
||||||
echo ================= BACKUP =================
|
|
||||||
kubectl exec -t $BACKUP_POD -- /usr/local/bin/backup.sh
|
|
||||||
|
|
||||||
sleep 10 # avoid race conditions
|
|
||||||
|
|
||||||
echo ================= RESTORE =================
|
|
||||||
kubectl exec -t $BACKUP_POD -- /usr/local/bin/restore.sh
|
|
||||||
}
|
|
||||||
|
|
||||||
main "$@"
|
|
|
@ -1,34 +0,0 @@
|
||||||
function main()
|
|
||||||
{
|
|
||||||
local bucket_name="${1:-mybucket}"; shift
|
|
||||||
|
|
||||||
./start-k3s.sh
|
|
||||||
|
|
||||||
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
|
||||||
|
|
||||||
sudo k3s kubectl apply -f localstack.yaml
|
|
||||||
|
|
||||||
until sudo k3s kubectl apply -f certificate.yaml
|
|
||||||
do
|
|
||||||
echo "*** Waiting for certificate ... ***"
|
|
||||||
sleep 10
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "[INFO] Waiting for localstack health endpoint"
|
|
||||||
until curl --connect-timeout 3 -s -f -o /dev/null "k3stesthost/health"
|
|
||||||
do
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
|
|
||||||
sudo k3s kubectl get secret localstack-secret -o jsonpath="{.data.ca\.crt}" | base64 --decode > ca.crt
|
|
||||||
|
|
||||||
#aws --endpoint-url=http://localhost s3 mb s3://$bucket_name
|
|
||||||
export RESTIC_PASSWORD="test-password"
|
|
||||||
restic init --cacert ca.crt -r s3://k3stesthost/$bucket_name
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
main $@
|
|
|
@ -1,9 +0,0 @@
|
||||||
function main()
|
|
||||||
{
|
|
||||||
./start-k3s.sh
|
|
||||||
|
|
||||||
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
main
|
|
|
@ -1 +0,0 @@
|
||||||
KUBECONFIG=~/.kube/custom-contexts/k3d-config.yml k3d cluster create nextcloud --k3s-arg '--tls-san cloudhost@loadbalancer' --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443 --kubeconfig-update-default
|
|
|
@ -1 +0,0 @@
|
||||||
curl -sfL https://get.k3s.io | K3S_NODE_NAME=k3stesthost INSTALL_K3S_EXEC='--tls-san cloudhost' sh -
|
|
|
@ -1,9 +0,0 @@
|
||||||
{:postgres-db-user "nextcloud"
|
|
||||||
:postgres-db-password "nextcloud-db-password"
|
|
||||||
:nextcloud-admin-user "cloudadmin"
|
|
||||||
:nextcloud-admin-password "cloudpassword"
|
|
||||||
:aws-access-key-id "aws-id"
|
|
||||||
:aws-secret-access-key "aws-secret"
|
|
||||||
:restic-password "restic-password"
|
|
||||||
:mon-auth {:grafana-cloud-user "user"
|
|
||||||
:grafana-cloud-password "password"}}
|
|
|
@ -1,8 +0,0 @@
|
||||||
{:fqdn "cloud.test.meissa-gmbh.de"
|
|
||||||
:issuer "staging"
|
|
||||||
:nextcloud-data-volume-path "/var/cloud"
|
|
||||||
:postgres-data-volume-path "/var/postgres"
|
|
||||||
:restic-repository "s3:s3.amazonaws.com/your-bucket/your-folder"
|
|
||||||
:mon-cfg {:grafana-cloud-url "url-for-your-prom-remote-write-endpoint"
|
|
||||||
:k3s-cluster-name "jitsi"
|
|
||||||
:k3s-cluster-stage "test"}}
|
|
Loading…
Reference in a new issue