Compare commits

..

No commits in common. "main" and "7.0.5" have entirely different histories.
main ... 7.0.5

43 changed files with 575 additions and 250 deletions

View file

@ -4,9 +4,10 @@ stages:
- security
- upload
- image
#- integrationtest
.img: &img
image: "domaindrivenarchitecture/ddadevops-dind:4.11.3"
image: "domaindrivenarchitecture/ddadevops-dind:4.10.5"
services:
- docker:dind
before_script:
@ -16,7 +17,7 @@ stages:
- export IMAGE_TAG=$CI_COMMIT_TAG
.cljs-job: &cljs
image: "domaindrivenarchitecture/ddadevops-clj-cljs:4.11.3"
image: "domaindrivenarchitecture/ddadevops-clj-cljs:4.10.7"
cache:
key: ${CI_COMMIT_REF_SLUG}
paths:
@ -29,7 +30,7 @@ stages:
- npm install
.clj-job: &clj
image: "domaindrivenarchitecture/ddadevops-clj:4.11.3"
image: "domaindrivenarchitecture/ddadevops-clj-cljs:4.10.7"
cache:
key: ${CI_COMMIT_REF_SLUG}
paths:
@ -93,15 +94,6 @@ package-uberjar:
paths:
- target/uberjar
package-native:
<<: *clj
stage: package
script:
- pyb package_native
artifacts:
paths:
- target/graalvm
release-to-clojars:
<<: *clj
<<: *tag_only
@ -129,3 +121,23 @@ nextcloud-image-publish:
stage: image
script:
- cd infrastructure/nextcloud && pyb image publish
#.nextcloud-integrationtest:
# stage: integrationtest
# image: registry.gitlab.com/gitlab-org/cluster-integration/helm-install-image/releases/3.7.1-kube-1.20.11-alpine-3.14
# services:
# - name: registry.gitlab.com/gitlab-org/cluster-integration/test-utils/k3s-gitlab-ci/releases/v1.22.2-k3s2
# alias: k3s
# script:
# - apk add curl sudo bash
# - apk add wget curl bash sudo openjdk8
# - wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
# - apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
#
# - mkdir -p ${HOME}/.kube/
# - curl -f k3s:8081 > ${HOME}/.kube/config
# - kubectl version
# - kubectl cluster-info
# - echo "---------- Integration test -------------"
# - pwd
# - cd ./src/test/resources/local-integration-test/ && ./setup-local-s3-on-k3d.sh

View file

@ -41,8 +41,7 @@ Development happens at: https://repo.prod.meissa.de/meissa/c4k-nextcloud
Mirrors are:
* https://codeberg.org/meissa/c4k-nextcloud (Issues and PR)
* https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud (CI)
* https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud (issues and PR, CI)
* https://github.com/DomainDrivenArchitecture/c4k-nextcloud
For more details about our repository model see: https://repo.prod.meissa.de/meissa/federate-your-repos
@ -50,6 +49,6 @@ For more details about our repository model see: https://repo.prod.meissa.de/mei
## License
Copyright © 2021, 2022, 2023, 2024 meissa GmbH
Copyright © 2021 meissa GmbH
Licensed under the [Apache License, Version 2.0](LICENSE) (the "License")
Pls. find licenses of our subcomponents [here](doc/SUBCOMPONENT_LICENSE)

7
auth-local.edn Normal file
View file

@ -0,0 +1,7 @@
{:postgres-db-user "nextcloud"
:postgres-db-password "dbpass"
:nextcloud-admin-user "cloudadmin"
:nextcloud-admin-password "cloudpassword"
:aws-access-key-id ""
:aws-secret-access-key ""
:restic-password "test-password"}

View file

@ -29,11 +29,10 @@ def initialize(project):
"release_organisation": "meissa",
"release_repository_name": name,
"release_artifacts": [
f"target/graalvm/{name}",
f"target/uberjar/{name}-standalone.jar",
f"target/frontend-build/{name}.js",
"target/uberjar/c4k-nextcloud-standalone.jar",
"target/frontend-build/c4k-nextcloud.js",
],
"release_main_branch": "main",
"release_main_branch": "master",
}
build = ReleaseMixin(project, input)
@ -41,18 +40,18 @@ def initialize(project):
@task
def test_clj():
def test_clj(project):
run("lein test", shell=True, check=True)
@task
def test_cljs():
def test_cljs(project):
run("shadow-cljs compile test", shell=True, check=True)
run("node target/node-tests.js", shell=True, check=True)
@task
def test_schema():
def test_schema(project):
run("lein uberjar", shell=True, check=True)
run(
"java -jar target/uberjar/c4k-nextcloud-standalone.jar "
@ -63,11 +62,6 @@ def test_schema():
check=True,
)
@task
def test():
test_clj()
test_cljs()
test_schema()
@task
def report_frontend(project):
@ -102,7 +96,6 @@ def package_frontend(project):
@task
def package_uberjar(project):
run("lein uberjar", shell=True, check=True)
run(
"sha256sum target/uberjar/c4k-nextcloud-standalone.jar > target/uberjar/c4k-nextcloud-standalone.jar.sha256",
shell=True,
@ -114,37 +107,6 @@ def package_uberjar(project):
check=True,
)
@task
def package_native(project):
run(
"mkdir -p target/graalvm",
shell=True,
check=True,
)
run(
"native-image " +
"--native-image-info " +
"--report-unsupported-elements-at-runtime " +
"--no-server " +
"--no-fallback " +
"--features=clj_easy.graal_build_time.InitClojureClasses " +
f"-jar target/uberjar/{project.name}-standalone.jar " +
"-H:IncludeResources=.*.yaml " +
"-H:Log=registerResource:verbose " +
f"-H:Name=target/graalvm/{project.name}",
shell=True,
check=True,
)
run(
f"sha256sum target/graalvm/{project.name} > target/graalvm/{project.name}.sha256",
shell=True,
check=True,
)
run(
f"sha512sum target/graalvm/{project.name} > target/graalvm/{project.name}.sha512",
shell=True,
check=True,
)
@task
def upload_clj(project):
@ -153,32 +115,17 @@ def upload_clj(project):
@task
def lint(project):
run(
"lein eastwood",
shell=True,
check=True,
)
#run(
# "lein eastwood",
# shell=True,
# check=True,
#)
run(
"lein ancient check",
shell=True,
check=True,
)
@task
def inst(project):
package_uberjar(project)
package_native(project)
run(
f"sudo install -m=755 target/uberjar/{project.name}-standalone.jar /usr/local/bin/{project.name}-standalone.jar",
shell=True,
check=True,
)
run(
f"sudo install -m=755 target/graalvm/{project.name} /usr/local/bin/{project.name}",
shell=True,
check=True,
)
@task
def patch(project):
@ -227,7 +174,7 @@ def release(project):
def linttest(project, release_type):
build = get_devops_build(project)
build.update_release_type(release_type)
test_clj()
test_cljs()
test_schema()
test_clj(project)
test_cljs(project)
test_schema(project)
lint(project)

6
config-local.edn Normal file
View file

@ -0,0 +1,6 @@
{:fqdn "cloudhost"
:issuer :staging
:nextcloud-data-volume-path "/var/cloud"
:postgres-data-volume-path "/var/postgres"
:restic-repository "s3://k3stesthost/mybucket"
:local-integration-test true}

View file

@ -10,37 +10,37 @@
## Manual init the restic repository for the first time
1. Scale backup-restore deployment up:
`kubectl -n nextcloud scale deployment backup-restore --replicas=1`
`kubectl scale deployment backup-restore --replicas=1`
1. exec into pod and execute restore pod
`kubectl -n nextcloud exec -it backup-restore -- /usr/local/bin/init.sh`
`kubectl exec -it backup-restore -- /usr/local/bin/init.sh`
1. Scale backup-restore deployment down:
`kubectl -n nextcloud scale deployment backup-restore --replicas=0`
`kubectl scale deployment backup-restore --replicas=0`
## Manual backup
## Manual backup the restic repository for the first time
1. Scale Cloud deployment down:
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=0`
`kubectl scale deployment cloud-deployment --replicas=0`
1. Scale backup-restore deployment up:
`kubectl -n nextcloud scale deployment backup-restore --replicas=1`
`kubectl scale deployment backup-restore --replicas=1`
1. exec into pod and execute restore pod
`kubectl -n nextcloud exec -it backup-restore -- /usr/local/bin/backup.sh`
`kubectl exec -it backup-restore -- /usr/local/bin/backup.sh`
1. Scale backup-restore deployment down:
`kubectl -n nextcloud scale deployment backup-restore --replicas=0`
`kubectl scale deployment backup-restore --replicas=0`
1. Scale Cloud deployment up:
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=1`
`kubectl scale deployment cloud-deployment --replicas=1`
## Manual restore
1. Scale Cloud deployment down:
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=0`
`kubectl scale deployment cloud-deployment --replicas=0`
2. Scale backup-restore deployment up:
`kubectl -n nextcloud scale deployment backup-restore --replicas=1`
`kubectl scale deployment backup-restore --replicas=1`
3. exec into pod and execute restore pod
`kubectl -n nextcloud exec -it backup-restore -- /usr/local/bin/restore.sh`
`kubectl exec -it backup-restore -- /usr/local/bin/restore.sh`
4. Scale backup-restore deployment down:
`kubectl -n nextcloud scale deployment backup-restore --replicas=0`
`kubectl scale deployment backup-restore --replicas=0`
5. Scale Cloud deployment up:
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=1`
`kubectl scale deployment cloud-deployment --replicas=1`

View file

@ -39,31 +39,34 @@ npx shadow-cljs release frontend
## graalvm-setup
```
curl -LO https://github.com/graalvm/graalvm-ce-builds/releases/download/jdk-21.0.2/graalvm-community-jdk-21.0.2_linux-x64_bin.tar.gz
curl -LO https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.0.0.2/graalvm-ce-java11-linux-amd64-21.0.0.2.tar.gz
# unpack
tar -xzf graalvm-community-jdk-21.0.2_linux-x64_bin.tar.gz
tar -xzf graalvm-ce-java11-linux-amd64-21.0.0.2.tar.gz
sudo mv graalvm-community-openjdk-21.0.2+13.1 /usr/lib/jvm/
sudo ln -s /usr/lib/jvm/graalvm-community-openjdk-21.0.2+13.1 /usr/lib/jvm/graalvm-21
sudo ln -s /usr/lib/jvm/graalvm-21/bin/gu /usr/local/bin
sudo update-alternatives --install /usr/bin/java java /usr/lib/jvm/graalvm-21/bin/java 2
sudo mv graalvm-ce-java11-21.0.0.2 /usr/lib/jvm/
sudo ln -s /usr/lib/jvm/graalvm-ce-java11-21.0.0.2 /usr/lib/jvm/graalvm
sudo ln -s /usr/lib/jvm/graalvm/bin/gu /usr/local/bin
sudo update-alternatives --install /usr/bin/java java /usr/lib/jvm/graalvm/bin/java 2
sudo update-alternatives --config java
sudo ln -s /usr/lib/jvm/graalvm-21/bin/native-image /usr/local/bin
# install native-image in graalvm-ce-java11-linux-amd64-21.0.0.2/bin
sudo gu install native-image
sudo ln -s /usr/lib/jvm/graalvm/bin/native-image /usr/local/bin
# deps
sudo apt-get install build-essential libz-dev zlib1g-dev
# build
cd ~/repo/c4k/c4k-nextcloud
cd ~/repo/dda/c4k-cloud
lein uberjar
mkdir -p target/graalvm
lein native
# execute
./target/graalvm/c4k-nextcloud -h
./target/graalvm/c4k-nextcloud src/test/resources/nextcloud-test/valid-config.yaml src/test/resources/nextcloud-test/valid-auth.yaml
./target/graalvm/c4k-nextcloud src/test/resources/nextcloud-test/invalid-config.yaml src/test/resources/nextcloud-test/invalid-auth.yaml
./target/graalvm/c4k-cloud -h
./target/graalvm/c4k-cloud src/test/resources/valid-config.edn src/test/resources/valid-auth.edn
./target/graalvm/c4k-cloud src/test/resources/invalid-config.edn src/test/resources/invalid-auth.edn
```
## c4k-setup

View file

@ -5,17 +5,12 @@
- 4.0.3: nextcloud 22
- 5.0.0: nextcloud 23
- 6.0.0: nextcloud 24
- 7.0.7: nextcloud 25.0.13
- 7.1.1: nextcloud 26.0.0 (manual publish) => attention - only upgrade to 26.0.0 is working
- 7.1.0: nextcloud 26.0.13 (manual publish)
- 7.2.0: nextcloud 27 (manual publish)
- 10.0.0: nextcloud 28.0.5
- 10.1.0: nextcloud 29.0.0
- 7.0.0: nextcloud 25
## Uprgrading process
1. Change the version of the docker image in the deployment to the next major version
- `kubectl -n=nextcloud edit deploy cloud-deployment`
- `kubectl edit deploy cloud-deployment`
- change `image: domaindrivenarchitecture/c4k-cloud:4.0.3`
2. Wait for the pod to finish restarting
3. Verify the website is working and https://URL/settings/admin/overview shows the correct version

View file

@ -1,41 +0,0 @@
# Rename Database
## Start
1. Scale down cloud deployment
`k -n nextcloud scale deployment cloud-deployment --replicas 0`
## Change db-name in postgres
1. Connect to postgres-pod
`k -n nextcloud exec -it postgresql-... -- bash`
2. Connect to a database
`PGPASSWORD=$POSTGRES_PASSWORD psql -h postgresql-service -U $POSTGRES_USER postgres`
3. List available databases
`\l`
4. Rename database
`ALTER DATABASE cloud RENAME TO nextcloud;`
5. Verify
`\l`
6. Quit
`\q`
## Update postgres-config
1. Edit configmap
`k -n nextcloud edit configmap postgres-config`
2. Update postgres-db value
3. Save
## Update nextcloud db-name
1. Scale up nextcloud
`k -n nextcloud scale deployment cloud-deployment --replicas 1`
2. Connect
`k -n nextcloud exec -it cloud-deployment-... -- bash`
3. Update db value in config.php
`apt update`
`apt install vim`
`vim config/config.php`
4. Update dbname field
5. Verify server+website is working

View file

@ -6,7 +6,7 @@ from ddadevops import *
name = "c4k-cloud"
MODULE = "backup"
PROJECT_ROOT_PATH = "../.."
version = "10.2.1-dev"
version = "7.0.5"
@init

View file

@ -6,7 +6,7 @@ from ddadevops import *
name = 'c4k-cloud'
MODULE = 'not_set'
PROJECT_ROOT_PATH = '../..'
version = "10.2.1-dev"
version = "7.0.5"
@init
def initialize(project):

View file

@ -1,7 +1,4 @@
FROM nextcloud:29
# REQUIRES docker >= 2.10.10
# https://docs.docker.com/engine/release-notes/20.10/#201010
FROM nextcloud:27
# Prepare Entrypoint Script
ADD resources /tmp

View file

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -exo pipefail

View file

@ -2,7 +2,7 @@
"name": "c4k-nextcloud",
"description": "Generate c4k yaml for a nextcloud deployment.",
"author": "meissa GmbH",
"version": "10.2.1-SNAPSHOT",
"version": "7.0.5",
"homepage": "https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud#readme",
"repository": "https://www.npmjs.com/package/c4k-nextcloud",
"license": "APACHE2",

View file

@ -1,11 +1,11 @@
(defproject org.domaindrivenarchitecture/c4k-nextcloud "10.2.1-SNAPSHOT"
(defproject org.domaindrivenarchitecture/c4k-nextcloud "7.0.5"
:description "nextcloud c4k-installation package"
:url "https://domaindrivenarchitecture.org"
:license {:name "Apache License, Version 2.0"
:url "https://www.apache.org/licenses/LICENSE-2.0.html"}
:dependencies [[org.clojure/clojure "1.11.3"]
[org.clojure/tools.reader "1.4.2"]
[org.domaindrivenarchitecture/c4k-common-clj "6.3.1"]
:dependencies [[org.clojure/clojure "1.11.1"]
[org.clojure/tools.reader "1.3.7"]
[org.domaindrivenarchitecture/c4k-common-clj "6.1.2"]
[hickory "0.7.1" :exclusions [viebel/codox-klipse-theme]]]
:target-path "target/%s/"
:source-paths ["src/main/cljc"
@ -22,14 +22,25 @@
:uberjar {:aot :all
:main dda.c4k-nextcloud.uberjar
:uberjar-name "c4k-nextcloud-standalone.jar"
:dependencies [[org.clojure/tools.cli "1.1.230"]
[ch.qos.logback/logback-classic "1.5.6"
:dependencies [[org.clojure/tools.cli "1.0.219"]
[ch.qos.logback/logback-classic "1.4.14"
:exclusions [com.sun.mail/javax.mail]]
[org.slf4j/jcl-over-slf4j "2.0.13"]
[com.github.clj-easy/graal-build-time "1.0.5"]]}}
[org.slf4j/jcl-over-slf4j "2.0.11"]]}}
:release-tasks [["test"]
["vcs" "assert-committed"]
["change" "version" "leiningen.release/bump-version" "release"]
["vcs" "commit"]
["vcs" "tag" "v" "--no-sign"]
["change" "version" "leiningen.release/bump-version"]])
["change" "version" "leiningen.release/bump-version"]]
:aliases {"native" ["shell"
"native-image"
"--report-unsupported-elements-at-runtime"
"--initialize-at-build-time"
"-jar" "target/uberjar/c4k-nextcloud-standalone.jar"
"-H:ResourceConfigurationFiles=graalvm-resource-config.json"
"-H:Log=registerResource"
"-H:Name=target/graalvm/${:name}"]
"inst" ["shell"
"sh"
"-c"
"lein uberjar && sudo install -m=755 target/uberjar/c4k-nextcloud-standalone.jar /usr/local/bin/c4k-nextcloud-standalone.jar"]})

View file

@ -4,7 +4,7 @@
"src/test/cljc"
"src/test/cljs"
"src/test/resources"]
:dependencies [[org.domaindrivenarchitecture/c4k-common-cljs "6.3.1"]
:dependencies [[org.domaindrivenarchitecture/c4k-common-cljs "6.0.3"]
[hickory "0.7.1"]]
:builds {:frontend {:target :browser
:modules {:main {:init-fn dda.c4k-nextcloud.browser/init}}

View file

@ -1,20 +1,24 @@
(ns dda.c4k-nextcloud.backup
(:require
[clojure.spec.alpha :as s]
#?(:cljs [shadow.resource :as rc])
[dda.c4k-common.yaml :as yaml]
[dda.c4k-common.base64 :as b64]
[dda.c4k-common.common :as cm]
[dda.c4k-common.predicate :as cp]
#?(:cljs [dda.c4k-common.macros :refer-macros [inline-resources]])))
[dda.c4k-common.common :as cm]))
(s/def ::aws-access-key-id cp/bash-env-string?)
(s/def ::aws-secret-access-key cp/bash-env-string?)
(s/def ::restic-password cp/bash-env-string?)
(s/def ::restic-repository cp/bash-env-string?)
(s/def ::aws-access-key-id cm/bash-env-string?)
(s/def ::aws-secret-access-key cm/bash-env-string?)
(s/def ::restic-password cm/bash-env-string?)
(s/def ::restic-repository cm/bash-env-string?)
#?(:cljs
(defmethod yaml/load-resource :backup [resource-name]
(get (inline-resources "backup") resource-name)))
(case resource-name
"backup/config.yaml" (rc/inline "backup/config.yaml")
"backup/cron.yaml" (rc/inline "backup/cron.yaml")
"backup/secret.yaml" (rc/inline "backup/secret.yaml")
"backup/backup-restore-deployment.yaml" (rc/inline "backup/backup-restore-deployment.yaml")
(throw (js/Error. "Undefined Resource!")))))
(defn generate-config [my-conf]
(let [{:keys [restic-repository]} my-conf]

View file

@ -1,43 +1,44 @@
(ns dda.c4k-nextcloud.core
(:require
#?(:clj [orchestra.core :refer [defn-spec]]
:cljs [orchestra.core :refer-macros [defn-spec]])
[dda.c4k-common.common :as cm]
[dda.c4k-common.predicate :as cp]
[dda.c4k-common.yaml :as yaml]
[dda.c4k-common.postgres :as postgres]
[dda.c4k-nextcloud.nextcloud :as nextcloud]
[dda.c4k-nextcloud.backup :as backup]
[dda.c4k-common.monitoring :as mon]
[dda.c4k-common.namespace :as ns]))
(:require
[clojure.spec.alpha :as s]
#?(:clj [orchestra.core :refer [defn-spec]]
:cljs [orchestra.core :refer-macros [defn-spec]])
[dda.c4k-common.common :as cm]
[dda.c4k-common.predicate :as cp]
[dda.c4k-common.yaml :as yaml]
[dda.c4k-common.postgres :as postgres]
[dda.c4k-nextcloud.nextcloud :as nextcloud]
[dda.c4k-nextcloud.backup :as backup]
[dda.c4k-common.monitoring :as mon]))
(def config-defaults {:namespace "nextcloud"
:issuer "staging"
:pvc-storage-class-name "hcloud-volumes-encrypted"
:pv-storage-size-gb 200})
(def default-storage-class :local-path)
(def config-defaults {:issuer "staging"})
(defn-spec k8s-objects cp/map-or-seq?
[config nextcloud/config?
auth nextcloud/auth?]
(let [resolved-config (merge config-defaults config)]
(let [nextcloud-default-storage-config {:pvc-storage-class-name default-storage-class
:pv-storage-size-gb 200}]
(map yaml/to-string
(filter
#(not (nil? %))
(cm/concat-vec
(ns/generate resolved-config)
(postgres/generate (merge resolved-config {:postgres-size :8gb
:db-name "cloud"
:pv-storage-size-gb 50})
auth)
[(nextcloud/generate-secret auth)
(nextcloud/generate-pvc resolved-config)
(nextcloud/generate-deployment resolved-config)
[(postgres/generate-config {:postgres-size :8gb :db-name "nextcloud"})
(postgres/generate-secret auth)
(postgres/generate-pvc {:pv-storage-size-gb 50
:pvc-storage-class-name default-storage-class})
(postgres/generate-deployment)
(postgres/generate-service)
(nextcloud/generate-secret auth)
(nextcloud/generate-pvc (merge nextcloud-default-storage-config config))
(nextcloud/generate-deployment config)
(nextcloud/generate-service)]
(nextcloud/generate-ingress-and-cert resolved-config)
(when (:contains? resolved-config :restic-repository)
[(backup/generate-config resolved-config)
(nextcloud/generate-ingress-and-cert config)
(when (:contains? config :restic-repository)
[(backup/generate-config config)
(backup/generate-secret auth)
(backup/generate-cron)
(backup/generate-backup-restore-deployment resolved-config)])
(when (:contains? resolved-config :mon-cfg)
(mon/generate (:mon-cfg resolved-config) (:mon-auth auth))))))))
(backup/generate-backup-restore-deployment config)])
(when (:contains? config :mon-cfg)
(mon/generate (:mon-cfg config) (:mon-auth auth))))))))

View file

@ -1,6 +1,7 @@
(ns dda.c4k-nextcloud.nextcloud
(:require
[clojure.spec.alpha :as s]
#?(:cljs [shadow.resource :as rc])
#?(:clj [orchestra.core :refer [defn-spec]]
:cljs [orchestra.core :refer-macros [defn-spec]])
[dda.c4k-common.yaml :as yaml]
@ -9,8 +10,7 @@
[dda.c4k-common.predicate :as cp]
[dda.c4k-common.postgres :as postgres]
[dda.c4k-common.common :as cm]
[dda.c4k-common.monitoring :as mon]
#?(:cljs [dda.c4k-common.macros :refer-macros [inline-resources]])))
[dda.c4k-common.monitoring :as mon]))
(s/def ::fqdn cp/fqdn-string?)
(s/def ::issuer cp/letsencrypt-issuer?)
@ -35,13 +35,18 @@
#?(:cljs
(defmethod yaml/load-resource :nextcloud [resource-name]
(get (inline-resources "nextcloud") resource-name)))
(case resource-name
"nextcloud/deployment.yaml" (rc/inline "nextcloud/deployment.yaml")
"nextcloud/pvc.yaml" (rc/inline "nextcloud/pvc.yaml")
"nextcloud/service.yaml" (rc/inline "nextcloud/service.yaml")
"nextcloud/secret.yaml" (rc/inline "nextcloud/secret.yaml")
(throw (js/Error. "Undefined Resource!")))))
(defn-spec generate-deployment cp/map-or-seq?
[config config?]
(let [{:keys [fqdn]} config]
(-> (yaml/load-as-edn "nextcloud/deployment.yaml")
(cm/replace-all-matching "fqdn" fqdn))))
(cm/replace-all-matching-values-by-new-value "fqdn" fqdn))))
(defn-spec generate-ingress-and-cert cp/map-or-seq?
[config config?]

View file

@ -2,7 +2,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: backup-restore
namespace: nextcloud
spec:
replicas: 0
selector:
@ -68,9 +67,6 @@ spec:
- name: cloud-secret-volume
mountPath: /var/run/secrets/cloud-secrets
readOnly: true
- name: rotation-credential-secret-volume
mountPath: /var/run/secrets/rotation-credential-secret
readOnly: true
volumes:
- name: cloud-data-volume
persistentVolumeClaim:
@ -81,7 +77,3 @@ spec:
- name: backup-secret-volume
secret:
secretName: backup-secret
- name: rotation-credential-secret-volume
secret:
secretName: rotation-credential-secret
optional: true

View file

@ -2,7 +2,6 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: backup-config
namespace: nextcloud
labels:
app.kubernetes.io/name: backup
app.kubernetes.io/part-of: cloud

View file

@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: rotation-credential-secret
namespace: nextcloud
type: Opaque
data:
rotation-credential: "dGVzdAo="

View file

@ -2,7 +2,6 @@ apiVersion: batch/v1
kind: CronJob
metadata:
name: cloud-backup
namespace: nextcloud
labels:
app.kubernetes.part-of: cloud
spec:

View file

@ -2,7 +2,6 @@ apiVersion: v1
kind: Secret
metadata:
name: backup-secret
namespace: nextcloud
type: Opaque
data:
aws-access-key-id: "aws-access-key-id"

View file

@ -2,7 +2,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: cloud-deployment
namespace: nextcloud
spec:
selector:
matchLabels:
@ -19,7 +18,7 @@ spec:
redeploy: v3
spec:
containers:
- image: domaindrivenarchitecture/c4k-cloud:8.0.0
- image: domaindrivenarchitecture/c4k-cloud:7.0.0
name: cloud-app
imagePullPolicy: IfNotPresent
ports:

View file

@ -2,7 +2,6 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: cloud-pvc
namespace: nextcloud
labels:
app.kubernetes.io/application: cloud
spec:

View file

@ -2,7 +2,6 @@ apiVersion: v1
kind: Secret
metadata:
name: cloud-secret
namespace: nextcloud
type: Opaque
data:
nextcloud-admin-user: "admin-user"

View file

@ -2,7 +2,6 @@ apiVersion: v1
kind: Service
metadata:
name: cloud-service
namespace: nextcloud
labels:
app.kubernetes.io/name: cloud-service
app.kubernetes.io/application: cloud

View file

@ -8,7 +8,7 @@
(deftest should-generate-secret
(is (= {:apiVersion "v1"
:kind "Secret"
:metadata {:name "backup-secret", :namespace "nextcloud"}
:metadata {:name "backup-secret"}
:type "Opaque"
:data
{:aws-access-key-id "YXdzLWlk", :aws-secret-access-key "YXdzLXNlY3JldA==", :restic-password "cmVzdGljLXB3"}}
@ -18,7 +18,6 @@
(is (= {:apiVersion "v1"
:kind "ConfigMap"
:metadata {:name "backup-config"
:namespace "nextcloud"
:labels {:app.kubernetes.io/name "backup"
:app.kubernetes.io/part-of "cloud"}}
:data
@ -28,7 +27,7 @@
(deftest should-generate-cron
(is (= {:apiVersion "batch/v1"
:kind "CronJob"
:metadata {:name "cloud-backup", :namespace "nextcloud", :labels {:app.kubernetes.part-of "cloud"}}
:metadata {:name "cloud-backup", :labels {:app.kubernetes.part-of "cloud"}}
:spec
{:schedule "10 23 * * *"
:successfulJobsHistoryLimit 1

View file

@ -2,17 +2,21 @@
(:require
#?(:clj [clojure.test :refer [deftest is are testing run-tests]]
:cljs [cljs.test :refer-macros [deftest is are testing run-tests]])
#?(:cljs [shadow.resource :as rc])
[clojure.spec.alpha :as s]
[clojure.spec.test.alpha :as st]
[dda.c4k-common.yaml :as yaml]
[dda.c4k-nextcloud.nextcloud :as cut]
#?(:cljs [dda.c4k-common.macros :refer-macros [inline-resources]])))
[dda.c4k-nextcloud.nextcloud :as cut]))
(st/instrument)
#?(:cljs
(defmethod yaml/load-resource :nextcloud-test [resource-name]
(get (inline-resources "nextcloud-test") resource-name)))
(case resource-name
"nextcloud-test/valid-auth.yaml" (rc/inline "nextcloud-test/valid-auth.yaml")
"nextcloud-test/valid-config.yaml" (rc/inline "nextcloud-test/valid-config.yaml")
"nextcloud-test/invalid-auth.yaml" (rc/inline "nextcloud-test/invalid-auth.yaml")
"nextcloud-test/invalid-config.yaml" (rc/inline "nextcloud-test/invalid-config.yaml"))))
(deftest validate-valid-resources
(is (s/valid? cut/config? (yaml/load-as-edn "nextcloud-test/valid-config.yaml")))
@ -23,7 +27,7 @@
(deftest should-generate-secret
(is (= {:apiVersion "v1"
:kind "Secret"
:metadata {:name "cloud-secret", :namespace "nextcloud"}
:metadata {:name "cloud-secret"}
:type "Opaque"
:data
{:nextcloud-admin-user "Y2xvdWRhZG1pbg=="
@ -77,8 +81,7 @@
(is (= {:apiVersion "v1"
:kind "PersistentVolumeClaim"
:metadata {:name "cloud-pvc"
:namespace "nextcloud"
:labels {:app.kubernetes.io/application "cloud"}}
:labels {:app.kubernetes.io/application "cloud"}}
:spec {:storageClassName "local-path"
:accessModes ["ReadWriteOnce"]
:resources {:requests {:storage "50Gi"}}}}
@ -87,7 +90,7 @@
(deftest should-generate-deployment
(is (= {:apiVersion "apps/v1"
:kind "Deployment"
:metadata {:name "cloud-deployment", :namespace "nextcloud"}
:metadata {:name "cloud-deployment"}
:spec
{:selector {:matchLabels #:app.kubernetes.io{:name "cloud-pod", :application "cloud"}}
:strategy {:type "Recreate"}
@ -95,7 +98,7 @@
{:metadata {:labels {:app "cloud-app", :app.kubernetes.io/name "cloud-pod", :app.kubernetes.io/application "cloud", :redeploy "v3"}}
:spec
{:containers
[{:image "domaindrivenarchitecture/c4k-cloud:8.0.0"
[{:image "domaindrivenarchitecture/c4k-cloud:7.0.0"
:name "cloud-app"
:imagePullPolicy "IfNotPresent"
:ports [{:containerPort 80}]

View file

@ -0,0 +1,84 @@
# Usage
`setup-local-s3.sh [BUCKET_NAME]`:
- [BUCKET_NAME] is optional, "mybucket" will be used if not specified
- sets up a k3s instance
- installs a localstack pod
- creates http and https routing to localstack via localhost
- saves the self-signed certificate as ca.crt
- uses the certificate to initialize a restic repo at `https://k3stesthost/BUCKET_NAME`
Note: In case of not being able to connect to "k3stesthost/health", you might need to ensure that the ingress' ip matches with the required host names: k3stesthost and cloudhost. With `sudo k3s kubectl get ingress` you can view the ingress' ip (e.g. 10.0.2.15), then add a line to file "/etc/hosts" e.g. `10.0.2.15 k3stesthost cloudhost`
`start-k3s.sh`:
- creates and starts a k3s instance
`k3s-uninstall.sh`:
- deletes everything k3s related
## Other useful commands
- `sudo k3s kubectl get pods`
- `curl k3stesthost/health`
expected: `{"services": {"s3": "running"}, "features": {"persistence": "disabled", "initScripts": "initialized"}}`
#### Requires AWS-CLI
- create bucket `aws --endpoint-url=http://k3stesthost s3 mb s3://mybucket`
- list buckets `aws --endpoint-url=http://k3stesthost s3 ls`
- upload something `aws --endpoint-url=http://k3stesthost s3 cp test.txt s3://mybucket`
- check files `aws --endpoint-url=http://k3stesthost s3 ls s3://mybucket`
## Run docker locally
```
docker pull docker:19.03.12-dind
docker run -d --privileged --name integration-test docker:19.03.12-dind
docker exec integration-test sh -c "apk add bash"
```
Set up docker container integration-test:
```
docker cp ../../../../../c4k-nextcloud/ integration-test:/
docker exec -it integration-test sh
cd /c4k-nextcloud/src/test/resources/local-integration-test
./setup-docker.sh
```
## Deploy nextcloud
### Requirements
* leiningen (install with: `sudo apt install leiningen` )
* In the project's root execute: `lein uberjar`
* Change file "valid-config.edn" according to your settings (e.g. `:fqdn "cloudhost"` and `:restic-repository "s3://k3stesthost:mybucket"`).
### Deploy to k3s
* Create and deploy the k8s yaml:
`java -jar target/uberjar/c4k-nextcloud-standalone.jar valid-config.edn valid-auth.edn | sudo k3s kubectl apply -f -`
Some of the steps may take some min to be effective, but eventually nextcloud should be available at: https://cloudhost
### Deploy to k3d
k3d is a k3s system which is running inside of a container. To install k3d run `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash` or have a look at https://k3d.io/v5.0.3/ .
* Start a k3d cluster to deploy s3, nextcloud and test backup and restore on it: `./setup-local-s3-on-k3d.sh`
Some steps may take a couple of minutes to be effective, but eventually nextcloud should be available at: https://cloudhost
#### Remove k3d cluster
`k3d cluster delete nextcloud`
## Test in local gitlab runner
See https://stackoverflow.com/questions/32933174/use-gitlab-ci-to-run-tests-locally
This needs to be done in the project root
`docker run -d --name gitlab-runner --restart always -v $PWD:$PWD -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:latest`
`docker exec -it -w $PWD gitlab-runner gitlab-runner exec docker nextcloud-integrationtest --docker-privileged --docker-volumes '/var/run/docker.sock:/var/run/docker.sock'`

View file

@ -0,0 +1,20 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: localstack-cert
namespace: default
spec:
secretName: localstack-secret
commonName: k3stesthost
dnsNames:
- k3stesthost
issuerRef:
name: selfsigning-issuer
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigning-issuer
spec:
selfSigned: {}

View file

@ -0,0 +1,44 @@
@startuml
autonumber
skinparam sequenceBox {
borderColor White
}
participant gitlab_runner
box "outer container" #LightBlue
participant .gitlab_ci
participant PreparingCommands
participant test_script
end box
box "k3s" #CornSilk
participant k3s_api_server
participant backup_pod
end box
gitlab_runner -> k3s_api_server: run k3s as container
gitlab_runner -> .gitlab_ci : run
.gitlab_ci -> PreparingCommands : Install packages (curl bash ...)
.gitlab_ci -> PreparingCommands : get k3s_api_server config for k3s_api_server
.gitlab_ci -> test_script : run
test_script -> k3s_api_server: apply cert-manager
test_script -> k3s_api_server: apply localstack
test_script -> k3s_api_server: enable tls / create certificates
test_script -> k3s_api_server: apply cloud
test_script -> k3s_api_server: create backup_pod (by scale to 1)
test_script -> backup_pod: backup
test_script -> backup_pod: restore
@enduml

View file

@ -0,0 +1,17 @@
# Set the default kube context if present
DEFAULT_KUBE_CONTEXTS="$HOME/.kube/config"
if test -f "${DEFAULT_KUBE_CONTEXTS}"
then
export KUBECONFIG="$DEFAULT_KUBE_CONTEXTS"
fi
# Additional contexts should be in ~/.kube/custom-contexts/
CUSTOM_KUBE_CONTEXTS="$HOME/.kube/custom-contexts"
mkdir -p "${CUSTOM_KUBE_CONTEXTS}"
OIFS="$IFS"
IFS=$'\n'
for contextFile in `find "${CUSTOM_KUBE_CONTEXTS}" -type f -name "*.yml"`
do
export KUBECONFIG="$contextFile:$KUBECONFIG"
done
IFS="$OIFS"

View file

@ -0,0 +1,65 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: localstack
spec:
selector:
matchLabels:
app: localstack
strategy:
type: Recreate
template:
metadata:
labels:
app: localstack
spec:
containers:
- image: localstack/localstack
name: localstack-app
imagePullPolicy: IfNotPresent
env:
- name: SERVICES
value: s3
---
# service
apiVersion: v1
kind: Service
metadata:
name: localstack-service
spec:
selector:
app: localstack
ports:
- port: 4566
---
apiVersion: v1
kind: Secret
metadata:
name: localstack-secret
type: Opaque
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-localstack
annotations:
cert-manager.io/cluster-issuer: selfsigning-issuer
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/redirect-entry-point: https
namespace: default
spec:
tls:
- hosts:
- k3stesthost
secretName: localstack-secret
rules:
- host: k3stesthost
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: localstack-service
port:
number: 4566

View file

@ -0,0 +1,48 @@
#!/bin/bash
set -x
docker volume create k3s-server
name='inttst'
[[ $(docker ps -f "name=$name" --format '{{.Names}}') == $name ]] || docker run --name $name -d --privileged --tmpfs /run --tmpfs /var/run --restart always -e K3S_TOKEN=12345678901234 -e K3S_KUBECONFIG_OUTPUT=./kubeconfig.yaml -e K3S_KUBECONFIG_MODE=666 -v k3s-server:/var/lib/rancher/k3s:z -v $(pwd):/output:z -p 6443:6443 -p 80:80 -p 443:443 rancher/k3s server --cluster-init --tls-san k3stesthost --tls-san cloudhost
docker ps
export timeout=30; while ! docker exec $name sh -c "test -f /var/lib/rancher/k3s/server/kubeconfig.yaml"; do if [ "$timeout" == 0 ]; then echo "ERROR: Timeout while waiting for file."; break; fi; sleep 1; ((timeout--)); done
mkdir -p $HOME/.kube/
docker cp $name:/var/lib/rancher/k3s/server/kubeconfig.yaml $HOME/.kube/config
if [ "$timeout" == 0 ]
then
echo -------------------------------------------------------
find / -name "kubeconfig.yaml";
echo -------------------------------------------------------
docker ps -a
echo -------------------------------------------------------
exit 1
fi
echo "127.0.0.1 kubernetes" >> /etc/hosts
apk add wget curl bash sudo openjdk8
wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.22.0/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
sleep 20 #allow some time to startup k3s
docker ps -a
swapoff -a # can this be removed ?
export KUBECONFIG=$HOME/.kube/config
pwd
cd ./c4k-nextcloud/src/test/resources/local-integration-test && ./setup-local-s3-on-k3d.sh

View file

@ -0,0 +1,60 @@
#!/bin/bash
set -x
function main()
{
# enable tls for k3s with cert-manager
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
kubectl apply -f localstack.yaml
until kubectl apply -f certificate.yaml
do
echo "[INFO] Waiting for certificate ..."
sleep 30
done
# wait for ingress to be ready
bash -c 'external_ip=""; while [ -z $external_ip ]; do echo "[INFO] Waiting for end point..."; external_ip=$(kubectl get ingress -o jsonpath="{$.items[*].status.loadBalancer.ingress[*].ip}"); [ -z "$external_ip" ] && sleep 10; done; echo "End point ready - $external_ip";'
export INGRESS_IP=$(kubectl get ingress ingress-localstack -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")
cd ../../../../ # c4k-nextcloud project root
lein uberjar
java -jar target/uberjar/c4k-nextcloud-standalone.jar config-local.edn auth-local.edn | kubectl apply -f -
CLOUD_POD=$(kubectl get pod -l app=cloud-app -o name)
kubectl wait $CLOUD_POD --for=condition=Ready --timeout=240s
# wait for nextcloud config file available
timeout 180 bash -c "kubectl exec -t $POD -- bash -c \"until [ -f /var/www/html/config/config.php ]; do sleep 10; done\""
# ensure an instance of pod backup-restore
kubectl scale deployment backup-restore --replicas 1
# wait for localstack health endpoint
echo "$INGRESS_IP k3stesthost cloudhost" >> /etc/hosts
until curl --fail --silent k3stesthost/health | grep -oe '"s3": "available"' -oe '"s3": "running"'
do
curl --fail k3stesthost/health
echo "[INFO] Waiting for s3 running"
sleep 10
done
BACKUP_POD=$(kubectl get pod -l app=backup-restore -o name)
kubectl wait $BACKUP_POD --for=condition=Ready --timeout=240s
kubectl exec -t $BACKUP_POD -- bash -c "echo \"$INGRESS_IP k3stesthost cloudhost\" >> /etc/hosts"
kubectl exec -t $BACKUP_POD -- /usr/local/bin/init.sh
echo ================= BACKUP =================
kubectl exec -t $BACKUP_POD -- /usr/local/bin/backup.sh
sleep 10 # avoid race conditions
echo ================= RESTORE =================
kubectl exec -t $BACKUP_POD -- /usr/local/bin/restore.sh
}
main "$@"

View file

@ -0,0 +1,34 @@
function main()
{
local bucket_name="${1:-mybucket}"; shift
./start-k3s.sh
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
sudo k3s kubectl apply -f localstack.yaml
until sudo k3s kubectl apply -f certificate.yaml
do
echo "*** Waiting for certificate ... ***"
sleep 10
done
echo
echo
echo "[INFO] Waiting for localstack health endpoint"
until curl --connect-timeout 3 -s -f -o /dev/null "k3stesthost/health"
do
sleep 5
done
echo
sudo k3s kubectl get secret localstack-secret -o jsonpath="{.data.ca\.crt}" | base64 --decode > ca.crt
#aws --endpoint-url=http://localhost s3 mb s3://$bucket_name
export RESTIC_PASSWORD="test-password"
restic init --cacert ca.crt -r s3://k3stesthost/$bucket_name
}
main $@

View file

@ -0,0 +1,9 @@
function main()
{
./start-k3s.sh
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
}
main

View file

@ -0,0 +1 @@
KUBECONFIG=~/.kube/custom-contexts/k3d-config.yml k3d cluster create nextcloud --k3s-arg '--tls-san cloudhost@loadbalancer' --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443 --kubeconfig-update-default

View file

@ -0,0 +1 @@
curl -sfL https://get.k3s.io | K3S_NODE_NAME=k3stesthost INSTALL_K3S_EXEC='--tls-san cloudhost' sh -

View file

@ -0,0 +1,9 @@
{:postgres-db-user "nextcloud"
:postgres-db-password "nextcloud-db-password"
:nextcloud-admin-user "cloudadmin"
:nextcloud-admin-password "cloudpassword"
:aws-access-key-id "aws-id"
:aws-secret-access-key "aws-secret"
:restic-password "restic-password"
:mon-auth {:grafana-cloud-user "user"
:grafana-cloud-password "password"}}

View file

@ -0,0 +1,8 @@
{:fqdn "cloud.test.meissa-gmbh.de"
:issuer "staging"
:nextcloud-data-volume-path "/var/cloud"
:postgres-data-volume-path "/var/postgres"
:restic-repository "s3:s3.amazonaws.com/your-bucket/your-folder"
:mon-cfg {:grafana-cloud-url "url-for-your-prom-remote-write-endpoint"
:k3s-cluster-name "jitsi"
:k3s-cluster-stage "test"}}