Compare commits
60 commits
Author | SHA1 | Date | |
---|---|---|---|
d1a8479598 | |||
c4832b1107 | |||
7e3312e285 | |||
f636f7ffc3 | |||
bba6bbe830 | |||
8388d72517 | |||
f2b583c060 | |||
5a3aca38cf | |||
4764d1db67 | |||
8aef785bdc | |||
5eb83f78a0 | |||
d997e470a0 | |||
dc86531454 | |||
ca0d4ac7b2 | |||
e4666a592e | |||
fa48d9762a | |||
6a278ece0d | |||
dda45d92d6 | |||
c69c9da659 | |||
de53b9b7a5 | |||
cc845af696 | |||
f5aa2295f0 | |||
1d344abc27 | |||
7cd4e4101d | |||
5d2a65079e | |||
b36808de7c | |||
3e588c082c | |||
4a74a1bec0 | |||
1ff8a0dc13 | |||
fd27c15ec7 | |||
192e053afc | |||
cab9b573c1 | |||
226046d278 | |||
f108b67e62 | |||
96343b9af5 | |||
f072ff027d | |||
e396880365 | |||
83080b30a8 | |||
4a78a35424 | |||
8727f16c75 | |||
ac3f2a455d | |||
c05ecfa427 | |||
fff2c939d9 | |||
e8cec9de8a | |||
1fb309f213 | |||
70d41ca532 | |||
a96cba8cb1 | |||
3bdf2ea553 | |||
fcf6d7783e | |||
ab4c6e0d76 | |||
ef8bfa11fc | |||
7d9ca203bb | |||
f8bcbe63ba | |||
7219533c86 | |||
9b13b96ff6 | |||
351b2295e3 | |||
3a9694d9a1 | |||
3b458b980b | |||
5f38fb7526 | |||
fa81908791 |
38 changed files with 154 additions and 494 deletions
|
@ -4,7 +4,6 @@ stages:
|
|||
- security
|
||||
- upload
|
||||
- image
|
||||
#- integrationtest
|
||||
|
||||
.img: &img
|
||||
image: "domaindrivenarchitecture/ddadevops-dind:4.11.3"
|
||||
|
@ -130,23 +129,3 @@ nextcloud-image-publish:
|
|||
stage: image
|
||||
script:
|
||||
- cd infrastructure/nextcloud && pyb image publish
|
||||
|
||||
#.nextcloud-integrationtest:
|
||||
# stage: integrationtest
|
||||
# image: registry.gitlab.com/gitlab-org/cluster-integration/helm-install-image/releases/3.7.1-kube-1.20.11-alpine-3.14
|
||||
# services:
|
||||
# - name: registry.gitlab.com/gitlab-org/cluster-integration/test-utils/k3s-gitlab-ci/releases/v1.22.2-k3s2
|
||||
# alias: k3s
|
||||
# script:
|
||||
# - apk add curl sudo bash
|
||||
# - apk add wget curl bash sudo openjdk8
|
||||
# - wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
|
||||
# - apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
|
||||
#
|
||||
# - mkdir -p ${HOME}/.kube/
|
||||
# - curl -f k3s:8081 > ${HOME}/.kube/config
|
||||
# - kubectl version
|
||||
# - kubectl cluster-info
|
||||
# - echo "---------- Integration test -------------"
|
||||
# - pwd
|
||||
# - cd ./src/test/resources/local-integration-test/ && ./setup-local-s3-on-k3d.sh
|
|
@ -41,7 +41,8 @@ Development happens at: https://repo.prod.meissa.de/meissa/c4k-nextcloud
|
|||
|
||||
Mirrors are:
|
||||
|
||||
* https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud (issues and PR, CI)
|
||||
* https://codeberg.org/meissa/c4k-nextcloud (Issues and PR)
|
||||
* https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud (CI)
|
||||
* https://github.com/DomainDrivenArchitecture/c4k-nextcloud
|
||||
|
||||
For more details about our repository model see: https://repo.prod.meissa.de/meissa/federate-your-repos
|
||||
|
@ -49,6 +50,6 @@ For more details about our repository model see: https://repo.prod.meissa.de/mei
|
|||
|
||||
## License
|
||||
|
||||
Copyright © 2021 meissa GmbH
|
||||
Copyright © 2021, 2022, 2023, 2024 meissa GmbH
|
||||
Licensed under the [Apache License, Version 2.0](LICENSE) (the "License")
|
||||
Pls. find licenses of our subcomponents [here](doc/SUBCOMPONENT_LICENSE)
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
{:postgres-db-user "nextcloud"
|
||||
:postgres-db-password "dbpass"
|
||||
:nextcloud-admin-user "cloudadmin"
|
||||
:nextcloud-admin-password "cloudpassword"
|
||||
:aws-access-key-id ""
|
||||
:aws-secret-access-key ""
|
||||
:restic-password "test-password"}
|
29
build.py
29
build.py
|
@ -33,7 +33,7 @@ def initialize(project):
|
|||
f"target/uberjar/{name}-standalone.jar",
|
||||
f"target/frontend-build/{name}.js",
|
||||
],
|
||||
"release_main_branch": "master",
|
||||
"release_main_branch": "main",
|
||||
}
|
||||
|
||||
build = ReleaseMixin(project, input)
|
||||
|
@ -41,18 +41,18 @@ def initialize(project):
|
|||
|
||||
|
||||
@task
|
||||
def test_clj(project):
|
||||
def test_clj():
|
||||
run("lein test", shell=True, check=True)
|
||||
|
||||
|
||||
@task
|
||||
def test_cljs(project):
|
||||
def test_cljs():
|
||||
run("shadow-cljs compile test", shell=True, check=True)
|
||||
run("node target/node-tests.js", shell=True, check=True)
|
||||
|
||||
|
||||
@task
|
||||
def test_schema(project):
|
||||
def test_schema():
|
||||
run("lein uberjar", shell=True, check=True)
|
||||
run(
|
||||
"java -jar target/uberjar/c4k-nextcloud-standalone.jar "
|
||||
|
@ -63,6 +63,11 @@ def test_schema(project):
|
|||
check=True,
|
||||
)
|
||||
|
||||
@task
|
||||
def test():
|
||||
test_clj()
|
||||
test_cljs()
|
||||
test_schema()
|
||||
|
||||
@task
|
||||
def report_frontend(project):
|
||||
|
@ -148,11 +153,11 @@ def upload_clj(project):
|
|||
|
||||
@task
|
||||
def lint(project):
|
||||
#run(
|
||||
# "lein eastwood",
|
||||
# shell=True,
|
||||
# check=True,
|
||||
#)
|
||||
run(
|
||||
"lein eastwood",
|
||||
shell=True,
|
||||
check=True,
|
||||
)
|
||||
run(
|
||||
"lein ancient check",
|
||||
shell=True,
|
||||
|
@ -222,7 +227,7 @@ def release(project):
|
|||
def linttest(project, release_type):
|
||||
build = get_devops_build(project)
|
||||
build.update_release_type(release_type)
|
||||
test_clj(project)
|
||||
test_cljs(project)
|
||||
test_schema(project)
|
||||
test_clj()
|
||||
test_cljs()
|
||||
test_schema()
|
||||
lint(project)
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
{:fqdn "cloudhost"
|
||||
:issuer :staging
|
||||
:nextcloud-data-volume-path "/var/cloud"
|
||||
:postgres-data-volume-path "/var/postgres"
|
||||
:restic-repository "s3://k3stesthost/mybucket"
|
||||
:local-integration-test true}
|
|
@ -10,37 +10,37 @@
|
|||
## Manual init the restic repository for the first time
|
||||
|
||||
1. Scale backup-restore deployment up:
|
||||
`kubectl scale deployment backup-restore --replicas=1`
|
||||
`kubectl -n nextcloud scale deployment backup-restore --replicas=1`
|
||||
1. exec into pod and execute restore pod
|
||||
`kubectl exec -it backup-restore -- /usr/local/bin/init.sh`
|
||||
`kubectl -n nextcloud exec -it backup-restore -- /usr/local/bin/init.sh`
|
||||
1. Scale backup-restore deployment down:
|
||||
`kubectl scale deployment backup-restore --replicas=0`
|
||||
`kubectl -n nextcloud scale deployment backup-restore --replicas=0`
|
||||
|
||||
|
||||
|
||||
## Manual backup the restic repository for the first time
|
||||
## Manual backup
|
||||
|
||||
1. Scale Cloud deployment down:
|
||||
`kubectl scale deployment cloud-deployment --replicas=0`
|
||||
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=0`
|
||||
1. Scale backup-restore deployment up:
|
||||
`kubectl scale deployment backup-restore --replicas=1`
|
||||
`kubectl -n nextcloud scale deployment backup-restore --replicas=1`
|
||||
1. exec into pod and execute restore pod
|
||||
`kubectl exec -it backup-restore -- /usr/local/bin/backup.sh`
|
||||
`kubectl -n nextcloud exec -it backup-restore -- /usr/local/bin/backup.sh`
|
||||
1. Scale backup-restore deployment down:
|
||||
`kubectl scale deployment backup-restore --replicas=0`
|
||||
`kubectl -n nextcloud scale deployment backup-restore --replicas=0`
|
||||
1. Scale Cloud deployment up:
|
||||
`kubectl scale deployment cloud-deployment --replicas=1`
|
||||
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=1`
|
||||
|
||||
|
||||
## Manual restore
|
||||
|
||||
1. Scale Cloud deployment down:
|
||||
`kubectl scale deployment cloud-deployment --replicas=0`
|
||||
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=0`
|
||||
2. Scale backup-restore deployment up:
|
||||
`kubectl scale deployment backup-restore --replicas=1`
|
||||
`kubectl -n nextcloud scale deployment backup-restore --replicas=1`
|
||||
3. exec into pod and execute restore pod
|
||||
`kubectl exec -it backup-restore -- /usr/local/bin/restore.sh`
|
||||
`kubectl -n nextcloud exec -it backup-restore -- /usr/local/bin/restore.sh`
|
||||
4. Scale backup-restore deployment down:
|
||||
`kubectl scale deployment backup-restore --replicas=0`
|
||||
`kubectl -n nextcloud scale deployment backup-restore --replicas=0`
|
||||
5. Scale Cloud deployment up:
|
||||
`kubectl scale deployment cloud-deployment --replicas=1`
|
||||
`kubectl -n nextcloud scale deployment cloud-deployment --replicas=1`
|
||||
|
|
|
@ -5,12 +5,17 @@
|
|||
- 4.0.3: nextcloud 22
|
||||
- 5.0.0: nextcloud 23
|
||||
- 6.0.0: nextcloud 24
|
||||
- 7.0.0: nextcloud 25
|
||||
- 7.0.7: nextcloud 25.0.13
|
||||
- 7.1.1: nextcloud 26.0.0 (manual publish) => attention - only upgrade to 26.0.0 is working
|
||||
- 7.1.0: nextcloud 26.0.13 (manual publish)
|
||||
- 7.2.0: nextcloud 27 (manual publish)
|
||||
- 10.0.0: nextcloud 28.0.5
|
||||
- 10.1.0: nextcloud 29.0.0
|
||||
|
||||
## Uprgrading process
|
||||
|
||||
1. Change the version of the docker image in the deployment to the next major version
|
||||
- `kubectl edit deploy cloud-deployment`
|
||||
- `kubectl -n=nextcloud edit deploy cloud-deployment`
|
||||
- change `image: domaindrivenarchitecture/c4k-cloud:4.0.3`
|
||||
2. Wait for the pod to finish restarting
|
||||
3. Verify the website is working and https://URL/settings/admin/overview shows the correct version
|
||||
|
|
41
doc/RenameDatabase.md
Normal file
41
doc/RenameDatabase.md
Normal file
|
@ -0,0 +1,41 @@
|
|||
# Rename Database
|
||||
|
||||
## Start
|
||||
|
||||
1. Scale down cloud deployment
|
||||
`k -n nextcloud scale deployment cloud-deployment --replicas 0`
|
||||
|
||||
## Change db-name in postgres
|
||||
|
||||
1. Connect to postgres-pod
|
||||
`k -n nextcloud exec -it postgresql-... -- bash`
|
||||
2. Connect to a database
|
||||
`PGPASSWORD=$POSTGRES_PASSWORD psql -h postgresql-service -U $POSTGRES_USER postgres`
|
||||
3. List available databases
|
||||
`\l`
|
||||
4. Rename database
|
||||
`ALTER DATABASE cloud RENAME TO nextcloud;`
|
||||
5. Verify
|
||||
`\l`
|
||||
6. Quit
|
||||
`\q`
|
||||
|
||||
## Update postgres-config
|
||||
|
||||
1. Edit configmap
|
||||
`k -n nextcloud edit configmap postgres-config`
|
||||
2. Update postgres-db value
|
||||
3. Save
|
||||
|
||||
## Update nextcloud db-name
|
||||
|
||||
1. Scale up nextcloud
|
||||
`k -n nextcloud scale deployment cloud-deployment --replicas 1`
|
||||
2. Connect
|
||||
`k -n nextcloud exec -it cloud-deployment-... -- bash`
|
||||
3. Update db value in config.php
|
||||
`apt update`
|
||||
`apt install vim`
|
||||
`vim config/config.php`
|
||||
4. Update dbname field
|
||||
5. Verify server+website is working
|
|
@ -6,7 +6,7 @@ from ddadevops import *
|
|||
name = "c4k-cloud"
|
||||
MODULE = "backup"
|
||||
PROJECT_ROOT_PATH = "../.."
|
||||
version = "8.0.1"
|
||||
version = "10.2.1-dev"
|
||||
|
||||
|
||||
@init
|
||||
|
|
|
@ -6,7 +6,7 @@ from ddadevops import *
|
|||
name = 'c4k-cloud'
|
||||
MODULE = 'not_set'
|
||||
PROJECT_ROOT_PATH = '../..'
|
||||
version = "8.0.1"
|
||||
version = "10.2.1-dev"
|
||||
|
||||
@init
|
||||
def initialize(project):
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM nextcloud:28
|
||||
FROM nextcloud:29
|
||||
|
||||
# REQUIRES docker >= 2.10.10
|
||||
# https://docs.docker.com/engine/release-notes/20.10/#201010
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
"name": "c4k-nextcloud",
|
||||
"description": "Generate c4k yaml for a nextcloud deployment.",
|
||||
"author": "meissa GmbH",
|
||||
"version": "8.0.1",
|
||||
"version": "10.2.1-SNAPSHOT",
|
||||
"homepage": "https://gitlab.com/domaindrivenarchitecture/c4k-nextcloud#readme",
|
||||
"repository": "https://www.npmjs.com/package/c4k-nextcloud",
|
||||
"license": "APACHE2",
|
||||
|
|
12
project.clj
12
project.clj
|
@ -1,11 +1,11 @@
|
|||
(defproject org.domaindrivenarchitecture/c4k-nextcloud "8.0.1"
|
||||
(defproject org.domaindrivenarchitecture/c4k-nextcloud "10.2.1-SNAPSHOT"
|
||||
:description "nextcloud c4k-installation package"
|
||||
:url "https://domaindrivenarchitecture.org"
|
||||
:license {:name "Apache License, Version 2.0"
|
||||
:url "https://www.apache.org/licenses/LICENSE-2.0.html"}
|
||||
:dependencies [[org.clojure/clojure "1.11.2"]
|
||||
[org.clojure/tools.reader "1.4.1"]
|
||||
[org.domaindrivenarchitecture/c4k-common-clj "6.2.2"]
|
||||
:dependencies [[org.clojure/clojure "1.11.3"]
|
||||
[org.clojure/tools.reader "1.4.2"]
|
||||
[org.domaindrivenarchitecture/c4k-common-clj "6.3.1"]
|
||||
[hickory "0.7.1" :exclusions [viebel/codox-klipse-theme]]]
|
||||
:target-path "target/%s/"
|
||||
:source-paths ["src/main/cljc"
|
||||
|
@ -23,9 +23,9 @@
|
|||
:main dda.c4k-nextcloud.uberjar
|
||||
:uberjar-name "c4k-nextcloud-standalone.jar"
|
||||
:dependencies [[org.clojure/tools.cli "1.1.230"]
|
||||
[ch.qos.logback/logback-classic "1.5.3"
|
||||
[ch.qos.logback/logback-classic "1.5.6"
|
||||
:exclusions [com.sun.mail/javax.mail]]
|
||||
[org.slf4j/jcl-over-slf4j "2.0.12"]
|
||||
[org.slf4j/jcl-over-slf4j "2.0.13"]
|
||||
[com.github.clj-easy/graal-build-time "1.0.5"]]}}
|
||||
:release-tasks [["test"]
|
||||
["vcs" "assert-committed"]
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"src/test/cljc"
|
||||
"src/test/cljs"
|
||||
"src/test/resources"]
|
||||
:dependencies [[org.domaindrivenarchitecture/c4k-common-cljs "6.1.3"]
|
||||
:dependencies [[org.domaindrivenarchitecture/c4k-common-cljs "6.3.1"]
|
||||
[hickory "0.7.1"]]
|
||||
:builds {:frontend {:target :browser
|
||||
:modules {:main {:init-fn dda.c4k-nextcloud.browser/init}}
|
||||
|
|
|
@ -1,43 +1,43 @@
|
|||
(ns dda.c4k-nextcloud.core
|
||||
(:require
|
||||
#?(:clj [orchestra.core :refer [defn-spec]]
|
||||
:cljs [orchestra.core :refer-macros [defn-spec]])
|
||||
[dda.c4k-common.common :as cm]
|
||||
[dda.c4k-common.predicate :as cp]
|
||||
[dda.c4k-common.yaml :as yaml]
|
||||
[dda.c4k-common.postgres :as postgres]
|
||||
[dda.c4k-nextcloud.nextcloud :as nextcloud]
|
||||
[dda.c4k-nextcloud.backup :as backup]
|
||||
[dda.c4k-common.monitoring :as mon]))
|
||||
(:require
|
||||
#?(:clj [orchestra.core :refer [defn-spec]]
|
||||
:cljs [orchestra.core :refer-macros [defn-spec]])
|
||||
[dda.c4k-common.common :as cm]
|
||||
[dda.c4k-common.predicate :as cp]
|
||||
[dda.c4k-common.yaml :as yaml]
|
||||
[dda.c4k-common.postgres :as postgres]
|
||||
[dda.c4k-nextcloud.nextcloud :as nextcloud]
|
||||
[dda.c4k-nextcloud.backup :as backup]
|
||||
[dda.c4k-common.monitoring :as mon]
|
||||
[dda.c4k-common.namespace :as ns]))
|
||||
|
||||
(def default-storage-class :local-path)
|
||||
|
||||
(def config-defaults {:issuer "staging"})
|
||||
(def config-defaults {:namespace "nextcloud"
|
||||
:issuer "staging"
|
||||
:pvc-storage-class-name "hcloud-volumes-encrypted"
|
||||
:pv-storage-size-gb 200})
|
||||
|
||||
(defn-spec k8s-objects cp/map-or-seq?
|
||||
[config nextcloud/config?
|
||||
auth nextcloud/auth?]
|
||||
(let [nextcloud-default-storage-config {:pvc-storage-class-name default-storage-class
|
||||
:pv-storage-size-gb 200}]
|
||||
(let [resolved-config (merge config-defaults config)]
|
||||
(map yaml/to-string
|
||||
(filter
|
||||
#(not (nil? %))
|
||||
(cm/concat-vec
|
||||
[(postgres/generate-config {:postgres-size :8gb :db-name "nextcloud"})
|
||||
(postgres/generate-secret auth)
|
||||
(postgres/generate-pvc {:pv-storage-size-gb 50
|
||||
:pvc-storage-class-name default-storage-class})
|
||||
(postgres/generate-deployment)
|
||||
(postgres/generate-service)
|
||||
(nextcloud/generate-secret auth)
|
||||
(nextcloud/generate-pvc (merge nextcloud-default-storage-config config))
|
||||
(nextcloud/generate-deployment config)
|
||||
(ns/generate resolved-config)
|
||||
(postgres/generate (merge resolved-config {:postgres-size :8gb
|
||||
:db-name "cloud"
|
||||
:pv-storage-size-gb 50})
|
||||
auth)
|
||||
[(nextcloud/generate-secret auth)
|
||||
(nextcloud/generate-pvc resolved-config)
|
||||
(nextcloud/generate-deployment resolved-config)
|
||||
(nextcloud/generate-service)]
|
||||
(nextcloud/generate-ingress-and-cert config)
|
||||
(when (:contains? config :restic-repository)
|
||||
[(backup/generate-config config)
|
||||
(nextcloud/generate-ingress-and-cert resolved-config)
|
||||
(when (:contains? resolved-config :restic-repository)
|
||||
[(backup/generate-config resolved-config)
|
||||
(backup/generate-secret auth)
|
||||
(backup/generate-cron)
|
||||
(backup/generate-backup-restore-deployment config)])
|
||||
(when (:contains? config :mon-cfg)
|
||||
(mon/generate (:mon-cfg config) (:mon-auth auth))))))))
|
||||
(backup/generate-backup-restore-deployment resolved-config)])
|
||||
(when (:contains? resolved-config :mon-cfg)
|
||||
(mon/generate (:mon-cfg resolved-config) (:mon-auth auth))))))))
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
[config config?]
|
||||
(let [{:keys [fqdn]} config]
|
||||
(-> (yaml/load-as-edn "nextcloud/deployment.yaml")
|
||||
(cm/replace-all-matching-values-by-new-value "fqdn" fqdn))))
|
||||
(cm/replace-all-matching "fqdn" fqdn))))
|
||||
|
||||
(defn-spec generate-ingress-and-cert cp/map-or-seq?
|
||||
[config config?]
|
||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: apps/v1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: backup-restore
|
||||
namespace: nextcloud
|
||||
spec:
|
||||
replicas: 0
|
||||
selector:
|
||||
|
@ -67,6 +68,9 @@ spec:
|
|||
- name: cloud-secret-volume
|
||||
mountPath: /var/run/secrets/cloud-secrets
|
||||
readOnly: true
|
||||
- name: rotation-credential-secret-volume
|
||||
mountPath: /var/run/secrets/rotation-credential-secret
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: cloud-data-volume
|
||||
persistentVolumeClaim:
|
||||
|
@ -77,3 +81,7 @@ spec:
|
|||
- name: backup-secret-volume
|
||||
secret:
|
||||
secretName: backup-secret
|
||||
- name: rotation-credential-secret-volume
|
||||
secret:
|
||||
secretName: rotation-credential-secret
|
||||
optional: true
|
||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: backup-config
|
||||
namespace: nextcloud
|
||||
labels:
|
||||
app.kubernetes.io/name: backup
|
||||
app.kubernetes.io/part-of: cloud
|
||||
|
|
8
src/main/resources/backup/credential-rotation.yaml
Normal file
8
src/main/resources/backup/credential-rotation.yaml
Normal file
|
@ -0,0 +1,8 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: rotation-credential-secret
|
||||
namespace: nextcloud
|
||||
type: Opaque
|
||||
data:
|
||||
rotation-credential: "dGVzdAo="
|
|
@ -2,6 +2,7 @@ apiVersion: batch/v1
|
|||
kind: CronJob
|
||||
metadata:
|
||||
name: cloud-backup
|
||||
namespace: nextcloud
|
||||
labels:
|
||||
app.kubernetes.part-of: cloud
|
||||
spec:
|
||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
|||
kind: Secret
|
||||
metadata:
|
||||
name: backup-secret
|
||||
namespace: nextcloud
|
||||
type: Opaque
|
||||
data:
|
||||
aws-access-key-id: "aws-access-key-id"
|
||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: apps/v1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: cloud-deployment
|
||||
namespace: nextcloud
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
|||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: cloud-pvc
|
||||
namespace: nextcloud
|
||||
labels:
|
||||
app.kubernetes.io/application: cloud
|
||||
spec:
|
||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
|||
kind: Secret
|
||||
metadata:
|
||||
name: cloud-secret
|
||||
namespace: nextcloud
|
||||
type: Opaque
|
||||
data:
|
||||
nextcloud-admin-user: "admin-user"
|
||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: cloud-service
|
||||
namespace: nextcloud
|
||||
labels:
|
||||
app.kubernetes.io/name: cloud-service
|
||||
app.kubernetes.io/application: cloud
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
(deftest should-generate-secret
|
||||
(is (= {:apiVersion "v1"
|
||||
:kind "Secret"
|
||||
:metadata {:name "backup-secret"}
|
||||
:metadata {:name "backup-secret", :namespace "nextcloud"}
|
||||
:type "Opaque"
|
||||
:data
|
||||
{:aws-access-key-id "YXdzLWlk", :aws-secret-access-key "YXdzLXNlY3JldA==", :restic-password "cmVzdGljLXB3"}}
|
||||
|
@ -18,6 +18,7 @@
|
|||
(is (= {:apiVersion "v1"
|
||||
:kind "ConfigMap"
|
||||
:metadata {:name "backup-config"
|
||||
:namespace "nextcloud"
|
||||
:labels {:app.kubernetes.io/name "backup"
|
||||
:app.kubernetes.io/part-of "cloud"}}
|
||||
:data
|
||||
|
@ -27,7 +28,7 @@
|
|||
(deftest should-generate-cron
|
||||
(is (= {:apiVersion "batch/v1"
|
||||
:kind "CronJob"
|
||||
:metadata {:name "cloud-backup", :labels {:app.kubernetes.part-of "cloud"}}
|
||||
:metadata {:name "cloud-backup", :namespace "nextcloud", :labels {:app.kubernetes.part-of "cloud"}}
|
||||
:spec
|
||||
{:schedule "10 23 * * *"
|
||||
:successfulJobsHistoryLimit 1
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
(deftest should-generate-secret
|
||||
(is (= {:apiVersion "v1"
|
||||
:kind "Secret"
|
||||
:metadata {:name "cloud-secret"}
|
||||
:metadata {:name "cloud-secret", :namespace "nextcloud"}
|
||||
:type "Opaque"
|
||||
:data
|
||||
{:nextcloud-admin-user "Y2xvdWRhZG1pbg=="
|
||||
|
@ -77,7 +77,8 @@
|
|||
(is (= {:apiVersion "v1"
|
||||
:kind "PersistentVolumeClaim"
|
||||
:metadata {:name "cloud-pvc"
|
||||
:labels {:app.kubernetes.io/application "cloud"}}
|
||||
:namespace "nextcloud"
|
||||
:labels {:app.kubernetes.io/application "cloud"}}
|
||||
:spec {:storageClassName "local-path"
|
||||
:accessModes ["ReadWriteOnce"]
|
||||
:resources {:requests {:storage "50Gi"}}}}
|
||||
|
@ -86,7 +87,7 @@
|
|||
(deftest should-generate-deployment
|
||||
(is (= {:apiVersion "apps/v1"
|
||||
:kind "Deployment"
|
||||
:metadata {:name "cloud-deployment"}
|
||||
:metadata {:name "cloud-deployment", :namespace "nextcloud"}
|
||||
:spec
|
||||
{:selector {:matchLabels #:app.kubernetes.io{:name "cloud-pod", :application "cloud"}}
|
||||
:strategy {:type "Recreate"}
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
# Usage
|
||||
|
||||
`setup-local-s3.sh [BUCKET_NAME]`:
|
||||
- [BUCKET_NAME] is optional, "mybucket" will be used if not specified
|
||||
- sets up a k3s instance
|
||||
- installs a localstack pod
|
||||
- creates http and https routing to localstack via localhost
|
||||
- saves the self-signed certificate as ca.crt
|
||||
- uses the certificate to initialize a restic repo at `https://k3stesthost/BUCKET_NAME`
|
||||
|
||||
Note: In case of not being able to connect to "k3stesthost/health", you might need to ensure that the ingress' ip matches with the required host names: k3stesthost and cloudhost. With `sudo k3s kubectl get ingress` you can view the ingress' ip (e.g. 10.0.2.15), then add a line to file "/etc/hosts" e.g. `10.0.2.15 k3stesthost cloudhost`
|
||||
|
||||
`start-k3s.sh`:
|
||||
- creates and starts a k3s instance
|
||||
|
||||
`k3s-uninstall.sh`:
|
||||
- deletes everything k3s related
|
||||
|
||||
## Other useful commands
|
||||
- `sudo k3s kubectl get pods`
|
||||
- `curl k3stesthost/health`
|
||||
expected: `{"services": {"s3": "running"}, "features": {"persistence": "disabled", "initScripts": "initialized"}}`
|
||||
|
||||
#### Requires AWS-CLI
|
||||
- create bucket `aws --endpoint-url=http://k3stesthost s3 mb s3://mybucket`
|
||||
- list buckets `aws --endpoint-url=http://k3stesthost s3 ls`
|
||||
- upload something `aws --endpoint-url=http://k3stesthost s3 cp test.txt s3://mybucket`
|
||||
- check files `aws --endpoint-url=http://k3stesthost s3 ls s3://mybucket`
|
||||
|
||||
## Run docker locally
|
||||
|
||||
|
||||
```
|
||||
docker pull docker:19.03.12-dind
|
||||
docker run -d --privileged --name integration-test docker:19.03.12-dind
|
||||
docker exec integration-test sh -c "apk add bash"
|
||||
|
||||
```
|
||||
|
||||
Set up docker container integration-test:
|
||||
|
||||
```
|
||||
docker cp ../../../../../c4k-nextcloud/ integration-test:/
|
||||
docker exec -it integration-test sh
|
||||
cd /c4k-nextcloud/src/test/resources/local-integration-test
|
||||
./setup-docker.sh
|
||||
```
|
||||
|
||||
## Deploy nextcloud
|
||||
|
||||
### Requirements
|
||||
|
||||
* leiningen (install with: `sudo apt install leiningen` )
|
||||
* In the project's root execute: `lein uberjar`
|
||||
* Change file "valid-config.edn" according to your settings (e.g. `:fqdn "cloudhost"` and `:restic-repository "s3://k3stesthost:mybucket"`).
|
||||
|
||||
### Deploy to k3s
|
||||
|
||||
* Create and deploy the k8s yaml:
|
||||
`java -jar target/uberjar/c4k-nextcloud-standalone.jar valid-config.edn valid-auth.edn | sudo k3s kubectl apply -f -`
|
||||
|
||||
Some of the steps may take some min to be effective, but eventually nextcloud should be available at: https://cloudhost
|
||||
|
||||
### Deploy to k3d
|
||||
|
||||
k3d is a k3s system which is running inside of a container. To install k3d run `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash` or have a look at https://k3d.io/v5.0.3/ .
|
||||
|
||||
* Start a k3d cluster to deploy s3, nextcloud and test backup and restore on it: `./setup-local-s3-on-k3d.sh`
|
||||
|
||||
Some steps may take a couple of minutes to be effective, but eventually nextcloud should be available at: https://cloudhost
|
||||
|
||||
#### Remove k3d cluster
|
||||
|
||||
`k3d cluster delete nextcloud`
|
||||
|
||||
## Test in local gitlab runner
|
||||
|
||||
See https://stackoverflow.com/questions/32933174/use-gitlab-ci-to-run-tests-locally
|
||||
|
||||
This needs to be done in the project root
|
||||
|
||||
`docker run -d --name gitlab-runner --restart always -v $PWD:$PWD -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:latest`
|
||||
|
||||
`docker exec -it -w $PWD gitlab-runner gitlab-runner exec docker nextcloud-integrationtest --docker-privileged --docker-volumes '/var/run/docker.sock:/var/run/docker.sock'`
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: localstack-cert
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: localstack-secret
|
||||
commonName: k3stesthost
|
||||
dnsNames:
|
||||
- k3stesthost
|
||||
issuerRef:
|
||||
name: selfsigning-issuer
|
||||
kind: ClusterIssuer
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: selfsigning-issuer
|
||||
spec:
|
||||
selfSigned: {}
|
|
@ -1,44 +0,0 @@
|
|||
@startuml
|
||||
|
||||
autonumber
|
||||
|
||||
skinparam sequenceBox {
|
||||
borderColor White
|
||||
}
|
||||
|
||||
participant gitlab_runner
|
||||
|
||||
box "outer container" #LightBlue
|
||||
|
||||
participant .gitlab_ci
|
||||
participant PreparingCommands
|
||||
participant test_script
|
||||
|
||||
end box
|
||||
|
||||
|
||||
box "k3s" #CornSilk
|
||||
|
||||
participant k3s_api_server
|
||||
participant backup_pod
|
||||
|
||||
end box
|
||||
|
||||
|
||||
gitlab_runner -> k3s_api_server: run k3s as container
|
||||
gitlab_runner -> .gitlab_ci : run
|
||||
|
||||
.gitlab_ci -> PreparingCommands : Install packages (curl bash ...)
|
||||
.gitlab_ci -> PreparingCommands : get k3s_api_server config for k3s_api_server
|
||||
|
||||
.gitlab_ci -> test_script : run
|
||||
|
||||
test_script -> k3s_api_server: apply cert-manager
|
||||
test_script -> k3s_api_server: apply localstack
|
||||
test_script -> k3s_api_server: enable tls / create certificates
|
||||
test_script -> k3s_api_server: apply cloud
|
||||
test_script -> k3s_api_server: create backup_pod (by scale to 1)
|
||||
test_script -> backup_pod: backup
|
||||
test_script -> backup_pod: restore
|
||||
|
||||
@enduml
|
|
@ -1,17 +0,0 @@
|
|||
# Set the default kube context if present
|
||||
DEFAULT_KUBE_CONTEXTS="$HOME/.kube/config"
|
||||
if test -f "${DEFAULT_KUBE_CONTEXTS}"
|
||||
then
|
||||
export KUBECONFIG="$DEFAULT_KUBE_CONTEXTS"
|
||||
fi
|
||||
|
||||
# Additional contexts should be in ~/.kube/custom-contexts/
|
||||
CUSTOM_KUBE_CONTEXTS="$HOME/.kube/custom-contexts"
|
||||
mkdir -p "${CUSTOM_KUBE_CONTEXTS}"
|
||||
OIFS="$IFS"
|
||||
IFS=$'\n'
|
||||
for contextFile in `find "${CUSTOM_KUBE_CONTEXTS}" -type f -name "*.yml"`
|
||||
do
|
||||
export KUBECONFIG="$contextFile:$KUBECONFIG"
|
||||
done
|
||||
IFS="$OIFS"
|
|
@ -1,65 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: localstack
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: localstack
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: localstack
|
||||
spec:
|
||||
containers:
|
||||
- image: localstack/localstack
|
||||
name: localstack-app
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: SERVICES
|
||||
value: s3
|
||||
---
|
||||
# service
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: localstack-service
|
||||
spec:
|
||||
selector:
|
||||
app: localstack
|
||||
ports:
|
||||
- port: 4566
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: localstack-secret
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ingress-localstack
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: selfsigning-issuer
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/redirect-entry-point: https
|
||||
namespace: default
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- k3stesthost
|
||||
secretName: localstack-secret
|
||||
rules:
|
||||
- host: k3stesthost
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: localstack-service
|
||||
port:
|
||||
number: 4566
|
|
@ -1,48 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
docker volume create k3s-server
|
||||
|
||||
name='inttst'
|
||||
|
||||
[[ $(docker ps -f "name=$name" --format '{{.Names}}') == $name ]] || docker run --name $name -d --privileged --tmpfs /run --tmpfs /var/run --restart always -e K3S_TOKEN=12345678901234 -e K3S_KUBECONFIG_OUTPUT=./kubeconfig.yaml -e K3S_KUBECONFIG_MODE=666 -v k3s-server:/var/lib/rancher/k3s:z -v $(pwd):/output:z -p 6443:6443 -p 80:80 -p 443:443 rancher/k3s server --cluster-init --tls-san k3stesthost --tls-san cloudhost
|
||||
|
||||
docker ps
|
||||
|
||||
export timeout=30; while ! docker exec $name sh -c "test -f /var/lib/rancher/k3s/server/kubeconfig.yaml"; do if [ "$timeout" == 0 ]; then echo "ERROR: Timeout while waiting for file."; break; fi; sleep 1; ((timeout--)); done
|
||||
|
||||
mkdir -p $HOME/.kube/
|
||||
|
||||
docker cp $name:/var/lib/rancher/k3s/server/kubeconfig.yaml $HOME/.kube/config
|
||||
|
||||
if [ "$timeout" == 0 ]
|
||||
then
|
||||
echo -------------------------------------------------------
|
||||
find / -name "kubeconfig.yaml";
|
||||
echo -------------------------------------------------------
|
||||
docker ps -a
|
||||
echo -------------------------------------------------------
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "127.0.0.1 kubernetes" >> /etc/hosts
|
||||
|
||||
apk add wget curl bash sudo openjdk8
|
||||
|
||||
wget -P /etc/apk/keys/ https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub
|
||||
apk add --no-cache --repository=https://apkproxy.herokuapp.com/sgerrand/alpine-pkg-leiningen leiningen
|
||||
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.22.0/bin/linux/amd64/kubectl
|
||||
chmod +x ./kubectl
|
||||
mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
sleep 20 #allow some time to startup k3s
|
||||
docker ps -a
|
||||
|
||||
swapoff -a # can this be removed ?
|
||||
|
||||
export KUBECONFIG=$HOME/.kube/config
|
||||
|
||||
pwd
|
||||
cd ./c4k-nextcloud/src/test/resources/local-integration-test && ./setup-local-s3-on-k3d.sh
|
|
@ -1,60 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
function main()
|
||||
{
|
||||
# enable tls for k3s with cert-manager
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
||||
|
||||
kubectl apply -f localstack.yaml
|
||||
|
||||
until kubectl apply -f certificate.yaml
|
||||
do
|
||||
echo "[INFO] Waiting for certificate ..."
|
||||
sleep 30
|
||||
done
|
||||
|
||||
# wait for ingress to be ready
|
||||
bash -c 'external_ip=""; while [ -z $external_ip ]; do echo "[INFO] Waiting for end point..."; external_ip=$(kubectl get ingress -o jsonpath="{$.items[*].status.loadBalancer.ingress[*].ip}"); [ -z "$external_ip" ] && sleep 10; done; echo "End point ready - $external_ip";'
|
||||
|
||||
export INGRESS_IP=$(kubectl get ingress ingress-localstack -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")
|
||||
|
||||
cd ../../../../ # c4k-nextcloud project root
|
||||
lein uberjar
|
||||
java -jar target/uberjar/c4k-nextcloud-standalone.jar config-local.edn auth-local.edn | kubectl apply -f -
|
||||
|
||||
CLOUD_POD=$(kubectl get pod -l app=cloud-app -o name)
|
||||
kubectl wait $CLOUD_POD --for=condition=Ready --timeout=240s
|
||||
|
||||
# wait for nextcloud config file available
|
||||
timeout 180 bash -c "kubectl exec -t $POD -- bash -c \"until [ -f /var/www/html/config/config.php ]; do sleep 10; done\""
|
||||
|
||||
# ensure an instance of pod backup-restore
|
||||
kubectl scale deployment backup-restore --replicas 1
|
||||
|
||||
# wait for localstack health endpoint
|
||||
echo "$INGRESS_IP k3stesthost cloudhost" >> /etc/hosts
|
||||
until curl --fail --silent k3stesthost/health | grep -oe '"s3": "available"' -oe '"s3": "running"'
|
||||
do
|
||||
curl --fail k3stesthost/health
|
||||
echo "[INFO] Waiting for s3 running"
|
||||
sleep 10
|
||||
done
|
||||
|
||||
BACKUP_POD=$(kubectl get pod -l app=backup-restore -o name)
|
||||
kubectl wait $BACKUP_POD --for=condition=Ready --timeout=240s
|
||||
|
||||
kubectl exec -t $BACKUP_POD -- bash -c "echo \"$INGRESS_IP k3stesthost cloudhost\" >> /etc/hosts"
|
||||
kubectl exec -t $BACKUP_POD -- /usr/local/bin/init.sh
|
||||
|
||||
echo ================= BACKUP =================
|
||||
kubectl exec -t $BACKUP_POD -- /usr/local/bin/backup.sh
|
||||
|
||||
sleep 10 # avoid race conditions
|
||||
|
||||
echo ================= RESTORE =================
|
||||
kubectl exec -t $BACKUP_POD -- /usr/local/bin/restore.sh
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,34 +0,0 @@
|
|||
function main()
|
||||
{
|
||||
local bucket_name="${1:-mybucket}"; shift
|
||||
|
||||
./start-k3s.sh
|
||||
|
||||
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
||||
|
||||
sudo k3s kubectl apply -f localstack.yaml
|
||||
|
||||
until sudo k3s kubectl apply -f certificate.yaml
|
||||
do
|
||||
echo "*** Waiting for certificate ... ***"
|
||||
sleep 10
|
||||
done
|
||||
echo
|
||||
|
||||
echo
|
||||
echo "[INFO] Waiting for localstack health endpoint"
|
||||
until curl --connect-timeout 3 -s -f -o /dev/null "k3stesthost/health"
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
echo
|
||||
|
||||
sudo k3s kubectl get secret localstack-secret -o jsonpath="{.data.ca\.crt}" | base64 --decode > ca.crt
|
||||
|
||||
#aws --endpoint-url=http://localhost s3 mb s3://$bucket_name
|
||||
export RESTIC_PASSWORD="test-password"
|
||||
restic init --cacert ca.crt -r s3://k3stesthost/$bucket_name
|
||||
|
||||
}
|
||||
|
||||
main $@
|
|
@ -1,9 +0,0 @@
|
|||
function main()
|
||||
{
|
||||
./start-k3s.sh
|
||||
|
||||
sudo k3s kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
|
||||
|
||||
}
|
||||
|
||||
main
|
|
@ -1 +0,0 @@
|
|||
KUBECONFIG=~/.kube/custom-contexts/k3d-config.yml k3d cluster create nextcloud --k3s-arg '--tls-san cloudhost@loadbalancer' --port 80:80@loadbalancer --port 443:443@loadbalancer --api-port 6443 --kubeconfig-update-default
|
|
@ -1 +0,0 @@
|
|||
curl -sfL https://get.k3s.io | K3S_NODE_NAME=k3stesthost INSTALL_K3S_EXEC='--tls-san cloudhost' sh -
|
Loading…
Reference in a new issue