Compare commits

...

193 commits
0.25.0 ... main

Author SHA1 Message Date
ansgarz
e6db481ac9 add possibility for monthly reboot of host system of k3s server 2024-11-23 18:48:25 +01:00
ansgarz
1118bc7d07 add createCronJob 2024-11-22 11:27:04 +01:00
ansgarz
c0985afacc [skip ci] correct task name binariesInstall 2024-10-25 09:13:52 +02:00
ansgarz
e8d4826708 version bump: 0.38.6-SNAPSHOT 2024-10-21 19:08:24 +02:00
ansgarz
ca2176f67a release: 0.38.5 2024-10-21 19:08:24 +02:00
ansgarz
445d12c849 refactorings & mark tests as ExtensiveContainerTests & add EnvSecretSource.kt 2024-10-21 19:08:09 +02:00
ansgarz
9e1023b4b8 [skip ci] reduce size native-images 2024-10-09 22:02:23 +02:00
ansgarz
f0e7d6518f snapshot: 0.38.5-SNAPSHOT 2024-10-09 18:31:28 +02:00
ansgarz
0d00f6f8c0 release: 0.38.4 2024-10-09 18:31:26 +02:00
ansgarz
c649010491 [skip ci] fix binary creation with native-image 2024-10-09 18:31:06 +02:00
ansgarz
659521e944 snapshot: 0.38.4-SNAPSHOT 2024-10-08 17:17:33 +02:00
ansgarz
ffee8aaff4 release: 0.38.3 2024-10-08 17:17:31 +02:00
ansgarz
b5a7b12794 improve serialization by using serializer<T>() 2024-10-08 17:16:37 +02:00
ansgarz
7f8d4c8b97 [skip ci] remove reflection dependency 2024-10-05 17:37:43 +02:00
ansgarz
e16c445b44 bump version to: 0.38.3-SNAPSHOT 2024-10-04 23:57:15 +02:00
ansgarz
9a68de9135 release: 0.38.2 2024-10-04 23:57:15 +02:00
ansgarz
98644199b5 snapshot: 0.38.2-SNAPSHOT 2024-10-04 23:47:36 +02:00
ansgarz
3f181e07b4 release: 0.38.1 2024-10-04 23:47:34 +02:00
ansgarz
ce93292336 snapshot: 0.38.1-SNAPSHOT 2024-10-04 22:30:10 +02:00
ansgarz
10633eb856 release: version = 0.38.0 2024-10-04 22:30:08 +02:00
ansgarz
da912dacf3 [skip ci] remove s3 verification from syspec & aws sdk s3 dependency 2024-10-04 22:29:26 +02:00
ansgarz
01ef388663 snapshot: 0.37.2-SNAPSHOT 2024-10-04 15:04:49 +02:00
ansgarz
4cf36605c1 release: version = 0.37.1 2024-10-04 15:04:48 +02:00
ansgarz
9affdbe04f [skip ci] add gradle task installbinaries 2024-10-04 15:04:13 +02:00
ansgarz
b546b94410 bump version to: 0.37.1-SNAPSHOT 2024-09-30 22:08:56 +02:00
ansgarz
2ee577a36f release: 0.37.0 2024-09-30 22:08:56 +02:00
ansgarz
74f56aed9c update some deps 2024-09-30 22:06:46 +02:00
78b238928b [skip ci] remove upower and dependencies to improve vm performance 2024-09-20 14:39:49 +02:00
ansgarz
bef0fff652 bump version to: 0.36.1-SNAPSHOT 2024-09-12 22:00:42 +02:00
ansgarz
8f27fde09c release: 0.36.0 2024-09-12 22:00:42 +02:00
ansgarz
9630e23ede add outTrimmed & useHomeDirAsWorkingDir for LocalProcessor 2024-09-12 20:33:10 +02:00
ansgarz
e8c0c97dbe [skip ci] fix test_incl_containtertests.run.xml 2024-09-12 20:12:25 +02:00
ansgarz
72af2db838 various refactorings 2024-09-06 22:43:55 +02:00
ansgarz
bb9146b542 [skip ci] add doc about modules 2024-09-06 22:30:01 +02:00
ansgarz
343c339a5a bump version to: 0.35.3-SNAPSHOT 2024-09-05 09:27:45 +02:00
ansgarz
44f788eb08 release: 0.35.2 2024-09-05 09:27:45 +02:00
ansgarz
41ab214a43 chg UbuntuProv to open class with public constructor 2024-09-05 09:27:20 +02:00
ansgarz
34c5101689 [skip ci] update ForDevelopers.md 2024-08-29 19:18:26 +02:00
ansgarz
30d12734fb refactor unsafe operators & method installHugoByDeb 2024-08-27 09:14:18 +02:00
ansgarz
d31ffd07b7 refactor installGraalVM 2024-08-20 17:30:33 +02:00
ansgarz
09c6de5318 add test installK9s 2024-08-20 17:15:51 +02:00
ansgarz
6f5560274d [skip ci] update README.md and docs 2024-08-16 17:31:12 +02:00
ansgarz
de7c1225b9 move VSCode installation from IDE desktop to office desktop 2024-08-16 15:41:54 +02:00
ansgarz
e14db18eb7 bump version to: 0.35.2-SNAPSHOT 2024-08-16 11:56:38 +02:00
ansgarz
2ebe84d42a release: 0.35.1 2024-08-16 11:56:38 +02:00
ansgarz
469f864339 refactorings, reformats, minor fixes, update docs 2024-08-16 11:53:46 +02:00
277302d0ee add k9s to server 2024-08-09 17:58:18 +02:00
43b7e83187 Merge branch 'main' of ssh://repo.prod.meissa.de:2222/meissa/provs 2024-08-06 14:39:22 +02:00
90d5a96ce8 [Skip-CI] Fix mastodon add website link 2024-08-06 14:39:13 +02:00
4befbd1017 [skip ci] Fix in logic 2024-07-31 10:11:58 +02:00
4bd18fcdf8 [skip ci] Added GraalVM installation to DevOps section, Test DevOpsKtTest/installGraalVM integration prooven 2024-07-29 19:48:09 +02:00
ansgarz
1b7e2824ce [skip ci] update README.md 2024-07-12 20:05:19 +02:00
ansgarz
fd0440fc2f [skip ci] improve test_install_and_configure_Gopass_and_GopassBridgeJsonApi 2024-07-12 19:55:20 +02:00
ansgarz
3217fa95bd [skip ci] update doc 2024-07-12 19:15:23 +02:00
ansgarz
ff331a45ee update gopassBridgeJsonApi version 1.15.5 to 1.15.13 2024-07-12 10:38:24 +02:00
ansgarz
812ae47d80 update gopass version 1.15.5 to 1.15.13 2024-07-12 10:08:25 +02:00
ansgarz
6e58053e1b remove openjdk-8-jdk and openjdk-11-jdk from IDE installation 2024-07-12 09:54:56 +02:00
ansgarz
4fff5257e0 [skip ci] tmp remove nextcloud-client installation 2024-07-05 22:46:04 +02:00
ansgarz
4e50537b39 fix tests 2024-07-05 22:23:59 +02:00
ansgarz
582a830a80 [skip ci] add cmts 2024-07-05 22:02:36 +02:00
ansgarz
d1693268f3 fix test_reopeing_ssh_session_succeeds 2024-07-05 21:59:48 +02:00
ansgarz
b24e4ba36c fix sha256sum for installKubectl 2024-07-05 21:45:02 +02:00
bom
ccea3c1c53 Add traefik annotation 2024-06-28 10:06:54 +02:00
a336838af8 Merge branch 'main' of ssh://repo.prod.meissa.de:2222/meissa/provs 2024-05-30 13:20:52 +02:00
6a0027fb64 Install in IDE 2024-05-30 13:20:42 +02:00
494e1bd8d6 Finish hugo install and test 2024-05-30 13:20:23 +02:00
49ec8462f0 Add Hugo install and test 2024-05-30 13:19:43 +02:00
ansgarz
7307e39ff6 update images for integration tests to ubuntu 22.04 2024-05-24 11:34:45 +02:00
ansgarz
8f90fa9d86 update images for integration tests to ubuntu 22.04 2024-05-24 11:23:47 +02:00
ansgarz
726cd5c01a [skip ci] adjust module names for tests 2024-05-24 11:04:21 +02:00
ansgarz
e2b7732a5e add installation of adduser for integration tests 2024-05-24 10:49:13 +02:00
bom
b8b7091ec2 Update deprecated ingress.class 2024-05-17 13:15:34 +02:00
bom
e568b5aa82 Server: Fix hetzner csi resource paths 2024-05-10 14:46:29 +02:00
bom
80476532d7 Server: Remove obsolete resources 2024-05-10 14:15:59 +02:00
bom
5bd824dee5 Server: Add support for hetzner csi with encryption 2024-05-10 13:44:20 +02:00
f0fa8d5ca5 [Skip-CI] Added doc, "Howto update gradle wrapper" 2024-03-15 16:16:15 +01:00
ansgarz
9d3f43975b update Kotlin version 2024-03-08 23:02:15 +01:00
989f80c41f bump version to: 0.35.1-SNAPSHOT 2024-03-08 11:14:14 +01:00
965f2e3101 release: 0.35.0 2024-03-08 11:14:14 +01:00
a20b1a9144 refactor kubeconform, installpath, check, test 2024-03-08 11:12:02 +01:00
ansgarz
771d7f7b89 bump version to: 0.34.1-SNAPSHOT 2024-03-05 23:34:15 +01:00
ansgarz
055d302022 release: 0.34.0 2024-03-05 23:34:15 +01:00
ansgarz
d187a8423c add kubeconform 2024-03-05 23:33:54 +01:00
7a506e07d4 Merge remote-tracking branch 'origin/main' 2024-02-28 10:12:07 +01:00
497fd9a45d fix scripts 2024-02-28 10:11:58 +01:00
ansgarz
c0e64096a8 bump version to: 0.33.2-SNAPSHOT 2024-02-24 20:29:03 +01:00
ansgarz
ba0f58b02d release: 0.33.1 2024-02-24 20:29:03 +01:00
ansgarz
028543df53 update logback 2024-02-24 20:28:39 +01:00
ansgarz
dd9e7b71b2 bump version to: 0.33.1-SNAPSHOT 2024-02-24 19:33:59 +01:00
ansgarz
8c6f25598d release: 0.33.0 2024-02-24 19:33:59 +01:00
ansgarz
d4115992b0 bump version to: 0.32.3-SNAPSHOT 2024-02-24 18:57:32 +01:00
ansgarz
4cf3ab358c release: 0.32.2 2024-02-24 18:57:32 +01:00
ansgarz
d0e88c3bf4 bump version to: 0.32.2-SNAPSHOT 2024-02-24 18:51:08 +01:00
ansgarz
f9c25a0e1a release: 0.32.1 2024-02-24 18:51:08 +01:00
ansgarz
8191096794 add release_main_branch to build.py 2024-02-24 18:50:40 +01:00
ansgarz
1d64b4400e bump version to: 0.32.1-SNAPSHOT 2024-02-24 18:43:38 +01:00
ansgarz
d43057bf9d release: 0.32.0 2024-02-24 18:43:38 +01:00
ansgarz
bc38779b25 bump version to: 0.31.2-SNAPSHOT 2024-02-24 18:31:40 +01:00
ansgarz
80ae171052 release: 0.31.1 2024-02-24 18:31:40 +01:00
ansgarz
2bb986f80f make deprovisionK3sInfra idempotent 2024-02-24 18:31:07 +01:00
bom
bcc89ef408 bump version to: 0.31.1-SNAPSHOT 2024-02-16 12:08:59 +01:00
bom
20db8c4aca release: 0.31.0 2024-02-16 12:08:59 +01:00
ansgarz
e85fa88bc4 update traefik.yaml 2024-02-16 11:58:26 +01:00
ansgarz
7f2ebcd6e9 update traefik.yaml 2024-02-16 11:32:32 +01:00
ansgarz
06a7cab974 bump version to: 0.30.1-SNAPSHOT 2024-02-16 10:40:07 +01:00
ansgarz
1a1d7c2f6f release: 0.30.0 2024-02-16 10:40:07 +01:00
ansgarz
eccf61b3d6 remove k3d 2024-02-16 10:37:57 +01:00
ansgarz
02ce6336a2 update k3s-install.sh 2024-02-16 10:33:43 +01:00
bom
507475f40e Update k3s version 2024-02-16 09:59:09 +01:00
33b38081d2 Add pytest to python provisioning 2024-02-16 09:59:09 +01:00
ansgarz
38ff640b00 bump version to: 0.29.14-SNAPSHOT 2024-02-02 12:03:20 +01:00
ansgarz
fa9c570186 release: 0.29.13 2024-02-02 12:03:20 +01:00
ansgarz
2cb39a82b8 fix missing function run in build.py 2024-02-02 12:03:04 +01:00
ansgarz
a4c561649a fix workaround for pyb / gopass prompt 2024-02-02 11:59:11 +01:00
ansgarz
1ae1a931b5 bump version to: 0.29.13-SNAPSHOT 2024-02-02 11:57:53 +01:00
ansgarz
3be59e887d release: 0.29.12 2024-02-02 11:57:53 +01:00
ansgarz
88e2cb5962 add info & workaround for pyb / gopass prompt 2024-02-02 11:57:20 +01:00
ansgarz
e039a68241 pin image: domaindrivenarchitecture/ddadevops-kotlin:4.10.7 & cleanup 2024-01-12 10:24:26 +01:00
ansgarz
2fa6e106c7 bump version to: 0.29.12-SNAPSHOT 2024-01-12 09:56:02 +01:00
ansgarz
97b5fdceb9 release: 0.29.11 2024-01-12 09:56:02 +01:00
ansgarz
6b89e4e928 bump version to: 0.29.11-SNAPSHOT 2023-12-15 10:41:01 +01:00
ansgarz
6cf5e75284 release: 0.29.10 2023-12-15 10:41:01 +01:00
ansgarz
099a6f1cee [skip ci] re-order steps 2023-12-15 10:38:52 +01:00
ansgarz
948516aee7 set RELEASE_ARTIFACT_TOKEN generic 2023-12-15 10:37:17 +01:00
ansgarz
647cfe5335 update .gitlab-ci.yml pin docker 2023-12-15 10:25:15 +01:00
ansgarz
73f4d31459 update .gitlab-ci.yml using docker:latest add - "export RELEASE_ARTIFACT_TOKEN" to stage package 2023-12-15 10:20:21 +01:00
ansgarz
42db8d8c92 update .gitlab-ci.yml 2023-12-15 10:07:45 +01:00
ansgarz
5b6d94851e bump version to: 0.29.10-SNAPSHOT 2023-12-15 09:55:07 +01:00
ansgarz
b268843ad4 release: 0.29.9 2023-12-15 09:55:07 +01:00
ansgarz
790a5e7957 update .gitlab-ci.yml 2023-12-15 09:54:52 +01:00
ansgarz
6b851cf783 bump version to: 0.29.9-SNAPSHOT 2023-12-15 09:42:33 +01:00
ansgarz
84132db8f3 release: 0.29.8 2023-12-15 09:42:33 +01:00
ansgarz
35f81320c1 update .gitlab-ci.yml 2023-12-15 09:42:16 +01:00
ansgarz
14787b6f0f bump version to: 0.29.8-SNAPSHOT 2023-12-15 08:58:42 +01:00
ansgarz
56334208be release: 0.29.7 2023-12-15 08:58:42 +01:00
ansgarz
52a6582abe add RELEASE_ARTIFACT_TOKEN to build 2023-12-15 08:58:15 +01:00
ansgarz
ac903c9f37 bump version to: 0.29.7-SNAPSHOT 2023-12-13 19:34:40 +01:00
ansgarz
804fe0c83c release: 0.29.6 2023-12-13 19:34:40 +01:00
ansgarz
40e2e3cd97 Revert "remove ci workaround manually updating ddadevopsbuild"
This reverts commit fc9cb72f1b.
2023-12-13 19:34:23 +01:00
ansgarz
727b53aff9 bump version to: 0.29.6-SNAPSHOT 2023-12-13 19:31:24 +01:00
ansgarz
1d12ea9c99 release: 0.29.5 2023-12-13 19:31:24 +01:00
ansgarz
fc9cb72f1b remove ci workaround manually updating ddadevopsbuild 2023-12-13 19:30:43 +01:00
ansgarz
d4f08cedc2 bump version to: 0.29.5-SNAPSHOT 2023-11-25 18:39:46 +01:00
ansgarz
873a10c76f release: 0.29.4 2023-11-25 18:39:46 +01:00
ansgarz
03a05a990a rename RELEASE_TOKEN to MEISSA_PUBLISH_PACKAGE_TOKEN 2023-11-25 18:39:27 +01:00
ansgarz
7f47c07b4d bump version to: 0.29.4-SNAPSHOT 2023-11-25 17:49:54 +01:00
ansgarz
828b2684c7 release: 0.29.3 2023-11-25 17:49:54 +01:00
ansgarz
ebea6dfad3 [skip ci] re-enable test in ci 2023-11-25 17:49:36 +01:00
ansgarz
717ddc01ae set RELEASE_ARTIFACT_TOKEN in .gitlab-ci.yml 2023-11-25 17:41:46 +01:00
ansgarz
37d2f4ff71 bump version to: 0.29.3-SNAPSHOT 2023-11-25 13:18:23 +01:00
ansgarz
d66a79b299 release: 0.29.2 2023-11-25 13:18:23 +01:00
ansgarz
d12633e43f enforce upgrade ddadevops in gitlab ci 2023-11-25 13:18:11 +01:00
ansgarz
9878aef9ae bump version to: 0.29.2-SNAPSHOT 2023-11-25 13:05:45 +01:00
ansgarz
7107fa7e5d release: 0.29.1 2023-11-25 13:05:45 +01:00
ansgarz
5cc9b32bf4 add tmp upgrade ddadevops to gitlab ci 2023-11-25 13:05:17 +01:00
ansgarz
c22f943dee bump version to: 0.29.1-SNAPSHOT 2023-11-25 12:09:27 +01:00
ansgarz
7ffff3ab13 release: 0.29.0 2023-11-25 12:09:27 +01:00
ansgarz
9eef8e9f04 [skip ci] update comments 2023-11-25 12:07:50 +01:00
ansgarz
35e849783b replace gradle release task by pyb publish_release in gitlab ci 2023-11-25 12:04:32 +01:00
ansgarz
5ed6187172 temporarily disable test on gitlab ci 2023-11-25 11:41:24 +01:00
ansgarz
997e6d8407 fix build.py 2023-11-25 11:39:13 +01:00
ansgarz
f6ba3c9117 bump version to: 0.28.4-SNAPSHOT 2023-11-25 11:25:16 +01:00
ansgarz
3903cf4a71 release: 0.28.3 2023-11-25 11:25:16 +01:00
ansgarz
4eafbce5f4 bump version to: 0.28.3-SNAPSHOT 2023-11-25 11:21:38 +01:00
ansgarz
98aa1306bf release: 0.28.2 2023-11-25 11:21:38 +01:00
ansgarz
48e9b74b37 update build.py 2023-11-24 23:10:02 +01:00
ansgarz
e060d584e9 version bump 2023-11-15 18:30:16 +01:00
ansgarz
a29d024ea3 Version 0.28.1 2023-11-15 18:28:31 +01:00
ansgarz
42c92915f8 move task package from build.gradle to build.py 2023-11-15 18:06:42 +01:00
ansgarz
59163710af fix tests 2023-11-09 09:41:11 +01:00
ansgarz
e373d327f3 remove ci debugging code 2023-11-08 18:22:09 +01:00
fe6e48f6dd [skip-ci]install asciinema with pip 2023-11-07 09:44:22 +01:00
07f7b5a6de improve doc 2023-10-20 14:53:07 +02:00
ansgarz
05450fed46 add python package inflection 2023-09-09 13:05:39 +02:00
ansgarz
419bdcd5fc [skip ci] chg docs 2023-09-09 13:01:28 +02:00
ansgarz
5572aa87ba [skip ci] chg docs 2023-09-09 12:47:03 +02:00
ansgarz
a457c1d05e [skip ci] chg/add cmts git clone 2023-08-31 23:28:52 +02:00
e56abd0c47 [skip-ci] Update traefik version as old version was not installable anymore 2023-08-29 14:58:24 +02:00
ansgarz
57adb756ad [skip ci] 0.28.1-SNAPSHOT 2023-08-27 13:01:21 +02:00
ansgarz
1ead864760 0.28.0 2023-08-27 12:59:44 +02:00
ansgarz
efb8fc8f8d [skip ci] 0.27.1-SNAPSHOT 2023-08-27 12:56:30 +02:00
ansgarz
7bcba91fd9 [skip ci] 0.27.0 2023-08-27 12:55:37 +02:00
ansgarz
2fff923539 add port to KnownHost 2023-08-27 12:54:21 +02:00
ansgarz
111d9951ed [skip ci] rename isHostKnown -> isKnownHost 2023-08-27 12:51:57 +02:00
ansgarz
87b56fb0d2 remove old metallb-0.10.2-manifest.yaml 2023-08-27 10:22:50 +02:00
ansgarz
17d3eb3491 further refactor onlyModules 2023-08-25 13:15:51 +02:00
ansgarz
3b18318921 set version k3s and kubectl to 1.27.0 and add test 2023-08-21 22:45:52 +02:00
ansgarz
11b13feb86 0.26.2 2023-08-20 15:15:40 +02:00
ansgarz
9ceb74515d refactor & fix execution of onlyModules, add tests 2023-08-20 10:51:13 +02:00
ansgarz
f4da33dcb5 [skip ci] 0.26.2-SNAPSHOT 2023-08-19 12:08:55 +02:00
ansgarz
77e063842d 0.26.1 2023-08-19 11:53:47 +02:00
ansgarz
e86efbc888 [skip ci] 0.26.0 2023-08-18 23:26:25 +02:00
ansgarz
4452cf5d01 refactor addKnownHost 2023-08-18 23:25:21 +02:00
101 changed files with 2353 additions and 1329 deletions

1
.gitignore vendored
View file

@ -9,3 +9,4 @@
/server-config.yaml /server-config.yaml
/desktop-config.yaml /desktop-config.yaml
/syspec-config.yaml /syspec-config.yaml
/.kotlin/

View file

@ -6,7 +6,7 @@ stages:
- release - release
.kotlin-job: &kotlin .kotlin-job: &kotlin
image: domaindrivenarchitecture/ddadevops-kotlin image: domaindrivenarchitecture/ddadevops-kotlin:4.10.7
cache: cache:
key: ${CI_COMMIT_REF_SLUG} key: ${CI_COMMIT_REF_SLUG}
paths: paths:
@ -16,6 +16,7 @@ stages:
- echo "---------- Start CI ----------" - echo "---------- Start CI ----------"
- export GRADLE_USER_HOME=`pwd`/.gradle - export GRADLE_USER_HOME=`pwd`/.gradle
- chmod +x gradlew - chmod +x gradlew
- export RELEASE_ARTIFACT_TOKEN=$MEISSA_REPO_BUERO_RW
- echo "------ commit info ---------------" - echo "------ commit info ---------------"
- echo $CI_COMMIT_TAG - echo $CI_COMMIT_TAG
- echo $CI_COMMIT_REF_NAME - echo $CI_COMMIT_REF_NAME
@ -39,19 +40,23 @@ build:
expire_in: 1 week expire_in: 1 week
variables:
DOCKER_TLS_CERTDIR: "/certs"
test: test:
stage: test stage: test
image: docker:latest image: docker:24.0.5
services: services:
- docker:dind - docker:24.0.5-dind
dependencies: dependencies:
- build - build
before_script: before_script:
- echo "---------- BEFORE -------------" - echo "---------- BEFORE -------------"
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - echo "$CI_REGISTRY_PASSWORD" | docker login $CI_REGISTRY --username $CI_REGISTRY_USER --password-stdin
script: script:
- echo "---------- TEST -------------" - echo "---------- TEST -------------"
- apk update && apk add bash openjdk11 - apk update && apk add bash openjdk11 git
- export JAVA_HOME=/usr/lib/jvm/java-11-openjdk - export JAVA_HOME=/usr/lib/jvm/java-11-openjdk
- docker build --pull -t "$CI_REGISTRY_IMAGE" . - docker build --pull -t "$CI_REGISTRY_IMAGE" .
- docker run --privileged -dit --name provs_test -v /var/run/docker.sock:/var/run/docker.sock $CI_REGISTRY_IMAGE - docker run --privileged -dit --name provs_test -v /var/run/docker.sock:/var/run/docker.sock $CI_REGISTRY_IMAGE
@ -60,7 +65,7 @@ test:
artifacts: artifacts:
when: on_failure when: on_failure
paths: paths:
- build/reports/tests/test - build/reports/*
reports: reports:
junit: build/test-results/test/TEST-*.xml junit: build/test-results/test/TEST-*.xml
@ -69,13 +74,7 @@ package:
<<: *kotlin <<: *kotlin
stage: package stage: package
script: script:
- ./gradlew -x assemble -x test jar - pyb package
- ./gradlew -x assemble -x test -x jar uberjarDesktop
- ./gradlew -x assemble -x test -x jar uberjarServer
- ./gradlew -x assemble -x test -x jar uberjarSyspec
- cd build/libs/
- find . -type f -exec sha256sum {} \; | sort > sha256sum.lst
- find . -type f -exec sha512sum {} \; | sort > sha512sum.lst
artifacts: artifacts:
paths: paths:
- build/libs/*.jar - build/libs/*.jar
@ -96,43 +95,16 @@ publish-maven-package-to-meissa:
stage: publish stage: publish
allow_failure: true allow_failure: true
script: script:
- apt-get update -y
- apt-get install -y iputils-ping ssh
- ping -c 2 repo.prod.meissa.de
- ssh-keyscan repo.prod.meissa.de
- ./gradlew -x assemble -x test publishLibraryPublicationToMeissaRepository - ./gradlew -x assemble -x test publishLibraryPublicationToMeissaRepository
release-to-gitlab:
<<: *tag_only
image: registry.gitlab.com/gitlab-org/release-cli:latest
stage: release
artifacts:
paths:
- 'build/libs/provs-desktop.jar'
- 'build/libs/provs-server.jar'
- 'build/libs/provs-syspec.jar'
- 'build/libs/sha256sum.lst'
- 'build/libs/sha512sum.lst'
script:
- apk --no-cache add curl
- |
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
--assets-link "{\"name\":\"provs-desktop.jar\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/provs-desktop.jar\"}" \
--assets-link "{\"name\":\"provs-server.jar\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/provs-server.jar\"}" \
--assets-link "{\"name\":\"provs-syspec.jar\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/provs-syspec.jar\"}" \
--assets-link "{\"name\":\"sha256sum.lst\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/sha256sum.lst\"}" \
--assets-link "{\"name\":\"sha512sum.lst\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/sha512sum.lst\"}" \
release-to-meissa: release-to-meissa:
<<: *kotlin <<: *kotlin
<<: *tag_only <<: *tag_only
stage: release stage: release
allow_failure: true allow_failure: true
script: script:
- ./gradlew createReleaseAndUploadAssets - pyb publish_release
after_script: after_script:
- echo "---------- End CI ----------" - echo "---------- End CI ----------"

View file

@ -1,6 +1,6 @@
<component name="ProjectRunConfigurationManager"> <component name="ProjectRunConfigurationManager">
<configuration default="false" name="test_incl_extensive_container_tests" type="JUnit" factoryName="JUnit"> <configuration default="false" name="test_incl_extensive_container_tests" type="JUnit" factoryName="JUnit">
<module name="provs.test" /> <module name="org.domaindrivenarchitecture.provs.provs.test" />
<option name="PACKAGE_NAME" value="org" /> <option name="PACKAGE_NAME" value="org" />
<option name="MAIN_CLASS_NAME" value="" /> <option name="MAIN_CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" /> <option name="METHOD_NAME" value="" />

View file

@ -1,10 +1,11 @@
FROM ubuntu:latest # image for usage in ci pipeline
FROM ubuntu:22.04
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get -y install apt-utils sudo RUN apt-get update && apt-get -y install apt-utils sudo
RUN useradd -m testuser && echo "testuser:testuserpw" | chpasswd && adduser testuser sudo RUN useradd -m testuser && echo "testuser:testuserpw" | chpasswd && usermod -aG sudo testuser
RUN echo "testuser ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/testuser RUN echo "testuser ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/testuser
USER testuser USER testuser

View file

@ -1,20 +1,20 @@
# provs # provs
[![pipeline status](https://gitlab.com/domaindrivenarchitecture/provs/badges/master/pipeline.svg)](https://gitlab.com/domaindrivenarchitecture/provs/-/commits/master) [![pipeline status](https://gitlab.com/domaindrivenarchitecture/provs/badges/master/pipeline.svg)](https://gitlab.com/domaindrivenarchitecture/provs/-/commits/master)
[<img src="https://domaindrivenarchitecture.org/img/delta-chat.svg" width=20 alt="DeltaChat"> chat over e-mail](mailto:buero@meissa-gmbh.de?subject=community-chat) | [<img src="https://meissa-gmbh.de/img/community/Mastodon_Logotype.svg" width=20 alt="team@social.meissa-gmbh.de"> team@social.meissa-gmbh.de](https://social.meissa-gmbh.de/@team) | [Website & Blog](https://domaindrivenarchitecture.org) [<img src="https://domaindrivenarchitecture.org/img/delta-chat.svg" width=20 alt="DeltaChat"> chat over e-mail](mailto:buero@meissa-gmbh.de?subject=community-chat) | [<img src="https://meissa.de/images/parts/contact/mastodon36_hue9b2464f10b18e134322af482b9c915e_5501_filter_14705073121015236177.png" width=20 alt="M"> meissa@social.meissa-gmbh.de](https://social.meissa-gmbh.de/@meissa) | [Blog](https://domaindrivenarchitecture.org) | [Website](https://meissa.de)
## Purpose ## Purpose
provs provides cli-based tools for provs provides cli-based tools for
* provisioning a desktop (various kinds) * provisioning desktop software for different desktop types:
* basic
* office
* IDE
* provisioning a k3s server * provisioning a k3s server
* performing system checks * performing system checks
Tasks can be run locally or remotely. Tasks can be run locally or remotely.
## Status
under development - though we already set up a few IDEs and servers with provs.
## Try out ## Try out
### Prerequisites ### Prerequisites
@ -28,8 +28,9 @@ under development - though we already set up a few IDEs and servers with provs.
* Download the latest `provs-desktop.jar`,`provs-server.jar` and/or `provs-syspec.jar` from: https://gitlab.com/domaindrivenarchitecture/provs/-/releases * Download the latest `provs-desktop.jar`,`provs-server.jar` and/or `provs-syspec.jar` from: https://gitlab.com/domaindrivenarchitecture/provs/-/releases
* Preferably into `/usr/local/bin` or any other folder where executables can be found by the system * Preferably into `/usr/local/bin` or any other folder where executables can be found by the system
* Make the jar-file executable e.g. by `chmod +x provs-desktop.jar` * Make the jar-file executable e.g. by `chmod +x provs-desktop.jar`
* Check with `provs-desktop.jar -h` to show help information
#### Build the binaries ###### Build the binaries
Instead of downloading the binaries you can build them yourself Instead of downloading the binaries you can build them yourself
@ -106,6 +107,24 @@ To provision the grafana agent only to an existing k8s system, ensure that the c
provs-server.jar k3s myuser@myhost.com -o grafana provs-server.jar k3s myuser@myhost.com -o grafana
``` ```
To add the hetzner csi driver and encrypted volumes to your k3s installation add the following to the config:
```yaml
hetzner:
hcloudApiToken:
source: "PLAIN" # PLAIN, GOPASS or PROMPT
parameter: "mypassword" # the api key for the hetzner cloud
encryptionPassphrase:
source: "PLAIN" # PLAIN, GOPASS or PROMPT
parameter: "mypassword" # the encryption passphrase for created volumes
```
To provision the grafana agent only to an existing k8s system, ensure that the config (as above) is available and execute:
```bash
provs-server.jar k3s myuser@myhost.com -o grafana
```
Reprovisioning the server can easily be done using the -r or --reprovision option. Reprovisioning the server can easily be done using the -r or --reprovision option.
```bash ```bash
@ -144,7 +163,9 @@ Or to get help for subcommands e.g.
provs-desktop.jar ide -h provs-desktop.jar ide -h
provs-server.jar k3s -h provs-server.jar k3s -h
``` ```
## Development & mirrors ## Development & mirrors
Development happens at: https://repo.prod.meissa.de/meissa/provs Development happens at: https://repo.prod.meissa.de/meissa/provs
Mirrors are: Mirrors are:
@ -152,3 +173,17 @@ Mirrors are:
* https://github.com/DomainDrivenArchitecture/provs * https://github.com/DomainDrivenArchitecture/provs
For more details about our repository model see: https://repo.prod.meissa.de/meissa/federate-your-repos For more details about our repository model see: https://repo.prod.meissa.de/meissa/federate-your-repos
## Developer information
For using provs framework, add the required dependency to your project, then you can implement your own tasks e.g. by:
```kotlin
import org.domaindrivenarchitecture.provs.framework.core.Prov
fun Prov.myCustomTask() = task {
cmd("echo \"Hello world!\"")
}
```
See also [ForDevelopers.md](doc/ForDevelopers.md)

View file

@ -1,5 +1,5 @@
buildscript { buildscript {
ext.kotlin_version = "1.7.20" ext.kotlin_version_no = "1.8.20"
ext.CI_PROJECT_ID = System.env.CI_PROJECT_ID ext.CI_PROJECT_ID = System.env.CI_PROJECT_ID
repositories { repositories {
@ -8,17 +8,16 @@ buildscript {
} }
plugins { plugins {
id "org.jetbrains.kotlin.jvm" version "$kotlin_version" id "org.jetbrains.kotlin.jvm" version "$kotlin_version_no"
id 'org.jetbrains.kotlin.plugin.serialization' version "$kotlin_version" id 'org.jetbrains.kotlin.plugin.serialization' version "$kotlin_version_no"
id "java" id "java"
id "java-test-fixtures" id "java-test-fixtures"
} }
apply plugin: "maven-publish" apply plugin: "maven-publish"
version = "0.38.6-SNAPSHOT"
group = "org.domaindrivenarchitecture.provs" group = "org.domaindrivenarchitecture.provs"
version = "0.25.1-SNAPSHOT"
repositories { repositories {
@ -36,11 +35,6 @@ java {
} }
} }
jar {
duplicatesStrategy(DuplicatesStrategy.EXCLUDE)
}
test { test {
// set properties for the tests // set properties for the tests
def propertiesForTests = ["testdockerwithoutsudo"] def propertiesForTests = ["testdockerwithoutsudo"]
@ -69,25 +63,20 @@ compileTestJava.options.debugOptions.debugLevel = "source,lines,vars"
dependencies { dependencies {
api("org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version") api("org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version_no")
api("org.jetbrains.kotlinx:kotlinx-serialization-json:1.3.2")
api("org.jetbrains.kotlinx:kotlinx-serialization-core:1.3.2")
api("org.jetbrains.kotlinx:kotlinx-cli:0.3.4") api("org.jetbrains.kotlinx:kotlinx-cli:0.3.4")
api('com.charleskorn.kaml:kaml:0.54.0') api('com.charleskorn.kaml:kaml:0.54.0')
api("org.slf4j:slf4j-api:1.7.36") api("org.slf4j:slf4j-api:1.7.36")
api('ch.qos.logback:logback-classic:1.2.11') api('ch.qos.logback:logback-classic:1.4.14')
api('ch.qos.logback:logback-core:1.2.11') api('ch.qos.logback:logback-core:1.4.14')
implementation("org.jetbrains.kotlin:kotlin-reflect:$kotlin_version") implementation("com.hierynomus:sshj:0.38.0")
implementation("com.hierynomus:sshj:0.32.0")
implementation("aws.sdk.kotlin:s3:0.17.1-beta")
testFixturesApi("org.junit.jupiter:junit-jupiter-api:5.8.2")
testFixturesApi('io.mockk:mockk:1.12.3') testFixturesApi('io.mockk:mockk:1.12.3')
testFixturesApi("org.junit.jupiter:junit-jupiter-api:5.8.2")
testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.8.2") testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.8.2")
} }
@ -172,6 +161,22 @@ tasks.register('installlocally') {
} }
// create binaries and install into /usr/local/bin
// PREREQUISITE: graalvm / native-image must be installed - see https://www.graalvm.org/
tasks.register('binariesInstall') {
dependsOn(uberjarServer, uberjarDesktop, uberjarSyspec)
doLast {
println "Building binaries ..."
exec { commandLine("sh", "-c", "cd build/libs/ && native-image --no-fallback --initialize-at-build-time=kotlin.DeprecationLevel -H:+UnlockExperimentalVMOptions -H:IncludeResources=\".*org/domaindrivenarchitecture/provs/.*(conf|ssh_config|sshd_config|sh|vimrc|xml|yaml)\" -jar provs-desktop.jar") }
exec { commandLine("sh", "-c", "cd build/libs/ && native-image --no-fallback --initialize-at-build-time=kotlin.DeprecationLevel -H:+UnlockExperimentalVMOptions -H:IncludeResources=\".*org/domaindrivenarchitecture/provs/.*(conf|ssh_config|sshd_config|sh|vimrc|xml|yaml)\" -jar provs-server.jar") }
exec { commandLine("sh", "-c", "cd build/libs/ && native-image --no-fallback --initialize-at-build-time=kotlin.DeprecationLevel -H:+UnlockExperimentalVMOptions -H:IncludeResources=\".*org/domaindrivenarchitecture/provs/.*(conf|ssh_config|sshd_config|sh|vimrc|xml|yaml)\" -jar provs-syspec.jar") }
exec { commandLine("sh", "-c", "sudo cp build/libs/provs-desktop /usr/local/bin/") }
exec { commandLine("sh", "-c", "sudo cp build/libs/provs-server /usr/local/bin/") }
exec { commandLine("sh", "-c", "sudo cp build/libs/provs-syspec /usr/local/bin/") }
}
}
// publish to repo.prod.meissa.de with task "publishLibraryPublicationToMeissaRepository" -- (using pattern "publishLibraryPublicationTo<MAVEN REPOSITORY NAME>Repository") // publish to repo.prod.meissa.de with task "publishLibraryPublicationToMeissaRepository" -- (using pattern "publishLibraryPublicationTo<MAVEN REPOSITORY NAME>Repository")
publishing { publishing {
publications { publications {
@ -200,23 +205,25 @@ publishing {
credentials(HttpHeaderCredentials) { credentials(HttpHeaderCredentials) {
name = "Authorization" name = "Authorization"
def publishPackageTokenName = "MEISSA_PUBLISH_PACKAGE_TOKEN"
if (System.getenv("CI_JOB_TOKEN") != null) { if (System.getenv("CI_JOB_TOKEN") != null) {
def tokenFromEnv = System.getenv("RELEASE_TOKEN") def tokenFromEnv = System.getenv(publishPackageTokenName)
if (tokenFromEnv == null) { if (tokenFromEnv == null) {
println "Error: RELEASE_TOKEN not found" println "Error: $publishPackageTokenName not found"
} else { } else {
value = "token " + tokenFromEnv value = "token " + tokenFromEnv
println "RELEASE_TOKEN found - " println "$publishPackageTokenName found - "
} }
} else { } else {
// use project-property (define e.g. in "~/.gradle/gradle.properties") when not running in ci // use project-property (define e.g. in "~/.gradle/gradle.properties") when not running in ci
// you can create a token in gitea "Profile and Settings ... > Settings > Applications", Token Name, Select scopes (write:package) > "Generate Token" // you can create a token in gitea "Profile and Settings ... > Settings > Applications", Token Name, Select scopes (write:package) > "Generate Token"
if (!project.hasProperty("RELEASE_TOKEN")) { if (!project.hasProperty(publishPackageTokenName)) {
// if RELEASE_TOKEN is missing, provide a dummy in order to avoid error "Could not get unknown property 'RELEASE_TOKEN' for Credentials [header: Authorization]" for other gradle tasks // if token is missing, provide a dummy in order to avoid error "Could not get unknown property 'MEISSA_PUBLISH_PACKAGE_TOKEN' for Credentials [header: Authorization]" for other gradle tasks
ext.RELEASE_TOKEN = "RELEASE_TOKEN not provided in file \".gradle/gradle.properties\"" ext.MEISSA_PUBLISH_PACKAGE_TOKEN = "Token $publishPackageTokenName not provided in file \".gradle/gradle.properties\""
println "Error: RELEASE_TOKEN not found" println "Error: Token $publishPackageTokenName not found"
} else {
value = "token " + project.property(publishPackageTokenName)
} }
value = "token $RELEASE_TOKEN"
} }
} }
@ -226,39 +233,3 @@ publishing {
} }
} }
} }
tasks.register('createReleaseAndUploadAssets') {
dependsOn(uberjarServer, uberjarDesktop, uberjarSyspec)
doLast {
def token = project.properties.get("RELEASE_TOKEN") ?: System.getenv("RELEASE_TOKEN")
if (token == null) {
throw new GradleException('No token found.')
}
def output1 = new ByteArrayOutputStream()
exec {
standardOutput = output1
def TAG = project.version
commandLine("sh", "-c", "curl -X 'POST' 'https://repo.prod.meissa.de/api/v1/repos/meissa/provs/releases' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{ \"body\": \"Provides jar-files for release $TAG\\nAttention: The \\\"Source Code\\\"-files below are not up-to-date!\", \"tag_name\": \"$TAG\" }' -H \"Authorization: token $token\"")
}
def matches = output1 =~ /\{"id":(\d+?),/
if (!matches) {
throw new GradleException('id of release could not be parsed in: ' + output1)
}
def releaseId = matches.group(1)
println "Release=$releaseId"
def releaseApiUrl = "https://repo.prod.meissa.de/api/v1/repos/meissa/provs/releases"
exec { commandLine("sh", "-c", "find build/libs/ -type f -exec sha256sum {} \\; | sort > build/libs/sha256sum.lst") }
exec { commandLine("sh", "-c", "find build/libs/ -type f -exec sha512sum {} \\; | sort > build/libs/sha512sum.lst") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/provs-desktop.jar;type=application/x-java-archive'") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/provs-server.jar;type=application/x-java-archive'") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/provs-syspec.jar;type=application/x-java-archive'") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/sha256sum.lst;type=text/plain'") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/sha512sum.lst;type=text/plain'") }
}
}

123
build.py
View file

@ -1,16 +1,36 @@
from os import environ import os
from subprocess import run from subprocess import run
from pybuilder.core import init, task from pybuilder.core import task, init
from ddadevops import * from ddadevops import *
default_task = "dev"
name = "provs" name = "provs"
PROJECT_ROOT_PATH = "." PROJECT_ROOT_PATH = "."
version = "0.38.3-dev"
@init @init
def initialize(project): def initialize0(project):
"""
workaround to avoid prompt for gopass if no artifacts need to be uploaded
usage: with option "-E ng" , e.g. "pyb -E artifacts patch_local"
"""
os.environ["RELEASE_ARTIFACT_TOKEN"] = "dummy" # avoids prompt for RELEASE_ARTIFACT_TOKEN
@init(environments=["artifacts"])
def initialize1(project):
"""
prompt for gopass if artifacts need to be uploaded
usage: with option "-E artifacts" , e.g. "pyb -E artifacts dev"
"""
del os.environ["RELEASE_ARTIFACT_TOKEN"]
@init
def initialize2(project):
input = { input = {
"name": name, "name": name,
"module": "notused", "module": "notused",
@ -18,55 +38,104 @@ def initialize(project):
"project_root_path": PROJECT_ROOT_PATH, "project_root_path": PROJECT_ROOT_PATH,
"build_types": [], "build_types": [],
"mixin_types": ["RELEASE"], "mixin_types": ["RELEASE"],
"release_main_branch": "main",
"release_primary_build_file": "build.gradle", "release_primary_build_file": "build.gradle",
"release_secondary_build_files": [], "release_secondary_build_files": ["build.py"],
# release artifacts
"release_artifact_server_url": "https://repo.prod.meissa.de",
"release_organisation": "meissa",
"release_repository_name": name,
"release_artifacts": [
"build/libs/provs-server.jar",
"build/libs/provs-desktop.jar",
"build/libs/provs-syspec.jar",
"build/libs/sha256sum.lst",
"build/libs/sha512sum.lst",
],
} }
build = ReleaseMixin(project, input) build = ReleaseMixin(project, input)
build.initialize_build_dir() build.initialize_build_dir()
@task
def dev(project):
"""
to avoid gopass prompt set RELEASE_ARTIFACT_TOKEN e.g.:
RELEASE_ARTIFACT_TOKEN=xxx pyb dev
"""
run("./gradlew assemble", shell=True)
@task
def build(project):
run("./gradlew assemble", shell=True)
@task @task
def patch(project): def patch(project):
linttest(project, "PATCH") """
updates version to next patch level, creates a tag, creates new SNAPSHOT version,
commits primary build file (build.gradle) and pushes to remote
"""
increase_version_number(project, "PATCH")
release(project) release(project)
@task @task
def minor(project): def minor(project):
linttest(project, "MINOR") """
updates version to next minor level, creates a tag, creates new SNAPSHOT version,
commits primary build file (build.gradle) and pushes to remote
"""
increase_version_number(project, "MINOR")
release(project) release(project)
@task @task
def major(project): def major(project):
linttest(project, "MAJOR") """
updates version to next major level, creates a tag, creates new SNAPSHOT version,
commits primary build file (build.gradle) and pushes to remote
"""
increase_version_number(project, "MAJOR")
release(project) release(project)
@task
def dev(project):
linttest(project, "NONE")
@task
def prepare(project):
build = get_devops_build(project)
build.prepare_release()
@task @task
def tag(project): def tag(project):
build = get_devops_build(project) build = get_devops_build(project)
build.tag_bump_and_push_release() build.tag_bump_and_push_release()
@task
def build(project):
print("---------- build stage ----------")
run("./gradlew assemble", shell=True)
@task
def release(project): def release(project):
prepare(project) build = get_devops_build(project)
build.prepare_release()
tag(project) tag(project)
def linttest(project, release_type):
build(project) @task
def package(project):
run("./gradlew assemble -x test jar", shell=True)
run("./gradlew assemble -x test uberjarDesktop", shell=True)
run("./gradlew assemble -x test uberjarServer", shell=True)
run("./gradlew assemble -x test uberjarSyspec", shell=True)
run("cd build/libs/ && find . -type f -exec sha256sum {} \; | sort > sha256sum.lst", shell=True)
run("cd build/libs/ && find . -type f -exec sha512sum {} \; | sort > sha512sum.lst", shell=True)
@task
def publish_release(project):
""" creates a release in repo.meissa and uploads artifacts (jar-files and checksum files) """
build = get_devops_build(project)
build.publish_artifacts()
@task
def inst(project):
run("./gradlew inst", shell=True)
def increase_version_number(project, release_type):
build = get_devops_build(project)
build.update_release_type(release_type)

View file

@ -1,22 +1,8 @@
# Information for developers This page provides information for developers.
## Create a provs jar-file # Tasks
* Clone this repo ## What is a task ?
* Build the jar-file by `./gradlew uberjarDesktop`
* In folder build/libs you'll find the file `provs-desktop.jar`
This uberjar is a Java jar-file including all required dependencies.
## Task
```kotlin
fun Prov.provisionK8s() = task { /* ... code and subtasks come here ... */ }
```
If you're having a deeper look into the provs code, you'll see regularly a task definition like this and might wonder ...
### What is a task ?
A task is the **basic execution unit** in provs. When executed, each task produces exactly one result (line) with either success or failure. A task is the **basic execution unit** in provs. When executed, each task produces exactly one result (line) with either success or failure.
@ -26,9 +12,108 @@ The success or failure is computed automatically in the following way:
* a task defined with **optional** (i.e. `= optional { /* ... */ }` always returns success (even if there are failing subtasks) * a task defined with **optional** (i.e. `= optional { /* ... */ }` always returns success (even if there are failing subtasks)
* **requireLast** defines a task which must provide an explicit result and solely this result counts for success calculation * **requireLast** defines a task which must provide an explicit result and solely this result counts for success calculation
## Task declaration
### Recommended way
A task can be declared by
```kotlin
fun Prov.myCustomTask() = task { /* ... code and subtasks come here ... */ }
// e.g.
fun Prov.myEchoTask() = task {
cmd("echo hello world!")
}
```
The task will succeed if all sub-tasks (called tasks during execution) have succeeded resp. if no sub-task was called.
### Alternative ways
The following ways are equivalent but are more verbose:
```kotlin
// Redundant declaration of the return type (ProvResult), which is already declared by task
fun Prov.myCustomTask(): ProvResult = task { /* ... code and subtasks come here ... */ }
// Redundant parentheses behind task
fun Prov.myCustomTask() = task() { /* ... code and subtasks come here ... */ }
// Redundant definition of the task name, but could be used to output a different task name
fun Prov.myCustomTask() = task("myCustomTask") { /* ... code and subtasks come here ... */ }
// Functionally equal, but with additional curly brackets
fun Prov.myCustomTask() { task { /* ... code and subtasks come here ... */ } }
```
Btw, the following lines and WILL NOT work as expected.
Due to too much lamda nesting, the code within the task is NOT executed:
```kotlin
fun Prov.myCustomTask() = { task { /* ... code and subtasks come here ... */ } }
fun Prov.myCustomTask() {{ task { /* ... code and subtasks come here ... */ } }}
```
### Add custom results
If you want to add a result explicitly, you can use method `addResultToEval`.
This maxy be used e.g. to add explicitly an error line, like in:
```kotlin
fun Prov.myCustomTask() = task {
/* some other code ... */
addResultToEval(ProvResult(false, err = "my error msg"))
/* some other code ... */
}
```
or alternatively you can use `taskWithResult`.
#### TaskWithResult
In case you want to include the return value (of type `ProvResult`) of a task to be added to the evaluation,
you can use `taskWithResult` instead of `task` and return the value, e.g. like
```kotlin
fun Prov.myEchoTask() = taskWithResult {
cmd("echo hello world!")
// ...
ProvResult(false, "Error: ... error message ...") // will be the returned as return value and included in the evaluation
}
```
IMPORTANT: the value you want to return must be placed at the end of the lambda code (as usual in functional programming)!
The following will NOT work as expected:
```kotlin
fun Prov.myEchoTask() = taskWithResult {
ProvResult(false, "Error: ... error message ...") // will be ignored
// the result from the call below (i.e. from task "cmd") will be returned by myEchoTask,
// which is redundant as its result is already included in the evaluation anyway.
cmd("echo hello world!")
}
```
### Task output
If a task is run e.g. with `local().myEchoTask()`, it will produce output like
```
> Success -- myEchoTask
---> Success -- cmd [/bin/bash, -c, echo hello world!]
```
## Call hierarchy ## Call hierarchy
In the following link you can find an example of a sequence diagram when provisioning a desktop: In the following link you can find an example of a sequence diagram when provisioning a desktop:
[ProvisionDesktopSequence.md](ProvisionDesktopSequence.md) [ProvisionDesktopSequence.md](ProvisionDesktopSequence.md)
## Create a provs jar-file
* Clone this repo
* Build the jar-file by `./gradlew uberjarDesktop`
* In folder build/libs you'll find the file `provs-desktop.jar`
This uberjar is a Java jar-file including all required dependencies.

View file

@ -4,18 +4,20 @@
#### remove old version #### remove old version
sudo rm -rf ~/go sudo rm -rf ~/go
### download latest version and configure ### download latest version and configure
curl -OL https://go.dev/dl/$(curl 'https://go.dev/VERSION?m=text').linux-amd64.tar.gz curl -OL https://go.dev/dl/go1.21.3.linux-amd64.tar.gz
extract latest version to ~/go # extract latest version to ~/go
tar -C ~ -xzf go*.linux-amd64.tar.gz tar -C ~ -xzf go*.linux-amd64.tar.gz
APPEND='export PATH=$PATH:$HOME/go/bin' # append path
```
echo $APPEND >> $HOME/.profile (meissa) jem@meissa-ide-2023:~$ cat .bashrc.d/go.sh
PATH=$PATH:$HOME/go/bin
export PATH
```
## VScode optional - TODO!?! ## VScode optional - TODO!?!
Go extension autoinstall "Go for VS Code v0.39.1"
install gpls, div, etc.
## Testing forgejo ## Testing forgejo
full: full:

20
doc/Modularization.md Normal file
View file

@ -0,0 +1,20 @@
# Modules
## Modules and their possible relations
![modularization.png](resources/modularization.png)
#### Modules
A,B,C: Modules with both domain and infrastructure layer code - common type of modules
D: Module with only domain: can sometimes make sense if only domain logic and no infrastructure logic is required
E: Module with only infrastructure: usually utility modules that just provide a collection of infrastructure functionality
#### Interactions
1. Domain calls (a function in) the infrastructure of the same module - common practice within a module
1. Domain calls (a function in) the domain another module - common practice between modules
1. Infrastructure calls infrastructure of another module - usually not recommended
1. Domain calls infrastructure in another module - can make sense in some cases e.g. if module D just needs some low-level function of module D. However where possible calling domain of module C should be preferred
1. Domain calls infrastructure in another module, which only has infrastructure - common practice for calling utility modules, which don't have a domain.

View file

@ -1,35 +0,0 @@
@startuml
autonumber
Application -> Prov: create
activate Prov
Application -> DesktopService.kt: provisionDesktop(prov, ...)
DesktopService.kt -> Install.kt: aptInstall(prov, lambda=cmd "apt install", ..)
Install.kt -> Prov: taskWithResult
activate Prov
Prov -> Prov: evaluate
activate Prov
Prov -> Prov: initProgress (bei level 0)
Prov -> Prov: progress
activate Prov
Prov -> Prov: lambda
activate Prov
Prov -> Processor: exec
deactivate Prov
Prov <-- Prov: ProvResult
deactivate Prov
Prov -> Prov: endProgress (bei level 0)
Prov -> Prov: printResults (bei level 0)
deactivate Prov
deactivate Prov
Install.kt <-- Prov: ProvResult
@enduml

View file

@ -0,0 +1,47 @@
```plantuml
@startuml
autonumber
participant Application
participant DesktopService
participant Install
participant Prov
participant Processor
Application -> Prov: create
activate Prov
Application -> DesktopService: provisionDesktop(prov, ...)
DesktopService -> Install: prov.aptInstall()
Install -> Prov: taskWithResult( lambda = cmd("sudo apt install ...") )
activate Prov
Prov -> Prov: evaluate
activate Prov
Prov -> Prov: initProgress (if level 0)
Prov -> Prov: progress
activate Prov
Prov -> Prov: lambda
activate Prov
Prov -> Processor: exec
Prov <-- Processor: exec
deactivate Prov
deactivate Prov
Prov -> Prov: endProgress (if level 0)
Prov -> Prov: printResults (if level 0)
deactivate Prov
deactivate Prov
Install <-- Prov: ProvResult
DesktopService <-- Install
Application <-- DesktopService
@enduml
```

View file

@ -13,6 +13,7 @@ box "application" #LightBlue
participant Application participant Application
participant CliArgumentsParser participant CliArgumentsParser
participant DesktopCliCommand participant DesktopCliCommand
participant ProvWithSudo
end box end box
box #White box #White
@ -21,8 +22,7 @@ participant "Prov (local or remote...)" as ProvInstance
end box end box
box "domain" #LightGreen box "domain" #LightGreen
participant "DesktopService\n.provisionDesktopCommand" as DesktopService1 participant "DesktopService"
participant "DesktopService\n.provisionDesktop" as DesktopService2
end box end box
box "infrastructure" #CornSilk box "infrastructure" #CornSilk
@ -36,14 +36,18 @@ Application -> CliArgumentsParser : parseCommand
Application -> DesktopCliCommand : isValid ? Application -> DesktopCliCommand : isValid ?
Application -> CliUtils : createProvInstance Application -> CliUtils : createProvInstance
ProvInstance <- CliUtils : create ProvInstance <- CliUtils : create
Application -> DesktopService1 : provisionDesktopCommand ( provInstance, desktopCliCommand )
DesktopService1 -> ConfigRepository : getConfig
DesktopService1 -> DesktopService2 : provisionDesktop( config )
DesktopService2 -> Infrastructure_functions: Various calls like: Application -> ProvWithSudo : ensureSudoWithoutPassword
DesktopService2 -> Infrastructure_functions: install ssh, gpg, git ... Application -> DesktopService : provisionDesktopCommand ( provInstance, desktopCliCommand )
DesktopService2 -> Infrastructure_functions: installVirtualBoxGuestAdditions
DesktopService2 -> Infrastructure_functions: configureNoSwappiness, ... DesktopService -> ConfigRepository : getConfig
DesktopService -> DesktopService : provisionDesktop( config )
DesktopService -> Infrastructure_functions: Various calls like:
DesktopService -> Infrastructure_functions: install ssh, gpg, git ...
DesktopService -> Infrastructure_functions: installVirtualBoxGuestAdditions
DesktopService -> Infrastructure_functions: configureNoSwappiness, ...
@enduml @enduml
``` ```

View file

@ -2,8 +2,6 @@ This repository holds the documentation of the provs framework.
# Design principles # Design principles
For usage examples it is recommended to have a look at [provs-scripts](https://gitlab.com/domaindrivenarchitecture/provs-scripts) or [provs-ubuntu-extensions](https://gitlab.com/domaindrivenarchitecture/provs-ubuntu-extensions).
## "Implarative" ## "Implarative"
Configuration management tools are usually classified as either **imperative** or **declarative**. Configuration management tools are usually classified as either **imperative** or **declarative**.

View file

@ -5,7 +5,7 @@ release-1.2 or release-1.2.3
I.e.: release-X.X.Z where X, Y, Z are the major, minor resp. the patch level of the release. Z can be omitted. I.e.: release-X.X.Z where X, Y, Z are the major, minor resp. the patch level of the release. Z can be omitted.
**Note:** Such kind of release tags should only be applied to commits in the master branch. **Note:** Such kind of release tags should only be applied to commits in the main branch.
``` ```
#adjust [version] #adjust [version]

View file

@ -0,0 +1,10 @@
### Howto update gradle wrapper
1. To *latest* version (be aware for deprecated parts in future versions):
```shell
./gradlew wrapper --gradle-version latest
```
2. To *specific version:
```shell
./gradlew wrapper --gradle-version 8.6
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

View file

@ -38,8 +38,8 @@ fun main(args: Array<String>) {
null null
} catch (e: FileNotFoundException) { } catch (e: FileNotFoundException) {
println( println(
"Error: File\u001b[31m ${configFileName} \u001b[0m was not found.\n" + "Error: File\u001b[31m $configFileName \u001b[0m was not found.\n" +
"Pls copy file \u001B[31m desktop-config-example.yaml \u001B[0m to file \u001B[31m ${configFileName} \u001B[0m " + "Pls copy file \u001B[31m desktop-config-example.yaml \u001B[0m to file \u001B[31m $configFileName \u001B[0m " +
"and change the content according to your needs." "and change the content according to your needs."
) )
null null

View file

@ -2,4 +2,9 @@ package org.domaindrivenarchitecture.provs.desktop.domain
enum class DesktopOnlyModule { enum class DesktopOnlyModule {
FIREFOX, VERIFY FIREFOX, VERIFY
;
fun isIn(list: List<String>): Boolean {
return list.any { it.equals(this.name, ignoreCase = true) }
}
} }

View file

@ -1,5 +1,7 @@
package org.domaindrivenarchitecture.provs.desktop.domain package org.domaindrivenarchitecture.provs.desktop.domain
import org.domaindrivenarchitecture.provs.desktop.domain.DesktopOnlyModule.FIREFOX
import org.domaindrivenarchitecture.provs.desktop.domain.DesktopOnlyModule.VERIFY
import org.domaindrivenarchitecture.provs.desktop.infrastructure.* import org.domaindrivenarchitecture.provs.desktop.infrastructure.*
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.git.provisionGit import org.domaindrivenarchitecture.provs.framework.ubuntu.git.provisionGit
@ -14,14 +16,21 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.user.base.whoami
internal fun Prov.provisionDesktopCommand(cmd: DesktopCliCommand, conf: DesktopConfig) = task { internal fun Prov.provisionDesktopCommand(cmd: DesktopCliCommand, conf: DesktopConfig) = task {
validatePrecondition()
val only = cmd.onlyModules
if (only == null) {
provisionDesktop( provisionDesktop(
cmd.type, cmd.type,
conf.ssh?.keyPair(), conf.ssh?.keyPair(),
conf.gpg?.keyPair(), conf.gpg?.keyPair(),
conf.gitUserName, conf.gitUserName,
conf.gitEmail, conf.gitEmail,
cmd.onlyModules
) )
} else {
provisionOnlyModules(cmd.type, only)
}
} }
@ -39,24 +48,34 @@ internal fun Prov.provisionDesktop(
gpg: KeyPair? = null, gpg: KeyPair? = null,
gitUserName: String? = null, gitUserName: String? = null,
gitEmail: String? = null, gitEmail: String? = null,
onlyModules: List<String>?
) = task { ) = task {
validatePrecondition()
provisionBasicDesktop(gpg, ssh, gitUserName, gitEmail, onlyModules) provisionBasicDesktop(gpg, ssh, gitUserName, gitEmail)
if (desktopType == DesktopType.OFFICE) { if (desktopType == DesktopType.OFFICE) {
provisionOfficeDesktop(onlyModules) provisionOfficeDesktop()
if (onlyModules == null) {
verifyOfficeSetup() verifyOfficeSetup()
} }
}
if (desktopType == DesktopType.IDE) { if (desktopType == DesktopType.IDE) {
if (onlyModules == null) {
provisionOfficeDesktop() provisionOfficeDesktop()
provisionIdeDesktop() provisionIdeDesktop()
verifyIdeSetup() verifyIdeSetup()
} else { }
provisionIdeDesktop(onlyModules) }
internal fun Prov.provisionOnlyModules(
desktopType: DesktopType = DesktopType.BASIC,
onlyModules: List<String>
) = task {
if (FIREFOX.isIn(onlyModules)) {
installPpaFirefox()
}
if (VERIFY.isIn(onlyModules)) {
if (desktopType == DesktopType.OFFICE) {
verifyOfficeSetup()
} else if (desktopType == DesktopType.IDE) {
verifyIdeSetup()
} }
} }
} }
@ -67,61 +86,13 @@ fun Prov.validatePrecondition() {
} }
} }
fun Prov.provisionIdeDesktop(onlyModules: List<String>? = null) {
if (onlyModules == null) {
aptInstall(OPEN_VPM)
aptInstall(OPENCONNECT)
aptInstall(VPNC)
// DevEnvs
installDocker()
aptInstall(JAVA)
aptInstall(CLOJURE_TOOLS)
installShadowCljs()
installDevOps()
provisionPython()
// IDEs
installVSC("python", "clojure")
installIntelliJ()
} else if (onlyModules.contains(DesktopOnlyModule.VERIFY.name.lowercase())) {
verifyIdeSetup()
} else if (onlyModules.contains(DesktopOnlyModule.FIREFOX.name.lowercase())) {
installPpaFirefox()
}
}
fun Prov.provisionOfficeDesktop(onlyModules: List<String>? = null) {
if (onlyModules == null) {
aptInstall(ZIP_UTILS)
aptInstall(SPELLCHECKING_DE)
aptInstall(BROWSER)
aptInstall(EMAIL_CLIENT)
installDeltaChat()
aptInstall(OFFICE_SUITE)
installZimWiki()
installNextcloudClient()
aptInstall(COMPARE_TOOLS)
// optional as installation of these tools often fail and they are not considered mandatory
optional {
aptInstall(DRAWING_TOOLS)
}
} else if (onlyModules.contains(DesktopOnlyModule.VERIFY.name.lowercase())) {
verifyOfficeSetup()
} else if (onlyModules.contains(DesktopOnlyModule.FIREFOX.name.lowercase())) {
installPpaFirefox()
}
}
fun Prov.provisionBasicDesktop( fun Prov.provisionBasicDesktop(
gpg: KeyPair?, gpg: KeyPair?,
ssh: SshKeyPair?, ssh: SshKeyPair?,
gitUserName: String?, gitUserName: String?,
gitEmail: String?, gitEmail: String?,
onlyModules: List<String>?
) { ) {
if (onlyModules == null) {
aptInstall(KEY_MANAGEMENT) aptInstall(KEY_MANAGEMENT)
aptInstall(VERSION_MANAGEMENT) aptInstall(VERSION_MANAGEMENT)
aptInstall(NETWORK_TOOLS) aptInstall(NETWORK_TOOLS)
@ -133,7 +104,8 @@ fun Prov.provisionBasicDesktop(
aptInstall(CLIP_TOOLS) aptInstall(CLIP_TOOLS)
aptPurge( aptPurge(
"remove-power-management xfce4-power-manager " + "remove-power-management xfce4-power-manager " +
"xfce4-power-manager-plugins xfce4-power-manager-data" "xfce4-power-manager-plugins xfce4-power-manager-data" +
"upower libimobiledevice6 libplist3 libusbmuxd6 usbmuxd bluez-cups"
) )
aptPurge("abiword gnumeric") aptPurge("abiword gnumeric")
aptPurge("popularity-contest") aptPurge("popularity-contest")
@ -153,7 +125,45 @@ fun Prov.provisionBasicDesktop(
configureNoSwappiness() configureNoSwappiness()
configureBash() configureBash()
installVirtualBoxGuestAdditions() installVirtualBoxGuestAdditions()
} else if (onlyModules.contains(DesktopOnlyModule.FIREFOX.name.lowercase())) { }
installPpaFirefox()
fun Prov.provisionOfficeDesktop() {
aptInstall(ZIP_UTILS)
aptInstall(SPELLCHECKING_DE)
aptInstall(BROWSER)
aptInstall(EMAIL_CLIENT)
installDeltaChat()
aptInstall(OFFICE_SUITE)
installZimWiki()
// installNextcloudClient() might not install - might need fix and working test
aptInstall(COMPARE_TOOLS)
// VSCode is also required in office VM (not only in IDE desktop) e.g. as editor
installVSCode("python", "clojure")
// optional, as installation of these tools often fail and as they are not mandatory
optional {
aptInstall(DRAWING_TOOLS)
} }
} }
fun Prov.provisionIdeDesktop() {
aptInstall(OPEN_VPM)
aptInstall(OPENCONNECT)
aptInstall(VPNC)
// DevEnvs
installDocker()
aptInstall(JAVA)
aptInstall(CLOJURE_TOOLS)
installShadowCljs()
installDevOps()
provisionPython()
installHugoByDeb()
// IDEs
installIntelliJ()
installKubeconform()
}

View file

@ -1,26 +1,35 @@
package org.domaindrivenarchitecture.provs.desktop.domain package org.domaindrivenarchitecture.provs.desktop.domain
typealias HostKey = String
/** /**
* A HostKey should contain space-separated: keytype, key and (optionally) a comment * Represents a known host for ssh connections.
*
* @param hostName domain name or ip
* @param port (optional) to be specified if different from default port 22
* @param hostKeys list of keys, where each should contain separated by space: 1. keytype, 2. key and 3. (optionally) a comment
* *
* See: https://man7.org/linux/man-pages/man8/sshd.8.html#SSH_KNOWN_HOSTS_FILE_FORMAT * See: https://man7.org/linux/man-pages/man8/sshd.8.html#SSH_KNOWN_HOSTS_FILE_FORMAT
*/ */
typealias HostKey = String open class KnownHost(
open class KnownHost protected constructor(
val hostName: String, val hostName: String,
val port: Int? = null,
val hostKeys: List<HostKey> val hostKeys: List<HostKey>
) { ) {
constructor(hostName: String, hostKeys: List<HostKey>) : this(hostName, null, hostKeys)
companion object { companion object {
val GITHUB = KnownHost( val GITHUB = KnownHost(
"github.com", listOf( "github.com",
listOf(
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl",
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=", "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=",
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=",
) )
) )
val GITLAB = KnownHost( val GITLAB = KnownHost(
"gitlab.com", listOf( "gitlab.com",
listOf(
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf",
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9",
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=", "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=",

View file

@ -6,6 +6,6 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.addKnownHos
fun Prov.addKnownHosts(knownHosts: List<KnownHost> = KnownHost.values()) = task { fun Prov.addKnownHosts(knownHosts: List<KnownHost> = KnownHost.values()) = task {
for (knownHost in knownHosts) { for (knownHost in knownHosts) {
addKnownHost(knownHost.hostName, knownHost.hostKeys, verifyKeys = true) addKnownHost(knownHost, verifyKeys = true)
} }
} }

View file

@ -15,13 +15,13 @@ fun Prov.installDevOps() = task {
installTerraform() installTerraform()
installKubectlAndTools() installKubectlAndTools()
installYq() installYq()
installGraalVM()
} }
fun Prov.installYq( fun Prov.installYq(
version: String = "4.13.2", version: String = "4.13.2",
sha256sum: String = "d7c89543d1437bf80fee6237eadc608d1b121c21a7cbbe79057d5086d74f8d79" sha256sum: String = "d7c89543d1437bf80fee6237eadc608d1b121c21a7cbbe79057d5086d74f8d79"
): ProvResult = task { ) = task {
val path = "/usr/bin/" val path = "/usr/bin/"
val filename = "yq" val filename = "yq"
if (!checkFile(path + filename)) { if (!checkFile(path + filename)) {
@ -38,7 +38,7 @@ fun Prov.installYq(
} }
} }
fun Prov.installKubectlAndTools(): ProvResult = task { fun Prov.installKubectlAndTools() = task {
task("installKubectl") { task("installKubectl") {
if (!checkFile(KUBE_CONFIG_CONTEXT_SCRIPT)) { if (!checkFile(KUBE_CONFIG_CONTEXT_SCRIPT)) {
@ -49,13 +49,37 @@ fun Prov.installKubectlAndTools(): ProvResult = task {
} }
} }
task("installKubeconform") {
installKubeconform()
}
installDevopsScripts() installDevopsScripts()
} }
fun Prov.installKubectl(): ProvResult = task { fun Prov.installKubeconform() = task {
// check for latest stable release on: https://github.com/yannh/kubeconform/releases
val version = "0.6.4"
val installationPath = "/usr/local/bin/"
val tmpDir = "~/tmp"
val filename = "kubeconform-linux-amd64"
val packedFilename = "$filename.tar.gz"
if ( !chk("kubeconform -v") || "v$version" != cmd("kubeconform -v").out?.trim() ) {
downloadFromURL(
"https://github.com/yannh/kubeconform/releases/download/v$version/$packedFilename",
path = tmpDir,
sha256sum = "2b4ebeaa4d5ac4843cf8f7b7e66a8874252b6b71bc7cbfc4ef1cbf85acec7c07"
)
cmd("sudo tar -xzf $packedFilename -C $installationPath", tmpDir)
} else {
ProvResult(true, out = "Kubeconform $version already installed")
}
}
fun Prov.installKubectl() = task {
// see https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ // see https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
val kubectlVersion = "1.23.0" val kubectlVersion = "1.27.4"
val tmpDir = "~/tmp" val tmpDir = "~/tmp"
// prerequisites -- see https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ // prerequisites -- see https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
@ -67,19 +91,19 @@ fun Prov.installKubectl(): ProvResult = task {
downloadFromURL( downloadFromURL(
"https://dl.k8s.io/release/v$kubectlVersion/bin/linux/amd64/kubectl", "https://dl.k8s.io/release/v$kubectlVersion/bin/linux/amd64/kubectl",
path = tmpDir, path = tmpDir,
// from https://dl.k8s.io/v1.23.0/bin/linux/amd64/kubectl.sha256 // from https://dl.k8s.io/v1.27.4/bin/linux/amd64/kubectl.sha256
sha256sum = "2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f" sha256sum = "4685bfcf732260f72fce58379e812e091557ef1dfc1bc8084226c7891dd6028f"
) )
cmd("sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl", dir = tmpDir) cmd("sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl", dir = tmpDir)
} }
fun Prov.configureKubectlBashCompletion(): ProvResult = task { fun Prov.configureKubectlBashCompletion() = task {
cmd("kubectl completion bash >> /etc/bash_completion.d/kubernetes", sudo = true) cmd("kubectl completion bash >> /etc/bash_completion.d/kubernetes", sudo = true)
createDir(".bashrc.d") createDir(".bashrc.d")
createFileFromResource(KUBE_CONFIG_CONTEXT_SCRIPT, "kubectl.sh", RESOURCE_PATH) createFileFromResource(KUBE_CONFIG_CONTEXT_SCRIPT, "kubectl.sh", RESOURCE_PATH)
} }
fun Prov.installDevopsScripts() { fun Prov.installDevopsScripts() = task {
task("install ssh helper") { task("install ssh helper") {
createFileFromResource( createFileFromResource(
@ -121,7 +145,7 @@ fun Prov.installDevopsScripts() {
} }
} }
fun Prov.installTerraform(): ProvResult = task { fun Prov.installTerraform() = task {
val dir = "/usr/lib/tfenv/" val dir = "/usr/lib/tfenv/"
if (!checkDir(dir)) { if (!checkDir(dir)) {
@ -134,34 +158,3 @@ fun Prov.installTerraform(): ProvResult = task {
cmd("tfenv install latest:^1.4.6", sudo = true) cmd("tfenv install latest:^1.4.6", sudo = true)
cmd("tfenv use latest:^1.4.6", sudo = true) cmd("tfenv use latest:^1.4.6", sudo = true)
} }
// -------------------------------------------- AWS credentials file -----------------------------------------------
fun Prov.installAwsCredentials(id: String = "REPLACE_WITH_YOUR_ID", key: String = "REPLACE_WITH_YOUR_KEY"): ProvResult =
task {
val dir = "~/.aws"
if (!checkDir(dir)) {
createDirs(dir)
createFile("~/.aws/config", awsConfig())
createFile("~/.aws/credentials", awsCredentials(id, key))
} else {
ProvResult(true, "aws credential folder already installed")
}
}
fun awsConfig(): String {
return """
[default]
region = eu-central-1
output = json
""".trimIndent()
}
fun awsCredentials(id: String, key: String): String {
return """
[default]
aws_access_key_id = $id
aws_secret_access_key = $key
""".trimIndent()
}

View file

@ -11,9 +11,10 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.web.base.downloadFrom
fun Prov.installGopass( fun Prov.installGopass(
version: String = "1.15.5", version: String = "1.15.13", // NOTE: when adjusting, pls also adjust checksum below and version of gopass bridge json api
enforceVersion: Boolean = false, enforceVersion: Boolean = false,
sha256sum: String = "23ec10015c2643f22cb305859eb36d671094d463d2eb1798cc675e7bb06f4b39" // from https://github.com/gopasspw/gopass/releases/tag/v1.15.13
sha256sum: String = "409ed5617e64fa2c781d5e2807ba7fcd65bc383a4e110f410f90b590e51aec55"
) = taskWithResult { ) = taskWithResult {
if (isPackageInstalled("gopass") && !enforceVersion) { if (isPackageInstalled("gopass") && !enforceVersion) {

View file

@ -22,10 +22,10 @@ fun Prov.downloadGopassBridge() = task {
} }
fun Prov.installGopassJsonApi() = taskWithResult { fun Prov.installGopassJsonApi() = taskWithResult {
// see https://github.com/gopasspw/gopass-jsonapi // from https://github.com/gopasspw/gopass-jsonapi/releases/tag/v1.15.13
val sha256sum = "ec9976e39a468428ae2eb1e2e0b9ceccba7f60d66b8097e2425b0c07f4fed108" val sha256sum = "3162ab558301645024325ce2e419c1d67900e1faf95dc1774a36f1ebfc76389f"
val gopassJsonApiVersion = "1.15.5" val gopassJsonApiVersion = "1.15.13"
val requiredGopassVersion = "1.15.5" val requiredGopassVersion = "1.15.13"
val filename = "gopass-jsonapi_${gopassJsonApiVersion}_linux_amd64.deb" val filename = "gopass-jsonapi_${gopassJsonApiVersion}_linux_amd64.deb"
val downloadUrl = "-L https://github.com/gopasspw/gopass-jsonapi/releases/download/v$gopassJsonApiVersion/$filename" val downloadUrl = "-L https://github.com/gopasspw/gopass-jsonapi/releases/download/v$gopassJsonApiVersion/$filename"
val downloadDir = "${userHome()}Downloads" val downloadDir = "${userHome()}Downloads"

View file

@ -0,0 +1,33 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDirs
import org.domaindrivenarchitecture.provs.framework.ubuntu.web.base.downloadFromURL
const val GRAAL_VM_VERSION = "21.0.2"
fun Prov.installGraalVM() = task {
val tmpDir = "~/tmp"
val filename = "graalvm-community-jdk-"
val additionalPartFilename = "_linux-x64_bin"
val packedFilename = "$filename$GRAAL_VM_VERSION$additionalPartFilename.tar.gz"
val extractedFilenameHunch = "graalvm-community-openjdk-"
val installationPath = "/usr/lib/jvm/"
if ( GRAAL_VM_VERSION != graalVMVersion() || !chk("ls -d $installationPath$extractedFilenameHunch$GRAAL_VM_VERSION*")) {
downloadFromURL(
"https://github.com/graalvm/graalvm-ce-builds/releases/download/jdk-$GRAAL_VM_VERSION/$packedFilename",
path = tmpDir,
sha256sum = "b048069aaa3a99b84f5b957b162cc181a32a4330cbc35402766363c5be76ae48"
)
createDirs(installationPath, sudo = true)
cmd("sudo tar -C $installationPath -xzf $packedFilename", tmpDir)
val graalInstPath = installationPath + (cmd("ls /usr/lib/jvm/|grep -e graalvm-community-openjdk-$GRAAL_VM_VERSION").out?.replace("\n", ""))
cmd("sudo ln -sf $graalInstPath/lib/svm/bin/native-image /usr/local/bin/native-image")
}
}
fun Prov.graalVMVersion(): String {
return cmdNoEval("/usr/local/bin/native-image --version|awk 'NR==1 {print $2}'").out?.trim() ?: ""
}

View file

@ -0,0 +1,85 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.userHome
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptPurge
import org.domaindrivenarchitecture.provs.framework.ubuntu.web.base.downloadFromURL
fun Prov.installHugoByDeb() = task {
val sha256sum = "46692ac9b79d5bc01b0f847f6dcf651d8630476de63e598ef61a8da9461d45cd"
val requiredHugoVersion = "0.125.5"
val filename = "hugo_extended_0.125.5_linux-amd64.deb"
val downloadUrl = "-L https://github.com/gohugoio/hugo/releases/download/v$requiredHugoVersion/$filename"
val downloadDir = "${userHome()}Downloads"
val currentHugoVersion = cmdNoEval("hugo version").out ?: ""
if (needsHugoInstall(currentHugoVersion, requiredHugoVersion)) {
if (isHugoInstalled(currentHugoVersion)) {
if (currentHugoVersion.contains("snap")) {
cmd("snap remove hugo", sudo = true)
} else {
aptPurge("hugo")
}
}
aptInstall("gnupg2")
downloadFromURL(downloadUrl, filename, downloadDir, sha256sum = sha256sum)
cmd("dpkg -i $downloadDir/$filename", sudo = true)
}
}
fun needsHugoInstall(currentHugoVersion: String?, requiredHugoVersion: String) : Boolean {
if (currentHugoVersion == null) {
return true
}
if (!isHugoInstalled(currentHugoVersion)) {
return true
}
if (!isHugoExtended(currentHugoVersion)) {
return true
}
if (isLowerHugoVersion(requiredHugoVersion, currentHugoVersion)) {
return true
}
return false
}
fun isHugoInstalled(hugoVersion: String?) : Boolean {
if (hugoVersion == null) {
return false
}
return hugoVersion.contains("hugo")
}
fun isHugoExtended(hugoVersion: String) : Boolean {
return hugoVersion.contains("extended")
}
fun isLowerHugoVersion(requiredHugoVersion: String, currentHugoVersion: String ) : Boolean {
val reqVersionNo = getHugoVersionNo(requiredHugoVersion)
val currentVersionNo = getHugoVersionNo(currentHugoVersion)
return when {
compareVersions(currentVersionNo, reqVersionNo).contains("lower") -> true
else -> false
}
}
fun compareVersions(firstVersion : List<Int>, secondVersion: List<Int>) : String {
var result = ""
for (i in 0..2) {
when {
firstVersion[i] > secondVersion[i] -> result += " higher"
firstVersion[i] < secondVersion[i] -> result += " lower"
firstVersion[i] == secondVersion[i] -> result += " equal"
}
}
return result
}
fun getHugoVersionNo(hugoVersion: String) : List<Int> {
// hugo v0.126.1-3d40ab+extended linux/amd64 BuildDate=2024-05-15T10:42:34Z VendorInfo=snap:0.126.1
var result = hugoVersion.split(" ")[1]
result = result.split("-")[0].removePrefix("v")
return result.split(".").map { it.toInt() }
}

View file

@ -30,7 +30,7 @@ val OPENCONNECT = "openconnect network-manager-openconnect network-manager-openc
val VPNC = "vpnc network-manager-vpnc network-manager-vpnc-gnome vpnc-scripts" val VPNC = "vpnc network-manager-vpnc network-manager-vpnc-gnome vpnc-scripts"
val JAVA = "openjdk-8-jdk openjdk-11-jdk openjdk-17-jdk jarwrapper" val JAVA = "openjdk-17-jdk jarwrapper"
val DRAWING_TOOLS = "inkscape dia openboard graphviz" val DRAWING_TOOLS = "inkscape dia openboard graphviz"

View file

@ -14,6 +14,8 @@ fun Prov.provisionPython(venvHome: String? = "~/.venv/meissa") = task {
installRestClient(venvHome) installRestClient(venvHome)
installJupyterlab(venvHome) installJupyterlab(venvHome)
installLinters(venvHome) installLinters(venvHome)
installAsciinema(venvHome)
installPyTest(venvHome)
} }
fun Prov.installPython3(): ProvResult = task { fun Prov.installPython3(): ProvResult = task {
@ -28,7 +30,7 @@ fun Prov.configureVenv(venvHome: String): ProvResult = task {
fun Prov.installPybuilder(venvHome: String? = null): ProvResult = task { fun Prov.installPybuilder(venvHome: String? = null): ProvResult = task {
pipInstall("pybuilder ddadevops pypandoc mockito coverage unittest-xml-reporting deprecation" + pipInstall("pybuilder ddadevops pypandoc mockito coverage unittest-xml-reporting deprecation" +
" python_terraform dda_python_terraform boto3 pyyaml packaging", " python_terraform dda_python_terraform boto3 pyyaml packaging inflection",
venvHome venvHome
) )
pipInstall("--upgrade ddadevops", venvHome) pipInstall("--upgrade ddadevops", venvHome)
@ -45,6 +47,13 @@ fun Prov.installJupyterlab(venvHome: String? = null): ProvResult = task {
fun Prov.installLinters(venvHome: String? = null): ProvResult = task { fun Prov.installLinters(venvHome: String? = null): ProvResult = task {
pipInstall("flake8 mypy pylint", venvHome) pipInstall("flake8 mypy pylint", venvHome)
} }
fun Prov.installAsciinema(venvHome: String? = null): ProvResult = task {
pipInstall("asciinema", venvHome)
}
fun Prov.installPyTest(venvHome: String? = null): ProvResult = task {
pipInstall("pytest", venvHome)
}
private fun Prov.pipInstall(pkg: String, venvHome: String? = null) { private fun Prov.pipInstall(pkg: String, venvHome: String? = null) {
cmd(activateVenvCommandPrefix(venvHome) + "pip3 install $pkg") cmd(activateVenvCommandPrefix(venvHome) + "pip3 install $pkg")

View file

@ -6,33 +6,33 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInsta
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.isPackageInstalled import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.isPackageInstalled
fun Prov.installVSC(vararg options: String) = task { fun Prov.installVSCode(vararg options: String) = task {
val clojureExtensions = setOf("betterthantomorrow.calva", "DavidAnson.vscode-markdownlint") val clojureExtensions = setOf("betterthantomorrow.calva", "DavidAnson.vscode-markdownlint")
val pythonExtensions = setOf("ms-python.python") val pythonExtensions = setOf("ms-python.python")
prerequisitesVSCinstall() installVSCodePrerequisites()
installVSCPackage() installVSCPackage()
installVSCodiumPackage() installVSCodiumPackage()
if (options.contains("clojure")) { if (options.contains("clojure")) {
installExtensionsCode(clojureExtensions) installVSCodeExtensions(clojureExtensions)
installExtensionsCodium(clojureExtensions) installVSCodiumExtensions(clojureExtensions)
} }
if (options.contains("python")) { if (options.contains("python")) {
installExtensionsCode(pythonExtensions) installVSCodeExtensions(pythonExtensions)
installExtensionsCodium(pythonExtensions) installVSCodiumExtensions(pythonExtensions)
} }
} }
private fun Prov.prerequisitesVSCinstall() = task { private fun Prov.installVSCodePrerequisites() = task {
aptInstall("curl gpg unzip apt-transport-https") aptInstall("curl gpg unzip apt-transport-https")
} }
@Suppress("unused") // only required for installation of vscode via apt @Suppress("unused") // only required for installation of vscode via apt
private fun Prov.installVscWithApt() = task { private fun Prov.installVSCodeWithApt() = task {
val packageName = "code" val packageName = "code"
if (!isPackageInstalled(packageName)) { if (!isPackageInstalled(packageName)) {
// see https://code.visualstudio.com/docs/setup/linux // see https://code.visualstudio.com/docs/setup/linux
@ -62,7 +62,7 @@ private fun Prov.installVSCodiumPackage() = task {
} }
private fun Prov.installExtensionsCode(extensions: Set<String>) = optional { private fun Prov.installVSCodeExtensions(extensions: Set<String>) = optional {
var res = ProvResult(true) var res = ProvResult(true)
for (ext in extensions) { for (ext in extensions) {
res = cmd("code --install-extension $ext") res = cmd("code --install-extension $ext")
@ -71,11 +71,11 @@ private fun Prov.installExtensionsCode(extensions: Set<String>) = optional {
// Settings can be found at $HOME/.config/Code/User/settings.json // Settings can be found at $HOME/.config/Code/User/settings.json
} }
private fun Prov.installExtensionsCodium(extensions: Set<String>) = optional { private fun Prov.installVSCodiumExtensions(extensions: Set<String>) = optional {
var res = ProvResult(true) var res = ProvResult(true)
for (ext in extensions) { for (ext in extensions) {
res = cmd("codium --install-extension $ext") res = ProvResult(res.success && cmd("codium --install-extension $ext").success)
} }
res res
// Settings can be found at $HOME/.config/Code/User/settings.json // Settings can be found at $HOME/.config/VSCodium/User/settings.json
} }

View file

@ -80,8 +80,8 @@ open class Prov protected constructor(
} }
/** /**
* A task is the base execution unit in provs. In the results overview it is represented by one line resp. result (of either success or failure). * A task is the fundamental execution unit. In the results overview it is represented by one line with a success or failure result.
* Returns success if no sub-tasks are called or if all subtasks finish with success. * Returns success if all sub-tasks finished with success or if no sub-tasks are called at all.
*/ */
fun task(name: String? = null, taskLambda: Prov.() -> Unit): ProvResult { fun task(name: String? = null, taskLambda: Prov.() -> Unit): ProvResult {
printDeprecationWarningIfLevel0("task") printDeprecationWarningIfLevel0("task")
@ -89,8 +89,10 @@ open class Prov protected constructor(
} }
/** /**
* Same as task but the provided lambda is explicitly required to provide a ProvResult to be returned. * Same as task above but the lambda parameter must have a ProvResult as return type.
* The returned result is included in the evaluation. * The returned ProvResult is included in the success resp. failure evaluation,
* i.e. if the returned ProvResult from the lambda fails, the returned ProvResult from
* taskWithResult also fails, else success depends on potentially called sub-tasks.
*/ */
fun taskWithResult(name: String? = null, taskLambda: Prov.() -> ProvResult): ProvResult { fun taskWithResult(name: String? = null, taskLambda: Prov.() -> ProvResult): ProvResult {
printDeprecationWarningIfLevel0("taskWithResult") printDeprecationWarningIfLevel0("taskWithResult")
@ -98,27 +100,27 @@ open class Prov protected constructor(
} }
/** /**
* defines a task, which returns the returned result, the results of sub-tasks are not considered * defines a task, which returns the returned result from the lambda, the results of sub-tasks are not considered
*/ */
fun requireLast(name: String? = null, a: Prov.() -> ProvResult): ProvResult { fun requireLast(name: String? = null, taskLambda: Prov.() -> ProvResult): ProvResult {
printDeprecationWarningIfLevel0("requireLast") printDeprecationWarningIfLevel0("requireLast")
return evaluate(ResultMode.LAST, name) { a() } return evaluate(ResultMode.LAST, name) { taskLambda() }
} }
/** /**
* defines a task, which always returns success * Defines a task, which always returns success.
*/ */
fun optional(name: String? = null, a: Prov.() -> ProvResult): ProvResult { fun optional(name: String? = null, taskLambda: Prov.() -> ProvResult): ProvResult {
printDeprecationWarningIfLevel0("optional") printDeprecationWarningIfLevel0("optional")
return evaluate(ResultMode.OPTIONAL, name) { a() } return evaluate(ResultMode.OPTIONAL, name) { taskLambda() }
} }
/** /**
* defines a task, which exits the overall execution on failure * Defines a task, which exits the overall execution on failure result of the taskLambda.
*/ */
fun exitOnFailure(a: Prov.() -> ProvResult): ProvResult { fun exitOnFailure(taskLambda: Prov.() -> ProvResult): ProvResult {
printDeprecationWarningIfLevel0("exitOnFailure") printDeprecationWarningIfLevel0("exitOnFailure")
return evaluate(ResultMode.FAILEXIT) { a() } return evaluate(ResultMode.FAILEXIT) { taskLambda() }
} }
/** /**

View file

@ -8,6 +8,8 @@ data class ProvResult(val success: Boolean,
val exception: Exception? = null, val exception: Exception? = null,
val exit: String? = null) { val exit: String? = null) {
val outTrimmed: String? = out?.trim()
constructor(returnCode : Int) : this(returnCode == 0) constructor(returnCode : Int) : this(returnCode == 0)
override fun toString(): String { override fun toString(): String {

View file

@ -2,7 +2,6 @@ package org.domaindrivenarchitecture.provs.framework.core
import com.charleskorn.kaml.Yaml import com.charleskorn.kaml.Yaml
import com.charleskorn.kaml.YamlConfiguration import com.charleskorn.kaml.YamlConfiguration
import kotlinx.serialization.InternalSerializationApi
import kotlinx.serialization.serializer import kotlinx.serialization.serializer
import java.io.BufferedReader import java.io.BufferedReader
import java.io.File import java.io.File
@ -18,15 +17,13 @@ fun writeToFile(fileName: String, text: String) {
} }
@OptIn(InternalSerializationApi::class)
inline fun <reified T : Any> String.yamlToType() = Yaml(configuration = YamlConfiguration(strictMode = false)).decodeFromString( inline fun <reified T : Any> String.yamlToType() = Yaml(configuration = YamlConfiguration(strictMode = false)).decodeFromString(
T::class.serializer(), serializer<T>(),
this this
) )
@OptIn(InternalSerializationApi::class)
inline fun <reified T : Any> T.toYaml() = Yaml(configuration = YamlConfiguration(strictMode = false, encodeDefaults = false)).encodeToString( inline fun <reified T : Any> T.toYaml() = Yaml(configuration = YamlConfiguration(strictMode = false, encodeDefaults = false)).encodeToString(
T::class.serializer(), serializer<T>(),
this this
) )

View file

@ -6,7 +6,7 @@ import org.domaindrivenarchitecture.provs.framework.core.docker.dockerimages.Doc
import org.domaindrivenarchitecture.provs.framework.core.docker.platforms.* import org.domaindrivenarchitecture.provs.framework.core.docker.platforms.*
import org.domaindrivenarchitecture.provs.framework.core.platforms.UbuntuProv import org.domaindrivenarchitecture.provs.framework.core.platforms.UbuntuProv
import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode
import org.domaindrivenarchitecture.provs.framework.core.docker.platforms.*
private const val DOCKER_NOT_SUPPORTED = "docker not yet supported for " private const val DOCKER_NOT_SUPPORTED = "docker not yet supported for "
@ -17,7 +17,7 @@ fun Prov.dockerProvideImage(image: DockerImage, skipIfExisting: Boolean = true,
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.dockerProvideImagePlatform(image, skipIfExisting, sudo) return this.dockerProvideImagePlatform(image, skipIfExisting, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass)
} }
} }
@ -28,7 +28,7 @@ fun Prov.dockerImageExists(imageName: String, sudo: Boolean = true) : Boolean {
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.dockerImageExistsPlatform(imageName, sudo) return this.dockerImageExistsPlatform(imageName, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass)
} }
} }
@ -50,7 +50,7 @@ fun Prov.provideContainer(
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.provideContainerPlatform(containerName, imageName, startMode, sudo, options, command) return this.provideContainerPlatform(containerName, imageName, startMode, sudo, options, command)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass)
} }
} }
@ -59,7 +59,7 @@ fun Prov.containerRuns(containerName: String, sudo: Boolean = true) : Boolean {
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.containerRunsPlatform(containerName, sudo) return this.containerRunsPlatform(containerName, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass)
} }
} }
@ -72,7 +72,7 @@ fun Prov.runContainer(
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.runContainerPlatform(containerName, imageName, sudo) return this.runContainerPlatform(containerName, imageName, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass)
} }
} }
@ -84,16 +84,17 @@ fun Prov.exitAndRmContainer(
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.exitAndRmContainerPlatform(containerName, sudo) return this.exitAndRmContainerPlatform(containerName, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass)
} }
} }
@Suppress("unused")
fun Prov.containerExec(containerName: String, cmd: String, sudo: Boolean = true): ProvResult { fun Prov.containerExec(containerName: String, cmd: String, sudo: Boolean = true): ProvResult {
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.containerExecPlatform(containerName, cmd, sudo) return this.containerExecPlatform(containerName, cmd, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass)
} }
} }

View file

@ -17,12 +17,12 @@ class UbuntuPlusUser(private val userName: String = "testuser") : DockerImage {
override fun imageText(): String { override fun imageText(): String {
return """ return """
FROM ubuntu:20.04 FROM ubuntu:22.04
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get -y install sudo RUN apt-get update && apt-get -y install sudo
RUN useradd -m $userName && echo "$userName:$userName" | chpasswd && adduser $userName sudo RUN useradd -m $userName && echo "$userName:$userName" | chpasswd && usermod -aG sudo $userName
RUN echo "$userName ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/$userName RUN echo "$userName ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/$userName
USER $userName USER $userName

View file

@ -7,10 +7,10 @@ import org.domaindrivenarchitecture.provs.framework.core.processors.Processor
const val SHELL = "/bin/bash" const val SHELL = "/bin/bash"
class UbuntuProv internal constructor( open class UbuntuProv(
processor: Processor = LocalProcessor(), processor: Processor = LocalProcessor(),
name: String? = null, name: String? = null,
progressType: ProgressType progressType: ProgressType = ProgressType.BASIC
) : Prov(processor, name, progressType) { ) : Prov(processor, name, progressType) {
override fun cmd(cmd: String, dir: String?, sudo: Boolean): ProvResult = taskWithResult { override fun cmd(cmd: String, dir: String?, sudo: Boolean): ProvResult = taskWithResult {
@ -30,14 +30,16 @@ class UbuntuProv internal constructor(
} }
private fun buildCommand(vararg args: String): String { private fun buildCommand(vararg args: String): String {
return if (args.size == 1) return if (args.size == 1) {
args[0].escapeAndEncloseByDoubleQuoteForShell() args[0].escapeAndEncloseByDoubleQuoteForShell()
else } else {
if (args.size == 3 && SHELL.equals(args[0]) && "-c".equals(args[1])) if (args.size == 3 && SHELL == args[0] && "-c" == args[1]) {
SHELL + " -c " + args[2].escapeAndEncloseByDoubleQuoteForShell() SHELL + " -c " + args[2].escapeAndEncloseByDoubleQuoteForShell()
else } else {
args.joinToString(separator = " ") args.joinToString(separator = " ")
} }
}
}
} }
private fun commandWithDirAndSudo(cmd: String, dir: String?, sudo: Boolean): String { private fun commandWithDirAndSudo(cmd: String, dir: String?, sudo: Boolean): String {

View file

@ -5,13 +5,14 @@ import org.slf4j.LoggerFactory
import java.io.File import java.io.File
import java.io.IOException import java.io.IOException
import java.nio.charset.Charset import java.nio.charset.Charset
import java.nio.file.Paths
private fun getOsName(): String { private fun getOsName(): String {
return System.getProperty("os.name") return System.getProperty("os.name")
} }
open class LocalProcessor : Processor { open class LocalProcessor(val useHomeDirAsWorkingDir: Boolean = true) : Processor {
companion object { companion object {
@Suppress("JAVA_CLASS_ON_COMPANION") @Suppress("JAVA_CLASS_ON_COMPANION")
@ -26,7 +27,12 @@ open class LocalProcessor : Processor {
private fun workingDir() : String private fun workingDir() : String
{ {
return System.getProperty("user.home") ?: File.separator return if (useHomeDirAsWorkingDir) {
System.getProperty("user.home") ?: File.separator
} else {
// folder in which program was started
Paths.get("").toAbsolutePath().toString()
}
} }
override fun exec(vararg args: String): ProcessResult { override fun exec(vararg args: String): ProcessResult {

View file

@ -93,9 +93,9 @@ class RemoteProcessor(val host: InetAddress, val user: String, val password: Sec
var session: Session? = null var session: Session? = null
try { try {
session = ssh.startSession() session = ssh.startSession() ?: throw IllegalStateException("ERROR: Could not start ssh session.")
val cmd: Command = session!!.exec(cmdString) val cmd: Command = session.exec(cmdString)
val out = BufferedReader(InputStreamReader(cmd.inputStream)).use { it.readText() } val out = BufferedReader(InputStreamReader(cmd.inputStream)).use { it.readText() }
val err = BufferedReader(InputStreamReader(cmd.errorStream)).use { it.readText() } val err = BufferedReader(InputStreamReader(cmd.errorStream)).use { it.readText() }
cmd.join(100, TimeUnit.SECONDS) cmd.join(100, TimeUnit.SECONDS)

View file

@ -0,0 +1,40 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.cron.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.ProvResult
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDirs
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.user.base.whoami
/**
* Creates a cron job.
* @param cronFilename e.g. "90_my_cron"; file is created in folder /etc/cron.d/
* @param schedule in the usual cron-format, examples: "0 * * * *" for each hour, "0 3 1-7 * 1" for the first Monday each month at 3:00, etc
* @param command the executed command
* @param user the user with whom the command will be executed, if null the current user is used
*/
fun Prov.createCronJob(cronFilename: String, schedule: String, command: String, user: String? = null) = task {
val cronUser = user ?: whoami()
val cronLine = "$schedule $cronUser $command\n"
createDirs("/etc/cron.d/", sudo = true)
createFile("/etc/cron.d/$cronFilename", cronLine, "644", sudo = true, overwriteIfExisting = true)
}
/**
* Adds a cronJob for a monthly reboot of the (Linux) system.
* ATTENTION: Use with care!!
*/
fun Prov.scheduleMonthlyReboot() = task {
val shutdown = "/sbin/shutdown"
if (checkFile(shutdown, sudo = true)) {
// reboot each first Tuesday in a month at 3:00
// use controlled "shutdown" instead of direct "reboot"
createCronJob("50_monthly_reboot", "0 3 1-7 * 2", "shutdown -r now", "root")
} else {
addResultToEval(ProvResult(false, err = "$shutdown not found."))
}
}

View file

@ -201,7 +201,7 @@ fun Prov.fileContentLargeFile(file: String, sudo: Boolean = false, chunkSize: In
// check first chunk // check first chunk
if (resultString == null) { if (resultString == null) {
if (!chunkResult.success) { if (!chunkResult.success) {
return resultString return null
} else { } else {
resultString = "" resultString = ""
} }
@ -329,12 +329,16 @@ fun Prov.deleteDir(dir: String, path: String, sudo: Boolean = false): ProvResult
if ("" == path) { if ("" == path) {
throw RuntimeException("In deleteDir: path must not be empty.") throw RuntimeException("In deleteDir: path must not be empty.")
} }
return if (checkDir(dir, path, sudo)) {
val cmd = "cd $path && rmdir $dir" val cmd = "cd $path && rmdir $dir"
return if (!sudo) { if (!sudo) {
cmd(cmd) cmd(cmd)
} else { } else {
cmd(cmd.sudoizeCommand()) cmd(cmd.sudoizeCommand())
} }
} else {
ProvResult(true, out = "Dir to delete did not exist: $dir")
}
} }
@ -403,7 +407,7 @@ fun Prov.fileSize(filename: String, sudo: Boolean = false): Int? {
private fun ensureValidPosixFilePermission(posixFilePermission: String) { private fun ensureValidPosixFilePermission(posixFilePermission: String) {
if (!Regex("^[0-7]{3}$").matches(posixFilePermission)) throw IllegalArgumentException("Wrong file permission ($posixFilePermission), permission must consist of 3 digits as e.g. 664") if (!Regex("^0?[0-7]{3}$").matches(posixFilePermission)) throw IllegalArgumentException("Wrong file permission ($posixFilePermission), permission must consist of 3 digits as e.g. 664")
} }
/** /**

View file

@ -30,10 +30,13 @@ fun Prov.gitClone(
ProvResult(true, out = "Repo [$pathWithBasename] already exists, but might not be up-to-date.") ProvResult(true, out = "Repo [$pathWithBasename] already exists, but might not be up-to-date.")
} }
} else { } else {
// create targetPath (if not yet existing) // create targetPath if not yet existing
if (!checkDir(targetPath)) { if (!checkDir(targetPath)) {
createDirs(targetPath) createDirs(targetPath)
} }
// Note that all output of git clone on Linux is shown in stderr (normal progress info AND errors),
// which might be confusing in the logfile.
cmd("cd $targetPath && git clone $repoSource ${targetFolderName ?: ""}") cmd("cd $targetPath && git clone $repoSource ${targetFolderName ?: ""}")
} }
} }

View file

@ -1,5 +1,6 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base package org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base
import org.domaindrivenarchitecture.provs.desktop.domain.KnownHost
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.ProvResult import org.domaindrivenarchitecture.provs.framework.core.ProvResult
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.* import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.*
@ -7,8 +8,6 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.SshKeyPair
import java.io.File import java.io.File
const val KNOWN_HOSTS_FILE = "~/.ssh/known_hosts"
/** /**
* Installs ssh keys for active user; ssh filenames depend on the ssh key type, e.g. for public key file: "id_rsa.pub", "id_id_ed25519.pub", etc * Installs ssh keys for active user; ssh filenames depend on the ssh key type, e.g. for public key file: "id_rsa.pub", "id_id_ed25519.pub", etc
*/ */
@ -20,38 +19,45 @@ fun Prov.configureSshKeys(sshKeys: SshKeyPair) = task {
/** /**
* Checks if the specified hostname or Ip is in a known_hosts file * Checks if the specified host (domain name or IP) and (optional) port is contained in the known_hosts file
*
* @return whether if was found
*/ */
fun Prov.isHostKnown(hostOrIp: String) : Boolean { fun Prov.isKnownHost(hostOrIp: String, port: Int? = null): Boolean {
return cmdNoEval("ssh-keygen -F $hostOrIp").out?.isNotEmpty() ?: false val hostWithPotentialPort = port?.let { hostInKnownHostsFileFormat(hostOrIp, port) } ?: hostOrIp
return cmdNoEval("ssh-keygen -F $hostWithPotentialPort").out?.isNotEmpty() ?: false
}
fun hostInKnownHostsFileFormat(hostOrIp: String, port: Int? = null): String {
return port?.let { "[$hostOrIp]:$port" } ?: hostOrIp
} }
/** /**
* Adds ssh keys for specified host (which also can be an ip-address) to ssh-file "known_hosts" * Adds ssh keys for specified host (which also can be an ip-address) to the ssh-file "known_hosts".
* Either add the specified keys or - if null - add keys automatically retrieved. * If parameter verifyKeys is true, the keys are checked against the live keys of the host and added only if valid.
* Note: adding keys automatically is vulnerable to a man-in-the-middle attack, thus considered insecure and not recommended.
*/ */
fun Prov.addKnownHost(host: String, keysToBeAdded: List<String>?, verifyKeys: Boolean = false) = task { fun Prov.addKnownHost(knownHost: KnownHost, verifyKeys: Boolean = false) = task {
if (!checkFile(KNOWN_HOSTS_FILE)) { val knownHostsFile = "~/.ssh/known_hosts"
if (!checkFile(knownHostsFile)) {
createDir(".ssh") createDir(".ssh")
createFile(KNOWN_HOSTS_FILE, null) createFile(knownHostsFile, null)
} }
if (keysToBeAdded == null) { with(knownHost) {
// auto add keys for (key in hostKeys) {
cmd("ssh-keyscan $host >> $KNOWN_HOSTS_FILE")
} else {
for (key in keysToBeAdded) {
if (!verifyKeys) { if (!verifyKeys) {
addTextToFile("\n$host $key\n", File(KNOWN_HOSTS_FILE)) addTextToFile("\n$hostName $key\n", File(knownHostsFile))
} else { } else {
val validKeys = getSshKeys(host) val validKeys = findSshKeys(hostName, port)
if (validKeys?.contains(key) == true) { if (validKeys?.contains(key) == true) {
addTextToFile("\n$host $key\n", File(KNOWN_HOSTS_FILE)) val formattedHost = hostInKnownHostsFileFormat(hostName, port)
addTextToFile("\n$formattedHost $key\n", File(knownHostsFile))
} else { } else {
addResultToEval(ProvResult(false, err = "The following key of host [$host] could not be verified successfully: " + key)) addResultToEval(
ProvResult(
false,
err = "The following key of host [$hostName] could not be verified successfully: " + key
)
)
} }
} }
} }
@ -60,10 +66,14 @@ fun Prov.addKnownHost(host: String, keysToBeAdded: List<String>?, verifyKeys: Bo
/** /**
* Returns a list of valid ssh keys for the given host (host can also be an ip address), keys are returned as keytype and key BUT WITHOUT the host name * Returns a list of valid ssh keys for the given host (host can also be an ip address),
* keys are returned (space-separated) as keytype and key, but WITHOUT the host name.*
* If no port is specified, the keys for the default port (22) are returned.
* If no keytype is specified, keys are returned for all keytypes.
*/ */
private fun Prov.getSshKeys(host: String, keytype: String? = null): List<String>? { fun Prov.findSshKeys(host: String, port: Int? = null, keytype: String? = null): List<String>? {
val portOption = port?.let { " -p $port " } ?: ""
val keytypeOption = keytype?.let { " -t $keytype " } ?: "" val keytypeOption = keytype?.let { " -t $keytype " } ?: ""
val output = cmd("ssh-keyscan $keytypeOption $host 2>/dev/null").out?.trim() val output = cmd("ssh-keyscan $portOption $keytypeOption $host 2>/dev/null").out?.trim()
return output?.split("\n")?.filter { x -> "" != x }?.map { x -> x.substringAfter(" ") } return output?.split("\n")?.filter { x -> "" != x }?.map { x -> x.substringAfter(" ") }
} }

View file

@ -6,24 +6,25 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources.
@Serializable @Serializable
abstract class SecretSource(protected val input: String) { abstract class SecretSource(protected val parameter: String) {
abstract fun secret() : Secret abstract fun secret() : Secret
abstract fun secretNullable() : Secret? abstract fun secretNullable() : Secret?
} }
@Serializable @Serializable
enum class SecretSourceType() { enum class SecretSourceType {
PLAIN, FILE, PROMPT, PASS, GOPASS; PLAIN, FILE, PROMPT, PASS, GOPASS, ENV;
fun secret(input: String) : Secret { fun secret(parameter: String) : Secret {
return when (this) { return when (this) {
PLAIN -> PlainSecretSource(input).secret() PLAIN -> PlainSecretSource(parameter).secret()
FILE -> FileSecretSource(input).secret() FILE -> FileSecretSource(parameter).secret()
PROMPT -> PromptSecretSource().secret() PROMPT -> PromptSecretSource().secret()
PASS -> PassSecretSource(input).secret() PASS -> PassSecretSource(parameter).secret()
GOPASS -> GopassSecretSource(input).secret() GOPASS -> GopassSecretSource(parameter).secret()
ENV -> EnvSecretSource(parameter).secret()
} }
} }
} }

View file

@ -0,0 +1,18 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources
import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSource
/**
* Reads secret from a local environment variable
*/
class EnvSecretSource(varName: String) : SecretSource(varName) {
override fun secret(): Secret {
return secretNullable() ?: throw Exception("Failed to get secret from environment variable: $parameter")
}
override fun secretNullable(): Secret? {
val secret = System.getenv(parameter)
return if (secret == null) null else Secret(secret)
}
}

View file

@ -13,11 +13,11 @@ class FileSecretSource(fqFileName: String) : SecretSource(fqFileName) {
override fun secret(): Secret { override fun secret(): Secret {
val p = Prov.newInstance(name = "FileSecretSource", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "FileSecretSource", progressType = ProgressType.NONE)
return p.getSecret("cat " + input) ?: throw Exception("Failed to get secret.") return p.getSecret("cat " + parameter) ?: throw Exception("Failed to get secret.")
} }
override fun secretNullable(): Secret? { override fun secretNullable(): Secret? {
val p = Prov.newInstance(name = "FileSecretSource", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "FileSecretSource", progressType = ProgressType.NONE)
return p.getSecret("cat " + input) return p.getSecret("cat " + parameter)
} }
} }

View file

@ -11,10 +11,10 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSource
*/ */
class GopassSecretSource(path: String) : SecretSource(path) { class GopassSecretSource(path: String) : SecretSource(path) {
override fun secret(): Secret { override fun secret(): Secret {
return secretNullable() ?: throw Exception("Failed to get \"$input\" secret from gopass.") return secretNullable() ?: throw Exception("Failed to get \"$parameter\" secret from gopass.")
} }
override fun secretNullable(): Secret? { override fun secretNullable(): Secret? {
val p = Prov.newInstance(name = "GopassSecretSource for $input", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "GopassSecretSource for $parameter", progressType = ProgressType.NONE)
return p.getSecret("gopass show -f $input", true) return p.getSecret("gopass show -f $parameter", true)
} }
} }

View file

@ -12,10 +12,10 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSource
class PassSecretSource(path: String) : SecretSource(path) { class PassSecretSource(path: String) : SecretSource(path) {
override fun secret(): Secret { override fun secret(): Secret {
val p = Prov.newInstance(name = "PassSecretSource", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "PassSecretSource", progressType = ProgressType.NONE)
return p.getSecret("pass " + input) ?: throw Exception("Failed to get secret.") return p.getSecret("pass " + parameter) ?: throw Exception("Failed to get secret.")
} }
override fun secretNullable(): Secret? { override fun secretNullable(): Secret? {
val p = Prov.newInstance(name = "PassSecretSource", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "PassSecretSource", progressType = ProgressType.NONE)
return p.getSecret("pass " + input) return p.getSecret("pass " + parameter)
} }
} }

View file

@ -6,9 +6,9 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSource
class PlainSecretSource(plainSecret: String) : SecretSource(plainSecret) { class PlainSecretSource(plainSecret: String) : SecretSource(plainSecret) {
override fun secret(): Secret { override fun secret(): Secret {
return Secret(input) return Secret(parameter)
} }
override fun secretNullable(): Secret { override fun secretNullable(): Secret {
return Secret(input) return Secret(parameter)
} }
} }

View file

@ -47,7 +47,7 @@ class PasswordPanel : JPanel(FlowLayout()) {
class PromptSecretSource(text: String = "Secret/Password") : SecretSource(text) { class PromptSecretSource(text: String = "Secret/Password") : SecretSource(text) {
override fun secret(): Secret { override fun secret(): Secret {
val password = PasswordPanel.requestPassword(input) val password = PasswordPanel.requestPassword(parameter)
if (password == null) { if (password == null) {
throw IllegalArgumentException("Failed to retrieve secret from prompting.") throw IllegalArgumentException("Failed to retrieve secret from prompting.")
} else { } else {
@ -56,7 +56,7 @@ class PromptSecretSource(text: String = "Secret/Password") : SecretSource(text)
} }
override fun secretNullable(): Secret? { override fun secretNullable(): Secret? {
val password = PasswordPanel.requestPassword(input) val password = PasswordPanel.requestPassword(parameter)
return if(password == null) { return if(password == null) {
null null

View file

@ -0,0 +1,7 @@
package org.domaindrivenarchitecture.provs.server.domain.hetzner_csi
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.server.infrastructure.provisionHetznerCSIForK8s
fun Prov.provisionHetznerCSI(configResolved: HetznerCSIConfigResolved) =
provisionHetznerCSIForK8s(configResolved.hcloudApiToken, configResolved.encryptionPassphrase)

View file

@ -0,0 +1,23 @@
package org.domaindrivenarchitecture.provs.server.domain.hetzner_csi
import kotlinx.serialization.Serializable
import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSupplier
@Serializable
data class HetznerCSIConfig (
val hcloudApiToken: SecretSupplier,
val encryptionPassphrase: SecretSupplier,
) {
fun resolveSecret(): HetznerCSIConfigResolved = HetznerCSIConfigResolved(this)
}
data class HetznerCSIConfigResolved(val configUnresolved: HetznerCSIConfig) {
val hcloudApiToken: Secret = configUnresolved.hcloudApiToken.secret()
val encryptionPassphrase: Secret = configUnresolved.encryptionPassphrase.secret()
}
@Serializable
data class HetznerCSIConfigHolder(
val hetzner: HetznerCSIConfig
)

View file

@ -10,7 +10,8 @@ data class K3sConfig(
val loopback: Loopback = Loopback(ipv4 = "192.168.5.1", ipv6 = "fc00::5:1"), val loopback: Loopback = Loopback(ipv4 = "192.168.5.1", ipv6 = "fc00::5:1"),
val certmanager: Certmanager? = null, val certmanager: Certmanager? = null,
val echo: Echo? = null, val echo: Echo? = null,
val reprovision: Reprovision = false val reprovision: Reprovision = false,
val monthlyReboot: Boolean = false,
) { ) {
fun isDualStack(): Boolean { fun isDualStack(): Boolean {
return node.ipv6 != null && loopback.ipv6 != null return node.ipv6 != null && loopback.ipv6 != null

View file

@ -2,6 +2,9 @@ package org.domaindrivenarchitecture.provs.server.domain.k3s
import org.domaindrivenarchitecture.provs.configuration.infrastructure.DefaultConfigFileRepository import org.domaindrivenarchitecture.provs.configuration.infrastructure.DefaultConfigFileRepository
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.cron.infrastructure.scheduleMonthlyReboot
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.HetznerCSIConfigResolved
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.provisionHetznerCSI
import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.GrafanaAgentConfigResolved import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.GrafanaAgentConfigResolved
import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.provisionGrafanaAgent import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.provisionGrafanaAgent
import org.domaindrivenarchitecture.provs.server.infrastructure.* import org.domaindrivenarchitecture.provs.server.infrastructure.*
@ -11,6 +14,7 @@ import kotlin.system.exitProcess
fun Prov.provisionK3sCommand(cli: K3sCliCommand) = task { fun Prov.provisionK3sCommand(cli: K3sCliCommand) = task {
val grafanaConfigResolved: GrafanaAgentConfigResolved? = findK8sGrafanaConfig(cli.configFileName)?.resolveSecret() val grafanaConfigResolved: GrafanaAgentConfigResolved? = findK8sGrafanaConfig(cli.configFileName)?.resolveSecret()
val hcloudConfigResolved: HetznerCSIConfigResolved? = findHetznerCSIConfig(cli.configFileName)?.resolveSecret()
if (cli.onlyModules == null) { if (cli.onlyModules == null) {
val k3sConfig: K3sConfig = getK3sConfig(cli.configFileName) val k3sConfig: K3sConfig = getK3sConfig(cli.configFileName)
@ -18,9 +22,10 @@ fun Prov.provisionK3sCommand(cli: K3sCliCommand) = task {
val k3sConfigReprovision = k3sConfig.copy(reprovision = cli.reprovision || k3sConfig.reprovision) val k3sConfigReprovision = k3sConfig.copy(reprovision = cli.reprovision || k3sConfig.reprovision)
val applicationFile = cli.applicationFileName?.let { DefaultApplicationFileRepository(cli.applicationFileName).getFile() } val applicationFile = cli.applicationFileName?.let { DefaultApplicationFileRepository(cli.applicationFileName).getFile() }
provisionK3s(k3sConfigReprovision, grafanaConfigResolved, applicationFile) provisionK3s(k3sConfigReprovision, grafanaConfigResolved, hcloudConfigResolved, applicationFile)
} else { } else {
provisionGrafana(cli.onlyModules, grafanaConfigResolved) provisionGrafana(cli.onlyModules, grafanaConfigResolved)
provisionHetznerCSI(cli.onlyModules, hcloudConfigResolved)
} }
} }
@ -30,6 +35,7 @@ fun Prov.provisionK3sCommand(cli: K3sCliCommand) = task {
fun Prov.provisionK3s( fun Prov.provisionK3s(
k3sConfig: K3sConfig, k3sConfig: K3sConfig,
grafanaConfigResolved: GrafanaAgentConfigResolved? = null, grafanaConfigResolved: GrafanaAgentConfigResolved? = null,
hetznerCSIConfigResolved: HetznerCSIConfigResolved? = null,
applicationFile: ApplicationFile? = null applicationFile: ApplicationFile? = null
) = task { ) = task {
@ -53,6 +59,10 @@ fun Prov.provisionK3s(
provisionGrafanaAgent(grafanaConfigResolved) provisionGrafanaAgent(grafanaConfigResolved)
} }
if (hetznerCSIConfigResolved != null) {
provisionHetznerCSI(hetznerCSIConfigResolved)
}
if (applicationFile != null) { if (applicationFile != null) {
provisionK3sApplication(applicationFile) provisionK3sApplication(applicationFile)
} }
@ -60,6 +70,12 @@ fun Prov.provisionK3s(
if (!k3sConfig.reprovision) { if (!k3sConfig.reprovision) {
provisionServerCliConvenience() provisionServerCliConvenience()
} }
if (k3sConfig.monthlyReboot) {
scheduleMonthlyReboot()
}
installK9s()
} }
private fun Prov.provisionGrafana( private fun Prov.provisionGrafana(
@ -75,3 +91,18 @@ private fun Prov.provisionGrafana(
provisionGrafanaAgent(grafanaConfigResolved) provisionGrafanaAgent(grafanaConfigResolved)
} }
} }
private fun Prov.provisionHetznerCSI(
onlyModules: List<String>?,
hetznerCSIConfigResolved: HetznerCSIConfigResolved?
) = task {
if (onlyModules != null && onlyModules.contains(ServerOnlyModule.HETZNER_CSI.name.lowercase())) {
if (hetznerCSIConfigResolved == null) {
println("ERROR: Could not find grafana config.")
exitProcess(7)
}
provisionHetznerCSI(hetznerCSIConfigResolved)
}
}

View file

@ -1,5 +1,6 @@
package org.domaindrivenarchitecture.provs.server.domain.k3s package org.domaindrivenarchitecture.provs.server.domain.k3s
enum class ServerOnlyModule { enum class ServerOnlyModule {
GRAFANA GRAFANA,
HETZNER_CSI
} }

View file

@ -2,7 +2,6 @@ package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.ProvResult import org.domaindrivenarchitecture.provs.framework.core.ProvResult
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFileFromResource import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFileFromResource
private const val resourcePath = "org/domaindrivenarchitecture/provs/desktop/infrastructure" private const val resourcePath = "org/domaindrivenarchitecture/provs/desktop/infrastructure"
@ -16,7 +15,8 @@ fun Prov.provisionServerCliConvenience() = task {
fun Prov.provisionKubectlCompletionAndAlias(): ProvResult = task { fun Prov.provisionKubectlCompletionAndAlias(): ProvResult = task {
cmd("kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null") cmd("kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null")
cmd("echo 'alias k=kubectl' >> ~/.bashrc") cmd("echo 'alias k=kubectl' >> ~/.bashrc")
cmd("echo 'complete -o default -F __start_kubectl k' >>~/.bashrc") cmd("echo 'alias k9=\"k9s --kubeconfig /etc/kubernetes/admin.conf\"' >> ~/.bashrc")
cmd("echo 'complete -o default -F __start_kubectl k' >> ~/.bashrc")
} }
fun Prov.provisionVimrc(): ProvResult = task { fun Prov.provisionVimrc(): ProvResult = task {

View file

@ -0,0 +1,53 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFileFromResource
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFileFromResourceTemplate
import org.domaindrivenarchitecture.provs.server.domain.k3s.FileMode
import java.io.File
private const val hetznerCSIResourceDir = "org/domaindrivenarchitecture/provs/server/infrastructure/hetznerCSI/"
fun Prov.provisionHetznerCSIForK8s(hetznerApiToken: Secret, encryptionPassphrase: Secret) {
// CSI Driver
createFileFromResourceTemplate(
k3sManualManifestsDir + "hcloud-api-token-secret.yaml",
"hcloud-api-token-secret.template.yaml",
resourcePath = hetznerCSIResourceDir,
posixFilePermission = "644",
values = mapOf(
"HETZNER_API_TOKEN" to hetznerApiToken.plain()
))
cmd("kubectl apply -f hcloud-api-token-secret.yaml", k3sManualManifestsDir)
applyHetznerCSIFileFromResource(File(k3sManualManifestsDir, "hcloud-csi.yaml"))
// Encryption
createFileFromResourceTemplate(
k3sManualManifestsDir + "hcloud-encryption-secret.yaml",
"hcloud-encryption-secret.template.yaml",
resourcePath = hetznerCSIResourceDir,
posixFilePermission = "644",
values = mapOf(
"HETZNER_ENCRYPTION_PASSPHRASE" to encryptionPassphrase.plain()
))
cmd("kubectl apply -f hcloud-encryption-secret.yaml", k3sManualManifestsDir)
applyHetznerCSIFileFromResource(File(k3sManualManifestsDir, "hcloud-encrypted-storage-class.yaml"))
}
private fun Prov.createHetznerCSIFileFromResource(
file: File,
posixFilePermission: FileMode? = "644"
) = task {
createFileFromResource(
file.path,
file.name,
hetznerCSIResourceDir,
posixFilePermission,
sudo = true
)
}
private fun Prov.applyHetznerCSIFileFromResource(file: File, posixFilePermission: FileMode? = "644") = task {
createHetznerCSIFileFromResource(file, posixFilePermission)
cmd("kubectl apply -f ${file.path}", sudo = true)
}

View file

@ -0,0 +1,31 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import com.charleskorn.kaml.MissingRequiredPropertyException
import org.domaindrivenarchitecture.provs.configuration.domain.ConfigFileName
import org.domaindrivenarchitecture.provs.framework.core.readFromFile
import org.domaindrivenarchitecture.provs.framework.core.toYaml
import org.domaindrivenarchitecture.provs.framework.core.yamlToType
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.HetznerCSIConfig
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.HetznerCSIConfigHolder
import java.io.File
import java.io.FileWriter
private const val DEFAULT_CONFIG_FILE = "server-config.yaml"
fun findHetznerCSIConfig(fileName: ConfigFileName? = null): HetznerCSIConfig? {
val filePath = fileName?.fileName ?: DEFAULT_CONFIG_FILE
return if(File(filePath).exists()) {
try {
readFromFile(filePath).yamlToType<HetznerCSIConfigHolder>().hetzner
} catch (e: MissingRequiredPropertyException) {
if (e.message.contains("Property 'hetzner'")) null else throw e
}
} else {
null
}
}
@Suppress("unused")
internal fun writeConfig(config: HetznerCSIConfigHolder, fileName: String = "hetzner-config.yaml") =
FileWriter(fileName).use { it.write(config.toYaml()) }

View file

@ -1,39 +0,0 @@
package org.domaindrivenarchitecture.provs.server.domain
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer
import org.domaindrivenarchitecture.provs.framework.core.echoCommandForTextWithNewlinesReplaced
import org.domaindrivenarchitecture.provs.framework.core.repeatTaskUntilSuccess
/**
* Runs a k3s server and a k3s agent as containers.
* Copies the kubeconfig from container to the default location: $HOME/.kube/config
*/
fun Prov.installK3sAsContainers(token: String = "12345678901234") = task {
cmd("docker volume create k3s-server")
provideContainer("k3s-server", "rancher/k3s", command = "server --cluster-init", options =
"-d --privileged --tmpfs /run --tmpfs /var/run " +
"-e K3S_TOKEN=$token -e K3S_KUBECONFIG_OUTPUT=./kubeconfig.yaml -e K3S_KUBECONFIG_MODE=666 " +
"-v k3s-server:/var/lib/rancher/k3s:z -p 6443:6443 -p 80:80 -p 443:443 " +
"--ulimit nproc=65535 --ulimit nofile=65535:65535")
// wait for config file
cmd("export timeout=60; while [ ! -f /var/lib/docker/volumes/k3s-server/_data/server/kubeconfig.yaml ]; do if [ \"${'$'}timeout\" == 0 ]; then echo \"ERROR: Timeout while waiting for file.\"; break; fi; sleep 1; ((timeout--)); done")
sh("""
mkdir -p ${'$'}HOME/.kube/
cp /var/lib/docker/volumes/k3s-server/_data/server/kubeconfig.yaml ${'$'}HOME/.kube/config
""".trimIndent())
}
/**
* Apply a config to kubernetes.
* Prerequisite: Kubectl has to be installed
*/
fun Prov.applyK8sConfig(configAsYaml: String, kubectlCommand: String = "kubectl") = task {
repeatTaskUntilSuccess(6, 10) {
cmd(echoCommandForTextWithNewlinesReplaced(configAsYaml) + " | $kubectlCommand apply -f -")
}
}

View file

@ -10,7 +10,9 @@ import java.io.File
// ----------------------------------- versions -------------------------------- // ----------------------------------- versions --------------------------------
const val K3S_VERSION = "v1.23.6+k3s1" // when updating this version, it is recommended to update also file k3s-install.sh as well as traefik.yaml in this repo
// (both files in: src/main/resources/org/domaindrivenarchitecture/provs/server/infrastructure/k3s/)
const val K3S_VERSION = "v1.29.1+k3s2"
// ----------------------------------- directories -------------------------------- // ----------------------------------- directories --------------------------------
const val k3sManualManifestsDir = "/etc/rancher/k3s/manifests/" const val k3sManualManifestsDir = "/etc/rancher/k3s/manifests/"
@ -31,12 +33,12 @@ private val k3sMiddleWareHttpsRedirect = File(k3sManualManifestsDir, "middleware
private val certManagerDeployment = File(k3sManualManifestsDir, "cert-manager.yaml") private val certManagerDeployment = File(k3sManualManifestsDir, "cert-manager.yaml")
private val certManagerIssuer = File(k3sManualManifestsDir, "le-issuer.yaml") private val certManagerIssuer = File(k3sManualManifestsDir, "le-issuer.yaml")
private val k3sEcho = File(k3sManualManifestsDir, "echo.yaml") private val k3sEchoWithTls = File(k3sManualManifestsDir, "echo-tls.yaml")
private val k3sEchoNoTls = File(k3sManualManifestsDir, "echo-no-tls.yaml")
private val selfSignedCertificate = File(k3sManualManifestsDir, "selfsigned-certificate.yaml") private val selfSignedCertificate = File(k3sManualManifestsDir, "selfsigned-certificate.yaml")
private val localPathProvisionerConfig = File(k3sManualManifestsDir, "local-path-provisioner-config.yaml") private val localPathProvisionerConfig = File(k3sManualManifestsDir, "local-path-provisioner-config.yaml")
// ----------------------------------- public functions -------------------------------- // ----------------------------------- public functions --------------------------------
fun Prov.testConfigExists(): Boolean { fun Prov.testConfigExists(): Boolean {
@ -49,7 +51,11 @@ fun Prov.deprovisionK3sInfra() = task {
deleteFile(certManagerDeployment.path, sudo = true) deleteFile(certManagerDeployment.path, sudo = true)
deleteFile(certManagerIssuer.path, sudo = true) deleteFile(certManagerIssuer.path, sudo = true)
deleteFile(k3sKubeConfig.path, sudo = true) deleteFile(k3sKubeConfig.path, sudo = true)
cmd("k3s-uninstall.sh")
val k3sUninstallScript = "k3s-uninstall.sh"
if (chk("which $k3sUninstallScript")) {
cmd(k3sUninstallScript)
}
} }
@ -95,7 +101,7 @@ fun Prov.installK3s(k3sConfig: K3sConfig): ProvResult {
// metallb // metallb
applyK3sFileFromResource(File(k3sManualManifestsDir, "metallb-0.13.7-native-manifest.yaml")) applyK3sFileFromResource(File(k3sManualManifestsDir, "metallb-0.13.7-native-manifest.yaml"))
repeatTaskUntilSuccess(6, 10) { repeatTaskUntilSuccess(10, 10) {
applyK3sFileFromResourceTemplate( applyK3sFileFromResourceTemplate(
File(k3sManualManifestsDir, "metallb-config.yaml"), File(k3sManualManifestsDir, "metallb-config.yaml"),
k3sConfigMap, k3sConfigMap,
@ -117,8 +123,9 @@ fun Prov.installK3s(k3sConfig: K3sConfig): ProvResult {
applyK3sFileFromResource(k3sMiddleWareHttpsRedirect) applyK3sFileFromResource(k3sMiddleWareHttpsRedirect)
} }
// other
applyK3sFileFromResource(localPathProvisionerConfig) applyK3sFileFromResource(localPathProvisionerConfig)
// TODO: jem 2022-11-25: Why do we need sudo here??
cmd("kubectl set env deployment -n kube-system local-path-provisioner DEPLOY_DATE=\"$(date)\"", sudo = true) cmd("kubectl set env deployment -n kube-system local-path-provisioner DEPLOY_DATE=\"$(date)\"", sudo = true)
cmd("ln -sf $k3sKubeConfig " + k8sCredentialsDir + "admin.conf", sudo = true) cmd("ln -sf $k3sKubeConfig " + k8sCredentialsDir + "admin.conf", sudo = true)
@ -144,7 +151,8 @@ fun Prov.provisionK3sCertManager(certmanager: Certmanager) = task {
} }
} }
fun Prov.provisionK3sEcho(fqdn: String, endpoint: CertmanagerEndpoint? = null) = task { fun Prov.provisionK3sEcho(fqdn: String, endpoint: CertmanagerEndpoint? = null, withTls: Boolean = false) = task {
if (withTls) {
val endpointName = endpoint?.name?.lowercase() val endpointName = endpoint?.name?.lowercase()
val issuer = if (endpointName == null) { val issuer = if (endpointName == null) {
@ -153,8 +161,10 @@ fun Prov.provisionK3sEcho(fqdn: String, endpoint: CertmanagerEndpoint? = null) =
} else { } else {
endpointName endpointName
} }
applyK3sFileFromResourceTemplate(k3sEchoWithTls, mapOf("fqdn" to fqdn, "issuer_name" to issuer))
applyK3sFileFromResourceTemplate(k3sEcho, mapOf("fqdn" to fqdn, "issuer_name" to issuer)) } else {
applyK3sFileFromResource(k3sEchoNoTls)
}
} }
fun Prov.provisionK3sApplication(applicationFile: ApplicationFile) = task { fun Prov.provisionK3sApplication(applicationFile: ApplicationFile) = task {

View file

@ -0,0 +1,15 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.web.base.downloadFromURL
const val K9S_VERSION = "v0.32.5"
fun Prov.installK9s() = task {
if (cmdNoEval("k9s version").out?.contains(K9S_VERSION) != true) {
downloadFromURL("https://github.com/derailed/k9s/releases/download/$K9S_VERSION/k9s_linux_amd64.deb", "k9s_linux_amd64.deb", "/tmp")
cmd("sudo dpkg -i k9s_linux_amd64.deb", "/tmp")
}
}

View file

@ -1,10 +1,5 @@
package org.domaindrivenarchitecture.provs.syspec.infrastructure package org.domaindrivenarchitecture.provs.syspec.infrastructure
import aws.sdk.kotlin.services.s3.S3Client
import aws.sdk.kotlin.services.s3.model.ListObjectsRequest
import aws.sdk.kotlin.services.s3.model.ListObjectsResponse
import aws.smithy.kotlin.runtime.time.Instant
import kotlinx.coroutines.runBlocking
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.ProvResult import org.domaindrivenarchitecture.provs.framework.core.ProvResult
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkDir import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkDir
@ -13,7 +8,6 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.isPackag
import org.domaindrivenarchitecture.provs.syspec.domain.* import org.domaindrivenarchitecture.provs.syspec.domain.*
import java.text.ParseException import java.text.ParseException
import java.text.SimpleDateFormat import java.text.SimpleDateFormat
import java.time.Duration
import java.util.* import java.util.*
import java.util.concurrent.TimeUnit import java.util.concurrent.TimeUnit
@ -29,7 +23,6 @@ fun Prov.verifySpecConfig(conf: SyspecConfig) = task {
conf.netcat?.let { task("NetcatSpecs") { for (spec in conf.netcat) verify(spec) } } conf.netcat?.let { task("NetcatSpecs") { for (spec in conf.netcat) verify(spec) } }
conf.socket?.let { task("SocketSpecs") { for (spec in conf.socket) verify(spec) } } conf.socket?.let { task("SocketSpecs") { for (spec in conf.socket) verify(spec) } }
conf.certificate?.let { task("CertificateFileSpecs") { for (spec in conf.certificate) verify(spec) } } conf.certificate?.let { task("CertificateFileSpecs") { for (spec in conf.certificate) verify(spec) } }
conf.s3?.let { task("CertificateFileSpecs") { for (spec in conf.s3) verify(spec) } }
} }
// ------------------------------- verification functions for individual specs -------------------------------- // ------------------------------- verification functions for individual specs --------------------------------
@ -112,27 +105,6 @@ fun Prov.verify(cert: CertificateFileSpec) {
} }
} }
fun Prov.verify(s3ObjectSpec: S3ObjectSpec) {
val (bucket, prefix, maxAge) = s3ObjectSpec
val expectedAge = Duration.ofHours(s3ObjectSpec.age)
val latestObject = getS3Objects(bucket, prefix).contents?.maxByOrNull { it.lastModified ?: Instant.fromEpochSeconds(0) }
if (latestObject == null) {
verify(false, "Could not retrieve an s3 object with prefix $prefix")
} else {
// convert to java.time.Instant for easier comparison
val lastModified = java.time.Instant.ofEpochSecond(latestObject.lastModified?.epochSeconds ?: 0)
val actualAge = Duration.between(lastModified, java.time.Instant.now())
verify(
actualAge <= expectedAge,
"Age is ${actualAge.toHours()} h (expected: <= $maxAge) for latest file with prefix \"$prefix\" " +
"--- modified date: $lastModified - size: ${(latestObject.size)} B - key: ${latestObject.key}"
)
}
}
// -------------------------- helper functions --------------------------------- // -------------------------- helper functions ---------------------------------
@ -215,14 +187,3 @@ private fun Prov.verifyCertExpiration(enddate: String?, certName: String, expira
) )
} }
} }
private fun getS3Objects(bucketName: String, prefixIn: String): ListObjectsResponse {
val request = ListObjectsRequest { bucket = bucketName; prefix = prefixIn }
return runBlocking {
S3Client { region = "eu-central-1" }.use { s3 ->
s3.listObjects(request)
}
}
}

View file

@ -8,7 +8,7 @@ function usage() {
function main() { function main() {
local cluster_name="${1}"; local cluster_name="${1}";
local domain_name="${2:-meissa-gmbh.de}"; local domain_name="${2:-meissa.de}";
/usr/local/bin/k3s-create-context.sh ${cluster_name} ${domain_name} /usr/local/bin/k3s-create-context.sh ${cluster_name} ${domain_name}
kubectl config use-context ${cluster_name} kubectl config use-context ${cluster_name}

View file

@ -4,8 +4,9 @@ set -o noglob
function main() { function main() {
local cluster_name="${1}"; shift local cluster_name="${1}"; shift
local domain_name="${1:-meissa.de}"; shift
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${cluster_name}.meissa-gmbh.de -L 8002:localhost:8002 -L 6443:192.168.5.1:6443 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${cluster_name}.${domain_name} -L 8002:localhost:8002 -L 6443:192.168.5.1:6443
} }
main $1 main $1

View file

@ -4,8 +4,9 @@ set -o noglob
function main() { function main() {
local cluster_name="${1}"; shift local cluster_name="${1}"; shift
local domain_name="${1:-meissa.de}"; shift
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${cluster_name}.meissa-gmbh.de ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${cluster_name}.${domain_name}
} }
main $1 main $1

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: hcloud
namespace: kube-system
stringData:
token: $HETZNER_API_TOKEN

View file

@ -0,0 +1,401 @@
# Version 2.6.0
# Source: hcloud-csi/templates/controller/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hcloud-csi-controller
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
automountServiceAccountToken: true
---
# Source: hcloud-csi/templates/core/storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: hcloud-volumes
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: csi.hetzner.cloud
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
reclaimPolicy: "Delete"
---
# Source: hcloud-csi/templates/controller/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hcloud-csi-controller
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
rules:
# attacher
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, update, patch]
- apiGroups: [""]
resources: [nodes]
verbs: [get, list, watch]
- apiGroups: [csi.storage.k8s.io]
resources: [csinodeinfos]
verbs: [get, list, watch]
- apiGroups: [storage.k8s.io]
resources: [csinodes]
verbs: [get, list, watch]
- apiGroups: [storage.k8s.io]
resources: [volumeattachments]
verbs: [get, list, watch, update, patch]
- apiGroups: [storage.k8s.io]
resources: [volumeattachments/status]
verbs: [patch]
# provisioner
- apiGroups: [""]
resources: [secrets]
verbs: [get, list]
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, create, delete, patch]
- apiGroups: [""]
resources: [persistentvolumeclaims, persistentvolumeclaims/status]
verbs: [get, list, watch, update, patch]
- apiGroups: [storage.k8s.io]
resources: [storageclasses]
verbs: [get, list, watch]
- apiGroups: [""]
resources: [events]
verbs: [list, watch, create, update, patch]
- apiGroups: [snapshot.storage.k8s.io]
resources: [volumesnapshots]
verbs: [get, list]
- apiGroups: [snapshot.storage.k8s.io]
resources: [volumesnapshotcontents]
verbs: [get, list]
# resizer
- apiGroups: [""]
resources: [pods]
verbs: [get, list, watch]
# node
- apiGroups: [""]
resources: [events]
verbs: [get, list, watch, create, update, patch]
---
# Source: hcloud-csi/templates/controller/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hcloud-csi-controller
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hcloud-csi-controller
subjects:
- kind: ServiceAccount
name: hcloud-csi-controller
namespace: "kube-system"
---
# Source: hcloud-csi/templates/controller/service.yaml
apiVersion: v1
kind: Service
metadata:
name: hcloud-csi-controller-metrics
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
spec:
ports:
- name: metrics
port: 9189
selector:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
---
# Source: hcloud-csi/templates/node/service.yaml
apiVersion: v1
kind: Service
metadata:
name: hcloud-csi-node-metrics
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: node
spec:
ports:
- name: metrics
port: 9189
selector:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: node
---
# Source: hcloud-csi/templates/node/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hcloud-csi-node
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: node
app: hcloud-csi
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: hcloud-csi
template:
metadata:
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: node
app: hcloud-csi
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: instance.hetzner.cloud/is-root-server
operator: NotIn
values:
- "true"
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: csi-node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0
imagePullPolicy: IfNotPresent
args:
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi.hetzner.cloud/socket
volumeMounts:
- name: plugin-dir
mountPath: /run/csi
- name: registration-dir
mountPath: /registration
resources:
limits: {}
requests: {}
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /run/csi
name: plugin-dir
resources:
limits: {}
requests: {}
- name: hcloud-csi-driver
image: docker.io/hetznercloud/hcloud-csi-driver:v2.6.0 # x-release-please-version
imagePullPolicy: IfNotPresent
command: [/bin/hcloud-csi-driver-node]
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /run/csi
- name: device-dir
mountPath: /dev
securityContext:
privileged: true
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: "0.0.0.0:9189"
- name: ENABLE_METRICS
value: "true"
ports:
- containerPort: 9189
name: metrics
- name: healthz
protocol: TCP
containerPort: 9808
resources:
limits: {}
requests: {}
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 10
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 3
httpGet:
path: /healthz
port: healthz
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi.hetzner.cloud/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory
---
# Source: hcloud-csi/templates/controller/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hcloud-csi-controller
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
app: hcloud-csi-controller
spec:
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app: hcloud-csi-controller
template:
metadata:
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
app: hcloud-csi-controller
spec:
serviceAccountName: hcloud-csi-controller
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: csi-attacher
image: registry.k8s.io/sig-storage/csi-attacher:v4.1.0
imagePullPolicy: IfNotPresent
resources:
limits: {}
requests: {}
args:
- --default-fstype=ext4
volumeMounts:
- name: socket-dir
mountPath: /run/csi
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.7.0
imagePullPolicy: IfNotPresent
resources:
limits: {}
requests: {}
volumeMounts:
- name: socket-dir
mountPath: /run/csi
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0
imagePullPolicy: IfNotPresent
resources:
limits: {}
requests: {}
args:
- --feature-gates=Topology=true
- --default-fstype=ext4
volumeMounts:
- name: socket-dir
mountPath: /run/csi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
imagePullPolicy: IfNotPresent
resources:
limits: {}
requests: {}
volumeMounts:
- mountPath: /run/csi
name: socket-dir
- name: hcloud-csi-driver
image: docker.io/hetznercloud/hcloud-csi-driver:v2.6.0 # x-release-please-version
imagePullPolicy: IfNotPresent
command: [/bin/hcloud-csi-driver-controller]
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: "0.0.0.0:9189"
- name: ENABLE_METRICS
value: "true"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
name: hcloud
key: token
resources:
limits: {}
requests: {}
ports:
- name: metrics
containerPort: 9189
- name: healthz
protocol: TCP
containerPort: 9808
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 10
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 3
httpGet:
path: /healthz
port: healthz
volumeMounts:
- name: socket-dir
mountPath: /run/csi
volumes:
- name: socket-dir
emptyDir: {}
---
# Source: hcloud-csi/templates/core/csidriver.yaml
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi.hetzner.cloud
spec:
attachRequired: true
fsGroupPolicy: File
podInfoOnMount: true
volumeLifecycleModes:
- Persistent

View file

@ -0,0 +1,11 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: hcloud-volumes-encrypted
provisioner: csi.hetzner.cloud
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/node-publish-secret-name: encryption-secret
csi.storage.k8s.io/node-publish-secret-namespace: kube-system

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: encryption-secret
namespace: kube-system
stringData:
encryption-passphrase: $HETZNER_ENCRYPTION_PASSPHRASE

View file

@ -0,0 +1,40 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: echo-ingress
spec:
ingressClassName: traefik
rules:
- http:
paths:
- pathType: Exact
path: /echo/ # traefik echo pod needs the trailing slash, otherwise it'll return bad request
backend:
service:
name: echo-service
port:
number: 80
---
kind: Pod
apiVersion: v1
metadata:
name: echo-app
labels:
app: echo
spec:
containers:
- name: echo-app
image: traefik/whoami
---
kind: Service
apiVersion: v1
metadata:
name: echo-service
spec:
selector:
app: echo
ports:
- port: 80 # Default port for image

View file

@ -3,15 +3,15 @@ apiVersion: networking.k8s.io/v1
metadata: metadata:
name: echo-ingress name: echo-ingress
annotations: annotations:
kubernetes.io/ingress.class: "traefik"
cert-manager.io/cluster-issuer: ${issuer_name} cert-manager.io/cluster-issuer: ${issuer_name}
spec: spec:
ingressClassName: traefik
rules: rules:
- host: ${fqdn} - host: ${fqdn}
http: http:
paths: paths:
- pathType: Prefix - pathType: Exact
path: /echo path: /echo/ # traefik echo pod needs the trailing slash, otherwise it'll return bad request
backend: backend:
service: service:
name: echo-service name: echo-service

View file

@ -1,4 +1,6 @@
#!/bin/sh #!/bin/sh
# File taken from https://github.com/k3s-io/k3s/blob/master/install.sh
set -e set -e
set -o noglob set -o noglob
@ -18,7 +20,7 @@ set -o noglob
# Environment variables which begin with K3S_ will be preserved for the # Environment variables which begin with K3S_ will be preserved for the
# systemd service to use. Setting K3S_URL without explicitly setting # systemd service to use. Setting K3S_URL without explicitly setting
# a systemd exec command will default the command to "agent", and we # a systemd exec command will default the command to "agent", and we
# enforce that K3S_TOKEN or K3S_CLUSTER_SECRET is also set. # enforce that K3S_TOKEN is also set.
# #
# - INSTALL_K3S_SKIP_DOWNLOAD # - INSTALL_K3S_SKIP_DOWNLOAD
# If set to true will not download k3s hash or binary. # If set to true will not download k3s hash or binary.
@ -44,6 +46,10 @@ set -o noglob
# Commit of k3s to download from temporary cloud storage. # Commit of k3s to download from temporary cloud storage.
# * (for developer & QA use) # * (for developer & QA use)
# #
# - INSTALL_K3S_PR
# PR build of k3s to download from Github Artifacts.
# * (for developer & QA use)
#
# - INSTALL_K3S_BIN_DIR # - INSTALL_K3S_BIN_DIR
# Directory to install k3s binary, links, and uninstall script to, or use # Directory to install k3s binary, links, and uninstall script to, or use
# /usr/local/bin as the default # /usr/local/bin as the default
@ -92,7 +98,8 @@ set -o noglob
# Defaults to 'stable'. # Defaults to 'stable'.
GITHUB_URL=https://github.com/k3s-io/k3s/releases GITHUB_URL=https://github.com/k3s-io/k3s/releases
STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds GITHUB_PR_URL=""
STORAGE_URL=https://k3s-ci-builds.s3.amazonaws.com
DOWNLOADER= DOWNLOADER=
# --- helper functions for logs --- # --- helper functions for logs ---
@ -170,8 +177,8 @@ setup_env() {
if [ -z "${K3S_URL}" ]; then if [ -z "${K3S_URL}" ]; then
CMD_K3S=server CMD_K3S=server
else else
if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ] && [ -z "${K3S_CLUSTER_SECRET}" ]; then if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ]; then
fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN, K3S_TOKEN_FILE or K3S_CLUSTER_SECRET is not defined." fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN or K3S_TOKEN_FILE is not defined."
fi fi
CMD_K3S=agent CMD_K3S=agent
fi fi
@ -217,11 +224,7 @@ setup_env() {
if [ -n "${INSTALL_K3S_TYPE}" ]; then if [ -n "${INSTALL_K3S_TYPE}" ]; then
SYSTEMD_TYPE=${INSTALL_K3S_TYPE} SYSTEMD_TYPE=${INSTALL_K3S_TYPE}
else else
if [ "${CMD_K3S}" = server ]; then
SYSTEMD_TYPE=notify SYSTEMD_TYPE=notify
else
SYSTEMD_TYPE=exec
fi
fi fi
# --- use binary install directory if defined or create default --- # --- use binary install directory if defined or create default ---
@ -273,8 +276,14 @@ setup_env() {
} }
# --- check if skip download environment variable set --- # --- check if skip download environment variable set ---
can_skip_download() { can_skip_download_binary() {
if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ]; then if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != binary ]; then
return 1
fi
}
can_skip_download_selinux() {
if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != selinux ]; then
return 1 return 1
fi fi
} }
@ -304,6 +313,10 @@ setup_verify_arch() {
ARCH=arm64 ARCH=arm64
SUFFIX=-${ARCH} SUFFIX=-${ARCH}
;; ;;
s390x)
ARCH=s390x
SUFFIX=-${ARCH}
;;
aarch64) aarch64)
ARCH=arm64 ARCH=arm64
SUFFIX=-${ARCH} SUFFIX=-${ARCH}
@ -331,6 +344,7 @@ verify_downloader() {
setup_tmp() { setup_tmp() {
TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX) TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX)
TMP_HASH=${TMP_DIR}/k3s.hash TMP_HASH=${TMP_DIR}/k3s.hash
TMP_ZIP=${TMP_DIR}/k3s.zip
TMP_BIN=${TMP_DIR}/k3s.bin TMP_BIN=${TMP_DIR}/k3s.bin
cleanup() { cleanup() {
code=$? code=$?
@ -344,7 +358,10 @@ setup_tmp() {
# --- use desired k3s version if defined or find version from channel --- # --- use desired k3s version if defined or find version from channel ---
get_release_version() { get_release_version() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then if [ -n "${INSTALL_K3S_PR}" ]; then
VERSION_K3S="PR ${INSTALL_K3S_PR}"
get_pr_artifact_url
elif [ -n "${INSTALL_K3S_COMMIT}" ]; then
VERSION_K3S="commit ${INSTALL_K3S_COMMIT}" VERSION_K3S="commit ${INSTALL_K3S_COMMIT}"
elif [ -n "${INSTALL_K3S_VERSION}" ]; then elif [ -n "${INSTALL_K3S_VERSION}" ]; then
VERSION_K3S=${INSTALL_K3S_VERSION} VERSION_K3S=${INSTALL_K3S_VERSION}
@ -366,10 +383,49 @@ get_release_version() {
info "Using ${VERSION_K3S} as release" info "Using ${VERSION_K3S} as release"
} }
# --- get k3s-selinux version ---
get_k3s_selinux_version() {
available_version="k3s-selinux-1.2-2.${rpm_target}.noarch.rpm"
info "Finding available k3s-selinux versions"
# run verify_downloader in case it binary installation was skipped
verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files'
case $DOWNLOADER in
curl)
DOWNLOADER_OPTS="-s"
;;
wget)
DOWNLOADER_OPTS="-q -O -"
;;
*)
fatal "Incorrect downloader executable '$DOWNLOADER'"
;;
esac
for i in {1..3}; do
set +e
if [ "${rpm_channel}" = "testing" ]; then
version=$(timeout 5 ${DOWNLOADER} ${DOWNLOADER_OPTS} https://api.github.com/repos/k3s-io/k3s-selinux/releases | grep browser_download_url | awk '{ print $2 }' | grep -oE "[^\/]+${rpm_target}\.noarch\.rpm" | head -n 1)
else
version=$(timeout 5 ${DOWNLOADER} ${DOWNLOADER_OPTS} https://api.github.com/repos/k3s-io/k3s-selinux/releases/latest | grep browser_download_url | awk '{ print $2 }' | grep -oE "[^\/]+${rpm_target}\.noarch\.rpm")
fi
set -e
if [ "${version}" != "" ]; then
break
fi
sleep 1
done
if [ "${version}" == "" ]; then
warn "Failed to get available versions of k3s-selinux..defaulting to ${available_version}"
return
fi
available_version=${version}
}
# --- download from github url --- # --- download from github url ---
download() { download() {
[ $# -eq 2 ] || fatal 'download needs exactly 2 arguments' [ $# -eq 2 ] || fatal 'download needs exactly 2 arguments'
set +e
case $DOWNLOADER in case $DOWNLOADER in
curl) curl)
curl -o $1 -sfL $2 curl -o $1 -sfL $2
@ -384,10 +440,17 @@ download() {
# Abort if download command failed # Abort if download command failed
[ $? -eq 0 ] || fatal 'Download failed' [ $? -eq 0 ] || fatal 'Download failed'
set -e
} }
# --- download hash from github url --- # --- download hash from github url ---
download_hash() { download_hash() {
if [ -n "${INSTALL_K3S_PR}" ]; then
info "Downloading hash ${GITHUB_PR_URL}"
curl -o ${TMP_ZIP} -H "Authorization: Bearer $GITHUB_TOKEN" -L ${GITHUB_PR_URL}
unzip -p ${TMP_ZIP} k3s.sha256sum > ${TMP_HASH}
sed -i 's/dist\/artifacts\/k3s/k3s/g' ${TMP_HASH}
else
if [ -n "${INSTALL_K3S_COMMIT}" ]; then if [ -n "${INSTALL_K3S_COMMIT}" ]; then
HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum
else else
@ -395,6 +458,7 @@ download_hash() {
fi fi
info "Downloading hash ${HASH_URL}" info "Downloading hash ${HASH_URL}"
download ${TMP_HASH} ${HASH_URL} download ${TMP_HASH} ${HASH_URL}
fi
HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH}) HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH})
HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*} HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*}
} }
@ -411,9 +475,48 @@ installed_hash_matches() {
return 1 return 1
} }
# Use the GitHub API to identify the artifact associated with a given PR
get_pr_artifact_url() {
GITHUB_API_URL=https://api.github.com/repos/k3s-io/k3s
# Check if jq is installed
if ! [ -x "$(command -v jq)" ]; then
echo "jq is required to use INSTALL_K3S_PR. Please install jq and try again"
exit 1
fi
if [ -z "${GITHUB_TOKEN}" ]; then
fatal "Installing PR builds requires GITHUB_TOKEN with k3s-io/k3s repo authorization"
fi
# GET request to the GitHub API to retrieve the latest commit SHA from the pull request
COMMIT_ID=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" "$GITHUB_API_URL/pulls/$INSTALL_K3S_PR" | jq -r '.head.sha')
# GET request to the GitHub API to retrieve the Build workflow associated with the commit
wf_raw=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" "$GITHUB_API_URL/commits/$COMMIT_ID/check-runs")
build_workflow=$(printf "%s" "$wf_raw" | jq -r '.check_runs[] | select(.name == "build / Build")')
# Extract the Run ID from the build workflow and lookup artifacts associated with the run
RUN_ID=$(echo "$build_workflow" | jq -r ' .details_url' | awk -F'/' '{print $(NF-2)}')
# Extract the artifat ID for the "k3s" artifact
artifacts=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" "$GITHUB_API_URL/actions/runs/$RUN_ID/artifacts")
artifacts_url=$(echo "$artifacts" | jq -r '.artifacts[] | select(.name == "k3s") | .archive_download_url')
GITHUB_PR_URL=$artifacts_url
}
# --- download binary from github url --- # --- download binary from github url ---
download_binary() { download_binary() {
if [ -n "${INSTALL_K3S_COMMIT}" ]; then if [ -n "${INSTALL_K3S_PR}" ]; then
# Since Binary and Hash are zipped together, check if TMP_ZIP already exists
if ! [ -f ${TMP_ZIP} ]; then
info "Downloading K3s artifact ${GITHUB_PR_URL}"
curl -o ${TMP_ZIP} -H "Authorization: Bearer $GITHUB_TOKEN" -L ${GITHUB_PR_URL}
fi
# extract k3s binary from zip
unzip -p ${TMP_ZIP} k3s > ${TMP_BIN}
return
elif [ -n "${INSTALL_K3S_COMMIT}" ]; then
BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT} BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}
else else
BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX} BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX}
@ -460,18 +563,35 @@ setup_selinux() {
fi fi
[ -r /etc/os-release ] && . /etc/os-release [ -r /etc/os-release ] && . /etc/os-release
if [ "${ID_LIKE%%[ ]*}" = "suse" ]; then if [ `expr "${ID_LIKE}" : ".*suse.*"` != 0 ]; then
rpm_target=sle rpm_target=sle
rpm_site_infix=microos rpm_site_infix=microos
package_installer=zypper package_installer=zypper
if [ "${ID_LIKE:-}" = suse ] && ( [ "${VARIANT_ID:-}" = sle-micro ] || [ "${ID:-}" = sle-micro ] ); then
rpm_target=sle
rpm_site_infix=slemicro
package_installer=zypper
fi
elif [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then
rpm_target=coreos
rpm_site_infix=coreos
package_installer=rpm-ostree
elif [ "${VERSION_ID%%.*}" = "7" ]; then elif [ "${VERSION_ID%%.*}" = "7" ]; then
rpm_target=el7 rpm_target=el7
rpm_site_infix=centos/7 rpm_site_infix=centos/7
package_installer=yum package_installer=yum
else elif [ "${VERSION_ID%%.*}" = "8" ] || [ "${VERSION_ID%%.*}" -gt "36" ]; then
rpm_target=el8 rpm_target=el8
rpm_site_infix=centos/8 rpm_site_infix=centos/8
package_installer=yum package_installer=yum
else
rpm_target=el9
rpm_site_infix=centos/9
package_installer=yum
fi
if [ "${package_installer}" = "rpm-ostree" ] && [ -x /bin/yum ]; then
package_installer=yum
fi fi
if [ "${package_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then if [ "${package_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then
@ -480,15 +600,17 @@ setup_selinux() {
policy_hint="please install: policy_hint="please install:
${package_installer} install -y container-selinux ${package_installer} install -y container-selinux
${package_installer} install -y https://${rpm_site}/k3s/${rpm_channel}/common/${rpm_site_infix}/noarch/k3s-selinux-0.4-1.${rpm_target}.noarch.rpm ${package_installer} install -y https://${rpm_site}/k3s/${rpm_channel}/common/${rpm_site_infix}/noarch/${available_version}
" "
if [ "$INSTALL_K3S_SKIP_SELINUX_RPM" = true ] || can_skip_download || [ ! -d /usr/share/selinux ]; then if [ "$INSTALL_K3S_SKIP_SELINUX_RPM" = true ] || can_skip_download_selinux || [ ! -d /usr/share/selinux ]; then
info "Skipping installation of SELinux RPM" info "Skipping installation of SELinux RPM"
elif [ "${ID_LIKE:-}" != coreos ] && [ "${VARIANT_ID:-}" != coreos ]; then return
install_selinux_rpm ${rpm_site} ${rpm_channel} ${rpm_target} ${rpm_site_infix}
fi fi
get_k3s_selinux_version
install_selinux_rpm ${rpm_site} ${rpm_channel} ${rpm_target} ${rpm_site_infix}
policy_error=fatal policy_error=fatal
if [ "$INSTALL_K3S_SELINUX_WARN" = true ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then if [ "$INSTALL_K3S_SELINUX_WARN" = true ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then
policy_error=warn policy_error=warn
@ -499,7 +621,7 @@ setup_selinux() {
$policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}" $policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}"
fi fi
elif [ ! -f /usr/share/selinux/packages/k3s.pp ]; then elif [ ! -f /usr/share/selinux/packages/k3s.pp ]; then
if [ -x /usr/sbin/transactional-update ]; then if [ -x /usr/sbin/transactional-update ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then
warn "Please reboot your machine to activate the changes and avoid data loss." warn "Please reboot your machine to activate the changes and avoid data loss."
else else
$policy_error "Failed to find the k3s-selinux policy, ${policy_hint}" $policy_error "Failed to find the k3s-selinux policy, ${policy_hint}"
@ -508,7 +630,7 @@ setup_selinux() {
} }
install_selinux_rpm() { install_selinux_rpm() {
if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ] || [ "${ID_LIKE%%[ ]*}" = "suse" ]; then if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ] || [ -r /etc/fedora-release ] || [ "${ID_LIKE%%[ ]*}" = "suse" ]; then
repodir=/etc/yum.repos.d repodir=/etc/yum.repos.d
if [ -d /etc/zypp/repos.d ]; then if [ -d /etc/zypp/repos.d ]; then
repodir=/etc/zypp/repos.d repodir=/etc/zypp/repos.d
@ -533,9 +655,17 @@ EOF
sle) sle)
rpm_installer="zypper --gpg-auto-import-keys" rpm_installer="zypper --gpg-auto-import-keys"
if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then
transactional_update_run="transactional-update --no-selfupdate -d run"
rpm_installer="transactional-update --no-selfupdate -d run ${rpm_installer}" rpm_installer="transactional-update --no-selfupdate -d run ${rpm_installer}"
: "${INSTALL_K3S_SKIP_START:=true}" : "${INSTALL_K3S_SKIP_START:=true}"
fi fi
# create the /var/lib/rpm-state in SLE systems to fix the prein selinux macro
${transactional_update_run} mkdir -p /var/lib/rpm-state
;;
coreos)
rpm_installer="rpm-ostree --idempotent"
# rpm_install_extra_args="--apply-live"
: "${INSTALL_K3S_SKIP_START:=true}"
;; ;;
*) *)
rpm_installer="yum" rpm_installer="yum"
@ -543,6 +673,15 @@ EOF
esac esac
if [ "${rpm_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then if [ "${rpm_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then
rpm_installer=dnf rpm_installer=dnf
fi
if rpm -q --quiet k3s-selinux; then
# remove k3s-selinux module before upgrade to allow container-selinux to upgrade safely
if check_available_upgrades container-selinux ${3} && check_available_upgrades k3s-selinux ${3}; then
MODULE_PRIORITY=$($SUDO semodule --list=full | grep k3s | cut -f1 -d" ")
if [ -n "${MODULE_PRIORITY}" ]; then
$SUDO semodule -X $MODULE_PRIORITY -r k3s || true
fi
fi
fi fi
# shellcheck disable=SC2086 # shellcheck disable=SC2086
$SUDO ${rpm_installer} install -y "k3s-selinux" $SUDO ${rpm_installer} install -y "k3s-selinux"
@ -550,9 +689,28 @@ EOF
return return
} }
check_available_upgrades() {
set +e
case ${2} in
sle)
available_upgrades=$($SUDO zypper -q -t -s 11 se -s -u --type package $1 | tail -n 1 | grep -v "No matching" | awk '{print $3}')
;;
coreos)
# currently rpm-ostree does not support search functionality https://github.com/coreos/rpm-ostree/issues/1877
;;
*)
available_upgrades=$($SUDO yum -q --refresh list $1 --upgrades | tail -n 1 | awk '{print $2}')
;;
esac
set -e
if [ -n "${available_upgrades}" ]; then
return 0
fi
return 1
}
# --- download and verify k3s --- # --- download and verify k3s ---
download_and_verify() { download_and_verify() {
if can_skip_download; then if can_skip_download_binary; then
info 'Skipping k3s download and verify' info 'Skipping k3s download and verify'
verify_k3s_is_executable verify_k3s_is_executable
return return
@ -640,6 +798,27 @@ killtree() {
) 2>/dev/null ) 2>/dev/null
} }
remove_interfaces() {
# Delete network interface(s) that match 'master cni0'
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
# Delete cni related interfaces
ip link delete cni0
ip link delete flannel.1
ip link delete flannel-v6.1
ip link delete kube-ipvs0
ip link delete flannel-wg
ip link delete flannel-wg-v6
# Restart tailscale
if [ -n "$(command -v tailscale)" ]; then
tailscale set --advertise-routes=
fi
}
getshims() { getshims() {
ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1
} }
@ -650,7 +829,7 @@ do_unmount_and_remove() {
set +x set +x
while read -r _ path _; do while read -r _ path _; do
case "$path" in $1*) echo "$path" ;; esac case "$path" in $1*) echo "$path" ;; esac
done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount "$0" && rm -rf "$0"' done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount -f "$0" && rm -rf "$0"'
set -x set -x
} }
@ -663,17 +842,11 @@ do_unmount_and_remove '/run/netns/cni-'
# Remove CNI namespaces # Remove CNI namespaces
ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete
# Delete network interface(s) that match 'master cni0' remove_interfaces
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
ip link delete cni0
ip link delete flannel.1
ip link delete flannel-v6.1
rm -rf /var/lib/cni/ rm -rf /var/lib/cni/
iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore iptables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | iptables-restore
ip6tables-save | grep -v KUBE- | grep -v CNI- | ip6tables-restore ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | ip6tables-restore
EOF EOF
$SUDO chmod 755 ${KILLALL_K3S_SH} $SUDO chmod 755 ${KILLALL_K3S_SH}
$SUDO chown root:root ${KILLALL_K3S_SH} $SUDO chown root:root ${KILLALL_K3S_SH}
@ -729,6 +902,9 @@ rm -f ${KILLALL_K3S_SH}
if type yum >/dev/null 2>&1; then if type yum >/dev/null 2>&1; then
yum remove -y k3s-selinux yum remove -y k3s-selinux
rm -f /etc/yum.repos.d/rancher-k3s-common*.repo rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
elif type rpm-ostree >/dev/null 2>&1; then
rpm-ostree uninstall k3s-selinux
rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
elif type zypper >/dev/null 2>&1; then elif type zypper >/dev/null 2>&1; then
uninstall_cmd="zypper remove -y k3s-selinux" uninstall_cmd="zypper remove -y k3s-selinux"
if [ "\${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then if [ "\${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then
@ -787,7 +963,7 @@ TasksMax=infinity
TimeoutStartSec=0 TimeoutStartSec=0
Restart=always Restart=always
RestartSec=5s RestartSec=5s
ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service' ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service 2>/dev/null'
ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay ExecStartPre=-/sbin/modprobe overlay
ExecStart=${BIN_DIR}/k3s \\ ExecStart=${BIN_DIR}/k3s \\
@ -827,8 +1003,8 @@ respawn_delay=5
respawn_max=0 respawn_max=0
set -o allexport set -o allexport
if [ -f /etc/environment ]; then source /etc/environment; fi if [ -f /etc/environment ]; then . /etc/environment; fi
if [ -f ${FILE_K3S_ENV} ]; then source ${FILE_K3S_ENV}; fi if [ -f ${FILE_K3S_ENV} ]; then . ${FILE_K3S_ENV}; fi
set +o allexport set +o allexport
EOF EOF
$SUDO chmod 0755 ${FILE_K3S_SERVICE} $SUDO chmod 0755 ${FILE_K3S_SERVICE}
@ -844,11 +1020,16 @@ EOF
# --- write systemd or openrc service file --- # --- write systemd or openrc service file ---
create_service_file() { create_service_file() {
[ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file [ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file && restore_systemd_service_file_context
[ "${HAS_OPENRC}" = true ] && create_openrc_service_file [ "${HAS_OPENRC}" = true ] && create_openrc_service_file
return 0 return 0
} }
restore_systemd_service_file_context() {
$SUDO restorecon -R -i ${FILE_K3S_SERVICE} 2>/dev/null || true
$SUDO restorecon -R -i ${FILE_K3S_ENV} 2>/dev/null || true
}
# --- get hashes of the current k3s bin and service files # --- get hashes of the current k3s bin and service files
get_installed_hashes() { get_installed_hashes() {
$SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true $SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true
@ -877,6 +1058,19 @@ openrc_start() {
$SUDO ${FILE_K3S_SERVICE} restart $SUDO ${FILE_K3S_SERVICE} restart
} }
has_working_xtables() {
if $SUDO sh -c "command -v \"$1-save\"" 1> /dev/null && $SUDO sh -c "command -v \"$1-restore\"" 1> /dev/null; then
if $SUDO $1-save 2>/dev/null | grep -q '^-A CNI-HOSTPORT-MASQ -j MASQUERADE$'; then
warn "Host $1-save/$1-restore tools are incompatible with existing rules"
else
return 0
fi
else
info "Host $1-save/$1-restore tools not found"
fi
return 1
}
# --- startup systemd or openrc service --- # --- startup systemd or openrc service ---
service_enable_and_start() { service_enable_and_start() {
if [ -f "/proc/cgroups" ] && [ "$(grep memory /proc/cgroups | while read -r n n n enabled; do echo $enabled; done)" -eq 0 ]; if [ -f "/proc/cgroups" ] && [ "$(grep memory /proc/cgroups | while read -r n n n enabled; do echo $enabled; done)" -eq 0 ];
@ -897,6 +1091,12 @@ service_enable_and_start() {
return return
fi fi
for XTABLES in iptables ip6tables; do
if has_working_xtables ${XTABLES}; then
$SUDO ${XTABLES}-save 2>/dev/null | grep -v KUBE- | grep -iv flannel | $SUDO ${XTABLES}-restore
fi
done
[ "${HAS_SYSTEMD}" = true ] && systemd_start [ "${HAS_SYSTEMD}" = true ] && systemd_start
[ "${HAS_OPENRC}" = true ] && openrc_start [ "${HAS_OPENRC}" = true ] && openrc_start
return 0 return 0
@ -919,3 +1119,4 @@ eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@")
create_service_file create_service_file
service_enable_and_start service_enable_and_start
} }

View file

@ -1,480 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: controller
spec:
allowPrivilegeEscalation: false
allowedCapabilities: []
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
hostIPC: false
hostNetwork: false
hostPID: false
privileged: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
ranges:
- max: 65535
min: 1
rule: MustRunAs
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: speaker
spec:
allowPrivilegeEscalation: false
allowedCapabilities:
- NET_RAW
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
hostIPC: false
hostNetwork: true
hostPID: false
hostPorts:
- max: 7472
min: 7472
- max: 7946
min: 7946
privileged: true
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:controller
rules:
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services/status
verbs:
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- controller
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:speaker
rules:
- apiGroups:
- ''
resources:
- services
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- speaker
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- pods
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- create
- apiGroups:
- ''
resources:
- secrets
resourceNames:
- memberlist
verbs:
- list
- apiGroups:
- apps
resources:
- deployments
resourceNames:
- controller
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pod-lister
subjects:
- kind: ServiceAccount
name: speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: controller
subjects:
- kind: ServiceAccount
name: controller
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: metallb
component: speaker
name: speaker
namespace: metallb-system
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: speaker
spec:
containers:
- args:
- --port=7472
- --config=config
- --log-level=info
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: METALLB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: METALLB_ML_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
# needed when another software is also using memberlist / port 7946
# when changing this default you also need to update the container ports definition
# and the PodSecurityPolicy hostPorts definition
#- name: METALLB_ML_BIND_PORT
# value: "7946"
- name: METALLB_ML_LABELS
value: "app=metallb,component=speaker"
- name: METALLB_ML_SECRET_KEY
valueFrom:
secretKeyRef:
name: memberlist
key: secretkey
image: quay.io/metallb/speaker:v0.12.1
name: speaker
ports:
- containerPort: 7472
name: monitoring
- containerPort: 7946
name: memberlist-tcp
- containerPort: 7946
name: memberlist-udp
protocol: UDP
livenessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_RAW
drop:
- ALL
readOnlyRootFilesystem: true
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: speaker
terminationGracePeriodSeconds: 2
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: metallb
component: controller
name: controller
namespace: metallb-system
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: controller
spec:
containers:
- args:
- --port=7472
- --config=config
- --log-level=info
env:
- name: METALLB_ML_SECRET_NAME
value: memberlist
- name: METALLB_DEPLOYMENT
value: controller
image: quay.io/metallb/controller:v0.12.1
name: controller
ports:
- containerPort: 7472
name: monitoring
livenessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
serviceAccountName: controller
terminationGracePeriodSeconds: 0

View file

@ -1,4 +1,5 @@
# based on https://github.com/k3s-io/k3s/blob/master/manifests/traefik.yaml # based on https://github.com/k3s-io/k3s/blob/master/manifests/traefik.yaml
# required changes: global.systemDefaultRegistry must be set to ""
--- ---
apiVersion: helm.cattle.io/v1 apiVersion: helm.cattle.io/v1
kind: HelmChart kind: HelmChart
@ -6,7 +7,7 @@ metadata:
name: traefik-crd name: traefik-crd
namespace: kube-system namespace: kube-system
spec: spec:
chart: https://%{KUBERNETES_API}%/static/charts/traefik-crd-10.19.300.tgz chart: https://%{KUBERNETES_API}%/static/charts/traefik-crd-25.0.2+up25.0.0.tgz
--- ---
apiVersion: helm.cattle.io/v1 apiVersion: helm.cattle.io/v1
kind: HelmChart kind: HelmChart
@ -14,16 +15,10 @@ metadata:
name: traefik name: traefik
namespace: kube-system namespace: kube-system
spec: spec:
chart: https://%{KUBERNETES_API}%/static/charts/traefik-10.19.300.tgz chart: https://%{KUBERNETES_API}%/static/charts/traefik-25.0.2+up25.0.0.tgz
set: set:
global.systemDefaultRegistry: "" global.systemDefaultRegistry: ""
valuesContent: |- valuesContent: |-
rbac:
enabled: true
ports:
websecure:
tls:
enabled: true
podAnnotations: podAnnotations:
prometheus.io/port: "8082" prometheus.io/port: "8082"
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
@ -33,8 +28,8 @@ spec:
enabled: true enabled: true
priorityClassName: "system-cluster-critical" priorityClassName: "system-cluster-critical"
image: image:
name: "rancher/mirrored-library-traefik" repository: "rancher/mirrored-library-traefik"
tag: "2.6.2" tag: "2.10.5"
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
@ -47,9 +42,4 @@ spec:
service: service:
ipFamilyPolicy: "PreferDualStack" ipFamilyPolicy: "PreferDualStack"
annotations: annotations:
metallb.universe.tf/allow-shared-ip: "shared-ip-service-group" metallb.universe.tf/allow-shared-ip: shared-ip-service-group
metallb.universe.tf/address-pool: public
spec:
type: LoadBalancer
externalTrafficPolicy: Cluster

View file

@ -2,7 +2,7 @@ package org.domaindrivenarchitecture.provs.configuration.application
import io.mockk.every import io.mockk.every
import io.mockk.mockkStatic import io.mockk.mockkStatic
import io.mockk.unmockkStatic import io.mockk.unmockkAll
import org.domaindrivenarchitecture.provs.framework.core.* import org.domaindrivenarchitecture.provs.framework.core.*
import org.domaindrivenarchitecture.provs.framework.core.cli.getPasswordToConfigureSudoWithoutPassword import org.domaindrivenarchitecture.provs.framework.core.cli.getPasswordToConfigureSudoWithoutPassword
import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer
@ -24,7 +24,7 @@ class ProvWithSudoKtTest {
fun test_ensureSudoWithoutPassword_local_Prov() { fun test_ensureSudoWithoutPassword_local_Prov() {
mockkStatic(::getPasswordToConfigureSudoWithoutPassword) mockkStatic(::getPasswordToConfigureSudoWithoutPassword)
every { getPasswordToConfigureSudoWithoutPassword() } returns Secret("testuserpw") every { getPasswordToConfigureSudoWithoutPassword() } returns Secret("testuser")
// given // given
val containerName = "prov-test-sudo-no-pw" val containerName = "prov-test-sudo-no-pw"
@ -49,7 +49,8 @@ class ProvWithSudoKtTest {
assertFalse(canSudo1) assertFalse(canSudo1)
assertTrue(canSudo2) assertTrue(canSudo2)
unmockkStatic(::getPasswordToConfigureSudoWithoutPassword) // cleanup
unmockkAll()
} }
@ExtensiveContainerTest @ExtensiveContainerTest
@ -57,7 +58,7 @@ class ProvWithSudoKtTest {
// given // given
val containerName = "prov-test-sudo-no-pw-ssh" val containerName = "prov-test-sudo-no-pw-ssh"
val password = Secret("testuserpw") val password = Secret("testuser")
val prov = Prov.newInstance( val prov = Prov.newInstance(
ContainerUbuntuHostProcessor( ContainerUbuntuHostProcessor(

View file

@ -31,10 +31,8 @@ internal class TargetCliCommandKtTest {
@AfterAll @AfterAll
@JvmStatic @JvmStatic
internal fun afterAll() { internal fun afterAll() {
unmockkObject(Prov) // cleanup
unmockkStatic(::local) unmockkAll()
unmockkStatic(::remote)
unmockkStatic(::getPasswordToConfigureSudoWithoutPassword)
} }
} }

View file

@ -40,7 +40,7 @@ internal class ApplicationKtTest {
val dummyProv = Prov.newInstance(DummyProcessor()) val dummyProv = Prov.newInstance(DummyProcessor())
mockkObject(Prov) mockkObject(Prov)
every { Prov.newInstance(any(), any(), any(), any(), ) } returns dummyProv every { Prov.newInstance(any(), any(), any(), any()) } returns dummyProv
mockkStatic(::local) mockkStatic(::local)
every { local() } returns dummyProv every { local() } returns dummyProv
@ -52,7 +52,7 @@ internal class ApplicationKtTest {
every { getConfig("testconfig.yaml") } returns testConfig every { getConfig("testconfig.yaml") } returns testConfig
mockkStatic(Prov::provisionDesktop) mockkStatic(Prov::provisionDesktop)
every { any<Prov>().provisionDesktop(any(), any(), any(), any(), any(),any()) } returns ProvResult( every { any<Prov>().provisionDesktop(any(), any(), any(), any(), any()) } returns ProvResult(
true, true,
cmd = "mocked command" cmd = "mocked command"
) )
@ -65,12 +65,8 @@ internal class ApplicationKtTest {
@AfterAll @AfterAll
@JvmStatic @JvmStatic
internal fun afterAll() { internal fun afterAll() {
unmockkObject(Prov) // cleanup
unmockkStatic(::local) unmockkAll()
unmockkStatic(::remote)
unmockkStatic(::getConfig)
unmockkStatic(Prov::provisionDesktop)
unmockkStatic(::getPasswordToConfigureSudoWithoutPassword)
} }
} }
@ -89,7 +85,6 @@ internal class ApplicationKtTest {
null, null,
testConfig.gitUserName, testConfig.gitUserName,
testConfig.gitEmail, testConfig.gitEmail,
null
) )
} }
} }
@ -123,7 +118,7 @@ internal class ApplicationKtTest {
"Error: File\u001B[31m idontexist.yaml \u001B[0m was not found.Pls copy file \u001B[31m desktop-config-example.yaml \u001B[0m to file \u001B[31m idontexist.yaml \u001B[0m and change the content according to your needs.No suitable config found." "Error: File\u001B[31m idontexist.yaml \u001B[0m was not found.Pls copy file \u001B[31m desktop-config-example.yaml \u001B[0m to file \u001B[31m idontexist.yaml \u001B[0m and change the content according to your needs.No suitable config found."
assertEquals(expectedOutput, outContent.toString().replace("\r", "").replace("\n", "")) assertEquals(expectedOutput, outContent.toString().replace("\r", "").replace("\n", ""))
verify(exactly = 0) { any<Prov>().provisionDesktop(any(), any(), any(), any(), any(), any()) } verify(exactly = 0) { any<Prov>().provisionDesktop(any(), any(), any(), any(), any()) }
unmockkStatic(::quit) unmockkStatic(::quit)
} }
@ -157,7 +152,7 @@ internal class ApplicationKtTest {
"Error: File \"src/test/resources/invalid-desktop-config.yaml\" has an invalid format and or invalid data.No suitable config found." "Error: File \"src/test/resources/invalid-desktop-config.yaml\" has an invalid format and or invalid data.No suitable config found."
assertEquals(expectedOutput, outContent.toString().replace("\r", "").replace("\n", "")) assertEquals(expectedOutput, outContent.toString().replace("\r", "").replace("\n", ""))
verify(exactly = 0) { any<Prov>().provisionDesktop(any(), any(), any(), any(), any(), any()) } verify(exactly = 0) { any<Prov>().provisionDesktop(any(), any(), any(), any(), any()) }
unmockkStatic(::quit) unmockkStatic(::quit)
} }

View file

@ -1,12 +1,15 @@
package org.domaindrivenarchitecture.provs.desktop.domain package org.domaindrivenarchitecture.provs.desktop.domain
import org.domaindrivenarchitecture.provs.framework.core.ProgressType import io.mockk.*
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.configuration.domain.TargetCliCommand
import org.domaindrivenarchitecture.provs.desktop.infrastructure.installPpaFirefox
import org.domaindrivenarchitecture.provs.desktop.infrastructure.verifyIdeSetup
import org.domaindrivenarchitecture.provs.desktop.infrastructure.verifyOfficeSetup
import org.domaindrivenarchitecture.provs.framework.core.*
import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer
import org.domaindrivenarchitecture.provs.framework.core.local
import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode
import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerUbuntuHostProcessor import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerUbuntuHostProcessor
import org.domaindrivenarchitecture.provs.framework.core.remote import org.domaindrivenarchitecture.provs.framework.core.processors.DummyProcessor
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
@ -35,15 +38,76 @@ internal class DesktopServiceKtTest {
// when // when
Assertions.assertThrows(Exception::class.java) { Assertions.assertThrows(Exception::class.java) {
prov.provisionDesktop( prov.provisionDesktopCommand(
DesktopType.BASIC, DesktopCliCommand(DesktopType.BASIC, TargetCliCommand("testuser@somehost"), null), DesktopConfig() // dummy data
gitUserName = "testuser",
gitEmail = "testuser@test.org",
onlyModules = null
) )
} }
} }
@Test
fun provisionDesktop_with_onlyModules_firefox_installs_firefox() {
// given
val prov = Prov.newInstance(DummyProcessor())
mockkStatic(Prov::installPpaFirefox)
every { any<Prov>().installPpaFirefox() } returns ProvResult(true, cmd = "mocked")
// when
prov.provisionOnlyModules(DesktopType.IDE, onlyModules = listOf("firefox"))
// then
verify(exactly = 1) { any<Prov>().installPpaFirefox() }
// cleanup
unmockkAll()
}
@Test
fun provisionDesktop_ide_with_onlyModules_verify_performs_verification() {
// given
val prov = Prov.newInstance(DummyProcessor())
mockkStatic(Prov::verifyIdeSetup)
mockkStatic(Prov::verifyOfficeSetup)
mockkStatic(Prov::provisionBasicDesktop)
every { any<Prov>().verifyIdeSetup() } returns ProvResult(true, cmd = "mocked")
every { any<Prov>().verifyOfficeSetup() } returns ProvResult(true, cmd = "mocked")
every { any<Prov>().provisionBasicDesktop(any(), any(), any(), any()) }
// when
prov.provisionOnlyModules(DesktopType.IDE, onlyModules = listOf("verify"))
// then
verify(exactly = 1) { any<Prov>().verifyIdeSetup() }
verify(exactly = 0) { any<Prov>().verifyOfficeSetup() }
verify(exactly = 0) { any<Prov>().provisionBasicDesktop(any(), any(), any(), any()) }
// cleanup
unmockkAll()
}
@Test
fun provisionDesktop_office_with_onlyModules_verify_performs_verification() {
// given
val prov = Prov.newInstance(DummyProcessor())
mockkStatic(Prov::verifyIdeSetup)
mockkStatic(Prov::verifyOfficeSetup)
mockkStatic(Prov::provisionBasicDesktop)
every { any<Prov>().verifyIdeSetup() } returns ProvResult(true, cmd = "mocked")
every { any<Prov>().verifyOfficeSetup() } returns ProvResult(true, cmd = "mocked")
every { any<Prov>().provisionBasicDesktop(any(), any(), any(), any()) }
// when
prov.provisionOnlyModules(DesktopType.OFFICE, onlyModules = listOf("verify"))
// then
verify(exactly = 0) { any<Prov>().verifyIdeSetup() }
verify(exactly = 1) { any<Prov>().verifyOfficeSetup() }
verify(exactly = 0) { any<Prov>().provisionBasicDesktop(any(), any(), any(), any()) }
// cleanup
unmockkAll()
}
@ExtensiveContainerTest @ExtensiveContainerTest
@Disabled("Takes very long, enable if you want to test a desktop setup") @Disabled("Takes very long, enable if you want to test a desktop setup")
fun provisionDesktop() { fun provisionDesktop() {
@ -56,7 +120,6 @@ internal class DesktopServiceKtTest {
DesktopType.BASIC, DesktopType.BASIC,
gitUserName = "testuser", gitUserName = "testuser",
gitEmail = "testuser@test.org", gitEmail = "testuser@test.org",
onlyModules = null
) )
// then // then
@ -81,7 +144,6 @@ internal class DesktopServiceKtTest {
DesktopType.IDE, DesktopType.IDE,
gitUserName = "testuser", gitUserName = "testuser",
gitEmail = "testuser@test.org", gitEmail = "testuser@test.org",
onlyModules = null
) )
// then // then

View file

@ -2,11 +2,11 @@ package org.domaindrivenarchitecture.provs.desktop.domain
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.KNOWN_HOSTS_FILE import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.addKnownHost
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.isKnownHost
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Assertions.*
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
@ -18,7 +18,7 @@ class KnownHostTest {
val prov = defaultTestContainer() val prov = defaultTestContainer()
prov.task { prov.task {
aptInstall("ssh") aptInstall("ssh")
deleteFile(KNOWN_HOSTS_FILE) deleteFile("~/.ssh/known_hosts")
} }
// when // when
@ -30,10 +30,10 @@ class KnownHostTest {
// Subclass of KnownHost for test knownHostSubclass_includes_additional_host // Subclass of KnownHost for test knownHostSubclass_includes_additional_host
class KnownHostsSubclass(hostName: String, hostKeys: List<HostKey>): KnownHost(hostName, hostKeys) { class KnownHostsSubclass(hostName: String, port: Int?, hostKeys: List<HostKey>): KnownHost(hostName, port, hostKeys) {
companion object { companion object {
val ANOTHER_HOST = KnownHostsSubclass("anotherhost.com", listOf("key1")) val ANOTHER_HOST = KnownHostsSubclass("anotherhost.com", 2222, listOf("key1"))
fun values(): List<KnownHost> { fun values(): List<KnownHost> {
return values + ANOTHER_HOST return values + ANOTHER_HOST
@ -50,5 +50,44 @@ class KnownHostTest {
assertTrue(hosts.size > 1) assertTrue(hosts.size > 1)
assertEquals("key1", hosts.last().hostKeys[0]) assertEquals("key1", hosts.last().hostKeys[0])
} }
@ContainerTest
fun knownHost_with_port_verified_successfully() {
// given
val prov = defaultTestContainer()
prov.task {
aptInstall("ssh")
deleteFile("~/.ssh/known_hosts")
}
// when
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName))
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName, 22))
val res = prov.addKnownHost(KnownHost(KnownHost.GITHUB.hostName, 22, KnownHost.GITHUB.hostKeys), verifyKeys = true)
// then
assertTrue(res.success)
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName))
assertTrue(prov.isKnownHost(KnownHost.GITHUB.hostName, 22))
}
@ContainerTest
fun knownHost_with_port_verification_failing() {
// given
val prov = defaultTestContainer()
prov.task {
aptInstall("ssh")
deleteFile("~/.ssh/known_hosts")
}
// when
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName, 80))
val res2 = prov.addKnownHost(KnownHost(KnownHost.GITHUB.hostName, 80, KnownHost.GITHUB.hostKeys), verifyKeys = true)
// then
assertFalse(res2.success)
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName))
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName, 80))
}
} }

View file

@ -1,10 +1,7 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.getResourceAsText import org.domaindrivenarchitecture.provs.framework.core.getResourceAsText
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkFile import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.*
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDir
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDirs
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.fileContainsText
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.assertTrue
@ -48,4 +45,17 @@ internal class DevOpsKtTest {
// then // then
assertTrue(res.success) assertTrue(res.success)
} }
@ExtensiveContainerTest
fun installKubeconform() {
// given
val prov = defaultTestContainer()
// when
val res = prov.installKubeconform()
// then
assertTrue(res.success)
assertTrue(prov.checkFile("/usr/local/bin/kubeconform"))
}
} }

View file

@ -1,17 +1,21 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.core.remote import org.domaindrivenarchitecture.provs.framework.core.remote
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.KeyPair import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.KeyPair
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.configureGpgKeys import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.configureGpgKeys
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.gpgFingerprint import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.gpgFingerprint
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources.GopassSecretSource
import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.* import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.*
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.privateGPGSnakeoilKey
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources.PromptSecretSource
import org.domaindrivenarchitecture.provs.framework.ubuntu.user.base.makeCurrentUserSudoerWithoutPasswordRequired
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.domaindrivenarchitecture.provs.test_keys.publicGPGSnakeoilKey
import org.junit.jupiter.api.Assertions.assertFalse import org.junit.jupiter.api.Assertions.assertFalse
@ -56,24 +60,32 @@ internal class GopassKtTest {
} }
@Test @Test
@Disabled // Integrationtest; change user, host and keys, then remove this line to run this test @Disabled // This is an integration test, which needs preparation:
// Pls change user, host and remote connection (choose connection either by password or by ssh key)
// then remove tag @Disabled to be able to run this test.
// PREREQUISITE: remote machine needs openssh-server installed
fun test_install_and_configure_Gopass_and_GopassBridgeJsonApi() { fun test_install_and_configure_Gopass_and_GopassBridgeJsonApi() {
// settings to change // host and user
val host = "192.168.56.135" val host = "192.168.56.154"
val user = "xxx" val user = "xxx"
val pubKey = GopassSecretSource("path-to/pub.key").secret()
val privateKey = GopassSecretSource("path-to/priv.key").secret()
// given // connection by password
val prov = remote(host, user) val pw = PromptSecretSource("Pw for $user").secret()
val prov = remote(host, user, pw)
prov.makeCurrentUserSudoerWithoutPasswordRequired(pw) // may be commented out if user can already sudo without password
// or alternatively use connection by ssh key if the public key is already available remotely
// val prov = remote(host, user)
val pubKey = Secret(publicGPGSnakeoilKey())
val privateKey = Secret(privateGPGSnakeoilKey())
// when // when
val res = prov.task { val res = prov.task {
configureGpgKeys( configureGpgKeys(
KeyPair( KeyPair(pubKey, privateKey),
pubKey,
privateKey
),
trust = true, trust = true,
skipIfExistin = true skipIfExistin = true
) )

View file

@ -0,0 +1,27 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkFile
import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.junit.jupiter.api.Assertions.*
class GraalVMKtTest {
@ExtensiveContainerTest
fun installGraalVM() {
// given
val prov = defaultTestContainer()
// when
val res = prov.task {
installGraalVM()
installGraalVM() // test repeatability
}
// then
assertTrue(res.success)
assertTrue(GRAAL_VM_VERSION == prov.graalVMVersion())
assertTrue(prov.checkFile("/usr/local/bin/native-image"))
}
}

View file

@ -0,0 +1,42 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.docker.exitAndRmContainer
import org.domaindrivenarchitecture.provs.framework.core.local
import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.junit.jupiter.api.Assertions.assertFalse
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Test
internal class HugoTest {
@ExtensiveContainerTest
fun test_installHugoByDeb() {
// given
local().exitAndRmContainer("provs_test")
val prov = defaultTestContainer()
// when
val res = prov.installHugoByDeb()
// then
assertTrue(res.success)
}
@Test
fun test_needsHugoInstall() {
// given
val hugoNull = null
val hugoLow = "hugo v0.0.0-abc+extended linux/amd64 BuildDate=0000-00-00 VendorInfo=snap:0.0.0"
val hugoMajHigh = "hugo v1.0.0-abc+extended linux/amd64 BuildDate=0000-00-00 VendorInfo=snap:1.0.0"
val hugoMinHigh = "hugo v0.1.0-abc+extended linux/amd64 BuildDate=0000-00-00 VendorInfo=snap:0.1.0"
val hugoPatHigh = "hugo v0.0.1-abc+extended linux/amd64 BuildDate=0000-00-00 VendorInfo=snap:0.0.1"
assertTrue(needsHugoInstall(hugoNull, hugoPatHigh))
assertTrue(needsHugoInstall(hugoLow, hugoPatHigh))
assertTrue(needsHugoInstall(hugoLow, hugoMinHigh))
assertTrue(needsHugoInstall(hugoLow, hugoMajHigh))
assertFalse(needsHugoInstall(hugoMajHigh, hugoLow))
assertFalse(needsHugoInstall(hugoMinHigh, hugoLow))
assertFalse(needsHugoInstall(hugoPatHigh, hugoLow))
}
}

View file

@ -1,9 +1,7 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure package org.domaindrivenarchitecture.provs.desktop.infrastructure
import com.charleskorn.kaml.InvalidPropertyValueException import com.charleskorn.kaml.InvalidPropertyValueException
import org.domaindrivenarchitecture.provs.configuration.domain.ConfigFileName
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSourceType import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSourceType
import org.domaindrivenarchitecture.provs.server.infrastructure.getK3sConfig
import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
import org.junit.jupiter.api.assertThrows import org.junit.jupiter.api.assertThrows
@ -34,7 +32,7 @@ internal class K3SDesktopConfigRepositoryKtTest {
val exception = assertThrows<InvalidPropertyValueException> { val exception = assertThrows<InvalidPropertyValueException> {
getConfig("src/test/resources/invalid-desktop-config.yaml") getConfig("src/test/resources/invalid-desktop-config.yaml")
} }
assertEquals("Value for 'sourceType' is invalid: Value 'xxx' is not a valid option, permitted choices are: FILE, GOPASS, PASS, PLAIN, PROMPT", exception.message) assertEquals("Value for 'sourceType' is invalid: Value 'xxx' is not a valid option, permitted choices are: ENV, FILE, GOPASS, PASS, PLAIN, PROMPT", exception.message)
} }
@Test @Test

View file

@ -3,8 +3,10 @@ package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled
internal class NextcloudClientTest { internal class NextcloudClientTest {
@Disabled // test is hanging sometimes
@ExtensiveContainerTest @ExtensiveContainerTest
fun test_installNextcloudClient() { fun test_installNextcloudClient() {
// when // when

View file

@ -9,9 +9,9 @@ internal class PythonKtTest {
@ExtensiveContainerTest @ExtensiveContainerTest
fun test_provisionPython() { fun test_provisionPython() {
// when // when
val res = defaultTestContainer().provisionPython() val result = defaultTestContainer().provisionPython()
// then // then
assertTrue(res.success) assertTrue(result.success)
} }
} }

View file

@ -1,7 +1,8 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.remote
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources.PromptSecretSource
import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
@ -9,14 +10,19 @@ import org.junit.jupiter.api.Test
internal class VSCodeKtTest { internal class VSCodeKtTest {
@Test @Test
@Disabled("Test currently not working, needs fix. VSC is installed by snapd which is not currently supported to run inside docker") @Disabled("Remote testing by updating connection details below, then enable test and run it manually.")
fun installVSC() { // Remark: VSCode installs with snap, which does not run in container and cannot be tested by container test.
fun installVSCode() {
// given // given
val a = defaultTestContainer() val prov = remote("192.168.56.153", "xx", PromptSecretSource("Remote password").secret()) // machine needs openssh-server installed and sudo possible without pw
a.aptInstall("xvfb libgbm-dev libasound2") prov.aptInstall("xvfb libgbm-dev libasound2")
// when // when
val res = a.installVSC("python", "clojure") val res = prov.task {
installVSCode("python", "clojure")
cmd("code -v")
cmd("codium -v")
}
// then // then
assertTrue(res.success) assertTrue(res.success)

View file

@ -33,33 +33,34 @@ internal class ProvTest {
assertTrue(res) assertTrue(res)
} }
@ContainerTest @Test
fun sh() { fun sh() {
// given // given
val script = """ val script = """
# test some script commands # test some script commands
ping -c1 hetzner.com echo something1
echo something pwd
echo something3
""" """
// when // when
val res = Prov.newInstance(name = "testing").sh(script).success val res = Prov.newInstance(name = "provs_test").sh(script).success
// then // then
assertTrue(res) assertTrue(res)
} }
@ContainerTest @ContainerTest
@NonCi @NonCi // sudo might not be available
fun sh_with_dir_and_sudo() { fun sh_with_dir_and_sudo() {
// given // given
val script = """ val script = """
# test some script commands # test some script commands
ping -c1 hetzner.com pwd
echo something echo something1
echo 1 # comment behind command echo something2 # with comment behind command
""" """
// when // when

View file

@ -103,7 +103,8 @@ internal class UbuntuProvTest {
// then // then
assertFalse(result.success) assertFalse(result.success)
assertEquals("sudo: no tty present and no askpass program specified\n", result.err) val expectedMsg = "a password is required"
assertTrue(result.err?.contains(expectedMsg) ?: false, "Error: [$expectedMsg] is not found in [${result.err}]")
} }
} }
@ -119,12 +120,12 @@ class UbuntuUserNeedsPasswordForSudo(private val userName: String = "testuser")
override fun imageText(): String { override fun imageText(): String {
return """ return """
FROM ubuntu:18.04 FROM ubuntu:22.04
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get -y install sudo RUN apt-get update && apt-get -y install sudo
RUN useradd -m $userName && echo "$userName:$userName" | chpasswd && adduser $userName sudo RUN useradd -m $userName && echo "$userName:$userName" | chpasswd && usermod -aG sudo $userName
USER $userName USER $userName
CMD /bin/bash CMD /bin/bash

View file

@ -34,7 +34,7 @@ class ContainerUbuntuHostProcessorTest {
// given // given
val containerName = "prov-test-ssh-with-container" val containerName = "prov-test-ssh-with-container"
val password = Secret("testuserpw") val password = Secret("testuser")
val prov = Prov.newInstance( val prov = Prov.newInstance(
ContainerUbuntuHostProcessor( ContainerUbuntuHostProcessor(
@ -57,8 +57,8 @@ class ContainerUbuntuHostProcessorTest {
val remoteProvBySsh = remote(ipOfContainer, "testuser", password) val remoteProvBySsh = remote(ipOfContainer, "testuser", password)
// when // when
val firstSessionResult = remoteProvBySsh.cmd("echo 1") val firstSessionResult = remoteProvBySsh.cmd("echo 1") // connect (to container) by ssh via ip
val secondSessionResult = remoteProvBySsh.cmd("echo 1") val secondSessionResult = remoteProvBySsh.cmd("echo 2") // second connect after first connection has been closed
// then // then
assertTrue(firstSessionResult.success) assertTrue(firstSessionResult.success)

View file

@ -6,6 +6,7 @@ import org.domaindrivenarchitecture.provs.framework.core.escapeProcentForPrintf
import org.domaindrivenarchitecture.provs.framework.core.escapeSingleQuoteForShell import org.domaindrivenarchitecture.provs.framework.core.escapeSingleQuoteForShell
import org.junit.jupiter.api.Assertions.* import org.junit.jupiter.api.Assertions.*
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
import java.nio.file.Paths
internal class LocalProcessorTest { internal class LocalProcessorTest {
@ -24,6 +25,18 @@ internal class LocalProcessorTest {
assertTrue(res.out == text) assertTrue(res.out == text)
} }
@Test
fun cmd_in_folder_where_program_was_started() {
// given
val prov = Prov.newInstance(LocalProcessor(false))
// when
val pwd = prov.cmd("pwd").outTrimmed
// then
assertEquals(Paths.get("").toAbsolutePath().toString(), pwd)
}
@Test @Test
fun cmd_with_nested_shell_and_printf() { fun cmd_with_nested_shell_and_printf() {
@ -59,7 +72,7 @@ internal class LocalProcessorTest {
@Test @Test
fun cmd_forUnkownCommand_resultWithError() { fun cmd_forUnknownCommand_resultWithError() {
// given // given
val prov = Prov.newInstance() val prov = Prov.newInstance()

View file

@ -0,0 +1,95 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.cron.infrastructure
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDirs
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.fileContent
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.framework.ubuntu.user.base.whoami
import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.junit.jupiter.api.Assertions.*
import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test
class CronJobsKtTest {
@Test
fun createCronJob_creates_cron_file() {
// given
val prov = defaultTestContainer()
val cronFilename = "50_test_cron"
// when
val result = prov.createCronJob(cronFilename, "0 * * * *", "echo hello > /dev/null 2>&1")
// then
assertTrue(result.success)
val fqFilename = "/etc/cron.d/$cronFilename"
assertTrue(prov.checkFile(fqFilename), "")
val actualFileContent = prov.fileContent(fqFilename, sudo = true)
val expectedUser = prov.whoami()
assertEquals("0 * * * * $expectedUser echo hello > /dev/null 2>&1\n", actualFileContent)
}
@Test
@Disabled // only for manual execution and manual check for the created files
// Test if cron-job is actually running, but needs manual checks
fun createCronJob_which_creates_files_with_timestamp() {
// given
val prov = defaultTestContainer()
val cronFilename = "90_time_cron"
// for cron in docker see e.g. https://serverfault.com/questions/924779/docker-cron-not-working
prov.task {
aptInstall("cron")
cmd("sudo touch /var/log/cron.log") // Create the log file
optional { // may already be running
cmd("sudo cron") // Run cron
}
cmd("pgrep cron") // Ensure cron is running
}
prov.createDirs("tmp")
val user = prov.whoami()
// when
val result = prov.createCronJob(
cronFilename,
"*/1 * * * *",
"echo \"xxx\" > /home/$user/tmp/\$(/usr/bin/date +\\%Y_\\%m_\\%d-\\%H_\\%M)"
)
// then
assertTrue(result.success)
val fqFilename = "/etc/cron.d/$cronFilename"
assertTrue(prov.checkFile(fqFilename), "File does not exist: $fqFilename")
// after a minute check manually if files exist, e.g. with: sudo docker exec provs_test /bin/bash -c "ls -l tmp"
// each minute a new file should be created with the timestamp
}
@Test
fun scheduleMonthlyReboot() {
// given
val prov = defaultTestContainer()
// create dummy shutdown in test container if missing (containers do usually not have shutdown installed)
prov.createFile(
"/sbin/shutdown",
"dummy file for test of scheduleMonthlyReboot",
sudo = true,
overwriteIfExisting = false
)
// when
val result = prov.scheduleMonthlyReboot()
// then
assertTrue(result.success)
val fqFilename = "/etc/cron.d/50_monthly_reboot"
assertTrue(prov.checkFile(fqFilename), "")
val actualFileContent = prov.fileContent(fqFilename, sudo = true)
assertEquals("0 3 1-7 * 2 root shutdown -r now\n", actualFileContent)
}
}

View file

@ -184,6 +184,7 @@ internal class FilesystemKtTest {
val res7 = prov.createDirs("test/testdir") val res7 = prov.createDirs("test/testdir")
val res8 = prov.checkDir("testdir", "~/test") val res8 = prov.checkDir("testdir", "~/test")
prov.deleteDir("testdir", "~/test/") prov.deleteDir("testdir", "~/test/")
val res9 = prov.deleteDir("notexistingdirdir", "~/")
// then // then
assertFalse(res1) assertFalse(res1)
@ -194,6 +195,7 @@ internal class FilesystemKtTest {
assertFalse(res6) assertFalse(res6)
assertTrue(res7.success) assertTrue(res7.success)
assertTrue(res8) assertTrue(res8)
assertTrue(res9.success)
} }

View file

@ -3,7 +3,7 @@ package org.domaindrivenarchitecture.provs.framework.ubuntu.git.base
import org.domaindrivenarchitecture.provs.desktop.domain.addKnownHosts import org.domaindrivenarchitecture.provs.desktop.domain.addKnownHosts
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkDir import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkDir
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.isHostKnown import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.isKnownHost
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
@ -24,8 +24,8 @@ internal class GitKtTest {
// then // then
assertTrue(res.success) assertTrue(res.success)
assertTrue(a.isHostKnown("github.com"), "github.com does not seem to be a known host") assertTrue(a.isKnownHost("github.com"), "github.com does not seem to be a known host")
assertTrue(a.isHostKnown("gitlab.com"), "gitlab.com does not seem to be a known host") assertTrue(a.isKnownHost("gitlab.com"), "gitlab.com does not seem to be a known host")
} }
@ExtensiveContainerTest @ExtensiveContainerTest

View file

@ -1,5 +1,6 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base package org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base
import org.domaindrivenarchitecture.provs.desktop.domain.KnownHost
import org.domaindrivenarchitecture.provs.framework.core.Secret import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.fileContainsText import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.fileContainsText
@ -10,6 +11,8 @@ import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.junit.jupiter.api.Assertions.* import org.junit.jupiter.api.Assertions.*
const val KNOWN_HOSTS_FILE = "~/.ssh/known_hosts"
internal class SshKtTest { internal class SshKtTest {
@ContainerTest @ContainerTest
fun configureSshKeys_for_ssh_type_rsa() { fun configureSshKeys_for_ssh_type_rsa() {
@ -48,7 +51,7 @@ internal class SshKtTest {
} }
@ContainerTest @ContainerTest
fun addKnownHost() { fun addKnownHost_without_verification() {
// given // given
val prov = defaultTestContainer() val prov = defaultTestContainer()
prov.task { prov.task {
@ -57,10 +60,8 @@ internal class SshKtTest {
} }
// when // when
val res = prov.addKnownHost("github.com", listOf("dummyProtocol dummyKey", "dummyProtocol2 dummyKey2", )) val res = prov.addKnownHost(KnownHost("github.com", listOf("dummyProtocol dummyKey", "dummyProtocol2 dummyKey2", )))
val res2 = prov.addKnownHost("github.com", listOf("dummyProtocol dummyKey", "dummyProtocol2 dummyKey2", )) val res2 = prov.addKnownHost(KnownHost("github.com", listOf("dummyProtocol dummyKey", "dummyProtocol2 dummyKey2", )))
val res3 = prov.addKnownHost("github.com", listOf("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl", ), verifyKeys = true)
val res4 = prov.addKnownHost("github.com", listOf("ssh-ed25519 AAAAC3Nzalwrongkey!!!", ), verifyKeys = true)
// then // then
assertTrue(res.success) assertTrue(res.success)
@ -70,8 +71,32 @@ internal class SshKtTest {
assertTrue(res2.success) assertTrue(res2.success)
val keyCount = prov.cmd("grep -o -i dummyKey2 $KNOWN_HOSTS_FILE | wc -l").out?.trim() val keyCount = prov.cmd("grep -o -i dummyKey2 $KNOWN_HOSTS_FILE | wc -l").out?.trim()
assertEquals("1", keyCount) assertEquals("1", keyCount)
}
assertTrue(res3.success) @ContainerTest
assertFalse(res4.success) fun addKnownHost_with_verifications() {
// given
val prov = defaultTestContainer()
prov.task {
aptInstall("ssh")
deleteFile(KNOWN_HOSTS_FILE)
}
// when
val res1 = prov.addKnownHost(KnownHost.GITHUB, verifyKeys = true)
val res2 = prov.addKnownHost(KnownHost.GITHUB, verifyKeys = true)
val invalidKey = "ssh-ed25519 AAAAC3Nzalwrongkey!!!"
val res3 = prov.addKnownHost(KnownHost("github.com", listOf(invalidKey )), verifyKeys = true)
// then
assertTrue(res1.success)
assertTrue(prov.fileContainsText(KNOWN_HOSTS_FILE, KnownHost.GITHUB.hostKeys[0]))
assertTrue(res2.success)
assertTrue(prov.fileContainsText(KNOWN_HOSTS_FILE, KnownHost.GITHUB.hostKeys[0]))
assertFalse(res3.success)
assertFalse(prov.fileContainsText(KNOWN_HOSTS_FILE, invalidKey))
} }
} }

View file

@ -0,0 +1,14 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test
internal class EnvSecretSourceTest {
@Test
@Disabled // set env variable "envtest=envtestval" externally e.g. in IDE and run manually
fun secret() {
assertEquals("envtestval", EnvSecretSource("envtest").secret().plain())
}
}

View file

@ -0,0 +1,35 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.configuration.domain.ConfigFileName
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSourceType
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSupplier
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.HetznerCSIConfig
import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.GrafanaAgentConfig
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Test
internal class HetznerCSIRepositoryKtTest {
@Test
fun findHetznerCSIConfig_returns_config() {
// when
val config = findHetznerCSIConfig(ConfigFileName("src/test/resources/k3s-server-config-with-hetzner.yaml"))
// then
assertEquals(
HetznerCSIConfig(
hcloudApiToken = SecretSupplier(SecretSourceType.GOPASS, "path/to/apitoken"),
encryptionPassphrase = SecretSupplier(SecretSourceType.GOPASS, "path/to/encryption"),
), config
)
}
@Test
fun findHetznerCSIConfig_returns_null_if_no_hetzner_data_available() {
// when
val config = findHetznerCSIConfig(ConfigFileName("src/test/resources/k3s-server-config.yaml"))
// then
assertEquals(null, config)
}
}

View file

@ -1,126 +0,0 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.*
import org.domaindrivenarchitecture.provs.framework.core.docker.containerExec
import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer
import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode
import org.domaindrivenarchitecture.provs.server.domain.applyK8sConfig
import org.domaindrivenarchitecture.provs.server.domain.installK3sAsContainers
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.domaindrivenarchitecture.provs.test.tags.NonCi
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled
internal class K3dKtTest {
@Disabled // remove line and execute manually as this test may take several minutes
@ContainerTest
@NonCi
fun installK3sAsContainers() {
// given
val containerName = "alpine-docker-dind"
local().task {
provideContainer(
containerName,
"yobasystems/alpine-docker:dind-amd64",
ContainerStartMode.CREATE_NEW_KILL_EXISTING, // for re-create a potentially existing container
sudo = false,
options = "--privileged"
)
// alpine does not have bash pre-installed - but bash is currently required for provs
containerExec(containerName, "sh -c \"apk add bash\"", sudo = false)
}
val result = docker(containerName, sudo = false).task {
cmd("apk update")
cmd("apk add sudo curl")
task(
"Install kubectl"
) {
sh("""
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
kubectl version --client
""".trimIndent())
}
// when
installK3sAsContainers()
applyK8sConfig(appleConfig())
cmd("kubectl wait --for=condition=ready --timeout=600s pod apple-app")
checkAppleService()
}
// then
assertTrue(result.success)
}
}
/**
* Checks if URL "$host/apple" is available and return text "apple"
*/
private fun Prov.checkAppleService(host: String = "127.0.0.1") = requireLast {
// repeat required as curl may return with "empty reply from server" or with "Recv failure: Connection reset by peer"
val res = repeatTaskUntilSuccess(12, 10) {
cmd("curl -m 30 $host/apple")
}.out?.trim()
if ("apple" == res) {
ProvResult(true, out = res)
} else {
ProvResult(false, err = "Url $host/apple did not return text \"apple\" but returned: $res")
}
}
fun appleConfig() =
"""
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: apple-ingress
annotations:
kubernetes.io/ingress.class: "traefik"
spec:
rules:
- http:
paths:
- path: /apple
pathType: Prefix
backend:
service:
name: apple-service
port:
number: 5678
---
kind: Pod
apiVersion: v1
metadata:
name: apple-app
labels:
app: apple
spec:
containers:
- name: apple-app
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: apple-service
spec:
selector:
app: apple
ports:
- port: 5678 # Default port for image
"""

View file

@ -0,0 +1,45 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.local
import org.domaindrivenarchitecture.provs.framework.core.remote
import org.domaindrivenarchitecture.provs.server.domain.k3s.K3sConfig
import org.domaindrivenarchitecture.provs.server.domain.k3s.Node
import org.domaindrivenarchitecture.provs.server.domain.k3s.provisionK3s
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test
import java.lang.Thread.sleep
class K3sKtTest {
@Test // Extensive test, takes several minutes
@Disabled("1. update remoteIp and user, 2. enable remote ssh connection and 3. enable this test and then 4. run manually")
fun installK3s() {
// given
val remoteIp = "192.168.56.146"
val user = "xxx"
// enable remote ssh connection either manually or by the commented-out code below to copy local authorized_keys to remote
// remote(remoteIp, user, PromptSecretSource("PW for $user on $remoteIp").secret()).task {
// val authorizedKeysFilename = ".ssh/authorized_keys"
// val publicSshKey = local().getSecret("cat $authorizedKeysFilename") ?: Secret("") // or set directly by: val publicSshKey = Secret("public ssh key")
// createDir(".ssh")
// createSecretFile(authorizedKeysFilename, publicSshKey, posixFilePermission = "0644")
// }
// when
val res = remote(remoteIp, user).task { // connect by ssh
provisionK3s(K3sConfig(remoteIp, Node(remoteIp), echo = true, reprovision = false))
}
// then
assertTrue(res.success)
// check response echo pod
sleep(10000) // if time too short, increase or check curl manually
val echoResponse = local().cmd("curl http://$remoteIp/echo/").out
assertTrue(echoResponse?.contains("Hostname: echo-app") ?: false)
assertTrue(echoResponse?.contains("Host: $remoteIp") ?: false)
}
}

View file

@ -0,0 +1,23 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.junit.jupiter.api.Assertions.assertTrue
class K9sKtTest {
@ContainerTest
fun installK9s() {
// given
val prov = defaultTestContainer()
// when
val res = prov.task {
installK9s()
installK9s() // test repeatability
}
// then
assertTrue(res.success)
}
}

View file

@ -0,0 +1,18 @@
fqdn: statistics.test.meissa-gmbh.de
node:
ipv4: 162.55.164.138
ipv6: 2a01:4f8:c010:672f::1
certmanager:
email: admin@meissa-gmbh.de
letsencryptEndpoint: prod
echo: true
reprovision: true
hetzner:
hcloudApiToken:
source: "GOPASS" # PLAIN, GOPASS or PROMPT
parameter: "path/to/apitoken" # the api key for the hetzner cloud
encryptionPassphrase:
source: "GOPASS" # PLAIN, GOPASS or PROMPT
parameter: "path/to/encryption" # the encryption passphrase for created volumes

Some files were not shown because too many files have changed in this diff Show more