Compare commits

..

No commits in common. "main" and "0.25.0" have entirely different histories.
main ... 0.25.0

101 changed files with 1329 additions and 2353 deletions

1
.gitignore vendored
View file

@ -9,4 +9,3 @@
/server-config.yaml /server-config.yaml
/desktop-config.yaml /desktop-config.yaml
/syspec-config.yaml /syspec-config.yaml
/.kotlin/

View file

@ -6,7 +6,7 @@ stages:
- release - release
.kotlin-job: &kotlin .kotlin-job: &kotlin
image: domaindrivenarchitecture/ddadevops-kotlin:4.10.7 image: domaindrivenarchitecture/ddadevops-kotlin
cache: cache:
key: ${CI_COMMIT_REF_SLUG} key: ${CI_COMMIT_REF_SLUG}
paths: paths:
@ -16,7 +16,6 @@ stages:
- echo "---------- Start CI ----------" - echo "---------- Start CI ----------"
- export GRADLE_USER_HOME=`pwd`/.gradle - export GRADLE_USER_HOME=`pwd`/.gradle
- chmod +x gradlew - chmod +x gradlew
- export RELEASE_ARTIFACT_TOKEN=$MEISSA_REPO_BUERO_RW
- echo "------ commit info ---------------" - echo "------ commit info ---------------"
- echo $CI_COMMIT_TAG - echo $CI_COMMIT_TAG
- echo $CI_COMMIT_REF_NAME - echo $CI_COMMIT_REF_NAME
@ -40,23 +39,19 @@ build:
expire_in: 1 week expire_in: 1 week
variables:
DOCKER_TLS_CERTDIR: "/certs"
test: test:
stage: test stage: test
image: docker:24.0.5 image: docker:latest
services: services:
- docker:24.0.5-dind - docker:dind
dependencies: dependencies:
- build - build
before_script: before_script:
- echo "---------- BEFORE -------------" - echo "---------- BEFORE -------------"
- echo "$CI_REGISTRY_PASSWORD" | docker login $CI_REGISTRY --username $CI_REGISTRY_USER --password-stdin - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script: script:
- echo "---------- TEST -------------" - echo "---------- TEST -------------"
- apk update && apk add bash openjdk11 git - apk update && apk add bash openjdk11
- export JAVA_HOME=/usr/lib/jvm/java-11-openjdk - export JAVA_HOME=/usr/lib/jvm/java-11-openjdk
- docker build --pull -t "$CI_REGISTRY_IMAGE" . - docker build --pull -t "$CI_REGISTRY_IMAGE" .
- docker run --privileged -dit --name provs_test -v /var/run/docker.sock:/var/run/docker.sock $CI_REGISTRY_IMAGE - docker run --privileged -dit --name provs_test -v /var/run/docker.sock:/var/run/docker.sock $CI_REGISTRY_IMAGE
@ -65,7 +60,7 @@ test:
artifacts: artifacts:
when: on_failure when: on_failure
paths: paths:
- build/reports/* - build/reports/tests/test
reports: reports:
junit: build/test-results/test/TEST-*.xml junit: build/test-results/test/TEST-*.xml
@ -74,7 +69,13 @@ package:
<<: *kotlin <<: *kotlin
stage: package stage: package
script: script:
- pyb package - ./gradlew -x assemble -x test jar
- ./gradlew -x assemble -x test -x jar uberjarDesktop
- ./gradlew -x assemble -x test -x jar uberjarServer
- ./gradlew -x assemble -x test -x jar uberjarSyspec
- cd build/libs/
- find . -type f -exec sha256sum {} \; | sort > sha256sum.lst
- find . -type f -exec sha512sum {} \; | sort > sha512sum.lst
artifacts: artifacts:
paths: paths:
- build/libs/*.jar - build/libs/*.jar
@ -95,16 +96,43 @@ publish-maven-package-to-meissa:
stage: publish stage: publish
allow_failure: true allow_failure: true
script: script:
- apt-get update -y
- apt-get install -y iputils-ping ssh
- ping -c 2 repo.prod.meissa.de
- ssh-keyscan repo.prod.meissa.de
- ./gradlew -x assemble -x test publishLibraryPublicationToMeissaRepository - ./gradlew -x assemble -x test publishLibraryPublicationToMeissaRepository
release-to-gitlab:
<<: *tag_only
image: registry.gitlab.com/gitlab-org/release-cli:latest
stage: release
artifacts:
paths:
- 'build/libs/provs-desktop.jar'
- 'build/libs/provs-server.jar'
- 'build/libs/provs-syspec.jar'
- 'build/libs/sha256sum.lst'
- 'build/libs/sha512sum.lst'
script:
- apk --no-cache add curl
- |
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
--assets-link "{\"name\":\"provs-desktop.jar\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/provs-desktop.jar\"}" \
--assets-link "{\"name\":\"provs-server.jar\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/provs-server.jar\"}" \
--assets-link "{\"name\":\"provs-syspec.jar\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/provs-syspec.jar\"}" \
--assets-link "{\"name\":\"sha256sum.lst\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/sha256sum.lst\"}" \
--assets-link "{\"name\":\"sha512sum.lst\",\"url\":\"https://gitlab.com/domaindrivenarchitecture/provs/-/jobs/${CI_JOB_ID}/artifacts/file/build/libs/sha512sum.lst\"}" \
release-to-meissa: release-to-meissa:
<<: *kotlin <<: *kotlin
<<: *tag_only <<: *tag_only
stage: release stage: release
allow_failure: true allow_failure: true
script: script:
- pyb publish_release - ./gradlew createReleaseAndUploadAssets
after_script: after_script:
- echo "---------- End CI ----------" - echo "---------- End CI ----------"

View file

@ -1,6 +1,6 @@
<component name="ProjectRunConfigurationManager"> <component name="ProjectRunConfigurationManager">
<configuration default="false" name="test_incl_extensive_container_tests" type="JUnit" factoryName="JUnit"> <configuration default="false" name="test_incl_extensive_container_tests" type="JUnit" factoryName="JUnit">
<module name="org.domaindrivenarchitecture.provs.provs.test" /> <module name="provs.test" />
<option name="PACKAGE_NAME" value="org" /> <option name="PACKAGE_NAME" value="org" />
<option name="MAIN_CLASS_NAME" value="" /> <option name="MAIN_CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" /> <option name="METHOD_NAME" value="" />

View file

@ -1,11 +1,10 @@
# image for usage in ci pipeline FROM ubuntu:latest
FROM ubuntu:22.04
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get -y install apt-utils sudo RUN apt-get update && apt-get -y install apt-utils sudo
RUN useradd -m testuser && echo "testuser:testuserpw" | chpasswd && usermod -aG sudo testuser RUN useradd -m testuser && echo "testuser:testuserpw" | chpasswd && adduser testuser sudo
RUN echo "testuser ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/testuser RUN echo "testuser ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/testuser
USER testuser USER testuser

View file

@ -1,20 +1,20 @@
# provs # provs
[![pipeline status](https://gitlab.com/domaindrivenarchitecture/provs/badges/master/pipeline.svg)](https://gitlab.com/domaindrivenarchitecture/provs/-/commits/master) [![pipeline status](https://gitlab.com/domaindrivenarchitecture/provs/badges/master/pipeline.svg)](https://gitlab.com/domaindrivenarchitecture/provs/-/commits/master)
[<img src="https://domaindrivenarchitecture.org/img/delta-chat.svg" width=20 alt="DeltaChat"> chat over e-mail](mailto:buero@meissa-gmbh.de?subject=community-chat) | [<img src="https://meissa.de/images/parts/contact/mastodon36_hue9b2464f10b18e134322af482b9c915e_5501_filter_14705073121015236177.png" width=20 alt="M"> meissa@social.meissa-gmbh.de](https://social.meissa-gmbh.de/@meissa) | [Blog](https://domaindrivenarchitecture.org) | [Website](https://meissa.de) [<img src="https://domaindrivenarchitecture.org/img/delta-chat.svg" width=20 alt="DeltaChat"> chat over e-mail](mailto:buero@meissa-gmbh.de?subject=community-chat) | [<img src="https://meissa-gmbh.de/img/community/Mastodon_Logotype.svg" width=20 alt="team@social.meissa-gmbh.de"> team@social.meissa-gmbh.de](https://social.meissa-gmbh.de/@team) | [Website & Blog](https://domaindrivenarchitecture.org)
## Purpose ## Purpose
provs provides cli-based tools for provs provides cli-based tools for
* provisioning desktop software for different desktop types: * provisioning a desktop (various kinds)
* basic
* office
* IDE
* provisioning a k3s server * provisioning a k3s server
* performing system checks * performing system checks
Tasks can be run locally or remotely. Tasks can be run locally or remotely.
## Status
under development - though we already set up a few IDEs and servers with provs.
## Try out ## Try out
### Prerequisites ### Prerequisites
@ -28,9 +28,8 @@ Tasks can be run locally or remotely.
* Download the latest `provs-desktop.jar`,`provs-server.jar` and/or `provs-syspec.jar` from: https://gitlab.com/domaindrivenarchitecture/provs/-/releases * Download the latest `provs-desktop.jar`,`provs-server.jar` and/or `provs-syspec.jar` from: https://gitlab.com/domaindrivenarchitecture/provs/-/releases
* Preferably into `/usr/local/bin` or any other folder where executables can be found by the system * Preferably into `/usr/local/bin` or any other folder where executables can be found by the system
* Make the jar-file executable e.g. by `chmod +x provs-desktop.jar` * Make the jar-file executable e.g. by `chmod +x provs-desktop.jar`
* Check with `provs-desktop.jar -h` to show help information
###### Build the binaries #### Build the binaries
Instead of downloading the binaries you can build them yourself Instead of downloading the binaries you can build them yourself
@ -107,24 +106,6 @@ To provision the grafana agent only to an existing k8s system, ensure that the c
provs-server.jar k3s myuser@myhost.com -o grafana provs-server.jar k3s myuser@myhost.com -o grafana
``` ```
To add the hetzner csi driver and encrypted volumes to your k3s installation add the following to the config:
```yaml
hetzner:
hcloudApiToken:
source: "PLAIN" # PLAIN, GOPASS or PROMPT
parameter: "mypassword" # the api key for the hetzner cloud
encryptionPassphrase:
source: "PLAIN" # PLAIN, GOPASS or PROMPT
parameter: "mypassword" # the encryption passphrase for created volumes
```
To provision the grafana agent only to an existing k8s system, ensure that the config (as above) is available and execute:
```bash
provs-server.jar k3s myuser@myhost.com -o grafana
```
Reprovisioning the server can easily be done using the -r or --reprovision option. Reprovisioning the server can easily be done using the -r or --reprovision option.
```bash ```bash
@ -163,9 +144,7 @@ Or to get help for subcommands e.g.
provs-desktop.jar ide -h provs-desktop.jar ide -h
provs-server.jar k3s -h provs-server.jar k3s -h
``` ```
## Development & mirrors ## Development & mirrors
Development happens at: https://repo.prod.meissa.de/meissa/provs Development happens at: https://repo.prod.meissa.de/meissa/provs
Mirrors are: Mirrors are:
@ -173,17 +152,3 @@ Mirrors are:
* https://github.com/DomainDrivenArchitecture/provs * https://github.com/DomainDrivenArchitecture/provs
For more details about our repository model see: https://repo.prod.meissa.de/meissa/federate-your-repos For more details about our repository model see: https://repo.prod.meissa.de/meissa/federate-your-repos
## Developer information
For using provs framework, add the required dependency to your project, then you can implement your own tasks e.g. by:
```kotlin
import org.domaindrivenarchitecture.provs.framework.core.Prov
fun Prov.myCustomTask() = task {
cmd("echo \"Hello world!\"")
}
```
See also [ForDevelopers.md](doc/ForDevelopers.md)

View file

@ -1,5 +1,5 @@
buildscript { buildscript {
ext.kotlin_version_no = "1.8.20" ext.kotlin_version = "1.7.20"
ext.CI_PROJECT_ID = System.env.CI_PROJECT_ID ext.CI_PROJECT_ID = System.env.CI_PROJECT_ID
repositories { repositories {
@ -8,16 +8,17 @@ buildscript {
} }
plugins { plugins {
id "org.jetbrains.kotlin.jvm" version "$kotlin_version_no" id "org.jetbrains.kotlin.jvm" version "$kotlin_version"
id 'org.jetbrains.kotlin.plugin.serialization' version "$kotlin_version_no" id 'org.jetbrains.kotlin.plugin.serialization' version "$kotlin_version"
id "java" id "java"
id "java-test-fixtures" id "java-test-fixtures"
} }
apply plugin: "maven-publish" apply plugin: "maven-publish"
version = "0.38.6-SNAPSHOT"
group = "org.domaindrivenarchitecture.provs" group = "org.domaindrivenarchitecture.provs"
version = "0.25.1-SNAPSHOT"
repositories { repositories {
@ -35,6 +36,11 @@ java {
} }
} }
jar {
duplicatesStrategy(DuplicatesStrategy.EXCLUDE)
}
test { test {
// set properties for the tests // set properties for the tests
def propertiesForTests = ["testdockerwithoutsudo"] def propertiesForTests = ["testdockerwithoutsudo"]
@ -63,20 +69,25 @@ compileTestJava.options.debugOptions.debugLevel = "source,lines,vars"
dependencies { dependencies {
api("org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version_no") api("org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version")
api("org.jetbrains.kotlinx:kotlinx-serialization-json:1.3.2")
api("org.jetbrains.kotlinx:kotlinx-serialization-core:1.3.2")
api("org.jetbrains.kotlinx:kotlinx-cli:0.3.4") api("org.jetbrains.kotlinx:kotlinx-cli:0.3.4")
api('com.charleskorn.kaml:kaml:0.54.0') api('com.charleskorn.kaml:kaml:0.54.0')
api("org.slf4j:slf4j-api:1.7.36") api("org.slf4j:slf4j-api:1.7.36")
api('ch.qos.logback:logback-classic:1.4.14') api('ch.qos.logback:logback-classic:1.2.11')
api('ch.qos.logback:logback-core:1.4.14') api('ch.qos.logback:logback-core:1.2.11')
implementation("com.hierynomus:sshj:0.38.0") implementation("org.jetbrains.kotlin:kotlin-reflect:$kotlin_version")
implementation("com.hierynomus:sshj:0.32.0")
testFixturesApi('io.mockk:mockk:1.12.3') implementation("aws.sdk.kotlin:s3:0.17.1-beta")
testFixturesApi("org.junit.jupiter:junit-jupiter-api:5.8.2") testFixturesApi("org.junit.jupiter:junit-jupiter-api:5.8.2")
testFixturesApi('io.mockk:mockk:1.12.3')
testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.8.2") testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.8.2")
} }
@ -161,22 +172,6 @@ tasks.register('installlocally') {
} }
// create binaries and install into /usr/local/bin
// PREREQUISITE: graalvm / native-image must be installed - see https://www.graalvm.org/
tasks.register('binariesInstall') {
dependsOn(uberjarServer, uberjarDesktop, uberjarSyspec)
doLast {
println "Building binaries ..."
exec { commandLine("sh", "-c", "cd build/libs/ && native-image --no-fallback --initialize-at-build-time=kotlin.DeprecationLevel -H:+UnlockExperimentalVMOptions -H:IncludeResources=\".*org/domaindrivenarchitecture/provs/.*(conf|ssh_config|sshd_config|sh|vimrc|xml|yaml)\" -jar provs-desktop.jar") }
exec { commandLine("sh", "-c", "cd build/libs/ && native-image --no-fallback --initialize-at-build-time=kotlin.DeprecationLevel -H:+UnlockExperimentalVMOptions -H:IncludeResources=\".*org/domaindrivenarchitecture/provs/.*(conf|ssh_config|sshd_config|sh|vimrc|xml|yaml)\" -jar provs-server.jar") }
exec { commandLine("sh", "-c", "cd build/libs/ && native-image --no-fallback --initialize-at-build-time=kotlin.DeprecationLevel -H:+UnlockExperimentalVMOptions -H:IncludeResources=\".*org/domaindrivenarchitecture/provs/.*(conf|ssh_config|sshd_config|sh|vimrc|xml|yaml)\" -jar provs-syspec.jar") }
exec { commandLine("sh", "-c", "sudo cp build/libs/provs-desktop /usr/local/bin/") }
exec { commandLine("sh", "-c", "sudo cp build/libs/provs-server /usr/local/bin/") }
exec { commandLine("sh", "-c", "sudo cp build/libs/provs-syspec /usr/local/bin/") }
}
}
// publish to repo.prod.meissa.de with task "publishLibraryPublicationToMeissaRepository" -- (using pattern "publishLibraryPublicationTo<MAVEN REPOSITORY NAME>Repository") // publish to repo.prod.meissa.de with task "publishLibraryPublicationToMeissaRepository" -- (using pattern "publishLibraryPublicationTo<MAVEN REPOSITORY NAME>Repository")
publishing { publishing {
publications { publications {
@ -205,25 +200,23 @@ publishing {
credentials(HttpHeaderCredentials) { credentials(HttpHeaderCredentials) {
name = "Authorization" name = "Authorization"
def publishPackageTokenName = "MEISSA_PUBLISH_PACKAGE_TOKEN"
if (System.getenv("CI_JOB_TOKEN") != null) { if (System.getenv("CI_JOB_TOKEN") != null) {
def tokenFromEnv = System.getenv(publishPackageTokenName) def tokenFromEnv = System.getenv("RELEASE_TOKEN")
if (tokenFromEnv == null) { if (tokenFromEnv == null) {
println "Error: $publishPackageTokenName not found" println "Error: RELEASE_TOKEN not found"
} else { } else {
value = "token " + tokenFromEnv value = "token " + tokenFromEnv
println "$publishPackageTokenName found - " println "RELEASE_TOKEN found - "
} }
} else { } else {
// use project-property (define e.g. in "~/.gradle/gradle.properties") when not running in ci // use project-property (define e.g. in "~/.gradle/gradle.properties") when not running in ci
// you can create a token in gitea "Profile and Settings ... > Settings > Applications", Token Name, Select scopes (write:package) > "Generate Token" // you can create a token in gitea "Profile and Settings ... > Settings > Applications", Token Name, Select scopes (write:package) > "Generate Token"
if (!project.hasProperty(publishPackageTokenName)) { if (!project.hasProperty("RELEASE_TOKEN")) {
// if token is missing, provide a dummy in order to avoid error "Could not get unknown property 'MEISSA_PUBLISH_PACKAGE_TOKEN' for Credentials [header: Authorization]" for other gradle tasks // if RELEASE_TOKEN is missing, provide a dummy in order to avoid error "Could not get unknown property 'RELEASE_TOKEN' for Credentials [header: Authorization]" for other gradle tasks
ext.MEISSA_PUBLISH_PACKAGE_TOKEN = "Token $publishPackageTokenName not provided in file \".gradle/gradle.properties\"" ext.RELEASE_TOKEN = "RELEASE_TOKEN not provided in file \".gradle/gradle.properties\""
println "Error: Token $publishPackageTokenName not found" println "Error: RELEASE_TOKEN not found"
} else {
value = "token " + project.property(publishPackageTokenName)
} }
value = "token $RELEASE_TOKEN"
} }
} }
@ -233,3 +226,39 @@ publishing {
} }
} }
} }
tasks.register('createReleaseAndUploadAssets') {
dependsOn(uberjarServer, uberjarDesktop, uberjarSyspec)
doLast {
def token = project.properties.get("RELEASE_TOKEN") ?: System.getenv("RELEASE_TOKEN")
if (token == null) {
throw new GradleException('No token found.')
}
def output1 = new ByteArrayOutputStream()
exec {
standardOutput = output1
def TAG = project.version
commandLine("sh", "-c", "curl -X 'POST' 'https://repo.prod.meissa.de/api/v1/repos/meissa/provs/releases' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{ \"body\": \"Provides jar-files for release $TAG\\nAttention: The \\\"Source Code\\\"-files below are not up-to-date!\", \"tag_name\": \"$TAG\" }' -H \"Authorization: token $token\"")
}
def matches = output1 =~ /\{"id":(\d+?),/
if (!matches) {
throw new GradleException('id of release could not be parsed in: ' + output1)
}
def releaseId = matches.group(1)
println "Release=$releaseId"
def releaseApiUrl = "https://repo.prod.meissa.de/api/v1/repos/meissa/provs/releases"
exec { commandLine("sh", "-c", "find build/libs/ -type f -exec sha256sum {} \\; | sort > build/libs/sha256sum.lst") }
exec { commandLine("sh", "-c", "find build/libs/ -type f -exec sha512sum {} \\; | sort > build/libs/sha512sum.lst") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/provs-desktop.jar;type=application/x-java-archive'") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/provs-server.jar;type=application/x-java-archive'") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/provs-syspec.jar;type=application/x-java-archive'") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/sha256sum.lst;type=text/plain'") }
exec { commandLine("sh", "-c", "curl -X 'POST' '$releaseApiUrl/$releaseId/assets' -H 'accept: application/json' -H \"Authorization: token $token\" -H 'Content-Type: multipart/form-data' -F 'attachment=@build/libs/sha512sum.lst;type=text/plain'") }
}
}

123
build.py
View file

@ -1,36 +1,16 @@
import os from os import environ
from subprocess import run from subprocess import run
from pybuilder.core import task, init from pybuilder.core import init, task
from ddadevops import * from ddadevops import *
default_task = "dev"
name = "provs" name = "provs"
PROJECT_ROOT_PATH = "." PROJECT_ROOT_PATH = "."
version = "0.38.3-dev"
@init @init
def initialize0(project): def initialize(project):
"""
workaround to avoid prompt for gopass if no artifacts need to be uploaded
usage: with option "-E ng" , e.g. "pyb -E artifacts patch_local"
"""
os.environ["RELEASE_ARTIFACT_TOKEN"] = "dummy" # avoids prompt for RELEASE_ARTIFACT_TOKEN
@init(environments=["artifacts"])
def initialize1(project):
"""
prompt for gopass if artifacts need to be uploaded
usage: with option "-E artifacts" , e.g. "pyb -E artifacts dev"
"""
del os.environ["RELEASE_ARTIFACT_TOKEN"]
@init
def initialize2(project):
input = { input = {
"name": name, "name": name,
"module": "notused", "module": "notused",
@ -38,104 +18,55 @@ def initialize2(project):
"project_root_path": PROJECT_ROOT_PATH, "project_root_path": PROJECT_ROOT_PATH,
"build_types": [], "build_types": [],
"mixin_types": ["RELEASE"], "mixin_types": ["RELEASE"],
"release_main_branch": "main",
"release_primary_build_file": "build.gradle", "release_primary_build_file": "build.gradle",
"release_secondary_build_files": ["build.py"], "release_secondary_build_files": [],
# release artifacts
"release_artifact_server_url": "https://repo.prod.meissa.de",
"release_organisation": "meissa",
"release_repository_name": name,
"release_artifacts": [
"build/libs/provs-server.jar",
"build/libs/provs-desktop.jar",
"build/libs/provs-syspec.jar",
"build/libs/sha256sum.lst",
"build/libs/sha512sum.lst",
],
} }
build = ReleaseMixin(project, input) build = ReleaseMixin(project, input)
build.initialize_build_dir() build.initialize_build_dir()
@task
def dev(project):
"""
to avoid gopass prompt set RELEASE_ARTIFACT_TOKEN e.g.:
RELEASE_ARTIFACT_TOKEN=xxx pyb dev
"""
run("./gradlew assemble", shell=True)
@task
def build(project):
run("./gradlew assemble", shell=True)
@task @task
def patch(project): def patch(project):
""" linttest(project, "PATCH")
updates version to next patch level, creates a tag, creates new SNAPSHOT version,
commits primary build file (build.gradle) and pushes to remote
"""
increase_version_number(project, "PATCH")
release(project) release(project)
@task @task
def minor(project): def minor(project):
""" linttest(project, "MINOR")
updates version to next minor level, creates a tag, creates new SNAPSHOT version,
commits primary build file (build.gradle) and pushes to remote
"""
increase_version_number(project, "MINOR")
release(project) release(project)
@task @task
def major(project): def major(project):
""" linttest(project, "MAJOR")
updates version to next major level, creates a tag, creates new SNAPSHOT version,
commits primary build file (build.gradle) and pushes to remote
"""
increase_version_number(project, "MAJOR")
release(project) release(project)
@task
def dev(project):
linttest(project, "NONE")
@task
def prepare(project):
build = get_devops_build(project)
build.prepare_release()
@task @task
def tag(project): def tag(project):
build = get_devops_build(project) build = get_devops_build(project)
build.tag_bump_and_push_release() build.tag_bump_and_push_release()
@task @task
def build(project):
print("---------- build stage ----------")
run("./gradlew assemble", shell=True)
def release(project): def release(project):
build = get_devops_build(project) prepare(project)
build.prepare_release()
tag(project) tag(project)
def linttest(project, release_type):
@task build(project)
def package(project):
run("./gradlew assemble -x test jar", shell=True)
run("./gradlew assemble -x test uberjarDesktop", shell=True)
run("./gradlew assemble -x test uberjarServer", shell=True)
run("./gradlew assemble -x test uberjarSyspec", shell=True)
run("cd build/libs/ && find . -type f -exec sha256sum {} \; | sort > sha256sum.lst", shell=True)
run("cd build/libs/ && find . -type f -exec sha512sum {} \; | sort > sha512sum.lst", shell=True)
@task
def publish_release(project):
""" creates a release in repo.meissa and uploads artifacts (jar-files and checksum files) """
build = get_devops_build(project)
build.publish_artifacts()
@task
def inst(project):
run("./gradlew inst", shell=True)
def increase_version_number(project, release_type):
build = get_devops_build(project)
build.update_release_type(release_type)

View file

@ -1,8 +1,22 @@
This page provides information for developers. # Information for developers
# Tasks ## Create a provs jar-file
## What is a task ? * Clone this repo
* Build the jar-file by `./gradlew uberjarDesktop`
* In folder build/libs you'll find the file `provs-desktop.jar`
This uberjar is a Java jar-file including all required dependencies.
## Task
```kotlin
fun Prov.provisionK8s() = task { /* ... code and subtasks come here ... */ }
```
If you're having a deeper look into the provs code, you'll see regularly a task definition like this and might wonder ...
### What is a task ?
A task is the **basic execution unit** in provs. When executed, each task produces exactly one result (line) with either success or failure. A task is the **basic execution unit** in provs. When executed, each task produces exactly one result (line) with either success or failure.
@ -12,108 +26,9 @@ The success or failure is computed automatically in the following way:
* a task defined with **optional** (i.e. `= optional { /* ... */ }` always returns success (even if there are failing subtasks) * a task defined with **optional** (i.e. `= optional { /* ... */ }` always returns success (even if there are failing subtasks)
* **requireLast** defines a task which must provide an explicit result and solely this result counts for success calculation * **requireLast** defines a task which must provide an explicit result and solely this result counts for success calculation
## Task declaration
### Recommended way
A task can be declared by
```kotlin
fun Prov.myCustomTask() = task { /* ... code and subtasks come here ... */ }
// e.g.
fun Prov.myEchoTask() = task {
cmd("echo hello world!")
}
```
The task will succeed if all sub-tasks (called tasks during execution) have succeeded resp. if no sub-task was called.
### Alternative ways
The following ways are equivalent but are more verbose:
```kotlin
// Redundant declaration of the return type (ProvResult), which is already declared by task
fun Prov.myCustomTask(): ProvResult = task { /* ... code and subtasks come here ... */ }
// Redundant parentheses behind task
fun Prov.myCustomTask() = task() { /* ... code and subtasks come here ... */ }
// Redundant definition of the task name, but could be used to output a different task name
fun Prov.myCustomTask() = task("myCustomTask") { /* ... code and subtasks come here ... */ }
// Functionally equal, but with additional curly brackets
fun Prov.myCustomTask() { task { /* ... code and subtasks come here ... */ } }
```
Btw, the following lines and WILL NOT work as expected.
Due to too much lamda nesting, the code within the task is NOT executed:
```kotlin
fun Prov.myCustomTask() = { task { /* ... code and subtasks come here ... */ } }
fun Prov.myCustomTask() {{ task { /* ... code and subtasks come here ... */ } }}
```
### Add custom results
If you want to add a result explicitly, you can use method `addResultToEval`.
This maxy be used e.g. to add explicitly an error line, like in:
```kotlin
fun Prov.myCustomTask() = task {
/* some other code ... */
addResultToEval(ProvResult(false, err = "my error msg"))
/* some other code ... */
}
```
or alternatively you can use `taskWithResult`.
#### TaskWithResult
In case you want to include the return value (of type `ProvResult`) of a task to be added to the evaluation,
you can use `taskWithResult` instead of `task` and return the value, e.g. like
```kotlin
fun Prov.myEchoTask() = taskWithResult {
cmd("echo hello world!")
// ...
ProvResult(false, "Error: ... error message ...") // will be the returned as return value and included in the evaluation
}
```
IMPORTANT: the value you want to return must be placed at the end of the lambda code (as usual in functional programming)!
The following will NOT work as expected:
```kotlin
fun Prov.myEchoTask() = taskWithResult {
ProvResult(false, "Error: ... error message ...") // will be ignored
// the result from the call below (i.e. from task "cmd") will be returned by myEchoTask,
// which is redundant as its result is already included in the evaluation anyway.
cmd("echo hello world!")
}
```
### Task output
If a task is run e.g. with `local().myEchoTask()`, it will produce output like
```
> Success -- myEchoTask
---> Success -- cmd [/bin/bash, -c, echo hello world!]
```
## Call hierarchy ## Call hierarchy
In the following link you can find an example of a sequence diagram when provisioning a desktop: In the following link you can find an example of a sequence diagram when provisioning a desktop:
[ProvisionDesktopSequence.md](ProvisionDesktopSequence.md) [ProvisionDesktopSequence.md](ProvisionDesktopSequence.md)
## Create a provs jar-file
* Clone this repo
* Build the jar-file by `./gradlew uberjarDesktop`
* In folder build/libs you'll find the file `provs-desktop.jar`
This uberjar is a Java jar-file including all required dependencies.

View file

@ -4,20 +4,18 @@
#### remove old version #### remove old version
sudo rm -rf ~/go sudo rm -rf ~/go
### download latest version and configure ### download latest version and configure
curl -OL https://go.dev/dl/go1.21.3.linux-amd64.tar.gz curl -OL https://go.dev/dl/$(curl 'https://go.dev/VERSION?m=text').linux-amd64.tar.gz
# extract latest version to ~/go extract latest version to ~/go
tar -C ~ -xzf go*.linux-amd64.tar.gz tar -C ~ -xzf go*.linux-amd64.tar.gz
# append path APPEND='export PATH=$PATH:$HOME/go/bin'
```
(meissa) jem@meissa-ide-2023:~$ cat .bashrc.d/go.sh echo $APPEND >> $HOME/.profile
PATH=$PATH:$HOME/go/bin
export PATH
```
## VScode optional - TODO!?! ## VScode optional - TODO!?!
"Go for VS Code v0.39.1" Go extension autoinstall
install gpls, div, etc.
## Testing forgejo ## Testing forgejo
full: full:

View file

@ -1,20 +0,0 @@
# Modules
## Modules and their possible relations
![modularization.png](resources/modularization.png)
#### Modules
A,B,C: Modules with both domain and infrastructure layer code - common type of modules
D: Module with only domain: can sometimes make sense if only domain logic and no infrastructure logic is required
E: Module with only infrastructure: usually utility modules that just provide a collection of infrastructure functionality
#### Interactions
1. Domain calls (a function in) the infrastructure of the same module - common practice within a module
1. Domain calls (a function in) the domain another module - common practice between modules
1. Infrastructure calls infrastructure of another module - usually not recommended
1. Domain calls infrastructure in another module - can make sense in some cases e.g. if module D just needs some low-level function of module D. However where possible calling domain of module C should be preferred
1. Domain calls infrastructure in another module, which only has infrastructure - common practice for calling utility modules, which don't have a domain.

35
doc/ProvSequence.puml Normal file
View file

@ -0,0 +1,35 @@
@startuml
autonumber
Application -> Prov: create
activate Prov
Application -> DesktopService.kt: provisionDesktop(prov, ...)
DesktopService.kt -> Install.kt: aptInstall(prov, lambda=cmd "apt install", ..)
Install.kt -> Prov: taskWithResult
activate Prov
Prov -> Prov: evaluate
activate Prov
Prov -> Prov: initProgress (bei level 0)
Prov -> Prov: progress
activate Prov
Prov -> Prov: lambda
activate Prov
Prov -> Processor: exec
deactivate Prov
Prov <-- Prov: ProvResult
deactivate Prov
Prov -> Prov: endProgress (bei level 0)
Prov -> Prov: printResults (bei level 0)
deactivate Prov
deactivate Prov
Install.kt <-- Prov: ProvResult
@enduml

View file

@ -1,47 +0,0 @@
```plantuml
@startuml
autonumber
participant Application
participant DesktopService
participant Install
participant Prov
participant Processor
Application -> Prov: create
activate Prov
Application -> DesktopService: provisionDesktop(prov, ...)
DesktopService -> Install: prov.aptInstall()
Install -> Prov: taskWithResult( lambda = cmd("sudo apt install ...") )
activate Prov
Prov -> Prov: evaluate
activate Prov
Prov -> Prov: initProgress (if level 0)
Prov -> Prov: progress
activate Prov
Prov -> Prov: lambda
activate Prov
Prov -> Processor: exec
Prov <-- Processor: exec
deactivate Prov
deactivate Prov
Prov -> Prov: endProgress (if level 0)
Prov -> Prov: printResults (if level 0)
deactivate Prov
deactivate Prov
Install <-- Prov: ProvResult
DesktopService <-- Install
Application <-- DesktopService
@enduml
```

View file

@ -13,7 +13,6 @@ box "application" #LightBlue
participant Application participant Application
participant CliArgumentsParser participant CliArgumentsParser
participant DesktopCliCommand participant DesktopCliCommand
participant ProvWithSudo
end box end box
box #White box #White
@ -22,7 +21,8 @@ participant "Prov (local or remote...)" as ProvInstance
end box end box
box "domain" #LightGreen box "domain" #LightGreen
participant "DesktopService" participant "DesktopService\n.provisionDesktopCommand" as DesktopService1
participant "DesktopService\n.provisionDesktop" as DesktopService2
end box end box
box "infrastructure" #CornSilk box "infrastructure" #CornSilk
@ -36,18 +36,14 @@ Application -> CliArgumentsParser : parseCommand
Application -> DesktopCliCommand : isValid ? Application -> DesktopCliCommand : isValid ?
Application -> CliUtils : createProvInstance Application -> CliUtils : createProvInstance
ProvInstance <- CliUtils : create ProvInstance <- CliUtils : create
Application -> DesktopService1 : provisionDesktopCommand ( provInstance, desktopCliCommand )
DesktopService1 -> ConfigRepository : getConfig
DesktopService1 -> DesktopService2 : provisionDesktop( config )
Application -> ProvWithSudo : ensureSudoWithoutPassword DesktopService2 -> Infrastructure_functions: Various calls like:
Application -> DesktopService : provisionDesktopCommand ( provInstance, desktopCliCommand ) DesktopService2 -> Infrastructure_functions: install ssh, gpg, git ...
DesktopService2 -> Infrastructure_functions: installVirtualBoxGuestAdditions
DesktopService -> ConfigRepository : getConfig DesktopService2 -> Infrastructure_functions: configureNoSwappiness, ...
DesktopService -> DesktopService : provisionDesktop( config )
DesktopService -> Infrastructure_functions: Various calls like:
DesktopService -> Infrastructure_functions: install ssh, gpg, git ...
DesktopService -> Infrastructure_functions: installVirtualBoxGuestAdditions
DesktopService -> Infrastructure_functions: configureNoSwappiness, ...
@enduml @enduml
``` ```

View file

@ -2,6 +2,8 @@ This repository holds the documentation of the provs framework.
# Design principles # Design principles
For usage examples it is recommended to have a look at [provs-scripts](https://gitlab.com/domaindrivenarchitecture/provs-scripts) or [provs-ubuntu-extensions](https://gitlab.com/domaindrivenarchitecture/provs-ubuntu-extensions).
## "Implarative" ## "Implarative"
Configuration management tools are usually classified as either **imperative** or **declarative**. Configuration management tools are usually classified as either **imperative** or **declarative**.

View file

@ -5,7 +5,7 @@ release-1.2 or release-1.2.3
I.e.: release-X.X.Z where X, Y, Z are the major, minor resp. the patch level of the release. Z can be omitted. I.e.: release-X.X.Z where X, Y, Z are the major, minor resp. the patch level of the release. Z can be omitted.
**Note:** Such kind of release tags should only be applied to commits in the main branch. **Note:** Such kind of release tags should only be applied to commits in the master branch.
``` ```
#adjust [version] #adjust [version]

View file

@ -1,10 +0,0 @@
### Howto update gradle wrapper
1. To *latest* version (be aware for deprecated parts in future versions):
```shell
./gradlew wrapper --gradle-version latest
```
2. To *specific version:
```shell
./gradlew wrapper --gradle-version 8.6
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

View file

@ -38,8 +38,8 @@ fun main(args: Array<String>) {
null null
} catch (e: FileNotFoundException) { } catch (e: FileNotFoundException) {
println( println(
"Error: File\u001b[31m $configFileName \u001b[0m was not found.\n" + "Error: File\u001b[31m ${configFileName} \u001b[0m was not found.\n" +
"Pls copy file \u001B[31m desktop-config-example.yaml \u001B[0m to file \u001B[31m $configFileName \u001B[0m " + "Pls copy file \u001B[31m desktop-config-example.yaml \u001B[0m to file \u001B[31m ${configFileName} \u001B[0m " +
"and change the content according to your needs." "and change the content according to your needs."
) )
null null

View file

@ -2,9 +2,4 @@ package org.domaindrivenarchitecture.provs.desktop.domain
enum class DesktopOnlyModule { enum class DesktopOnlyModule {
FIREFOX, VERIFY FIREFOX, VERIFY
;
fun isIn(list: List<String>): Boolean {
return list.any { it.equals(this.name, ignoreCase = true) }
}
} }

View file

@ -1,7 +1,5 @@
package org.domaindrivenarchitecture.provs.desktop.domain package org.domaindrivenarchitecture.provs.desktop.domain
import org.domaindrivenarchitecture.provs.desktop.domain.DesktopOnlyModule.FIREFOX
import org.domaindrivenarchitecture.provs.desktop.domain.DesktopOnlyModule.VERIFY
import org.domaindrivenarchitecture.provs.desktop.infrastructure.* import org.domaindrivenarchitecture.provs.desktop.infrastructure.*
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.git.provisionGit import org.domaindrivenarchitecture.provs.framework.ubuntu.git.provisionGit
@ -16,21 +14,14 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.user.base.whoami
internal fun Prov.provisionDesktopCommand(cmd: DesktopCliCommand, conf: DesktopConfig) = task { internal fun Prov.provisionDesktopCommand(cmd: DesktopCliCommand, conf: DesktopConfig) = task {
validatePrecondition()
val only = cmd.onlyModules
if (only == null) {
provisionDesktop( provisionDesktop(
cmd.type, cmd.type,
conf.ssh?.keyPair(), conf.ssh?.keyPair(),
conf.gpg?.keyPair(), conf.gpg?.keyPair(),
conf.gitUserName, conf.gitUserName,
conf.gitEmail, conf.gitEmail,
cmd.onlyModules
) )
} else {
provisionOnlyModules(cmd.type, only)
}
} }
@ -48,34 +39,24 @@ internal fun Prov.provisionDesktop(
gpg: KeyPair? = null, gpg: KeyPair? = null,
gitUserName: String? = null, gitUserName: String? = null,
gitEmail: String? = null, gitEmail: String? = null,
onlyModules: List<String>?
) = task { ) = task {
validatePrecondition()
provisionBasicDesktop(gpg, ssh, gitUserName, gitEmail) provisionBasicDesktop(gpg, ssh, gitUserName, gitEmail, onlyModules)
if (desktopType == DesktopType.OFFICE) { if (desktopType == DesktopType.OFFICE) {
provisionOfficeDesktop() provisionOfficeDesktop(onlyModules)
if (onlyModules == null) {
verifyOfficeSetup() verifyOfficeSetup()
} }
}
if (desktopType == DesktopType.IDE) { if (desktopType == DesktopType.IDE) {
if (onlyModules == null) {
provisionOfficeDesktop() provisionOfficeDesktop()
provisionIdeDesktop() provisionIdeDesktop()
verifyIdeSetup() verifyIdeSetup()
} } else {
} provisionIdeDesktop(onlyModules)
internal fun Prov.provisionOnlyModules(
desktopType: DesktopType = DesktopType.BASIC,
onlyModules: List<String>
) = task {
if (FIREFOX.isIn(onlyModules)) {
installPpaFirefox()
}
if (VERIFY.isIn(onlyModules)) {
if (desktopType == DesktopType.OFFICE) {
verifyOfficeSetup()
} else if (desktopType == DesktopType.IDE) {
verifyIdeSetup()
} }
} }
} }
@ -86,13 +67,61 @@ fun Prov.validatePrecondition() {
} }
} }
fun Prov.provisionIdeDesktop(onlyModules: List<String>? = null) {
if (onlyModules == null) {
aptInstall(OPEN_VPM)
aptInstall(OPENCONNECT)
aptInstall(VPNC)
// DevEnvs
installDocker()
aptInstall(JAVA)
aptInstall(CLOJURE_TOOLS)
installShadowCljs()
installDevOps()
provisionPython()
// IDEs
installVSC("python", "clojure")
installIntelliJ()
} else if (onlyModules.contains(DesktopOnlyModule.VERIFY.name.lowercase())) {
verifyIdeSetup()
} else if (onlyModules.contains(DesktopOnlyModule.FIREFOX.name.lowercase())) {
installPpaFirefox()
}
}
fun Prov.provisionOfficeDesktop(onlyModules: List<String>? = null) {
if (onlyModules == null) {
aptInstall(ZIP_UTILS)
aptInstall(SPELLCHECKING_DE)
aptInstall(BROWSER)
aptInstall(EMAIL_CLIENT)
installDeltaChat()
aptInstall(OFFICE_SUITE)
installZimWiki()
installNextcloudClient()
aptInstall(COMPARE_TOOLS)
// optional as installation of these tools often fail and they are not considered mandatory
optional {
aptInstall(DRAWING_TOOLS)
}
} else if (onlyModules.contains(DesktopOnlyModule.VERIFY.name.lowercase())) {
verifyOfficeSetup()
} else if (onlyModules.contains(DesktopOnlyModule.FIREFOX.name.lowercase())) {
installPpaFirefox()
}
}
fun Prov.provisionBasicDesktop( fun Prov.provisionBasicDesktop(
gpg: KeyPair?, gpg: KeyPair?,
ssh: SshKeyPair?, ssh: SshKeyPair?,
gitUserName: String?, gitUserName: String?,
gitEmail: String?, gitEmail: String?,
onlyModules: List<String>?
) { ) {
if (onlyModules == null) {
aptInstall(KEY_MANAGEMENT) aptInstall(KEY_MANAGEMENT)
aptInstall(VERSION_MANAGEMENT) aptInstall(VERSION_MANAGEMENT)
aptInstall(NETWORK_TOOLS) aptInstall(NETWORK_TOOLS)
@ -104,8 +133,7 @@ fun Prov.provisionBasicDesktop(
aptInstall(CLIP_TOOLS) aptInstall(CLIP_TOOLS)
aptPurge( aptPurge(
"remove-power-management xfce4-power-manager " + "remove-power-management xfce4-power-manager " +
"xfce4-power-manager-plugins xfce4-power-manager-data" + "xfce4-power-manager-plugins xfce4-power-manager-data"
"upower libimobiledevice6 libplist3 libusbmuxd6 usbmuxd bluez-cups"
) )
aptPurge("abiword gnumeric") aptPurge("abiword gnumeric")
aptPurge("popularity-contest") aptPurge("popularity-contest")
@ -125,45 +153,7 @@ fun Prov.provisionBasicDesktop(
configureNoSwappiness() configureNoSwappiness()
configureBash() configureBash()
installVirtualBoxGuestAdditions() installVirtualBoxGuestAdditions()
} } else if (onlyModules.contains(DesktopOnlyModule.FIREFOX.name.lowercase())) {
installPpaFirefox()
fun Prov.provisionOfficeDesktop() {
aptInstall(ZIP_UTILS)
aptInstall(SPELLCHECKING_DE)
aptInstall(BROWSER)
aptInstall(EMAIL_CLIENT)
installDeltaChat()
aptInstall(OFFICE_SUITE)
installZimWiki()
// installNextcloudClient() might not install - might need fix and working test
aptInstall(COMPARE_TOOLS)
// VSCode is also required in office VM (not only in IDE desktop) e.g. as editor
installVSCode("python", "clojure")
// optional, as installation of these tools often fail and as they are not mandatory
optional {
aptInstall(DRAWING_TOOLS)
} }
} }
fun Prov.provisionIdeDesktop() {
aptInstall(OPEN_VPM)
aptInstall(OPENCONNECT)
aptInstall(VPNC)
// DevEnvs
installDocker()
aptInstall(JAVA)
aptInstall(CLOJURE_TOOLS)
installShadowCljs()
installDevOps()
provisionPython()
installHugoByDeb()
// IDEs
installIntelliJ()
installKubeconform()
}

View file

@ -1,35 +1,26 @@
package org.domaindrivenarchitecture.provs.desktop.domain package org.domaindrivenarchitecture.provs.desktop.domain
typealias HostKey = String
/** /**
* Represents a known host for ssh connections. * A HostKey should contain space-separated: keytype, key and (optionally) a comment
*
* @param hostName domain name or ip
* @param port (optional) to be specified if different from default port 22
* @param hostKeys list of keys, where each should contain separated by space: 1. keytype, 2. key and 3. (optionally) a comment
* *
* See: https://man7.org/linux/man-pages/man8/sshd.8.html#SSH_KNOWN_HOSTS_FILE_FORMAT * See: https://man7.org/linux/man-pages/man8/sshd.8.html#SSH_KNOWN_HOSTS_FILE_FORMAT
*/ */
open class KnownHost( typealias HostKey = String
open class KnownHost protected constructor(
val hostName: String, val hostName: String,
val port: Int? = null,
val hostKeys: List<HostKey> val hostKeys: List<HostKey>
) { ) {
constructor(hostName: String, hostKeys: List<HostKey>) : this(hostName, null, hostKeys)
companion object { companion object {
val GITHUB = KnownHost( val GITHUB = KnownHost(
"github.com", "github.com", listOf(
listOf(
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl",
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=", "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=",
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=",
) )
) )
val GITLAB = KnownHost( val GITLAB = KnownHost(
"gitlab.com", "gitlab.com", listOf(
listOf(
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf",
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9",
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=", "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=",

View file

@ -6,6 +6,6 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.addKnownHos
fun Prov.addKnownHosts(knownHosts: List<KnownHost> = KnownHost.values()) = task { fun Prov.addKnownHosts(knownHosts: List<KnownHost> = KnownHost.values()) = task {
for (knownHost in knownHosts) { for (knownHost in knownHosts) {
addKnownHost(knownHost, verifyKeys = true) addKnownHost(knownHost.hostName, knownHost.hostKeys, verifyKeys = true)
} }
} }

View file

@ -15,13 +15,13 @@ fun Prov.installDevOps() = task {
installTerraform() installTerraform()
installKubectlAndTools() installKubectlAndTools()
installYq() installYq()
installGraalVM()
} }
fun Prov.installYq( fun Prov.installYq(
version: String = "4.13.2", version: String = "4.13.2",
sha256sum: String = "d7c89543d1437bf80fee6237eadc608d1b121c21a7cbbe79057d5086d74f8d79" sha256sum: String = "d7c89543d1437bf80fee6237eadc608d1b121c21a7cbbe79057d5086d74f8d79"
) = task { ): ProvResult = task {
val path = "/usr/bin/" val path = "/usr/bin/"
val filename = "yq" val filename = "yq"
if (!checkFile(path + filename)) { if (!checkFile(path + filename)) {
@ -38,7 +38,7 @@ fun Prov.installYq(
} }
} }
fun Prov.installKubectlAndTools() = task { fun Prov.installKubectlAndTools(): ProvResult = task {
task("installKubectl") { task("installKubectl") {
if (!checkFile(KUBE_CONFIG_CONTEXT_SCRIPT)) { if (!checkFile(KUBE_CONFIG_CONTEXT_SCRIPT)) {
@ -49,37 +49,13 @@ fun Prov.installKubectlAndTools() = task {
} }
} }
task("installKubeconform") {
installKubeconform()
}
installDevopsScripts() installDevopsScripts()
} }
fun Prov.installKubeconform() = task { fun Prov.installKubectl(): ProvResult = task {
// check for latest stable release on: https://github.com/yannh/kubeconform/releases
val version = "0.6.4"
val installationPath = "/usr/local/bin/"
val tmpDir = "~/tmp"
val filename = "kubeconform-linux-amd64"
val packedFilename = "$filename.tar.gz"
if ( !chk("kubeconform -v") || "v$version" != cmd("kubeconform -v").out?.trim() ) {
downloadFromURL(
"https://github.com/yannh/kubeconform/releases/download/v$version/$packedFilename",
path = tmpDir,
sha256sum = "2b4ebeaa4d5ac4843cf8f7b7e66a8874252b6b71bc7cbfc4ef1cbf85acec7c07"
)
cmd("sudo tar -xzf $packedFilename -C $installationPath", tmpDir)
} else {
ProvResult(true, out = "Kubeconform $version already installed")
}
}
fun Prov.installKubectl() = task {
// see https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ // see https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
val kubectlVersion = "1.27.4" val kubectlVersion = "1.23.0"
val tmpDir = "~/tmp" val tmpDir = "~/tmp"
// prerequisites -- see https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ // prerequisites -- see https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
@ -91,19 +67,19 @@ fun Prov.installKubectl() = task {
downloadFromURL( downloadFromURL(
"https://dl.k8s.io/release/v$kubectlVersion/bin/linux/amd64/kubectl", "https://dl.k8s.io/release/v$kubectlVersion/bin/linux/amd64/kubectl",
path = tmpDir, path = tmpDir,
// from https://dl.k8s.io/v1.27.4/bin/linux/amd64/kubectl.sha256 // from https://dl.k8s.io/v1.23.0/bin/linux/amd64/kubectl.sha256
sha256sum = "4685bfcf732260f72fce58379e812e091557ef1dfc1bc8084226c7891dd6028f" sha256sum = "2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f"
) )
cmd("sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl", dir = tmpDir) cmd("sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl", dir = tmpDir)
} }
fun Prov.configureKubectlBashCompletion() = task { fun Prov.configureKubectlBashCompletion(): ProvResult = task {
cmd("kubectl completion bash >> /etc/bash_completion.d/kubernetes", sudo = true) cmd("kubectl completion bash >> /etc/bash_completion.d/kubernetes", sudo = true)
createDir(".bashrc.d") createDir(".bashrc.d")
createFileFromResource(KUBE_CONFIG_CONTEXT_SCRIPT, "kubectl.sh", RESOURCE_PATH) createFileFromResource(KUBE_CONFIG_CONTEXT_SCRIPT, "kubectl.sh", RESOURCE_PATH)
} }
fun Prov.installDevopsScripts() = task { fun Prov.installDevopsScripts() {
task("install ssh helper") { task("install ssh helper") {
createFileFromResource( createFileFromResource(
@ -145,7 +121,7 @@ fun Prov.installDevopsScripts() = task {
} }
} }
fun Prov.installTerraform() = task { fun Prov.installTerraform(): ProvResult = task {
val dir = "/usr/lib/tfenv/" val dir = "/usr/lib/tfenv/"
if (!checkDir(dir)) { if (!checkDir(dir)) {
@ -158,3 +134,34 @@ fun Prov.installTerraform() = task {
cmd("tfenv install latest:^1.4.6", sudo = true) cmd("tfenv install latest:^1.4.6", sudo = true)
cmd("tfenv use latest:^1.4.6", sudo = true) cmd("tfenv use latest:^1.4.6", sudo = true)
} }
// -------------------------------------------- AWS credentials file -----------------------------------------------
fun Prov.installAwsCredentials(id: String = "REPLACE_WITH_YOUR_ID", key: String = "REPLACE_WITH_YOUR_KEY"): ProvResult =
task {
val dir = "~/.aws"
if (!checkDir(dir)) {
createDirs(dir)
createFile("~/.aws/config", awsConfig())
createFile("~/.aws/credentials", awsCredentials(id, key))
} else {
ProvResult(true, "aws credential folder already installed")
}
}
fun awsConfig(): String {
return """
[default]
region = eu-central-1
output = json
""".trimIndent()
}
fun awsCredentials(id: String, key: String): String {
return """
[default]
aws_access_key_id = $id
aws_secret_access_key = $key
""".trimIndent()
}

View file

@ -11,10 +11,9 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.web.base.downloadFrom
fun Prov.installGopass( fun Prov.installGopass(
version: String = "1.15.13", // NOTE: when adjusting, pls also adjust checksum below and version of gopass bridge json api version: String = "1.15.5",
enforceVersion: Boolean = false, enforceVersion: Boolean = false,
// from https://github.com/gopasspw/gopass/releases/tag/v1.15.13 sha256sum: String = "23ec10015c2643f22cb305859eb36d671094d463d2eb1798cc675e7bb06f4b39"
sha256sum: String = "409ed5617e64fa2c781d5e2807ba7fcd65bc383a4e110f410f90b590e51aec55"
) = taskWithResult { ) = taskWithResult {
if (isPackageInstalled("gopass") && !enforceVersion) { if (isPackageInstalled("gopass") && !enforceVersion) {

View file

@ -22,10 +22,10 @@ fun Prov.downloadGopassBridge() = task {
} }
fun Prov.installGopassJsonApi() = taskWithResult { fun Prov.installGopassJsonApi() = taskWithResult {
// from https://github.com/gopasspw/gopass-jsonapi/releases/tag/v1.15.13 // see https://github.com/gopasspw/gopass-jsonapi
val sha256sum = "3162ab558301645024325ce2e419c1d67900e1faf95dc1774a36f1ebfc76389f" val sha256sum = "ec9976e39a468428ae2eb1e2e0b9ceccba7f60d66b8097e2425b0c07f4fed108"
val gopassJsonApiVersion = "1.15.13" val gopassJsonApiVersion = "1.15.5"
val requiredGopassVersion = "1.15.13" val requiredGopassVersion = "1.15.5"
val filename = "gopass-jsonapi_${gopassJsonApiVersion}_linux_amd64.deb" val filename = "gopass-jsonapi_${gopassJsonApiVersion}_linux_amd64.deb"
val downloadUrl = "-L https://github.com/gopasspw/gopass-jsonapi/releases/download/v$gopassJsonApiVersion/$filename" val downloadUrl = "-L https://github.com/gopasspw/gopass-jsonapi/releases/download/v$gopassJsonApiVersion/$filename"
val downloadDir = "${userHome()}Downloads" val downloadDir = "${userHome()}Downloads"

View file

@ -1,33 +0,0 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDirs
import org.domaindrivenarchitecture.provs.framework.ubuntu.web.base.downloadFromURL
const val GRAAL_VM_VERSION = "21.0.2"
fun Prov.installGraalVM() = task {
val tmpDir = "~/tmp"
val filename = "graalvm-community-jdk-"
val additionalPartFilename = "_linux-x64_bin"
val packedFilename = "$filename$GRAAL_VM_VERSION$additionalPartFilename.tar.gz"
val extractedFilenameHunch = "graalvm-community-openjdk-"
val installationPath = "/usr/lib/jvm/"
if ( GRAAL_VM_VERSION != graalVMVersion() || !chk("ls -d $installationPath$extractedFilenameHunch$GRAAL_VM_VERSION*")) {
downloadFromURL(
"https://github.com/graalvm/graalvm-ce-builds/releases/download/jdk-$GRAAL_VM_VERSION/$packedFilename",
path = tmpDir,
sha256sum = "b048069aaa3a99b84f5b957b162cc181a32a4330cbc35402766363c5be76ae48"
)
createDirs(installationPath, sudo = true)
cmd("sudo tar -C $installationPath -xzf $packedFilename", tmpDir)
val graalInstPath = installationPath + (cmd("ls /usr/lib/jvm/|grep -e graalvm-community-openjdk-$GRAAL_VM_VERSION").out?.replace("\n", ""))
cmd("sudo ln -sf $graalInstPath/lib/svm/bin/native-image /usr/local/bin/native-image")
}
}
fun Prov.graalVMVersion(): String {
return cmdNoEval("/usr/local/bin/native-image --version|awk 'NR==1 {print $2}'").out?.trim() ?: ""
}

View file

@ -1,85 +0,0 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.userHome
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptPurge
import org.domaindrivenarchitecture.provs.framework.ubuntu.web.base.downloadFromURL
fun Prov.installHugoByDeb() = task {
val sha256sum = "46692ac9b79d5bc01b0f847f6dcf651d8630476de63e598ef61a8da9461d45cd"
val requiredHugoVersion = "0.125.5"
val filename = "hugo_extended_0.125.5_linux-amd64.deb"
val downloadUrl = "-L https://github.com/gohugoio/hugo/releases/download/v$requiredHugoVersion/$filename"
val downloadDir = "${userHome()}Downloads"
val currentHugoVersion = cmdNoEval("hugo version").out ?: ""
if (needsHugoInstall(currentHugoVersion, requiredHugoVersion)) {
if (isHugoInstalled(currentHugoVersion)) {
if (currentHugoVersion.contains("snap")) {
cmd("snap remove hugo", sudo = true)
} else {
aptPurge("hugo")
}
}
aptInstall("gnupg2")
downloadFromURL(downloadUrl, filename, downloadDir, sha256sum = sha256sum)
cmd("dpkg -i $downloadDir/$filename", sudo = true)
}
}
fun needsHugoInstall(currentHugoVersion: String?, requiredHugoVersion: String) : Boolean {
if (currentHugoVersion == null) {
return true
}
if (!isHugoInstalled(currentHugoVersion)) {
return true
}
if (!isHugoExtended(currentHugoVersion)) {
return true
}
if (isLowerHugoVersion(requiredHugoVersion, currentHugoVersion)) {
return true
}
return false
}
fun isHugoInstalled(hugoVersion: String?) : Boolean {
if (hugoVersion == null) {
return false
}
return hugoVersion.contains("hugo")
}
fun isHugoExtended(hugoVersion: String) : Boolean {
return hugoVersion.contains("extended")
}
fun isLowerHugoVersion(requiredHugoVersion: String, currentHugoVersion: String ) : Boolean {
val reqVersionNo = getHugoVersionNo(requiredHugoVersion)
val currentVersionNo = getHugoVersionNo(currentHugoVersion)
return when {
compareVersions(currentVersionNo, reqVersionNo).contains("lower") -> true
else -> false
}
}
fun compareVersions(firstVersion : List<Int>, secondVersion: List<Int>) : String {
var result = ""
for (i in 0..2) {
when {
firstVersion[i] > secondVersion[i] -> result += " higher"
firstVersion[i] < secondVersion[i] -> result += " lower"
firstVersion[i] == secondVersion[i] -> result += " equal"
}
}
return result
}
fun getHugoVersionNo(hugoVersion: String) : List<Int> {
// hugo v0.126.1-3d40ab+extended linux/amd64 BuildDate=2024-05-15T10:42:34Z VendorInfo=snap:0.126.1
var result = hugoVersion.split(" ")[1]
result = result.split("-")[0].removePrefix("v")
return result.split(".").map { it.toInt() }
}

View file

@ -30,7 +30,7 @@ val OPENCONNECT = "openconnect network-manager-openconnect network-manager-openc
val VPNC = "vpnc network-manager-vpnc network-manager-vpnc-gnome vpnc-scripts" val VPNC = "vpnc network-manager-vpnc network-manager-vpnc-gnome vpnc-scripts"
val JAVA = "openjdk-17-jdk jarwrapper" val JAVA = "openjdk-8-jdk openjdk-11-jdk openjdk-17-jdk jarwrapper"
val DRAWING_TOOLS = "inkscape dia openboard graphviz" val DRAWING_TOOLS = "inkscape dia openboard graphviz"

View file

@ -14,8 +14,6 @@ fun Prov.provisionPython(venvHome: String? = "~/.venv/meissa") = task {
installRestClient(venvHome) installRestClient(venvHome)
installJupyterlab(venvHome) installJupyterlab(venvHome)
installLinters(venvHome) installLinters(venvHome)
installAsciinema(venvHome)
installPyTest(venvHome)
} }
fun Prov.installPython3(): ProvResult = task { fun Prov.installPython3(): ProvResult = task {
@ -30,7 +28,7 @@ fun Prov.configureVenv(venvHome: String): ProvResult = task {
fun Prov.installPybuilder(venvHome: String? = null): ProvResult = task { fun Prov.installPybuilder(venvHome: String? = null): ProvResult = task {
pipInstall("pybuilder ddadevops pypandoc mockito coverage unittest-xml-reporting deprecation" + pipInstall("pybuilder ddadevops pypandoc mockito coverage unittest-xml-reporting deprecation" +
" python_terraform dda_python_terraform boto3 pyyaml packaging inflection", " python_terraform dda_python_terraform boto3 pyyaml packaging",
venvHome venvHome
) )
pipInstall("--upgrade ddadevops", venvHome) pipInstall("--upgrade ddadevops", venvHome)
@ -47,13 +45,6 @@ fun Prov.installJupyterlab(venvHome: String? = null): ProvResult = task {
fun Prov.installLinters(venvHome: String? = null): ProvResult = task { fun Prov.installLinters(venvHome: String? = null): ProvResult = task {
pipInstall("flake8 mypy pylint", venvHome) pipInstall("flake8 mypy pylint", venvHome)
} }
fun Prov.installAsciinema(venvHome: String? = null): ProvResult = task {
pipInstall("asciinema", venvHome)
}
fun Prov.installPyTest(venvHome: String? = null): ProvResult = task {
pipInstall("pytest", venvHome)
}
private fun Prov.pipInstall(pkg: String, venvHome: String? = null) { private fun Prov.pipInstall(pkg: String, venvHome: String? = null) {
cmd(activateVenvCommandPrefix(venvHome) + "pip3 install $pkg") cmd(activateVenvCommandPrefix(venvHome) + "pip3 install $pkg")

View file

@ -6,33 +6,33 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInsta
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.isPackageInstalled import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.isPackageInstalled
fun Prov.installVSCode(vararg options: String) = task { fun Prov.installVSC(vararg options: String) = task {
val clojureExtensions = setOf("betterthantomorrow.calva", "DavidAnson.vscode-markdownlint") val clojureExtensions = setOf("betterthantomorrow.calva", "DavidAnson.vscode-markdownlint")
val pythonExtensions = setOf("ms-python.python") val pythonExtensions = setOf("ms-python.python")
installVSCodePrerequisites() prerequisitesVSCinstall()
installVSCPackage() installVSCPackage()
installVSCodiumPackage() installVSCodiumPackage()
if (options.contains("clojure")) { if (options.contains("clojure")) {
installVSCodeExtensions(clojureExtensions) installExtensionsCode(clojureExtensions)
installVSCodiumExtensions(clojureExtensions) installExtensionsCodium(clojureExtensions)
} }
if (options.contains("python")) { if (options.contains("python")) {
installVSCodeExtensions(pythonExtensions) installExtensionsCode(pythonExtensions)
installVSCodiumExtensions(pythonExtensions) installExtensionsCodium(pythonExtensions)
} }
} }
private fun Prov.installVSCodePrerequisites() = task { private fun Prov.prerequisitesVSCinstall() = task {
aptInstall("curl gpg unzip apt-transport-https") aptInstall("curl gpg unzip apt-transport-https")
} }
@Suppress("unused") // only required for installation of vscode via apt @Suppress("unused") // only required for installation of vscode via apt
private fun Prov.installVSCodeWithApt() = task { private fun Prov.installVscWithApt() = task {
val packageName = "code" val packageName = "code"
if (!isPackageInstalled(packageName)) { if (!isPackageInstalled(packageName)) {
// see https://code.visualstudio.com/docs/setup/linux // see https://code.visualstudio.com/docs/setup/linux
@ -62,7 +62,7 @@ private fun Prov.installVSCodiumPackage() = task {
} }
private fun Prov.installVSCodeExtensions(extensions: Set<String>) = optional { private fun Prov.installExtensionsCode(extensions: Set<String>) = optional {
var res = ProvResult(true) var res = ProvResult(true)
for (ext in extensions) { for (ext in extensions) {
res = cmd("code --install-extension $ext") res = cmd("code --install-extension $ext")
@ -71,11 +71,11 @@ private fun Prov.installVSCodeExtensions(extensions: Set<String>) = optional {
// Settings can be found at $HOME/.config/Code/User/settings.json // Settings can be found at $HOME/.config/Code/User/settings.json
} }
private fun Prov.installVSCodiumExtensions(extensions: Set<String>) = optional { private fun Prov.installExtensionsCodium(extensions: Set<String>) = optional {
var res = ProvResult(true) var res = ProvResult(true)
for (ext in extensions) { for (ext in extensions) {
res = ProvResult(res.success && cmd("codium --install-extension $ext").success) res = cmd("codium --install-extension $ext")
} }
res res
// Settings can be found at $HOME/.config/VSCodium/User/settings.json // Settings can be found at $HOME/.config/Code/User/settings.json
} }

View file

@ -80,8 +80,8 @@ open class Prov protected constructor(
} }
/** /**
* A task is the fundamental execution unit. In the results overview it is represented by one line with a success or failure result. * A task is the base execution unit in provs. In the results overview it is represented by one line resp. result (of either success or failure).
* Returns success if all sub-tasks finished with success or if no sub-tasks are called at all. * Returns success if no sub-tasks are called or if all subtasks finish with success.
*/ */
fun task(name: String? = null, taskLambda: Prov.() -> Unit): ProvResult { fun task(name: String? = null, taskLambda: Prov.() -> Unit): ProvResult {
printDeprecationWarningIfLevel0("task") printDeprecationWarningIfLevel0("task")
@ -89,10 +89,8 @@ open class Prov protected constructor(
} }
/** /**
* Same as task above but the lambda parameter must have a ProvResult as return type. * Same as task but the provided lambda is explicitly required to provide a ProvResult to be returned.
* The returned ProvResult is included in the success resp. failure evaluation, * The returned result is included in the evaluation.
* i.e. if the returned ProvResult from the lambda fails, the returned ProvResult from
* taskWithResult also fails, else success depends on potentially called sub-tasks.
*/ */
fun taskWithResult(name: String? = null, taskLambda: Prov.() -> ProvResult): ProvResult { fun taskWithResult(name: String? = null, taskLambda: Prov.() -> ProvResult): ProvResult {
printDeprecationWarningIfLevel0("taskWithResult") printDeprecationWarningIfLevel0("taskWithResult")
@ -100,27 +98,27 @@ open class Prov protected constructor(
} }
/** /**
* defines a task, which returns the returned result from the lambda, the results of sub-tasks are not considered * defines a task, which returns the returned result, the results of sub-tasks are not considered
*/ */
fun requireLast(name: String? = null, taskLambda: Prov.() -> ProvResult): ProvResult { fun requireLast(name: String? = null, a: Prov.() -> ProvResult): ProvResult {
printDeprecationWarningIfLevel0("requireLast") printDeprecationWarningIfLevel0("requireLast")
return evaluate(ResultMode.LAST, name) { taskLambda() } return evaluate(ResultMode.LAST, name) { a() }
} }
/** /**
* Defines a task, which always returns success. * defines a task, which always returns success
*/ */
fun optional(name: String? = null, taskLambda: Prov.() -> ProvResult): ProvResult { fun optional(name: String? = null, a: Prov.() -> ProvResult): ProvResult {
printDeprecationWarningIfLevel0("optional") printDeprecationWarningIfLevel0("optional")
return evaluate(ResultMode.OPTIONAL, name) { taskLambda() } return evaluate(ResultMode.OPTIONAL, name) { a() }
} }
/** /**
* Defines a task, which exits the overall execution on failure result of the taskLambda. * defines a task, which exits the overall execution on failure
*/ */
fun exitOnFailure(taskLambda: Prov.() -> ProvResult): ProvResult { fun exitOnFailure(a: Prov.() -> ProvResult): ProvResult {
printDeprecationWarningIfLevel0("exitOnFailure") printDeprecationWarningIfLevel0("exitOnFailure")
return evaluate(ResultMode.FAILEXIT) { taskLambda() } return evaluate(ResultMode.FAILEXIT) { a() }
} }
/** /**

View file

@ -8,8 +8,6 @@ data class ProvResult(val success: Boolean,
val exception: Exception? = null, val exception: Exception? = null,
val exit: String? = null) { val exit: String? = null) {
val outTrimmed: String? = out?.trim()
constructor(returnCode : Int) : this(returnCode == 0) constructor(returnCode : Int) : this(returnCode == 0)
override fun toString(): String { override fun toString(): String {

View file

@ -2,6 +2,7 @@ package org.domaindrivenarchitecture.provs.framework.core
import com.charleskorn.kaml.Yaml import com.charleskorn.kaml.Yaml
import com.charleskorn.kaml.YamlConfiguration import com.charleskorn.kaml.YamlConfiguration
import kotlinx.serialization.InternalSerializationApi
import kotlinx.serialization.serializer import kotlinx.serialization.serializer
import java.io.BufferedReader import java.io.BufferedReader
import java.io.File import java.io.File
@ -17,13 +18,15 @@ fun writeToFile(fileName: String, text: String) {
} }
@OptIn(InternalSerializationApi::class)
inline fun <reified T : Any> String.yamlToType() = Yaml(configuration = YamlConfiguration(strictMode = false)).decodeFromString( inline fun <reified T : Any> String.yamlToType() = Yaml(configuration = YamlConfiguration(strictMode = false)).decodeFromString(
serializer<T>(), T::class.serializer(),
this this
) )
@OptIn(InternalSerializationApi::class)
inline fun <reified T : Any> T.toYaml() = Yaml(configuration = YamlConfiguration(strictMode = false, encodeDefaults = false)).encodeToString( inline fun <reified T : Any> T.toYaml() = Yaml(configuration = YamlConfiguration(strictMode = false, encodeDefaults = false)).encodeToString(
serializer<T>(), T::class.serializer(),
this this
) )

View file

@ -6,7 +6,7 @@ import org.domaindrivenarchitecture.provs.framework.core.docker.dockerimages.Doc
import org.domaindrivenarchitecture.provs.framework.core.docker.platforms.* import org.domaindrivenarchitecture.provs.framework.core.docker.platforms.*
import org.domaindrivenarchitecture.provs.framework.core.platforms.UbuntuProv import org.domaindrivenarchitecture.provs.framework.core.platforms.UbuntuProv
import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode
import org.domaindrivenarchitecture.provs.framework.core.docker.platforms.*
private const val DOCKER_NOT_SUPPORTED = "docker not yet supported for " private const val DOCKER_NOT_SUPPORTED = "docker not yet supported for "
@ -17,7 +17,7 @@ fun Prov.dockerProvideImage(image: DockerImage, skipIfExisting: Boolean = true,
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.dockerProvideImagePlatform(image, skipIfExisting, sudo) return this.dockerProvideImagePlatform(image, skipIfExisting, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass)
} }
} }
@ -28,7 +28,7 @@ fun Prov.dockerImageExists(imageName: String, sudo: Boolean = true) : Boolean {
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.dockerImageExistsPlatform(imageName, sudo) return this.dockerImageExistsPlatform(imageName, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass)
} }
} }
@ -50,7 +50,7 @@ fun Prov.provideContainer(
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.provideContainerPlatform(containerName, imageName, startMode, sudo, options, command) return this.provideContainerPlatform(containerName, imageName, startMode, sudo, options, command)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass)
} }
} }
@ -59,7 +59,7 @@ fun Prov.containerRuns(containerName: String, sudo: Boolean = true) : Boolean {
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.containerRunsPlatform(containerName, sudo) return this.containerRunsPlatform(containerName, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass)
} }
} }
@ -72,7 +72,7 @@ fun Prov.runContainer(
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.runContainerPlatform(containerName, imageName, sudo) return this.runContainerPlatform(containerName, imageName, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass)
} }
} }
@ -84,17 +84,16 @@ fun Prov.exitAndRmContainer(
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.exitAndRmContainerPlatform(containerName, sudo) return this.exitAndRmContainerPlatform(containerName, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass)
} }
} }
@Suppress("unused")
fun Prov.containerExec(containerName: String, cmd: String, sudo: Boolean = true): ProvResult { fun Prov.containerExec(containerName: String, cmd: String, sudo: Boolean = true): ProvResult {
if (this is UbuntuProv) { if (this is UbuntuProv) {
return this.containerExecPlatform(containerName, cmd, sudo) return this.containerExecPlatform(containerName, cmd, sudo)
} else { } else {
throw RuntimeException(DOCKER_NOT_SUPPORTED + this.javaClass) throw RuntimeException(DOCKER_NOT_SUPPORTED + (this as UbuntuProv).javaClass)
} }
} }

View file

@ -17,12 +17,12 @@ class UbuntuPlusUser(private val userName: String = "testuser") : DockerImage {
override fun imageText(): String { override fun imageText(): String {
return """ return """
FROM ubuntu:22.04 FROM ubuntu:20.04
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get -y install sudo RUN apt-get update && apt-get -y install sudo
RUN useradd -m $userName && echo "$userName:$userName" | chpasswd && usermod -aG sudo $userName RUN useradd -m $userName && echo "$userName:$userName" | chpasswd && adduser $userName sudo
RUN echo "$userName ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/$userName RUN echo "$userName ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/$userName
USER $userName USER $userName

View file

@ -7,10 +7,10 @@ import org.domaindrivenarchitecture.provs.framework.core.processors.Processor
const val SHELL = "/bin/bash" const val SHELL = "/bin/bash"
open class UbuntuProv( class UbuntuProv internal constructor(
processor: Processor = LocalProcessor(), processor: Processor = LocalProcessor(),
name: String? = null, name: String? = null,
progressType: ProgressType = ProgressType.BASIC progressType: ProgressType
) : Prov(processor, name, progressType) { ) : Prov(processor, name, progressType) {
override fun cmd(cmd: String, dir: String?, sudo: Boolean): ProvResult = taskWithResult { override fun cmd(cmd: String, dir: String?, sudo: Boolean): ProvResult = taskWithResult {
@ -30,16 +30,14 @@ open class UbuntuProv(
} }
private fun buildCommand(vararg args: String): String { private fun buildCommand(vararg args: String): String {
return if (args.size == 1) { return if (args.size == 1)
args[0].escapeAndEncloseByDoubleQuoteForShell() args[0].escapeAndEncloseByDoubleQuoteForShell()
} else { else
if (args.size == 3 && SHELL == args[0] && "-c" == args[1]) { if (args.size == 3 && SHELL.equals(args[0]) && "-c".equals(args[1]))
SHELL + " -c " + args[2].escapeAndEncloseByDoubleQuoteForShell() SHELL + " -c " + args[2].escapeAndEncloseByDoubleQuoteForShell()
} else { else
args.joinToString(separator = " ") args.joinToString(separator = " ")
} }
}
}
} }
private fun commandWithDirAndSudo(cmd: String, dir: String?, sudo: Boolean): String { private fun commandWithDirAndSudo(cmd: String, dir: String?, sudo: Boolean): String {

View file

@ -5,14 +5,13 @@ import org.slf4j.LoggerFactory
import java.io.File import java.io.File
import java.io.IOException import java.io.IOException
import java.nio.charset.Charset import java.nio.charset.Charset
import java.nio.file.Paths
private fun getOsName(): String { private fun getOsName(): String {
return System.getProperty("os.name") return System.getProperty("os.name")
} }
open class LocalProcessor(val useHomeDirAsWorkingDir: Boolean = true) : Processor { open class LocalProcessor : Processor {
companion object { companion object {
@Suppress("JAVA_CLASS_ON_COMPANION") @Suppress("JAVA_CLASS_ON_COMPANION")
@ -27,12 +26,7 @@ open class LocalProcessor(val useHomeDirAsWorkingDir: Boolean = true) : Processo
private fun workingDir() : String private fun workingDir() : String
{ {
return if (useHomeDirAsWorkingDir) { return System.getProperty("user.home") ?: File.separator
System.getProperty("user.home") ?: File.separator
} else {
// folder in which program was started
Paths.get("").toAbsolutePath().toString()
}
} }
override fun exec(vararg args: String): ProcessResult { override fun exec(vararg args: String): ProcessResult {

View file

@ -93,9 +93,9 @@ class RemoteProcessor(val host: InetAddress, val user: String, val password: Sec
var session: Session? = null var session: Session? = null
try { try {
session = ssh.startSession() ?: throw IllegalStateException("ERROR: Could not start ssh session.") session = ssh.startSession()
val cmd: Command = session.exec(cmdString) val cmd: Command = session!!.exec(cmdString)
val out = BufferedReader(InputStreamReader(cmd.inputStream)).use { it.readText() } val out = BufferedReader(InputStreamReader(cmd.inputStream)).use { it.readText() }
val err = BufferedReader(InputStreamReader(cmd.errorStream)).use { it.readText() } val err = BufferedReader(InputStreamReader(cmd.errorStream)).use { it.readText() }
cmd.join(100, TimeUnit.SECONDS) cmd.join(100, TimeUnit.SECONDS)

View file

@ -1,40 +0,0 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.cron.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.ProvResult
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDirs
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.user.base.whoami
/**
* Creates a cron job.
* @param cronFilename e.g. "90_my_cron"; file is created in folder /etc/cron.d/
* @param schedule in the usual cron-format, examples: "0 * * * *" for each hour, "0 3 1-7 * 1" for the first Monday each month at 3:00, etc
* @param command the executed command
* @param user the user with whom the command will be executed, if null the current user is used
*/
fun Prov.createCronJob(cronFilename: String, schedule: String, command: String, user: String? = null) = task {
val cronUser = user ?: whoami()
val cronLine = "$schedule $cronUser $command\n"
createDirs("/etc/cron.d/", sudo = true)
createFile("/etc/cron.d/$cronFilename", cronLine, "644", sudo = true, overwriteIfExisting = true)
}
/**
* Adds a cronJob for a monthly reboot of the (Linux) system.
* ATTENTION: Use with care!!
*/
fun Prov.scheduleMonthlyReboot() = task {
val shutdown = "/sbin/shutdown"
if (checkFile(shutdown, sudo = true)) {
// reboot each first Tuesday in a month at 3:00
// use controlled "shutdown" instead of direct "reboot"
createCronJob("50_monthly_reboot", "0 3 1-7 * 2", "shutdown -r now", "root")
} else {
addResultToEval(ProvResult(false, err = "$shutdown not found."))
}
}

View file

@ -201,7 +201,7 @@ fun Prov.fileContentLargeFile(file: String, sudo: Boolean = false, chunkSize: In
// check first chunk // check first chunk
if (resultString == null) { if (resultString == null) {
if (!chunkResult.success) { if (!chunkResult.success) {
return null return resultString
} else { } else {
resultString = "" resultString = ""
} }
@ -329,16 +329,12 @@ fun Prov.deleteDir(dir: String, path: String, sudo: Boolean = false): ProvResult
if ("" == path) { if ("" == path) {
throw RuntimeException("In deleteDir: path must not be empty.") throw RuntimeException("In deleteDir: path must not be empty.")
} }
return if (checkDir(dir, path, sudo)) {
val cmd = "cd $path && rmdir $dir" val cmd = "cd $path && rmdir $dir"
if (!sudo) { return if (!sudo) {
cmd(cmd) cmd(cmd)
} else { } else {
cmd(cmd.sudoizeCommand()) cmd(cmd.sudoizeCommand())
} }
} else {
ProvResult(true, out = "Dir to delete did not exist: $dir")
}
} }
@ -407,7 +403,7 @@ fun Prov.fileSize(filename: String, sudo: Boolean = false): Int? {
private fun ensureValidPosixFilePermission(posixFilePermission: String) { private fun ensureValidPosixFilePermission(posixFilePermission: String) {
if (!Regex("^0?[0-7]{3}$").matches(posixFilePermission)) throw IllegalArgumentException("Wrong file permission ($posixFilePermission), permission must consist of 3 digits as e.g. 664") if (!Regex("^[0-7]{3}$").matches(posixFilePermission)) throw IllegalArgumentException("Wrong file permission ($posixFilePermission), permission must consist of 3 digits as e.g. 664")
} }
/** /**

View file

@ -30,13 +30,10 @@ fun Prov.gitClone(
ProvResult(true, out = "Repo [$pathWithBasename] already exists, but might not be up-to-date.") ProvResult(true, out = "Repo [$pathWithBasename] already exists, but might not be up-to-date.")
} }
} else { } else {
// create targetPath if not yet existing // create targetPath (if not yet existing)
if (!checkDir(targetPath)) { if (!checkDir(targetPath)) {
createDirs(targetPath) createDirs(targetPath)
} }
// Note that all output of git clone on Linux is shown in stderr (normal progress info AND errors),
// which might be confusing in the logfile.
cmd("cd $targetPath && git clone $repoSource ${targetFolderName ?: ""}") cmd("cd $targetPath && git clone $repoSource ${targetFolderName ?: ""}")
} }
} }

View file

@ -1,6 +1,5 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base package org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base
import org.domaindrivenarchitecture.provs.desktop.domain.KnownHost
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.ProvResult import org.domaindrivenarchitecture.provs.framework.core.ProvResult
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.* import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.*
@ -8,6 +7,8 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.SshKeyPair
import java.io.File import java.io.File
const val KNOWN_HOSTS_FILE = "~/.ssh/known_hosts"
/** /**
* Installs ssh keys for active user; ssh filenames depend on the ssh key type, e.g. for public key file: "id_rsa.pub", "id_id_ed25519.pub", etc * Installs ssh keys for active user; ssh filenames depend on the ssh key type, e.g. for public key file: "id_rsa.pub", "id_id_ed25519.pub", etc
*/ */
@ -19,45 +20,38 @@ fun Prov.configureSshKeys(sshKeys: SshKeyPair) = task {
/** /**
* Checks if the specified host (domain name or IP) and (optional) port is contained in the known_hosts file * Checks if the specified hostname or Ip is in a known_hosts file
*
* @return whether if was found
*/ */
fun Prov.isKnownHost(hostOrIp: String, port: Int? = null): Boolean { fun Prov.isHostKnown(hostOrIp: String) : Boolean {
val hostWithPotentialPort = port?.let { hostInKnownHostsFileFormat(hostOrIp, port) } ?: hostOrIp return cmdNoEval("ssh-keygen -F $hostOrIp").out?.isNotEmpty() ?: false
return cmdNoEval("ssh-keygen -F $hostWithPotentialPort").out?.isNotEmpty() ?: false
}
fun hostInKnownHostsFileFormat(hostOrIp: String, port: Int? = null): String {
return port?.let { "[$hostOrIp]:$port" } ?: hostOrIp
} }
/** /**
* Adds ssh keys for specified host (which also can be an ip-address) to the ssh-file "known_hosts". * Adds ssh keys for specified host (which also can be an ip-address) to ssh-file "known_hosts"
* If parameter verifyKeys is true, the keys are checked against the live keys of the host and added only if valid. * Either add the specified keys or - if null - add keys automatically retrieved.
* Note: adding keys automatically is vulnerable to a man-in-the-middle attack, thus considered insecure and not recommended.
*/ */
fun Prov.addKnownHost(knownHost: KnownHost, verifyKeys: Boolean = false) = task { fun Prov.addKnownHost(host: String, keysToBeAdded: List<String>?, verifyKeys: Boolean = false) = task {
val knownHostsFile = "~/.ssh/known_hosts" if (!checkFile(KNOWN_HOSTS_FILE)) {
if (!checkFile(knownHostsFile)) {
createDir(".ssh") createDir(".ssh")
createFile(knownHostsFile, null) createFile(KNOWN_HOSTS_FILE, null)
} }
with(knownHost) { if (keysToBeAdded == null) {
for (key in hostKeys) { // auto add keys
cmd("ssh-keyscan $host >> $KNOWN_HOSTS_FILE")
} else {
for (key in keysToBeAdded) {
if (!verifyKeys) { if (!verifyKeys) {
addTextToFile("\n$hostName $key\n", File(knownHostsFile)) addTextToFile("\n$host $key\n", File(KNOWN_HOSTS_FILE))
} else { } else {
val validKeys = findSshKeys(hostName, port) val validKeys = getSshKeys(host)
if (validKeys?.contains(key) == true) { if (validKeys?.contains(key) == true) {
val formattedHost = hostInKnownHostsFileFormat(hostName, port) addTextToFile("\n$host $key\n", File(KNOWN_HOSTS_FILE))
addTextToFile("\n$formattedHost $key\n", File(knownHostsFile))
} else { } else {
addResultToEval( addResultToEval(ProvResult(false, err = "The following key of host [$host] could not be verified successfully: " + key))
ProvResult(
false,
err = "The following key of host [$hostName] could not be verified successfully: " + key
)
)
} }
} }
} }
@ -66,14 +60,10 @@ fun Prov.addKnownHost(knownHost: KnownHost, verifyKeys: Boolean = false) = task
/** /**
* Returns a list of valid ssh keys for the given host (host can also be an ip address), * Returns a list of valid ssh keys for the given host (host can also be an ip address), keys are returned as keytype and key BUT WITHOUT the host name
* keys are returned (space-separated) as keytype and key, but WITHOUT the host name.*
* If no port is specified, the keys for the default port (22) are returned.
* If no keytype is specified, keys are returned for all keytypes.
*/ */
fun Prov.findSshKeys(host: String, port: Int? = null, keytype: String? = null): List<String>? { private fun Prov.getSshKeys(host: String, keytype: String? = null): List<String>? {
val portOption = port?.let { " -p $port " } ?: ""
val keytypeOption = keytype?.let { " -t $keytype " } ?: "" val keytypeOption = keytype?.let { " -t $keytype " } ?: ""
val output = cmd("ssh-keyscan $portOption $keytypeOption $host 2>/dev/null").out?.trim() val output = cmd("ssh-keyscan $keytypeOption $host 2>/dev/null").out?.trim()
return output?.split("\n")?.filter { x -> "" != x }?.map { x -> x.substringAfter(" ") } return output?.split("\n")?.filter { x -> "" != x }?.map { x -> x.substringAfter(" ") }
} }

View file

@ -6,25 +6,24 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources.
@Serializable @Serializable
abstract class SecretSource(protected val parameter: String) { abstract class SecretSource(protected val input: String) {
abstract fun secret() : Secret abstract fun secret() : Secret
abstract fun secretNullable() : Secret? abstract fun secretNullable() : Secret?
} }
@Serializable @Serializable
enum class SecretSourceType { enum class SecretSourceType() {
PLAIN, FILE, PROMPT, PASS, GOPASS, ENV; PLAIN, FILE, PROMPT, PASS, GOPASS;
fun secret(parameter: String) : Secret { fun secret(input: String) : Secret {
return when (this) { return when (this) {
PLAIN -> PlainSecretSource(parameter).secret() PLAIN -> PlainSecretSource(input).secret()
FILE -> FileSecretSource(parameter).secret() FILE -> FileSecretSource(input).secret()
PROMPT -> PromptSecretSource().secret() PROMPT -> PromptSecretSource().secret()
PASS -> PassSecretSource(parameter).secret() PASS -> PassSecretSource(input).secret()
GOPASS -> GopassSecretSource(parameter).secret() GOPASS -> GopassSecretSource(input).secret()
ENV -> EnvSecretSource(parameter).secret()
} }
} }
} }

View file

@ -1,18 +0,0 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources
import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSource
/**
* Reads secret from a local environment variable
*/
class EnvSecretSource(varName: String) : SecretSource(varName) {
override fun secret(): Secret {
return secretNullable() ?: throw Exception("Failed to get secret from environment variable: $parameter")
}
override fun secretNullable(): Secret? {
val secret = System.getenv(parameter)
return if (secret == null) null else Secret(secret)
}
}

View file

@ -13,11 +13,11 @@ class FileSecretSource(fqFileName: String) : SecretSource(fqFileName) {
override fun secret(): Secret { override fun secret(): Secret {
val p = Prov.newInstance(name = "FileSecretSource", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "FileSecretSource", progressType = ProgressType.NONE)
return p.getSecret("cat " + parameter) ?: throw Exception("Failed to get secret.") return p.getSecret("cat " + input) ?: throw Exception("Failed to get secret.")
} }
override fun secretNullable(): Secret? { override fun secretNullable(): Secret? {
val p = Prov.newInstance(name = "FileSecretSource", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "FileSecretSource", progressType = ProgressType.NONE)
return p.getSecret("cat " + parameter) return p.getSecret("cat " + input)
} }
} }

View file

@ -11,10 +11,10 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSource
*/ */
class GopassSecretSource(path: String) : SecretSource(path) { class GopassSecretSource(path: String) : SecretSource(path) {
override fun secret(): Secret { override fun secret(): Secret {
return secretNullable() ?: throw Exception("Failed to get \"$parameter\" secret from gopass.") return secretNullable() ?: throw Exception("Failed to get \"$input\" secret from gopass.")
} }
override fun secretNullable(): Secret? { override fun secretNullable(): Secret? {
val p = Prov.newInstance(name = "GopassSecretSource for $parameter", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "GopassSecretSource for $input", progressType = ProgressType.NONE)
return p.getSecret("gopass show -f $parameter", true) return p.getSecret("gopass show -f $input", true)
} }
} }

View file

@ -12,10 +12,10 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSource
class PassSecretSource(path: String) : SecretSource(path) { class PassSecretSource(path: String) : SecretSource(path) {
override fun secret(): Secret { override fun secret(): Secret {
val p = Prov.newInstance(name = "PassSecretSource", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "PassSecretSource", progressType = ProgressType.NONE)
return p.getSecret("pass " + parameter) ?: throw Exception("Failed to get secret.") return p.getSecret("pass " + input) ?: throw Exception("Failed to get secret.")
} }
override fun secretNullable(): Secret? { override fun secretNullable(): Secret? {
val p = Prov.newInstance(name = "PassSecretSource", progressType = ProgressType.NONE) val p = Prov.newInstance(name = "PassSecretSource", progressType = ProgressType.NONE)
return p.getSecret("pass " + parameter) return p.getSecret("pass " + input)
} }
} }

View file

@ -6,9 +6,9 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSource
class PlainSecretSource(plainSecret: String) : SecretSource(plainSecret) { class PlainSecretSource(plainSecret: String) : SecretSource(plainSecret) {
override fun secret(): Secret { override fun secret(): Secret {
return Secret(parameter) return Secret(input)
} }
override fun secretNullable(): Secret { override fun secretNullable(): Secret {
return Secret(parameter) return Secret(input)
} }
} }

View file

@ -47,7 +47,7 @@ class PasswordPanel : JPanel(FlowLayout()) {
class PromptSecretSource(text: String = "Secret/Password") : SecretSource(text) { class PromptSecretSource(text: String = "Secret/Password") : SecretSource(text) {
override fun secret(): Secret { override fun secret(): Secret {
val password = PasswordPanel.requestPassword(parameter) val password = PasswordPanel.requestPassword(input)
if (password == null) { if (password == null) {
throw IllegalArgumentException("Failed to retrieve secret from prompting.") throw IllegalArgumentException("Failed to retrieve secret from prompting.")
} else { } else {
@ -56,7 +56,7 @@ class PromptSecretSource(text: String = "Secret/Password") : SecretSource(text)
} }
override fun secretNullable(): Secret? { override fun secretNullable(): Secret? {
val password = PasswordPanel.requestPassword(parameter) val password = PasswordPanel.requestPassword(input)
return if(password == null) { return if(password == null) {
null null

View file

@ -1,7 +0,0 @@
package org.domaindrivenarchitecture.provs.server.domain.hetzner_csi
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.server.infrastructure.provisionHetznerCSIForK8s
fun Prov.provisionHetznerCSI(configResolved: HetznerCSIConfigResolved) =
provisionHetznerCSIForK8s(configResolved.hcloudApiToken, configResolved.encryptionPassphrase)

View file

@ -1,23 +0,0 @@
package org.domaindrivenarchitecture.provs.server.domain.hetzner_csi
import kotlinx.serialization.Serializable
import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSupplier
@Serializable
data class HetznerCSIConfig (
val hcloudApiToken: SecretSupplier,
val encryptionPassphrase: SecretSupplier,
) {
fun resolveSecret(): HetznerCSIConfigResolved = HetznerCSIConfigResolved(this)
}
data class HetznerCSIConfigResolved(val configUnresolved: HetznerCSIConfig) {
val hcloudApiToken: Secret = configUnresolved.hcloudApiToken.secret()
val encryptionPassphrase: Secret = configUnresolved.encryptionPassphrase.secret()
}
@Serializable
data class HetznerCSIConfigHolder(
val hetzner: HetznerCSIConfig
)

View file

@ -10,8 +10,7 @@ data class K3sConfig(
val loopback: Loopback = Loopback(ipv4 = "192.168.5.1", ipv6 = "fc00::5:1"), val loopback: Loopback = Loopback(ipv4 = "192.168.5.1", ipv6 = "fc00::5:1"),
val certmanager: Certmanager? = null, val certmanager: Certmanager? = null,
val echo: Echo? = null, val echo: Echo? = null,
val reprovision: Reprovision = false, val reprovision: Reprovision = false
val monthlyReboot: Boolean = false,
) { ) {
fun isDualStack(): Boolean { fun isDualStack(): Boolean {
return node.ipv6 != null && loopback.ipv6 != null return node.ipv6 != null && loopback.ipv6 != null

View file

@ -2,9 +2,6 @@ package org.domaindrivenarchitecture.provs.server.domain.k3s
import org.domaindrivenarchitecture.provs.configuration.infrastructure.DefaultConfigFileRepository import org.domaindrivenarchitecture.provs.configuration.infrastructure.DefaultConfigFileRepository
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.cron.infrastructure.scheduleMonthlyReboot
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.HetznerCSIConfigResolved
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.provisionHetznerCSI
import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.GrafanaAgentConfigResolved import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.GrafanaAgentConfigResolved
import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.provisionGrafanaAgent import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.provisionGrafanaAgent
import org.domaindrivenarchitecture.provs.server.infrastructure.* import org.domaindrivenarchitecture.provs.server.infrastructure.*
@ -14,7 +11,6 @@ import kotlin.system.exitProcess
fun Prov.provisionK3sCommand(cli: K3sCliCommand) = task { fun Prov.provisionK3sCommand(cli: K3sCliCommand) = task {
val grafanaConfigResolved: GrafanaAgentConfigResolved? = findK8sGrafanaConfig(cli.configFileName)?.resolveSecret() val grafanaConfigResolved: GrafanaAgentConfigResolved? = findK8sGrafanaConfig(cli.configFileName)?.resolveSecret()
val hcloudConfigResolved: HetznerCSIConfigResolved? = findHetznerCSIConfig(cli.configFileName)?.resolveSecret()
if (cli.onlyModules == null) { if (cli.onlyModules == null) {
val k3sConfig: K3sConfig = getK3sConfig(cli.configFileName) val k3sConfig: K3sConfig = getK3sConfig(cli.configFileName)
@ -22,10 +18,9 @@ fun Prov.provisionK3sCommand(cli: K3sCliCommand) = task {
val k3sConfigReprovision = k3sConfig.copy(reprovision = cli.reprovision || k3sConfig.reprovision) val k3sConfigReprovision = k3sConfig.copy(reprovision = cli.reprovision || k3sConfig.reprovision)
val applicationFile = cli.applicationFileName?.let { DefaultApplicationFileRepository(cli.applicationFileName).getFile() } val applicationFile = cli.applicationFileName?.let { DefaultApplicationFileRepository(cli.applicationFileName).getFile() }
provisionK3s(k3sConfigReprovision, grafanaConfigResolved, hcloudConfigResolved, applicationFile) provisionK3s(k3sConfigReprovision, grafanaConfigResolved, applicationFile)
} else { } else {
provisionGrafana(cli.onlyModules, grafanaConfigResolved) provisionGrafana(cli.onlyModules, grafanaConfigResolved)
provisionHetznerCSI(cli.onlyModules, hcloudConfigResolved)
} }
} }
@ -35,7 +30,6 @@ fun Prov.provisionK3sCommand(cli: K3sCliCommand) = task {
fun Prov.provisionK3s( fun Prov.provisionK3s(
k3sConfig: K3sConfig, k3sConfig: K3sConfig,
grafanaConfigResolved: GrafanaAgentConfigResolved? = null, grafanaConfigResolved: GrafanaAgentConfigResolved? = null,
hetznerCSIConfigResolved: HetznerCSIConfigResolved? = null,
applicationFile: ApplicationFile? = null applicationFile: ApplicationFile? = null
) = task { ) = task {
@ -59,10 +53,6 @@ fun Prov.provisionK3s(
provisionGrafanaAgent(grafanaConfigResolved) provisionGrafanaAgent(grafanaConfigResolved)
} }
if (hetznerCSIConfigResolved != null) {
provisionHetznerCSI(hetznerCSIConfigResolved)
}
if (applicationFile != null) { if (applicationFile != null) {
provisionK3sApplication(applicationFile) provisionK3sApplication(applicationFile)
} }
@ -70,12 +60,6 @@ fun Prov.provisionK3s(
if (!k3sConfig.reprovision) { if (!k3sConfig.reprovision) {
provisionServerCliConvenience() provisionServerCliConvenience()
} }
if (k3sConfig.monthlyReboot) {
scheduleMonthlyReboot()
}
installK9s()
} }
private fun Prov.provisionGrafana( private fun Prov.provisionGrafana(
@ -91,18 +75,3 @@ private fun Prov.provisionGrafana(
provisionGrafanaAgent(grafanaConfigResolved) provisionGrafanaAgent(grafanaConfigResolved)
} }
} }
private fun Prov.provisionHetznerCSI(
onlyModules: List<String>?,
hetznerCSIConfigResolved: HetznerCSIConfigResolved?
) = task {
if (onlyModules != null && onlyModules.contains(ServerOnlyModule.HETZNER_CSI.name.lowercase())) {
if (hetznerCSIConfigResolved == null) {
println("ERROR: Could not find grafana config.")
exitProcess(7)
}
provisionHetznerCSI(hetznerCSIConfigResolved)
}
}

View file

@ -1,6 +1,5 @@
package org.domaindrivenarchitecture.provs.server.domain.k3s package org.domaindrivenarchitecture.provs.server.domain.k3s
enum class ServerOnlyModule { enum class ServerOnlyModule {
GRAFANA, GRAFANA
HETZNER_CSI
} }

View file

@ -2,6 +2,7 @@ package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.ProvResult import org.domaindrivenarchitecture.provs.framework.core.ProvResult
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFileFromResource import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFileFromResource
private const val resourcePath = "org/domaindrivenarchitecture/provs/desktop/infrastructure" private const val resourcePath = "org/domaindrivenarchitecture/provs/desktop/infrastructure"
@ -15,8 +16,7 @@ fun Prov.provisionServerCliConvenience() = task {
fun Prov.provisionKubectlCompletionAndAlias(): ProvResult = task { fun Prov.provisionKubectlCompletionAndAlias(): ProvResult = task {
cmd("kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null") cmd("kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null")
cmd("echo 'alias k=kubectl' >> ~/.bashrc") cmd("echo 'alias k=kubectl' >> ~/.bashrc")
cmd("echo 'alias k9=\"k9s --kubeconfig /etc/kubernetes/admin.conf\"' >> ~/.bashrc") cmd("echo 'complete -o default -F __start_kubectl k' >>~/.bashrc")
cmd("echo 'complete -o default -F __start_kubectl k' >> ~/.bashrc")
} }
fun Prov.provisionVimrc(): ProvResult = task { fun Prov.provisionVimrc(): ProvResult = task {

View file

@ -1,53 +0,0 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFileFromResource
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFileFromResourceTemplate
import org.domaindrivenarchitecture.provs.server.domain.k3s.FileMode
import java.io.File
private const val hetznerCSIResourceDir = "org/domaindrivenarchitecture/provs/server/infrastructure/hetznerCSI/"
fun Prov.provisionHetznerCSIForK8s(hetznerApiToken: Secret, encryptionPassphrase: Secret) {
// CSI Driver
createFileFromResourceTemplate(
k3sManualManifestsDir + "hcloud-api-token-secret.yaml",
"hcloud-api-token-secret.template.yaml",
resourcePath = hetznerCSIResourceDir,
posixFilePermission = "644",
values = mapOf(
"HETZNER_API_TOKEN" to hetznerApiToken.plain()
))
cmd("kubectl apply -f hcloud-api-token-secret.yaml", k3sManualManifestsDir)
applyHetznerCSIFileFromResource(File(k3sManualManifestsDir, "hcloud-csi.yaml"))
// Encryption
createFileFromResourceTemplate(
k3sManualManifestsDir + "hcloud-encryption-secret.yaml",
"hcloud-encryption-secret.template.yaml",
resourcePath = hetznerCSIResourceDir,
posixFilePermission = "644",
values = mapOf(
"HETZNER_ENCRYPTION_PASSPHRASE" to encryptionPassphrase.plain()
))
cmd("kubectl apply -f hcloud-encryption-secret.yaml", k3sManualManifestsDir)
applyHetznerCSIFileFromResource(File(k3sManualManifestsDir, "hcloud-encrypted-storage-class.yaml"))
}
private fun Prov.createHetznerCSIFileFromResource(
file: File,
posixFilePermission: FileMode? = "644"
) = task {
createFileFromResource(
file.path,
file.name,
hetznerCSIResourceDir,
posixFilePermission,
sudo = true
)
}
private fun Prov.applyHetznerCSIFileFromResource(file: File, posixFilePermission: FileMode? = "644") = task {
createHetznerCSIFileFromResource(file, posixFilePermission)
cmd("kubectl apply -f ${file.path}", sudo = true)
}

View file

@ -1,31 +0,0 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import com.charleskorn.kaml.MissingRequiredPropertyException
import org.domaindrivenarchitecture.provs.configuration.domain.ConfigFileName
import org.domaindrivenarchitecture.provs.framework.core.readFromFile
import org.domaindrivenarchitecture.provs.framework.core.toYaml
import org.domaindrivenarchitecture.provs.framework.core.yamlToType
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.HetznerCSIConfig
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.HetznerCSIConfigHolder
import java.io.File
import java.io.FileWriter
private const val DEFAULT_CONFIG_FILE = "server-config.yaml"
fun findHetznerCSIConfig(fileName: ConfigFileName? = null): HetznerCSIConfig? {
val filePath = fileName?.fileName ?: DEFAULT_CONFIG_FILE
return if(File(filePath).exists()) {
try {
readFromFile(filePath).yamlToType<HetznerCSIConfigHolder>().hetzner
} catch (e: MissingRequiredPropertyException) {
if (e.message.contains("Property 'hetzner'")) null else throw e
}
} else {
null
}
}
@Suppress("unused")
internal fun writeConfig(config: HetznerCSIConfigHolder, fileName: String = "hetzner-config.yaml") =
FileWriter(fileName).use { it.write(config.toYaml()) }

View file

@ -0,0 +1,39 @@
package org.domaindrivenarchitecture.provs.server.domain
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer
import org.domaindrivenarchitecture.provs.framework.core.echoCommandForTextWithNewlinesReplaced
import org.domaindrivenarchitecture.provs.framework.core.repeatTaskUntilSuccess
/**
* Runs a k3s server and a k3s agent as containers.
* Copies the kubeconfig from container to the default location: $HOME/.kube/config
*/
fun Prov.installK3sAsContainers(token: String = "12345678901234") = task {
cmd("docker volume create k3s-server")
provideContainer("k3s-server", "rancher/k3s", command = "server --cluster-init", options =
"-d --privileged --tmpfs /run --tmpfs /var/run " +
"-e K3S_TOKEN=$token -e K3S_KUBECONFIG_OUTPUT=./kubeconfig.yaml -e K3S_KUBECONFIG_MODE=666 " +
"-v k3s-server:/var/lib/rancher/k3s:z -p 6443:6443 -p 80:80 -p 443:443 " +
"--ulimit nproc=65535 --ulimit nofile=65535:65535")
// wait for config file
cmd("export timeout=60; while [ ! -f /var/lib/docker/volumes/k3s-server/_data/server/kubeconfig.yaml ]; do if [ \"${'$'}timeout\" == 0 ]; then echo \"ERROR: Timeout while waiting for file.\"; break; fi; sleep 1; ((timeout--)); done")
sh("""
mkdir -p ${'$'}HOME/.kube/
cp /var/lib/docker/volumes/k3s-server/_data/server/kubeconfig.yaml ${'$'}HOME/.kube/config
""".trimIndent())
}
/**
* Apply a config to kubernetes.
* Prerequisite: Kubectl has to be installed
*/
fun Prov.applyK8sConfig(configAsYaml: String, kubectlCommand: String = "kubectl") = task {
repeatTaskUntilSuccess(6, 10) {
cmd(echoCommandForTextWithNewlinesReplaced(configAsYaml) + " | $kubectlCommand apply -f -")
}
}

View file

@ -10,9 +10,7 @@ import java.io.File
// ----------------------------------- versions -------------------------------- // ----------------------------------- versions --------------------------------
// when updating this version, it is recommended to update also file k3s-install.sh as well as traefik.yaml in this repo const val K3S_VERSION = "v1.23.6+k3s1"
// (both files in: src/main/resources/org/domaindrivenarchitecture/provs/server/infrastructure/k3s/)
const val K3S_VERSION = "v1.29.1+k3s2"
// ----------------------------------- directories -------------------------------- // ----------------------------------- directories --------------------------------
const val k3sManualManifestsDir = "/etc/rancher/k3s/manifests/" const val k3sManualManifestsDir = "/etc/rancher/k3s/manifests/"
@ -33,12 +31,12 @@ private val k3sMiddleWareHttpsRedirect = File(k3sManualManifestsDir, "middleware
private val certManagerDeployment = File(k3sManualManifestsDir, "cert-manager.yaml") private val certManagerDeployment = File(k3sManualManifestsDir, "cert-manager.yaml")
private val certManagerIssuer = File(k3sManualManifestsDir, "le-issuer.yaml") private val certManagerIssuer = File(k3sManualManifestsDir, "le-issuer.yaml")
private val k3sEchoWithTls = File(k3sManualManifestsDir, "echo-tls.yaml") private val k3sEcho = File(k3sManualManifestsDir, "echo.yaml")
private val k3sEchoNoTls = File(k3sManualManifestsDir, "echo-no-tls.yaml")
private val selfSignedCertificate = File(k3sManualManifestsDir, "selfsigned-certificate.yaml") private val selfSignedCertificate = File(k3sManualManifestsDir, "selfsigned-certificate.yaml")
private val localPathProvisionerConfig = File(k3sManualManifestsDir, "local-path-provisioner-config.yaml") private val localPathProvisionerConfig = File(k3sManualManifestsDir, "local-path-provisioner-config.yaml")
// ----------------------------------- public functions -------------------------------- // ----------------------------------- public functions --------------------------------
fun Prov.testConfigExists(): Boolean { fun Prov.testConfigExists(): Boolean {
@ -51,11 +49,7 @@ fun Prov.deprovisionK3sInfra() = task {
deleteFile(certManagerDeployment.path, sudo = true) deleteFile(certManagerDeployment.path, sudo = true)
deleteFile(certManagerIssuer.path, sudo = true) deleteFile(certManagerIssuer.path, sudo = true)
deleteFile(k3sKubeConfig.path, sudo = true) deleteFile(k3sKubeConfig.path, sudo = true)
cmd("k3s-uninstall.sh")
val k3sUninstallScript = "k3s-uninstall.sh"
if (chk("which $k3sUninstallScript")) {
cmd(k3sUninstallScript)
}
} }
@ -101,7 +95,7 @@ fun Prov.installK3s(k3sConfig: K3sConfig): ProvResult {
// metallb // metallb
applyK3sFileFromResource(File(k3sManualManifestsDir, "metallb-0.13.7-native-manifest.yaml")) applyK3sFileFromResource(File(k3sManualManifestsDir, "metallb-0.13.7-native-manifest.yaml"))
repeatTaskUntilSuccess(10, 10) { repeatTaskUntilSuccess(6, 10) {
applyK3sFileFromResourceTemplate( applyK3sFileFromResourceTemplate(
File(k3sManualManifestsDir, "metallb-config.yaml"), File(k3sManualManifestsDir, "metallb-config.yaml"),
k3sConfigMap, k3sConfigMap,
@ -123,9 +117,8 @@ fun Prov.installK3s(k3sConfig: K3sConfig): ProvResult {
applyK3sFileFromResource(k3sMiddleWareHttpsRedirect) applyK3sFileFromResource(k3sMiddleWareHttpsRedirect)
} }
// other
applyK3sFileFromResource(localPathProvisionerConfig) applyK3sFileFromResource(localPathProvisionerConfig)
// TODO: jem 2022-11-25: Why do we need sudo here??
cmd("kubectl set env deployment -n kube-system local-path-provisioner DEPLOY_DATE=\"$(date)\"", sudo = true) cmd("kubectl set env deployment -n kube-system local-path-provisioner DEPLOY_DATE=\"$(date)\"", sudo = true)
cmd("ln -sf $k3sKubeConfig " + k8sCredentialsDir + "admin.conf", sudo = true) cmd("ln -sf $k3sKubeConfig " + k8sCredentialsDir + "admin.conf", sudo = true)
@ -151,8 +144,7 @@ fun Prov.provisionK3sCertManager(certmanager: Certmanager) = task {
} }
} }
fun Prov.provisionK3sEcho(fqdn: String, endpoint: CertmanagerEndpoint? = null, withTls: Boolean = false) = task { fun Prov.provisionK3sEcho(fqdn: String, endpoint: CertmanagerEndpoint? = null) = task {
if (withTls) {
val endpointName = endpoint?.name?.lowercase() val endpointName = endpoint?.name?.lowercase()
val issuer = if (endpointName == null) { val issuer = if (endpointName == null) {
@ -161,10 +153,8 @@ fun Prov.provisionK3sEcho(fqdn: String, endpoint: CertmanagerEndpoint? = null, w
} else { } else {
endpointName endpointName
} }
applyK3sFileFromResourceTemplate(k3sEchoWithTls, mapOf("fqdn" to fqdn, "issuer_name" to issuer))
} else { applyK3sFileFromResourceTemplate(k3sEcho, mapOf("fqdn" to fqdn, "issuer_name" to issuer))
applyK3sFileFromResource(k3sEchoNoTls)
}
} }
fun Prov.provisionK3sApplication(applicationFile: ApplicationFile) = task { fun Prov.provisionK3sApplication(applicationFile: ApplicationFile) = task {

View file

@ -1,15 +0,0 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.ubuntu.web.base.downloadFromURL
const val K9S_VERSION = "v0.32.5"
fun Prov.installK9s() = task {
if (cmdNoEval("k9s version").out?.contains(K9S_VERSION) != true) {
downloadFromURL("https://github.com/derailed/k9s/releases/download/$K9S_VERSION/k9s_linux_amd64.deb", "k9s_linux_amd64.deb", "/tmp")
cmd("sudo dpkg -i k9s_linux_amd64.deb", "/tmp")
}
}

View file

@ -1,5 +1,10 @@
package org.domaindrivenarchitecture.provs.syspec.infrastructure package org.domaindrivenarchitecture.provs.syspec.infrastructure
import aws.sdk.kotlin.services.s3.S3Client
import aws.sdk.kotlin.services.s3.model.ListObjectsRequest
import aws.sdk.kotlin.services.s3.model.ListObjectsResponse
import aws.smithy.kotlin.runtime.time.Instant
import kotlinx.coroutines.runBlocking
import org.domaindrivenarchitecture.provs.framework.core.Prov import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.framework.core.ProvResult import org.domaindrivenarchitecture.provs.framework.core.ProvResult
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkDir import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkDir
@ -8,6 +13,7 @@ import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.isPackag
import org.domaindrivenarchitecture.provs.syspec.domain.* import org.domaindrivenarchitecture.provs.syspec.domain.*
import java.text.ParseException import java.text.ParseException
import java.text.SimpleDateFormat import java.text.SimpleDateFormat
import java.time.Duration
import java.util.* import java.util.*
import java.util.concurrent.TimeUnit import java.util.concurrent.TimeUnit
@ -23,6 +29,7 @@ fun Prov.verifySpecConfig(conf: SyspecConfig) = task {
conf.netcat?.let { task("NetcatSpecs") { for (spec in conf.netcat) verify(spec) } } conf.netcat?.let { task("NetcatSpecs") { for (spec in conf.netcat) verify(spec) } }
conf.socket?.let { task("SocketSpecs") { for (spec in conf.socket) verify(spec) } } conf.socket?.let { task("SocketSpecs") { for (spec in conf.socket) verify(spec) } }
conf.certificate?.let { task("CertificateFileSpecs") { for (spec in conf.certificate) verify(spec) } } conf.certificate?.let { task("CertificateFileSpecs") { for (spec in conf.certificate) verify(spec) } }
conf.s3?.let { task("CertificateFileSpecs") { for (spec in conf.s3) verify(spec) } }
} }
// ------------------------------- verification functions for individual specs -------------------------------- // ------------------------------- verification functions for individual specs --------------------------------
@ -105,6 +112,27 @@ fun Prov.verify(cert: CertificateFileSpec) {
} }
} }
fun Prov.verify(s3ObjectSpec: S3ObjectSpec) {
val (bucket, prefix, maxAge) = s3ObjectSpec
val expectedAge = Duration.ofHours(s3ObjectSpec.age)
val latestObject = getS3Objects(bucket, prefix).contents?.maxByOrNull { it.lastModified ?: Instant.fromEpochSeconds(0) }
if (latestObject == null) {
verify(false, "Could not retrieve an s3 object with prefix $prefix")
} else {
// convert to java.time.Instant for easier comparison
val lastModified = java.time.Instant.ofEpochSecond(latestObject.lastModified?.epochSeconds ?: 0)
val actualAge = Duration.between(lastModified, java.time.Instant.now())
verify(
actualAge <= expectedAge,
"Age is ${actualAge.toHours()} h (expected: <= $maxAge) for latest file with prefix \"$prefix\" " +
"--- modified date: $lastModified - size: ${(latestObject.size)} B - key: ${latestObject.key}"
)
}
}
// -------------------------- helper functions --------------------------------- // -------------------------- helper functions ---------------------------------
@ -187,3 +215,14 @@ private fun Prov.verifyCertExpiration(enddate: String?, certName: String, expira
) )
} }
} }
private fun getS3Objects(bucketName: String, prefixIn: String): ListObjectsResponse {
val request = ListObjectsRequest { bucket = bucketName; prefix = prefixIn }
return runBlocking {
S3Client { region = "eu-central-1" }.use { s3 ->
s3.listObjects(request)
}
}
}

View file

@ -8,7 +8,7 @@ function usage() {
function main() { function main() {
local cluster_name="${1}"; local cluster_name="${1}";
local domain_name="${2:-meissa.de}"; local domain_name="${2:-meissa-gmbh.de}";
/usr/local/bin/k3s-create-context.sh ${cluster_name} ${domain_name} /usr/local/bin/k3s-create-context.sh ${cluster_name} ${domain_name}
kubectl config use-context ${cluster_name} kubectl config use-context ${cluster_name}

View file

@ -4,9 +4,8 @@ set -o noglob
function main() { function main() {
local cluster_name="${1}"; shift local cluster_name="${1}"; shift
local domain_name="${1:-meissa.de}"; shift
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${cluster_name}.${domain_name} -L 8002:localhost:8002 -L 6443:192.168.5.1:6443 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${cluster_name}.meissa-gmbh.de -L 8002:localhost:8002 -L 6443:192.168.5.1:6443
} }
main $1 main $1

View file

@ -4,9 +4,8 @@ set -o noglob
function main() { function main() {
local cluster_name="${1}"; shift local cluster_name="${1}"; shift
local domain_name="${1:-meissa.de}"; shift
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${cluster_name}.${domain_name} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${cluster_name}.meissa-gmbh.de
} }
main $1 main $1

View file

@ -1,7 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: hcloud
namespace: kube-system
stringData:
token: $HETZNER_API_TOKEN

View file

@ -1,401 +0,0 @@
# Version 2.6.0
# Source: hcloud-csi/templates/controller/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: hcloud-csi-controller
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
automountServiceAccountToken: true
---
# Source: hcloud-csi/templates/core/storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: hcloud-volumes
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: csi.hetzner.cloud
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
reclaimPolicy: "Delete"
---
# Source: hcloud-csi/templates/controller/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hcloud-csi-controller
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
rules:
# attacher
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, update, patch]
- apiGroups: [""]
resources: [nodes]
verbs: [get, list, watch]
- apiGroups: [csi.storage.k8s.io]
resources: [csinodeinfos]
verbs: [get, list, watch]
- apiGroups: [storage.k8s.io]
resources: [csinodes]
verbs: [get, list, watch]
- apiGroups: [storage.k8s.io]
resources: [volumeattachments]
verbs: [get, list, watch, update, patch]
- apiGroups: [storage.k8s.io]
resources: [volumeattachments/status]
verbs: [patch]
# provisioner
- apiGroups: [""]
resources: [secrets]
verbs: [get, list]
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, create, delete, patch]
- apiGroups: [""]
resources: [persistentvolumeclaims, persistentvolumeclaims/status]
verbs: [get, list, watch, update, patch]
- apiGroups: [storage.k8s.io]
resources: [storageclasses]
verbs: [get, list, watch]
- apiGroups: [""]
resources: [events]
verbs: [list, watch, create, update, patch]
- apiGroups: [snapshot.storage.k8s.io]
resources: [volumesnapshots]
verbs: [get, list]
- apiGroups: [snapshot.storage.k8s.io]
resources: [volumesnapshotcontents]
verbs: [get, list]
# resizer
- apiGroups: [""]
resources: [pods]
verbs: [get, list, watch]
# node
- apiGroups: [""]
resources: [events]
verbs: [get, list, watch, create, update, patch]
---
# Source: hcloud-csi/templates/controller/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hcloud-csi-controller
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hcloud-csi-controller
subjects:
- kind: ServiceAccount
name: hcloud-csi-controller
namespace: "kube-system"
---
# Source: hcloud-csi/templates/controller/service.yaml
apiVersion: v1
kind: Service
metadata:
name: hcloud-csi-controller-metrics
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
spec:
ports:
- name: metrics
port: 9189
selector:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
---
# Source: hcloud-csi/templates/node/service.yaml
apiVersion: v1
kind: Service
metadata:
name: hcloud-csi-node-metrics
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: node
spec:
ports:
- name: metrics
port: 9189
selector:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: node
---
# Source: hcloud-csi/templates/node/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hcloud-csi-node
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: node
app: hcloud-csi
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: hcloud-csi
template:
metadata:
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: node
app: hcloud-csi
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: instance.hetzner.cloud/is-root-server
operator: NotIn
values:
- "true"
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: csi-node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0
imagePullPolicy: IfNotPresent
args:
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi.hetzner.cloud/socket
volumeMounts:
- name: plugin-dir
mountPath: /run/csi
- name: registration-dir
mountPath: /registration
resources:
limits: {}
requests: {}
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /run/csi
name: plugin-dir
resources:
limits: {}
requests: {}
- name: hcloud-csi-driver
image: docker.io/hetznercloud/hcloud-csi-driver:v2.6.0 # x-release-please-version
imagePullPolicy: IfNotPresent
command: [/bin/hcloud-csi-driver-node]
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /run/csi
- name: device-dir
mountPath: /dev
securityContext:
privileged: true
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: "0.0.0.0:9189"
- name: ENABLE_METRICS
value: "true"
ports:
- containerPort: 9189
name: metrics
- name: healthz
protocol: TCP
containerPort: 9808
resources:
limits: {}
requests: {}
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 10
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 3
httpGet:
path: /healthz
port: healthz
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi.hetzner.cloud/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory
---
# Source: hcloud-csi/templates/controller/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hcloud-csi-controller
namespace: "kube-system"
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
app: hcloud-csi-controller
spec:
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app: hcloud-csi-controller
template:
metadata:
labels:
app.kubernetes.io/name: hcloud-csi
app.kubernetes.io/instance: hcloud-csi
app.kubernetes.io/component: controller
app: hcloud-csi-controller
spec:
serviceAccountName: hcloud-csi-controller
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: csi-attacher
image: registry.k8s.io/sig-storage/csi-attacher:v4.1.0
imagePullPolicy: IfNotPresent
resources:
limits: {}
requests: {}
args:
- --default-fstype=ext4
volumeMounts:
- name: socket-dir
mountPath: /run/csi
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.7.0
imagePullPolicy: IfNotPresent
resources:
limits: {}
requests: {}
volumeMounts:
- name: socket-dir
mountPath: /run/csi
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0
imagePullPolicy: IfNotPresent
resources:
limits: {}
requests: {}
args:
- --feature-gates=Topology=true
- --default-fstype=ext4
volumeMounts:
- name: socket-dir
mountPath: /run/csi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
imagePullPolicy: IfNotPresent
resources:
limits: {}
requests: {}
volumeMounts:
- mountPath: /run/csi
name: socket-dir
- name: hcloud-csi-driver
image: docker.io/hetznercloud/hcloud-csi-driver:v2.6.0 # x-release-please-version
imagePullPolicy: IfNotPresent
command: [/bin/hcloud-csi-driver-controller]
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: "0.0.0.0:9189"
- name: ENABLE_METRICS
value: "true"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
name: hcloud
key: token
resources:
limits: {}
requests: {}
ports:
- name: metrics
containerPort: 9189
- name: healthz
protocol: TCP
containerPort: 9808
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 10
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 3
httpGet:
path: /healthz
port: healthz
volumeMounts:
- name: socket-dir
mountPath: /run/csi
volumes:
- name: socket-dir
emptyDir: {}
---
# Source: hcloud-csi/templates/core/csidriver.yaml
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi.hetzner.cloud
spec:
attachRequired: true
fsGroupPolicy: File
podInfoOnMount: true
volumeLifecycleModes:
- Persistent

View file

@ -1,11 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: hcloud-volumes-encrypted
provisioner: csi.hetzner.cloud
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/node-publish-secret-name: encryption-secret
csi.storage.k8s.io/node-publish-secret-namespace: kube-system

View file

@ -1,7 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: encryption-secret
namespace: kube-system
stringData:
encryption-passphrase: $HETZNER_ENCRYPTION_PASSPHRASE

View file

@ -1,40 +0,0 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: echo-ingress
spec:
ingressClassName: traefik
rules:
- http:
paths:
- pathType: Exact
path: /echo/ # traefik echo pod needs the trailing slash, otherwise it'll return bad request
backend:
service:
name: echo-service
port:
number: 80
---
kind: Pod
apiVersion: v1
metadata:
name: echo-app
labels:
app: echo
spec:
containers:
- name: echo-app
image: traefik/whoami
---
kind: Service
apiVersion: v1
metadata:
name: echo-service
spec:
selector:
app: echo
ports:
- port: 80 # Default port for image

View file

@ -3,15 +3,15 @@ apiVersion: networking.k8s.io/v1
metadata: metadata:
name: echo-ingress name: echo-ingress
annotations: annotations:
kubernetes.io/ingress.class: "traefik"
cert-manager.io/cluster-issuer: ${issuer_name} cert-manager.io/cluster-issuer: ${issuer_name}
spec: spec:
ingressClassName: traefik
rules: rules:
- host: ${fqdn} - host: ${fqdn}
http: http:
paths: paths:
- pathType: Exact - pathType: Prefix
path: /echo/ # traefik echo pod needs the trailing slash, otherwise it'll return bad request path: /echo
backend: backend:
service: service:
name: echo-service name: echo-service

View file

@ -1,6 +1,4 @@
#!/bin/sh #!/bin/sh
# File taken from https://github.com/k3s-io/k3s/blob/master/install.sh
set -e set -e
set -o noglob set -o noglob
@ -20,7 +18,7 @@ set -o noglob
# Environment variables which begin with K3S_ will be preserved for the # Environment variables which begin with K3S_ will be preserved for the
# systemd service to use. Setting K3S_URL without explicitly setting # systemd service to use. Setting K3S_URL without explicitly setting
# a systemd exec command will default the command to "agent", and we # a systemd exec command will default the command to "agent", and we
# enforce that K3S_TOKEN is also set. # enforce that K3S_TOKEN or K3S_CLUSTER_SECRET is also set.
# #
# - INSTALL_K3S_SKIP_DOWNLOAD # - INSTALL_K3S_SKIP_DOWNLOAD
# If set to true will not download k3s hash or binary. # If set to true will not download k3s hash or binary.
@ -46,10 +44,6 @@ set -o noglob
# Commit of k3s to download from temporary cloud storage. # Commit of k3s to download from temporary cloud storage.
# * (for developer & QA use) # * (for developer & QA use)
# #
# - INSTALL_K3S_PR
# PR build of k3s to download from Github Artifacts.
# * (for developer & QA use)
#
# - INSTALL_K3S_BIN_DIR # - INSTALL_K3S_BIN_DIR
# Directory to install k3s binary, links, and uninstall script to, or use # Directory to install k3s binary, links, and uninstall script to, or use
# /usr/local/bin as the default # /usr/local/bin as the default
@ -98,8 +92,7 @@ set -o noglob
# Defaults to 'stable'. # Defaults to 'stable'.
GITHUB_URL=https://github.com/k3s-io/k3s/releases GITHUB_URL=https://github.com/k3s-io/k3s/releases
GITHUB_PR_URL="" STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds
STORAGE_URL=https://k3s-ci-builds.s3.amazonaws.com
DOWNLOADER= DOWNLOADER=
# --- helper functions for logs --- # --- helper functions for logs ---
@ -177,8 +170,8 @@ setup_env() {
if [ -z "${K3S_URL}" ]; then if [ -z "${K3S_URL}" ]; then
CMD_K3S=server CMD_K3S=server
else else
if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ]; then if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ] && [ -z "${K3S_CLUSTER_SECRET}" ]; then
fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN or K3S_TOKEN_FILE is not defined." fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN, K3S_TOKEN_FILE or K3S_CLUSTER_SECRET is not defined."
fi fi
CMD_K3S=agent CMD_K3S=agent
fi fi
@ -224,7 +217,11 @@ setup_env() {
if [ -n "${INSTALL_K3S_TYPE}" ]; then if [ -n "${INSTALL_K3S_TYPE}" ]; then
SYSTEMD_TYPE=${INSTALL_K3S_TYPE} SYSTEMD_TYPE=${INSTALL_K3S_TYPE}
else else
if [ "${CMD_K3S}" = server ]; then
SYSTEMD_TYPE=notify SYSTEMD_TYPE=notify
else
SYSTEMD_TYPE=exec
fi
fi fi
# --- use binary install directory if defined or create default --- # --- use binary install directory if defined or create default ---
@ -276,14 +273,8 @@ setup_env() {
} }
# --- check if skip download environment variable set --- # --- check if skip download environment variable set ---
can_skip_download_binary() { can_skip_download() {
if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != binary ]; then if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ]; then
return 1
fi
}
can_skip_download_selinux() {
if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != selinux ]; then
return 1 return 1
fi fi
} }
@ -313,10 +304,6 @@ setup_verify_arch() {
ARCH=arm64 ARCH=arm64
SUFFIX=-${ARCH} SUFFIX=-${ARCH}
;; ;;
s390x)
ARCH=s390x
SUFFIX=-${ARCH}
;;
aarch64) aarch64)
ARCH=arm64 ARCH=arm64
SUFFIX=-${ARCH} SUFFIX=-${ARCH}
@ -344,7 +331,6 @@ verify_downloader() {
setup_tmp() { setup_tmp() {
TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX) TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX)
TMP_HASH=${TMP_DIR}/k3s.hash TMP_HASH=${TMP_DIR}/k3s.hash
TMP_ZIP=${TMP_DIR}/k3s.zip
TMP_BIN=${TMP_DIR}/k3s.bin TMP_BIN=${TMP_DIR}/k3s.bin
cleanup() { cleanup() {
code=$? code=$?
@ -358,10 +344,7 @@ setup_tmp() {
# --- use desired k3s version if defined or find version from channel --- # --- use desired k3s version if defined or find version from channel ---
get_release_version() { get_release_version() {
if [ -n "${INSTALL_K3S_PR}" ]; then if [ -n "${INSTALL_K3S_COMMIT}" ]; then
VERSION_K3S="PR ${INSTALL_K3S_PR}"
get_pr_artifact_url
elif [ -n "${INSTALL_K3S_COMMIT}" ]; then
VERSION_K3S="commit ${INSTALL_K3S_COMMIT}" VERSION_K3S="commit ${INSTALL_K3S_COMMIT}"
elif [ -n "${INSTALL_K3S_VERSION}" ]; then elif [ -n "${INSTALL_K3S_VERSION}" ]; then
VERSION_K3S=${INSTALL_K3S_VERSION} VERSION_K3S=${INSTALL_K3S_VERSION}
@ -383,49 +366,10 @@ get_release_version() {
info "Using ${VERSION_K3S} as release" info "Using ${VERSION_K3S} as release"
} }
# --- get k3s-selinux version ---
get_k3s_selinux_version() {
available_version="k3s-selinux-1.2-2.${rpm_target}.noarch.rpm"
info "Finding available k3s-selinux versions"
# run verify_downloader in case it binary installation was skipped
verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files'
case $DOWNLOADER in
curl)
DOWNLOADER_OPTS="-s"
;;
wget)
DOWNLOADER_OPTS="-q -O -"
;;
*)
fatal "Incorrect downloader executable '$DOWNLOADER'"
;;
esac
for i in {1..3}; do
set +e
if [ "${rpm_channel}" = "testing" ]; then
version=$(timeout 5 ${DOWNLOADER} ${DOWNLOADER_OPTS} https://api.github.com/repos/k3s-io/k3s-selinux/releases | grep browser_download_url | awk '{ print $2 }' | grep -oE "[^\/]+${rpm_target}\.noarch\.rpm" | head -n 1)
else
version=$(timeout 5 ${DOWNLOADER} ${DOWNLOADER_OPTS} https://api.github.com/repos/k3s-io/k3s-selinux/releases/latest | grep browser_download_url | awk '{ print $2 }' | grep -oE "[^\/]+${rpm_target}\.noarch\.rpm")
fi
set -e
if [ "${version}" != "" ]; then
break
fi
sleep 1
done
if [ "${version}" == "" ]; then
warn "Failed to get available versions of k3s-selinux..defaulting to ${available_version}"
return
fi
available_version=${version}
}
# --- download from github url --- # --- download from github url ---
download() { download() {
[ $# -eq 2 ] || fatal 'download needs exactly 2 arguments' [ $# -eq 2 ] || fatal 'download needs exactly 2 arguments'
set +e
case $DOWNLOADER in case $DOWNLOADER in
curl) curl)
curl -o $1 -sfL $2 curl -o $1 -sfL $2
@ -440,17 +384,10 @@ download() {
# Abort if download command failed # Abort if download command failed
[ $? -eq 0 ] || fatal 'Download failed' [ $? -eq 0 ] || fatal 'Download failed'
set -e
} }
# --- download hash from github url --- # --- download hash from github url ---
download_hash() { download_hash() {
if [ -n "${INSTALL_K3S_PR}" ]; then
info "Downloading hash ${GITHUB_PR_URL}"
curl -o ${TMP_ZIP} -H "Authorization: Bearer $GITHUB_TOKEN" -L ${GITHUB_PR_URL}
unzip -p ${TMP_ZIP} k3s.sha256sum > ${TMP_HASH}
sed -i 's/dist\/artifacts\/k3s/k3s/g' ${TMP_HASH}
else
if [ -n "${INSTALL_K3S_COMMIT}" ]; then if [ -n "${INSTALL_K3S_COMMIT}" ]; then
HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum
else else
@ -458,7 +395,6 @@ download_hash() {
fi fi
info "Downloading hash ${HASH_URL}" info "Downloading hash ${HASH_URL}"
download ${TMP_HASH} ${HASH_URL} download ${TMP_HASH} ${HASH_URL}
fi
HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH}) HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH})
HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*} HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*}
} }
@ -475,48 +411,9 @@ installed_hash_matches() {
return 1 return 1
} }
# Use the GitHub API to identify the artifact associated with a given PR
get_pr_artifact_url() {
GITHUB_API_URL=https://api.github.com/repos/k3s-io/k3s
# Check if jq is installed
if ! [ -x "$(command -v jq)" ]; then
echo "jq is required to use INSTALL_K3S_PR. Please install jq and try again"
exit 1
fi
if [ -z "${GITHUB_TOKEN}" ]; then
fatal "Installing PR builds requires GITHUB_TOKEN with k3s-io/k3s repo authorization"
fi
# GET request to the GitHub API to retrieve the latest commit SHA from the pull request
COMMIT_ID=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" "$GITHUB_API_URL/pulls/$INSTALL_K3S_PR" | jq -r '.head.sha')
# GET request to the GitHub API to retrieve the Build workflow associated with the commit
wf_raw=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" "$GITHUB_API_URL/commits/$COMMIT_ID/check-runs")
build_workflow=$(printf "%s" "$wf_raw" | jq -r '.check_runs[] | select(.name == "build / Build")')
# Extract the Run ID from the build workflow and lookup artifacts associated with the run
RUN_ID=$(echo "$build_workflow" | jq -r ' .details_url' | awk -F'/' '{print $(NF-2)}')
# Extract the artifat ID for the "k3s" artifact
artifacts=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" "$GITHUB_API_URL/actions/runs/$RUN_ID/artifacts")
artifacts_url=$(echo "$artifacts" | jq -r '.artifacts[] | select(.name == "k3s") | .archive_download_url')
GITHUB_PR_URL=$artifacts_url
}
# --- download binary from github url --- # --- download binary from github url ---
download_binary() { download_binary() {
if [ -n "${INSTALL_K3S_PR}" ]; then if [ -n "${INSTALL_K3S_COMMIT}" ]; then
# Since Binary and Hash are zipped together, check if TMP_ZIP already exists
if ! [ -f ${TMP_ZIP} ]; then
info "Downloading K3s artifact ${GITHUB_PR_URL}"
curl -o ${TMP_ZIP} -H "Authorization: Bearer $GITHUB_TOKEN" -L ${GITHUB_PR_URL}
fi
# extract k3s binary from zip
unzip -p ${TMP_ZIP} k3s > ${TMP_BIN}
return
elif [ -n "${INSTALL_K3S_COMMIT}" ]; then
BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT} BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}
else else
BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX} BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX}
@ -563,35 +460,18 @@ setup_selinux() {
fi fi
[ -r /etc/os-release ] && . /etc/os-release [ -r /etc/os-release ] && . /etc/os-release
if [ `expr "${ID_LIKE}" : ".*suse.*"` != 0 ]; then if [ "${ID_LIKE%%[ ]*}" = "suse" ]; then
rpm_target=sle rpm_target=sle
rpm_site_infix=microos rpm_site_infix=microos
package_installer=zypper package_installer=zypper
if [ "${ID_LIKE:-}" = suse ] && ( [ "${VARIANT_ID:-}" = sle-micro ] || [ "${ID:-}" = sle-micro ] ); then
rpm_target=sle
rpm_site_infix=slemicro
package_installer=zypper
fi
elif [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then
rpm_target=coreos
rpm_site_infix=coreos
package_installer=rpm-ostree
elif [ "${VERSION_ID%%.*}" = "7" ]; then elif [ "${VERSION_ID%%.*}" = "7" ]; then
rpm_target=el7 rpm_target=el7
rpm_site_infix=centos/7 rpm_site_infix=centos/7
package_installer=yum package_installer=yum
elif [ "${VERSION_ID%%.*}" = "8" ] || [ "${VERSION_ID%%.*}" -gt "36" ]; then else
rpm_target=el8 rpm_target=el8
rpm_site_infix=centos/8 rpm_site_infix=centos/8
package_installer=yum package_installer=yum
else
rpm_target=el9
rpm_site_infix=centos/9
package_installer=yum
fi
if [ "${package_installer}" = "rpm-ostree" ] && [ -x /bin/yum ]; then
package_installer=yum
fi fi
if [ "${package_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then if [ "${package_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then
@ -600,16 +480,14 @@ setup_selinux() {
policy_hint="please install: policy_hint="please install:
${package_installer} install -y container-selinux ${package_installer} install -y container-selinux
${package_installer} install -y https://${rpm_site}/k3s/${rpm_channel}/common/${rpm_site_infix}/noarch/${available_version} ${package_installer} install -y https://${rpm_site}/k3s/${rpm_channel}/common/${rpm_site_infix}/noarch/k3s-selinux-0.4-1.${rpm_target}.noarch.rpm
" "
if [ "$INSTALL_K3S_SKIP_SELINUX_RPM" = true ] || can_skip_download_selinux || [ ! -d /usr/share/selinux ]; then if [ "$INSTALL_K3S_SKIP_SELINUX_RPM" = true ] || can_skip_download || [ ! -d /usr/share/selinux ]; then
info "Skipping installation of SELinux RPM" info "Skipping installation of SELinux RPM"
return elif [ "${ID_LIKE:-}" != coreos ] && [ "${VARIANT_ID:-}" != coreos ]; then
fi
get_k3s_selinux_version
install_selinux_rpm ${rpm_site} ${rpm_channel} ${rpm_target} ${rpm_site_infix} install_selinux_rpm ${rpm_site} ${rpm_channel} ${rpm_target} ${rpm_site_infix}
fi
policy_error=fatal policy_error=fatal
if [ "$INSTALL_K3S_SELINUX_WARN" = true ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then if [ "$INSTALL_K3S_SELINUX_WARN" = true ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then
@ -621,7 +499,7 @@ setup_selinux() {
$policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}" $policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}"
fi fi
elif [ ! -f /usr/share/selinux/packages/k3s.pp ]; then elif [ ! -f /usr/share/selinux/packages/k3s.pp ]; then
if [ -x /usr/sbin/transactional-update ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then if [ -x /usr/sbin/transactional-update ]; then
warn "Please reboot your machine to activate the changes and avoid data loss." warn "Please reboot your machine to activate the changes and avoid data loss."
else else
$policy_error "Failed to find the k3s-selinux policy, ${policy_hint}" $policy_error "Failed to find the k3s-selinux policy, ${policy_hint}"
@ -630,7 +508,7 @@ setup_selinux() {
} }
install_selinux_rpm() { install_selinux_rpm() {
if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ] || [ -r /etc/fedora-release ] || [ "${ID_LIKE%%[ ]*}" = "suse" ]; then if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ] || [ "${ID_LIKE%%[ ]*}" = "suse" ]; then
repodir=/etc/yum.repos.d repodir=/etc/yum.repos.d
if [ -d /etc/zypp/repos.d ]; then if [ -d /etc/zypp/repos.d ]; then
repodir=/etc/zypp/repos.d repodir=/etc/zypp/repos.d
@ -655,17 +533,9 @@ EOF
sle) sle)
rpm_installer="zypper --gpg-auto-import-keys" rpm_installer="zypper --gpg-auto-import-keys"
if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then
transactional_update_run="transactional-update --no-selfupdate -d run"
rpm_installer="transactional-update --no-selfupdate -d run ${rpm_installer}" rpm_installer="transactional-update --no-selfupdate -d run ${rpm_installer}"
: "${INSTALL_K3S_SKIP_START:=true}" : "${INSTALL_K3S_SKIP_START:=true}"
fi fi
# create the /var/lib/rpm-state in SLE systems to fix the prein selinux macro
${transactional_update_run} mkdir -p /var/lib/rpm-state
;;
coreos)
rpm_installer="rpm-ostree --idempotent"
# rpm_install_extra_args="--apply-live"
: "${INSTALL_K3S_SKIP_START:=true}"
;; ;;
*) *)
rpm_installer="yum" rpm_installer="yum"
@ -673,15 +543,6 @@ EOF
esac esac
if [ "${rpm_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then if [ "${rpm_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then
rpm_installer=dnf rpm_installer=dnf
fi
if rpm -q --quiet k3s-selinux; then
# remove k3s-selinux module before upgrade to allow container-selinux to upgrade safely
if check_available_upgrades container-selinux ${3} && check_available_upgrades k3s-selinux ${3}; then
MODULE_PRIORITY=$($SUDO semodule --list=full | grep k3s | cut -f1 -d" ")
if [ -n "${MODULE_PRIORITY}" ]; then
$SUDO semodule -X $MODULE_PRIORITY -r k3s || true
fi
fi
fi fi
# shellcheck disable=SC2086 # shellcheck disable=SC2086
$SUDO ${rpm_installer} install -y "k3s-selinux" $SUDO ${rpm_installer} install -y "k3s-selinux"
@ -689,28 +550,9 @@ EOF
return return
} }
check_available_upgrades() {
set +e
case ${2} in
sle)
available_upgrades=$($SUDO zypper -q -t -s 11 se -s -u --type package $1 | tail -n 1 | grep -v "No matching" | awk '{print $3}')
;;
coreos)
# currently rpm-ostree does not support search functionality https://github.com/coreos/rpm-ostree/issues/1877
;;
*)
available_upgrades=$($SUDO yum -q --refresh list $1 --upgrades | tail -n 1 | awk '{print $2}')
;;
esac
set -e
if [ -n "${available_upgrades}" ]; then
return 0
fi
return 1
}
# --- download and verify k3s --- # --- download and verify k3s ---
download_and_verify() { download_and_verify() {
if can_skip_download_binary; then if can_skip_download; then
info 'Skipping k3s download and verify' info 'Skipping k3s download and verify'
verify_k3s_is_executable verify_k3s_is_executable
return return
@ -798,27 +640,6 @@ killtree() {
) 2>/dev/null ) 2>/dev/null
} }
remove_interfaces() {
# Delete network interface(s) that match 'master cni0'
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
# Delete cni related interfaces
ip link delete cni0
ip link delete flannel.1
ip link delete flannel-v6.1
ip link delete kube-ipvs0
ip link delete flannel-wg
ip link delete flannel-wg-v6
# Restart tailscale
if [ -n "$(command -v tailscale)" ]; then
tailscale set --advertise-routes=
fi
}
getshims() { getshims() {
ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1
} }
@ -829,7 +650,7 @@ do_unmount_and_remove() {
set +x set +x
while read -r _ path _; do while read -r _ path _; do
case "$path" in $1*) echo "$path" ;; esac case "$path" in $1*) echo "$path" ;; esac
done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount -f "$0" && rm -rf "$0"' done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount "$0" && rm -rf "$0"'
set -x set -x
} }
@ -842,11 +663,17 @@ do_unmount_and_remove '/run/netns/cni-'
# Remove CNI namespaces # Remove CNI namespaces
ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete
remove_interfaces # Delete network interface(s) that match 'master cni0'
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
ip link delete cni0
ip link delete flannel.1
ip link delete flannel-v6.1
rm -rf /var/lib/cni/ rm -rf /var/lib/cni/
iptables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | iptables-restore iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore
ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | ip6tables-restore ip6tables-save | grep -v KUBE- | grep -v CNI- | ip6tables-restore
EOF EOF
$SUDO chmod 755 ${KILLALL_K3S_SH} $SUDO chmod 755 ${KILLALL_K3S_SH}
$SUDO chown root:root ${KILLALL_K3S_SH} $SUDO chown root:root ${KILLALL_K3S_SH}
@ -902,9 +729,6 @@ rm -f ${KILLALL_K3S_SH}
if type yum >/dev/null 2>&1; then if type yum >/dev/null 2>&1; then
yum remove -y k3s-selinux yum remove -y k3s-selinux
rm -f /etc/yum.repos.d/rancher-k3s-common*.repo rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
elif type rpm-ostree >/dev/null 2>&1; then
rpm-ostree uninstall k3s-selinux
rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
elif type zypper >/dev/null 2>&1; then elif type zypper >/dev/null 2>&1; then
uninstall_cmd="zypper remove -y k3s-selinux" uninstall_cmd="zypper remove -y k3s-selinux"
if [ "\${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then if [ "\${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then
@ -963,7 +787,7 @@ TasksMax=infinity
TimeoutStartSec=0 TimeoutStartSec=0
Restart=always Restart=always
RestartSec=5s RestartSec=5s
ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service 2>/dev/null' ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service'
ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay ExecStartPre=-/sbin/modprobe overlay
ExecStart=${BIN_DIR}/k3s \\ ExecStart=${BIN_DIR}/k3s \\
@ -1003,8 +827,8 @@ respawn_delay=5
respawn_max=0 respawn_max=0
set -o allexport set -o allexport
if [ -f /etc/environment ]; then . /etc/environment; fi if [ -f /etc/environment ]; then source /etc/environment; fi
if [ -f ${FILE_K3S_ENV} ]; then . ${FILE_K3S_ENV}; fi if [ -f ${FILE_K3S_ENV} ]; then source ${FILE_K3S_ENV}; fi
set +o allexport set +o allexport
EOF EOF
$SUDO chmod 0755 ${FILE_K3S_SERVICE} $SUDO chmod 0755 ${FILE_K3S_SERVICE}
@ -1020,16 +844,11 @@ EOF
# --- write systemd or openrc service file --- # --- write systemd or openrc service file ---
create_service_file() { create_service_file() {
[ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file && restore_systemd_service_file_context [ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file
[ "${HAS_OPENRC}" = true ] && create_openrc_service_file [ "${HAS_OPENRC}" = true ] && create_openrc_service_file
return 0 return 0
} }
restore_systemd_service_file_context() {
$SUDO restorecon -R -i ${FILE_K3S_SERVICE} 2>/dev/null || true
$SUDO restorecon -R -i ${FILE_K3S_ENV} 2>/dev/null || true
}
# --- get hashes of the current k3s bin and service files # --- get hashes of the current k3s bin and service files
get_installed_hashes() { get_installed_hashes() {
$SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true $SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true
@ -1058,19 +877,6 @@ openrc_start() {
$SUDO ${FILE_K3S_SERVICE} restart $SUDO ${FILE_K3S_SERVICE} restart
} }
has_working_xtables() {
if $SUDO sh -c "command -v \"$1-save\"" 1> /dev/null && $SUDO sh -c "command -v \"$1-restore\"" 1> /dev/null; then
if $SUDO $1-save 2>/dev/null | grep -q '^-A CNI-HOSTPORT-MASQ -j MASQUERADE$'; then
warn "Host $1-save/$1-restore tools are incompatible with existing rules"
else
return 0
fi
else
info "Host $1-save/$1-restore tools not found"
fi
return 1
}
# --- startup systemd or openrc service --- # --- startup systemd or openrc service ---
service_enable_and_start() { service_enable_and_start() {
if [ -f "/proc/cgroups" ] && [ "$(grep memory /proc/cgroups | while read -r n n n enabled; do echo $enabled; done)" -eq 0 ]; if [ -f "/proc/cgroups" ] && [ "$(grep memory /proc/cgroups | while read -r n n n enabled; do echo $enabled; done)" -eq 0 ];
@ -1091,12 +897,6 @@ service_enable_and_start() {
return return
fi fi
for XTABLES in iptables ip6tables; do
if has_working_xtables ${XTABLES}; then
$SUDO ${XTABLES}-save 2>/dev/null | grep -v KUBE- | grep -iv flannel | $SUDO ${XTABLES}-restore
fi
done
[ "${HAS_SYSTEMD}" = true ] && systemd_start [ "${HAS_SYSTEMD}" = true ] && systemd_start
[ "${HAS_OPENRC}" = true ] && openrc_start [ "${HAS_OPENRC}" = true ] && openrc_start
return 0 return 0
@ -1119,4 +919,3 @@ eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@")
create_service_file create_service_file
service_enable_and_start service_enable_and_start
} }

View file

@ -0,0 +1,480 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: controller
spec:
allowPrivilegeEscalation: false
allowedCapabilities: []
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
hostIPC: false
hostNetwork: false
hostPID: false
privileged: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
ranges:
- max: 65535
min: 1
rule: MustRunAs
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: speaker
spec:
allowPrivilegeEscalation: false
allowedCapabilities:
- NET_RAW
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
hostIPC: false
hostNetwork: true
hostPID: false
hostPorts:
- max: 7472
min: 7472
- max: 7946
min: 7946
privileged: true
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:controller
rules:
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services/status
verbs:
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- controller
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:speaker
rules:
- apiGroups:
- ''
resources:
- services
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- speaker
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- pods
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- create
- apiGroups:
- ''
resources:
- secrets
resourceNames:
- memberlist
verbs:
- list
- apiGroups:
- apps
resources:
- deployments
resourceNames:
- controller
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
subjects:
- kind: ServiceAccount
name: controller
- kind: ServiceAccount
name: speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pod-lister
subjects:
- kind: ServiceAccount
name: speaker
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: controller
subjects:
- kind: ServiceAccount
name: controller
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: metallb
component: speaker
name: speaker
namespace: metallb-system
spec:
selector:
matchLabels:
app: metallb
component: speaker
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: speaker
spec:
containers:
- args:
- --port=7472
- --config=config
- --log-level=info
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: METALLB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: METALLB_ML_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
# needed when another software is also using memberlist / port 7946
# when changing this default you also need to update the container ports definition
# and the PodSecurityPolicy hostPorts definition
#- name: METALLB_ML_BIND_PORT
# value: "7946"
- name: METALLB_ML_LABELS
value: "app=metallb,component=speaker"
- name: METALLB_ML_SECRET_KEY
valueFrom:
secretKeyRef:
name: memberlist
key: secretkey
image: quay.io/metallb/speaker:v0.12.1
name: speaker
ports:
- containerPort: 7472
name: monitoring
- containerPort: 7946
name: memberlist-tcp
- containerPort: 7946
name: memberlist-udp
protocol: UDP
livenessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_RAW
drop:
- ALL
readOnlyRootFilesystem: true
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: speaker
terminationGracePeriodSeconds: 2
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: metallb
component: controller
name: controller
namespace: metallb-system
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: controller
spec:
containers:
- args:
- --port=7472
- --config=config
- --log-level=info
env:
- name: METALLB_ML_SECRET_NAME
value: memberlist
- name: METALLB_DEPLOYMENT
value: controller
image: quay.io/metallb/controller:v0.12.1
name: controller
ports:
- containerPort: 7472
name: monitoring
livenessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /metrics
port: monitoring
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
serviceAccountName: controller
terminationGracePeriodSeconds: 0

View file

@ -1,5 +1,4 @@
# based on https://github.com/k3s-io/k3s/blob/master/manifests/traefik.yaml # based on https://github.com/k3s-io/k3s/blob/master/manifests/traefik.yaml
# required changes: global.systemDefaultRegistry must be set to ""
--- ---
apiVersion: helm.cattle.io/v1 apiVersion: helm.cattle.io/v1
kind: HelmChart kind: HelmChart
@ -7,7 +6,7 @@ metadata:
name: traefik-crd name: traefik-crd
namespace: kube-system namespace: kube-system
spec: spec:
chart: https://%{KUBERNETES_API}%/static/charts/traefik-crd-25.0.2+up25.0.0.tgz chart: https://%{KUBERNETES_API}%/static/charts/traefik-crd-10.19.300.tgz
--- ---
apiVersion: helm.cattle.io/v1 apiVersion: helm.cattle.io/v1
kind: HelmChart kind: HelmChart
@ -15,10 +14,16 @@ metadata:
name: traefik name: traefik
namespace: kube-system namespace: kube-system
spec: spec:
chart: https://%{KUBERNETES_API}%/static/charts/traefik-25.0.2+up25.0.0.tgz chart: https://%{KUBERNETES_API}%/static/charts/traefik-10.19.300.tgz
set: set:
global.systemDefaultRegistry: "" global.systemDefaultRegistry: ""
valuesContent: |- valuesContent: |-
rbac:
enabled: true
ports:
websecure:
tls:
enabled: true
podAnnotations: podAnnotations:
prometheus.io/port: "8082" prometheus.io/port: "8082"
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
@ -28,8 +33,8 @@ spec:
enabled: true enabled: true
priorityClassName: "system-cluster-critical" priorityClassName: "system-cluster-critical"
image: image:
repository: "rancher/mirrored-library-traefik" name: "rancher/mirrored-library-traefik"
tag: "2.10.5" tag: "2.6.2"
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
@ -42,4 +47,9 @@ spec:
service: service:
ipFamilyPolicy: "PreferDualStack" ipFamilyPolicy: "PreferDualStack"
annotations: annotations:
metallb.universe.tf/allow-shared-ip: shared-ip-service-group metallb.universe.tf/allow-shared-ip: "shared-ip-service-group"
metallb.universe.tf/address-pool: public
spec:
type: LoadBalancer
externalTrafficPolicy: Cluster

View file

@ -2,7 +2,7 @@ package org.domaindrivenarchitecture.provs.configuration.application
import io.mockk.every import io.mockk.every
import io.mockk.mockkStatic import io.mockk.mockkStatic
import io.mockk.unmockkAll import io.mockk.unmockkStatic
import org.domaindrivenarchitecture.provs.framework.core.* import org.domaindrivenarchitecture.provs.framework.core.*
import org.domaindrivenarchitecture.provs.framework.core.cli.getPasswordToConfigureSudoWithoutPassword import org.domaindrivenarchitecture.provs.framework.core.cli.getPasswordToConfigureSudoWithoutPassword
import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer
@ -24,7 +24,7 @@ class ProvWithSudoKtTest {
fun test_ensureSudoWithoutPassword_local_Prov() { fun test_ensureSudoWithoutPassword_local_Prov() {
mockkStatic(::getPasswordToConfigureSudoWithoutPassword) mockkStatic(::getPasswordToConfigureSudoWithoutPassword)
every { getPasswordToConfigureSudoWithoutPassword() } returns Secret("testuser") every { getPasswordToConfigureSudoWithoutPassword() } returns Secret("testuserpw")
// given // given
val containerName = "prov-test-sudo-no-pw" val containerName = "prov-test-sudo-no-pw"
@ -49,8 +49,7 @@ class ProvWithSudoKtTest {
assertFalse(canSudo1) assertFalse(canSudo1)
assertTrue(canSudo2) assertTrue(canSudo2)
// cleanup unmockkStatic(::getPasswordToConfigureSudoWithoutPassword)
unmockkAll()
} }
@ExtensiveContainerTest @ExtensiveContainerTest
@ -58,7 +57,7 @@ class ProvWithSudoKtTest {
// given // given
val containerName = "prov-test-sudo-no-pw-ssh" val containerName = "prov-test-sudo-no-pw-ssh"
val password = Secret("testuser") val password = Secret("testuserpw")
val prov = Prov.newInstance( val prov = Prov.newInstance(
ContainerUbuntuHostProcessor( ContainerUbuntuHostProcessor(

View file

@ -31,8 +31,10 @@ internal class TargetCliCommandKtTest {
@AfterAll @AfterAll
@JvmStatic @JvmStatic
internal fun afterAll() { internal fun afterAll() {
// cleanup unmockkObject(Prov)
unmockkAll() unmockkStatic(::local)
unmockkStatic(::remote)
unmockkStatic(::getPasswordToConfigureSudoWithoutPassword)
} }
} }

View file

@ -40,7 +40,7 @@ internal class ApplicationKtTest {
val dummyProv = Prov.newInstance(DummyProcessor()) val dummyProv = Prov.newInstance(DummyProcessor())
mockkObject(Prov) mockkObject(Prov)
every { Prov.newInstance(any(), any(), any(), any()) } returns dummyProv every { Prov.newInstance(any(), any(), any(), any(), ) } returns dummyProv
mockkStatic(::local) mockkStatic(::local)
every { local() } returns dummyProv every { local() } returns dummyProv
@ -52,7 +52,7 @@ internal class ApplicationKtTest {
every { getConfig("testconfig.yaml") } returns testConfig every { getConfig("testconfig.yaml") } returns testConfig
mockkStatic(Prov::provisionDesktop) mockkStatic(Prov::provisionDesktop)
every { any<Prov>().provisionDesktop(any(), any(), any(), any(), any()) } returns ProvResult( every { any<Prov>().provisionDesktop(any(), any(), any(), any(), any(),any()) } returns ProvResult(
true, true,
cmd = "mocked command" cmd = "mocked command"
) )
@ -65,8 +65,12 @@ internal class ApplicationKtTest {
@AfterAll @AfterAll
@JvmStatic @JvmStatic
internal fun afterAll() { internal fun afterAll() {
// cleanup unmockkObject(Prov)
unmockkAll() unmockkStatic(::local)
unmockkStatic(::remote)
unmockkStatic(::getConfig)
unmockkStatic(Prov::provisionDesktop)
unmockkStatic(::getPasswordToConfigureSudoWithoutPassword)
} }
} }
@ -85,6 +89,7 @@ internal class ApplicationKtTest {
null, null,
testConfig.gitUserName, testConfig.gitUserName,
testConfig.gitEmail, testConfig.gitEmail,
null
) )
} }
} }
@ -118,7 +123,7 @@ internal class ApplicationKtTest {
"Error: File\u001B[31m idontexist.yaml \u001B[0m was not found.Pls copy file \u001B[31m desktop-config-example.yaml \u001B[0m to file \u001B[31m idontexist.yaml \u001B[0m and change the content according to your needs.No suitable config found." "Error: File\u001B[31m idontexist.yaml \u001B[0m was not found.Pls copy file \u001B[31m desktop-config-example.yaml \u001B[0m to file \u001B[31m idontexist.yaml \u001B[0m and change the content according to your needs.No suitable config found."
assertEquals(expectedOutput, outContent.toString().replace("\r", "").replace("\n", "")) assertEquals(expectedOutput, outContent.toString().replace("\r", "").replace("\n", ""))
verify(exactly = 0) { any<Prov>().provisionDesktop(any(), any(), any(), any(), any()) } verify(exactly = 0) { any<Prov>().provisionDesktop(any(), any(), any(), any(), any(), any()) }
unmockkStatic(::quit) unmockkStatic(::quit)
} }
@ -152,7 +157,7 @@ internal class ApplicationKtTest {
"Error: File \"src/test/resources/invalid-desktop-config.yaml\" has an invalid format and or invalid data.No suitable config found." "Error: File \"src/test/resources/invalid-desktop-config.yaml\" has an invalid format and or invalid data.No suitable config found."
assertEquals(expectedOutput, outContent.toString().replace("\r", "").replace("\n", "")) assertEquals(expectedOutput, outContent.toString().replace("\r", "").replace("\n", ""))
verify(exactly = 0) { any<Prov>().provisionDesktop(any(), any(), any(), any(), any()) } verify(exactly = 0) { any<Prov>().provisionDesktop(any(), any(), any(), any(), any(), any()) }
unmockkStatic(::quit) unmockkStatic(::quit)
} }

View file

@ -1,15 +1,12 @@
package org.domaindrivenarchitecture.provs.desktop.domain package org.domaindrivenarchitecture.provs.desktop.domain
import io.mockk.* import org.domaindrivenarchitecture.provs.framework.core.ProgressType
import org.domaindrivenarchitecture.provs.configuration.domain.TargetCliCommand import org.domaindrivenarchitecture.provs.framework.core.Prov
import org.domaindrivenarchitecture.provs.desktop.infrastructure.installPpaFirefox
import org.domaindrivenarchitecture.provs.desktop.infrastructure.verifyIdeSetup
import org.domaindrivenarchitecture.provs.desktop.infrastructure.verifyOfficeSetup
import org.domaindrivenarchitecture.provs.framework.core.*
import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer
import org.domaindrivenarchitecture.provs.framework.core.local
import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode
import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerUbuntuHostProcessor import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerUbuntuHostProcessor
import org.domaindrivenarchitecture.provs.framework.core.processors.DummyProcessor import org.domaindrivenarchitecture.provs.framework.core.remote
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
@ -38,76 +35,15 @@ internal class DesktopServiceKtTest {
// when // when
Assertions.assertThrows(Exception::class.java) { Assertions.assertThrows(Exception::class.java) {
prov.provisionDesktopCommand( prov.provisionDesktop(
DesktopCliCommand(DesktopType.BASIC, TargetCliCommand("testuser@somehost"), null), DesktopConfig() // dummy data DesktopType.BASIC,
gitUserName = "testuser",
gitEmail = "testuser@test.org",
onlyModules = null
) )
} }
} }
@Test
fun provisionDesktop_with_onlyModules_firefox_installs_firefox() {
// given
val prov = Prov.newInstance(DummyProcessor())
mockkStatic(Prov::installPpaFirefox)
every { any<Prov>().installPpaFirefox() } returns ProvResult(true, cmd = "mocked")
// when
prov.provisionOnlyModules(DesktopType.IDE, onlyModules = listOf("firefox"))
// then
verify(exactly = 1) { any<Prov>().installPpaFirefox() }
// cleanup
unmockkAll()
}
@Test
fun provisionDesktop_ide_with_onlyModules_verify_performs_verification() {
// given
val prov = Prov.newInstance(DummyProcessor())
mockkStatic(Prov::verifyIdeSetup)
mockkStatic(Prov::verifyOfficeSetup)
mockkStatic(Prov::provisionBasicDesktop)
every { any<Prov>().verifyIdeSetup() } returns ProvResult(true, cmd = "mocked")
every { any<Prov>().verifyOfficeSetup() } returns ProvResult(true, cmd = "mocked")
every { any<Prov>().provisionBasicDesktop(any(), any(), any(), any()) }
// when
prov.provisionOnlyModules(DesktopType.IDE, onlyModules = listOf("verify"))
// then
verify(exactly = 1) { any<Prov>().verifyIdeSetup() }
verify(exactly = 0) { any<Prov>().verifyOfficeSetup() }
verify(exactly = 0) { any<Prov>().provisionBasicDesktop(any(), any(), any(), any()) }
// cleanup
unmockkAll()
}
@Test
fun provisionDesktop_office_with_onlyModules_verify_performs_verification() {
// given
val prov = Prov.newInstance(DummyProcessor())
mockkStatic(Prov::verifyIdeSetup)
mockkStatic(Prov::verifyOfficeSetup)
mockkStatic(Prov::provisionBasicDesktop)
every { any<Prov>().verifyIdeSetup() } returns ProvResult(true, cmd = "mocked")
every { any<Prov>().verifyOfficeSetup() } returns ProvResult(true, cmd = "mocked")
every { any<Prov>().provisionBasicDesktop(any(), any(), any(), any()) }
// when
prov.provisionOnlyModules(DesktopType.OFFICE, onlyModules = listOf("verify"))
// then
verify(exactly = 0) { any<Prov>().verifyIdeSetup() }
verify(exactly = 1) { any<Prov>().verifyOfficeSetup() }
verify(exactly = 0) { any<Prov>().provisionBasicDesktop(any(), any(), any(), any()) }
// cleanup
unmockkAll()
}
@ExtensiveContainerTest @ExtensiveContainerTest
@Disabled("Takes very long, enable if you want to test a desktop setup") @Disabled("Takes very long, enable if you want to test a desktop setup")
fun provisionDesktop() { fun provisionDesktop() {
@ -120,6 +56,7 @@ internal class DesktopServiceKtTest {
DesktopType.BASIC, DesktopType.BASIC,
gitUserName = "testuser", gitUserName = "testuser",
gitEmail = "testuser@test.org", gitEmail = "testuser@test.org",
onlyModules = null
) )
// then // then
@ -144,6 +81,7 @@ internal class DesktopServiceKtTest {
DesktopType.IDE, DesktopType.IDE,
gitUserName = "testuser", gitUserName = "testuser",
gitEmail = "testuser@test.org", gitEmail = "testuser@test.org",
onlyModules = null
) )
// then // then

View file

@ -2,11 +2,11 @@ package org.domaindrivenarchitecture.provs.desktop.domain
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.addKnownHost import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.KNOWN_HOSTS_FILE
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.isKnownHost
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.junit.jupiter.api.Assertions.* import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
@ -18,7 +18,7 @@ class KnownHostTest {
val prov = defaultTestContainer() val prov = defaultTestContainer()
prov.task { prov.task {
aptInstall("ssh") aptInstall("ssh")
deleteFile("~/.ssh/known_hosts") deleteFile(KNOWN_HOSTS_FILE)
} }
// when // when
@ -30,10 +30,10 @@ class KnownHostTest {
// Subclass of KnownHost for test knownHostSubclass_includes_additional_host // Subclass of KnownHost for test knownHostSubclass_includes_additional_host
class KnownHostsSubclass(hostName: String, port: Int?, hostKeys: List<HostKey>): KnownHost(hostName, port, hostKeys) { class KnownHostsSubclass(hostName: String, hostKeys: List<HostKey>): KnownHost(hostName, hostKeys) {
companion object { companion object {
val ANOTHER_HOST = KnownHostsSubclass("anotherhost.com", 2222, listOf("key1")) val ANOTHER_HOST = KnownHostsSubclass("anotherhost.com", listOf("key1"))
fun values(): List<KnownHost> { fun values(): List<KnownHost> {
return values + ANOTHER_HOST return values + ANOTHER_HOST
@ -50,44 +50,5 @@ class KnownHostTest {
assertTrue(hosts.size > 1) assertTrue(hosts.size > 1)
assertEquals("key1", hosts.last().hostKeys[0]) assertEquals("key1", hosts.last().hostKeys[0])
} }
@ContainerTest
fun knownHost_with_port_verified_successfully() {
// given
val prov = defaultTestContainer()
prov.task {
aptInstall("ssh")
deleteFile("~/.ssh/known_hosts")
}
// when
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName))
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName, 22))
val res = prov.addKnownHost(KnownHost(KnownHost.GITHUB.hostName, 22, KnownHost.GITHUB.hostKeys), verifyKeys = true)
// then
assertTrue(res.success)
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName))
assertTrue(prov.isKnownHost(KnownHost.GITHUB.hostName, 22))
}
@ContainerTest
fun knownHost_with_port_verification_failing() {
// given
val prov = defaultTestContainer()
prov.task {
aptInstall("ssh")
deleteFile("~/.ssh/known_hosts")
}
// when
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName, 80))
val res2 = prov.addKnownHost(KnownHost(KnownHost.GITHUB.hostName, 80, KnownHost.GITHUB.hostKeys), verifyKeys = true)
// then
assertFalse(res2.success)
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName))
assertFalse(prov.isKnownHost(KnownHost.GITHUB.hostName, 80))
}
} }

View file

@ -1,7 +1,10 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.getResourceAsText import org.domaindrivenarchitecture.provs.framework.core.getResourceAsText
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.* import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDir
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDirs
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.fileContainsText
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.assertTrue
@ -45,17 +48,4 @@ internal class DevOpsKtTest {
// then // then
assertTrue(res.success) assertTrue(res.success)
} }
@ExtensiveContainerTest
fun installKubeconform() {
// given
val prov = defaultTestContainer()
// when
val res = prov.installKubeconform()
// then
assertTrue(res.success)
assertTrue(prov.checkFile("/usr/local/bin/kubeconform"))
}
} }

View file

@ -1,21 +1,17 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.core.remote import org.domaindrivenarchitecture.provs.framework.core.remote
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.KeyPair import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.KeyPair
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.configureGpgKeys import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.configureGpgKeys
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.gpgFingerprint import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.gpgFingerprint
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources.GopassSecretSource
import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.* import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.*
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.privateGPGSnakeoilKey
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources.PromptSecretSource
import org.domaindrivenarchitecture.provs.framework.ubuntu.user.base.makeCurrentUserSudoerWithoutPasswordRequired
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.domaindrivenarchitecture.provs.test_keys.publicGPGSnakeoilKey
import org.junit.jupiter.api.Assertions.assertFalse import org.junit.jupiter.api.Assertions.assertFalse
@ -60,32 +56,24 @@ internal class GopassKtTest {
} }
@Test @Test
@Disabled // This is an integration test, which needs preparation: @Disabled // Integrationtest; change user, host and keys, then remove this line to run this test
// Pls change user, host and remote connection (choose connection either by password or by ssh key)
// then remove tag @Disabled to be able to run this test.
// PREREQUISITE: remote machine needs openssh-server installed
fun test_install_and_configure_Gopass_and_GopassBridgeJsonApi() { fun test_install_and_configure_Gopass_and_GopassBridgeJsonApi() {
// host and user // settings to change
val host = "192.168.56.154" val host = "192.168.56.135"
val user = "xxx" val user = "xxx"
val pubKey = GopassSecretSource("path-to/pub.key").secret()
val privateKey = GopassSecretSource("path-to/priv.key").secret()
// connection by password // given
val pw = PromptSecretSource("Pw for $user").secret() val prov = remote(host, user)
val prov = remote(host, user, pw)
prov.makeCurrentUserSudoerWithoutPasswordRequired(pw) // may be commented out if user can already sudo without password
// or alternatively use connection by ssh key if the public key is already available remotely
// val prov = remote(host, user)
val pubKey = Secret(publicGPGSnakeoilKey())
val privateKey = Secret(privateGPGSnakeoilKey())
// when // when
val res = prov.task { val res = prov.task {
configureGpgKeys( configureGpgKeys(
KeyPair(pubKey, privateKey), KeyPair(
pubKey,
privateKey
),
trust = true, trust = true,
skipIfExistin = true skipIfExistin = true
) )

View file

@ -1,27 +0,0 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkFile
import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.junit.jupiter.api.Assertions.*
class GraalVMKtTest {
@ExtensiveContainerTest
fun installGraalVM() {
// given
val prov = defaultTestContainer()
// when
val res = prov.task {
installGraalVM()
installGraalVM() // test repeatability
}
// then
assertTrue(res.success)
assertTrue(GRAAL_VM_VERSION == prov.graalVMVersion())
assertTrue(prov.checkFile("/usr/local/bin/native-image"))
}
}

View file

@ -1,42 +0,0 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.docker.exitAndRmContainer
import org.domaindrivenarchitecture.provs.framework.core.local
import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.junit.jupiter.api.Assertions.assertFalse
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Test
internal class HugoTest {
@ExtensiveContainerTest
fun test_installHugoByDeb() {
// given
local().exitAndRmContainer("provs_test")
val prov = defaultTestContainer()
// when
val res = prov.installHugoByDeb()
// then
assertTrue(res.success)
}
@Test
fun test_needsHugoInstall() {
// given
val hugoNull = null
val hugoLow = "hugo v0.0.0-abc+extended linux/amd64 BuildDate=0000-00-00 VendorInfo=snap:0.0.0"
val hugoMajHigh = "hugo v1.0.0-abc+extended linux/amd64 BuildDate=0000-00-00 VendorInfo=snap:1.0.0"
val hugoMinHigh = "hugo v0.1.0-abc+extended linux/amd64 BuildDate=0000-00-00 VendorInfo=snap:0.1.0"
val hugoPatHigh = "hugo v0.0.1-abc+extended linux/amd64 BuildDate=0000-00-00 VendorInfo=snap:0.0.1"
assertTrue(needsHugoInstall(hugoNull, hugoPatHigh))
assertTrue(needsHugoInstall(hugoLow, hugoPatHigh))
assertTrue(needsHugoInstall(hugoLow, hugoMinHigh))
assertTrue(needsHugoInstall(hugoLow, hugoMajHigh))
assertFalse(needsHugoInstall(hugoMajHigh, hugoLow))
assertFalse(needsHugoInstall(hugoMinHigh, hugoLow))
assertFalse(needsHugoInstall(hugoPatHigh, hugoLow))
}
}

View file

@ -1,7 +1,9 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure package org.domaindrivenarchitecture.provs.desktop.infrastructure
import com.charleskorn.kaml.InvalidPropertyValueException import com.charleskorn.kaml.InvalidPropertyValueException
import org.domaindrivenarchitecture.provs.configuration.domain.ConfigFileName
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSourceType import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSourceType
import org.domaindrivenarchitecture.provs.server.infrastructure.getK3sConfig
import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
import org.junit.jupiter.api.assertThrows import org.junit.jupiter.api.assertThrows
@ -32,7 +34,7 @@ internal class K3SDesktopConfigRepositoryKtTest {
val exception = assertThrows<InvalidPropertyValueException> { val exception = assertThrows<InvalidPropertyValueException> {
getConfig("src/test/resources/invalid-desktop-config.yaml") getConfig("src/test/resources/invalid-desktop-config.yaml")
} }
assertEquals("Value for 'sourceType' is invalid: Value 'xxx' is not a valid option, permitted choices are: ENV, FILE, GOPASS, PASS, PLAIN, PROMPT", exception.message) assertEquals("Value for 'sourceType' is invalid: Value 'xxx' is not a valid option, permitted choices are: FILE, GOPASS, PASS, PLAIN, PROMPT", exception.message)
} }
@Test @Test

View file

@ -3,10 +3,8 @@ package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled
internal class NextcloudClientTest { internal class NextcloudClientTest {
@Disabled // test is hanging sometimes
@ExtensiveContainerTest @ExtensiveContainerTest
fun test_installNextcloudClient() { fun test_installNextcloudClient() {
// when // when

View file

@ -9,9 +9,9 @@ internal class PythonKtTest {
@ExtensiveContainerTest @ExtensiveContainerTest
fun test_provisionPython() { fun test_provisionPython() {
// when // when
val result = defaultTestContainer().provisionPython() val res = defaultTestContainer().provisionPython()
// then // then
assertTrue(result.success) assertTrue(res.success)
} }
} }

View file

@ -1,8 +1,7 @@
package org.domaindrivenarchitecture.provs.desktop.infrastructure package org.domaindrivenarchitecture.provs.desktop.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.remote
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources.PromptSecretSource import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
@ -10,19 +9,14 @@ import org.junit.jupiter.api.Test
internal class VSCodeKtTest { internal class VSCodeKtTest {
@Test @Test
@Disabled("Remote testing by updating connection details below, then enable test and run it manually.") @Disabled("Test currently not working, needs fix. VSC is installed by snapd which is not currently supported to run inside docker")
// Remark: VSCode installs with snap, which does not run in container and cannot be tested by container test. fun installVSC() {
fun installVSCode() {
// given // given
val prov = remote("192.168.56.153", "xx", PromptSecretSource("Remote password").secret()) // machine needs openssh-server installed and sudo possible without pw val a = defaultTestContainer()
prov.aptInstall("xvfb libgbm-dev libasound2") a.aptInstall("xvfb libgbm-dev libasound2")
// when // when
val res = prov.task { val res = a.installVSC("python", "clojure")
installVSCode("python", "clojure")
cmd("code -v")
cmd("codium -v")
}
// then // then
assertTrue(res.success) assertTrue(res.success)

View file

@ -33,34 +33,33 @@ internal class ProvTest {
assertTrue(res) assertTrue(res)
} }
@Test @ContainerTest
fun sh() { fun sh() {
// given // given
val script = """ val script = """
# test some script commands # test some script commands
echo something1 ping -c1 hetzner.com
pwd echo something
echo something3
""" """
// when // when
val res = Prov.newInstance(name = "provs_test").sh(script).success val res = Prov.newInstance(name = "testing").sh(script).success
// then // then
assertTrue(res) assertTrue(res)
} }
@ContainerTest @ContainerTest
@NonCi // sudo might not be available @NonCi
fun sh_with_dir_and_sudo() { fun sh_with_dir_and_sudo() {
// given // given
val script = """ val script = """
# test some script commands # test some script commands
pwd ping -c1 hetzner.com
echo something1 echo something
echo something2 # with comment behind command echo 1 # comment behind command
""" """
// when // when

View file

@ -103,8 +103,7 @@ internal class UbuntuProvTest {
// then // then
assertFalse(result.success) assertFalse(result.success)
val expectedMsg = "a password is required" assertEquals("sudo: no tty present and no askpass program specified\n", result.err)
assertTrue(result.err?.contains(expectedMsg) ?: false, "Error: [$expectedMsg] is not found in [${result.err}]")
} }
} }
@ -120,12 +119,12 @@ class UbuntuUserNeedsPasswordForSudo(private val userName: String = "testuser")
override fun imageText(): String { override fun imageText(): String {
return """ return """
FROM ubuntu:22.04 FROM ubuntu:18.04
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get -y install sudo RUN apt-get update && apt-get -y install sudo
RUN useradd -m $userName && echo "$userName:$userName" | chpasswd && usermod -aG sudo $userName RUN useradd -m $userName && echo "$userName:$userName" | chpasswd && adduser $userName sudo
USER $userName USER $userName
CMD /bin/bash CMD /bin/bash

View file

@ -34,7 +34,7 @@ class ContainerUbuntuHostProcessorTest {
// given // given
val containerName = "prov-test-ssh-with-container" val containerName = "prov-test-ssh-with-container"
val password = Secret("testuser") val password = Secret("testuserpw")
val prov = Prov.newInstance( val prov = Prov.newInstance(
ContainerUbuntuHostProcessor( ContainerUbuntuHostProcessor(
@ -57,8 +57,8 @@ class ContainerUbuntuHostProcessorTest {
val remoteProvBySsh = remote(ipOfContainer, "testuser", password) val remoteProvBySsh = remote(ipOfContainer, "testuser", password)
// when // when
val firstSessionResult = remoteProvBySsh.cmd("echo 1") // connect (to container) by ssh via ip val firstSessionResult = remoteProvBySsh.cmd("echo 1")
val secondSessionResult = remoteProvBySsh.cmd("echo 2") // second connect after first connection has been closed val secondSessionResult = remoteProvBySsh.cmd("echo 1")
// then // then
assertTrue(firstSessionResult.success) assertTrue(firstSessionResult.success)

View file

@ -6,7 +6,6 @@ import org.domaindrivenarchitecture.provs.framework.core.escapeProcentForPrintf
import org.domaindrivenarchitecture.provs.framework.core.escapeSingleQuoteForShell import org.domaindrivenarchitecture.provs.framework.core.escapeSingleQuoteForShell
import org.junit.jupiter.api.Assertions.* import org.junit.jupiter.api.Assertions.*
import org.junit.jupiter.api.Test import org.junit.jupiter.api.Test
import java.nio.file.Paths
internal class LocalProcessorTest { internal class LocalProcessorTest {
@ -25,18 +24,6 @@ internal class LocalProcessorTest {
assertTrue(res.out == text) assertTrue(res.out == text)
} }
@Test
fun cmd_in_folder_where_program_was_started() {
// given
val prov = Prov.newInstance(LocalProcessor(false))
// when
val pwd = prov.cmd("pwd").outTrimmed
// then
assertEquals(Paths.get("").toAbsolutePath().toString(), pwd)
}
@Test @Test
fun cmd_with_nested_shell_and_printf() { fun cmd_with_nested_shell_and_printf() {
@ -72,7 +59,7 @@ internal class LocalProcessorTest {
@Test @Test
fun cmd_forUnknownCommand_resultWithError() { fun cmd_forUnkownCommand_resultWithError() {
// given // given
val prov = Prov.newInstance() val prov = Prov.newInstance()

View file

@ -1,95 +0,0 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.cron.infrastructure
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createDirs
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.createFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.fileContent
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.framework.ubuntu.user.base.whoami
import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.junit.jupiter.api.Assertions.*
import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test
class CronJobsKtTest {
@Test
fun createCronJob_creates_cron_file() {
// given
val prov = defaultTestContainer()
val cronFilename = "50_test_cron"
// when
val result = prov.createCronJob(cronFilename, "0 * * * *", "echo hello > /dev/null 2>&1")
// then
assertTrue(result.success)
val fqFilename = "/etc/cron.d/$cronFilename"
assertTrue(prov.checkFile(fqFilename), "")
val actualFileContent = prov.fileContent(fqFilename, sudo = true)
val expectedUser = prov.whoami()
assertEquals("0 * * * * $expectedUser echo hello > /dev/null 2>&1\n", actualFileContent)
}
@Test
@Disabled // only for manual execution and manual check for the created files
// Test if cron-job is actually running, but needs manual checks
fun createCronJob_which_creates_files_with_timestamp() {
// given
val prov = defaultTestContainer()
val cronFilename = "90_time_cron"
// for cron in docker see e.g. https://serverfault.com/questions/924779/docker-cron-not-working
prov.task {
aptInstall("cron")
cmd("sudo touch /var/log/cron.log") // Create the log file
optional { // may already be running
cmd("sudo cron") // Run cron
}
cmd("pgrep cron") // Ensure cron is running
}
prov.createDirs("tmp")
val user = prov.whoami()
// when
val result = prov.createCronJob(
cronFilename,
"*/1 * * * *",
"echo \"xxx\" > /home/$user/tmp/\$(/usr/bin/date +\\%Y_\\%m_\\%d-\\%H_\\%M)"
)
// then
assertTrue(result.success)
val fqFilename = "/etc/cron.d/$cronFilename"
assertTrue(prov.checkFile(fqFilename), "File does not exist: $fqFilename")
// after a minute check manually if files exist, e.g. with: sudo docker exec provs_test /bin/bash -c "ls -l tmp"
// each minute a new file should be created with the timestamp
}
@Test
fun scheduleMonthlyReboot() {
// given
val prov = defaultTestContainer()
// create dummy shutdown in test container if missing (containers do usually not have shutdown installed)
prov.createFile(
"/sbin/shutdown",
"dummy file for test of scheduleMonthlyReboot",
sudo = true,
overwriteIfExisting = false
)
// when
val result = prov.scheduleMonthlyReboot()
// then
assertTrue(result.success)
val fqFilename = "/etc/cron.d/50_monthly_reboot"
assertTrue(prov.checkFile(fqFilename), "")
val actualFileContent = prov.fileContent(fqFilename, sudo = true)
assertEquals("0 3 1-7 * 2 root shutdown -r now\n", actualFileContent)
}
}

View file

@ -184,7 +184,6 @@ internal class FilesystemKtTest {
val res7 = prov.createDirs("test/testdir") val res7 = prov.createDirs("test/testdir")
val res8 = prov.checkDir("testdir", "~/test") val res8 = prov.checkDir("testdir", "~/test")
prov.deleteDir("testdir", "~/test/") prov.deleteDir("testdir", "~/test/")
val res9 = prov.deleteDir("notexistingdirdir", "~/")
// then // then
assertFalse(res1) assertFalse(res1)
@ -195,7 +194,6 @@ internal class FilesystemKtTest {
assertFalse(res6) assertFalse(res6)
assertTrue(res7.success) assertTrue(res7.success)
assertTrue(res8) assertTrue(res8)
assertTrue(res9.success)
} }

View file

@ -3,7 +3,7 @@ package org.domaindrivenarchitecture.provs.framework.ubuntu.git.base
import org.domaindrivenarchitecture.provs.desktop.domain.addKnownHosts import org.domaindrivenarchitecture.provs.desktop.domain.addKnownHosts
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkDir import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.checkDir
import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall import org.domaindrivenarchitecture.provs.framework.ubuntu.install.base.aptInstall
import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.isKnownHost import org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base.isHostKnown
import org.domaindrivenarchitecture.provs.test.defaultTestContainer import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest import org.domaindrivenarchitecture.provs.test.tags.ExtensiveContainerTest
@ -24,8 +24,8 @@ internal class GitKtTest {
// then // then
assertTrue(res.success) assertTrue(res.success)
assertTrue(a.isKnownHost("github.com"), "github.com does not seem to be a known host") assertTrue(a.isHostKnown("github.com"), "github.com does not seem to be a known host")
assertTrue(a.isKnownHost("gitlab.com"), "gitlab.com does not seem to be a known host") assertTrue(a.isHostKnown("gitlab.com"), "gitlab.com does not seem to be a known host")
} }
@ExtensiveContainerTest @ExtensiveContainerTest

View file

@ -1,6 +1,5 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base package org.domaindrivenarchitecture.provs.framework.ubuntu.keys.base
import org.domaindrivenarchitecture.provs.desktop.domain.KnownHost
import org.domaindrivenarchitecture.provs.framework.core.Secret import org.domaindrivenarchitecture.provs.framework.core.Secret
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.deleteFile
import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.fileContainsText import org.domaindrivenarchitecture.provs.framework.ubuntu.filesystem.base.fileContainsText
@ -11,8 +10,6 @@ import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.junit.jupiter.api.Assertions.* import org.junit.jupiter.api.Assertions.*
const val KNOWN_HOSTS_FILE = "~/.ssh/known_hosts"
internal class SshKtTest { internal class SshKtTest {
@ContainerTest @ContainerTest
fun configureSshKeys_for_ssh_type_rsa() { fun configureSshKeys_for_ssh_type_rsa() {
@ -51,7 +48,7 @@ internal class SshKtTest {
} }
@ContainerTest @ContainerTest
fun addKnownHost_without_verification() { fun addKnownHost() {
// given // given
val prov = defaultTestContainer() val prov = defaultTestContainer()
prov.task { prov.task {
@ -60,8 +57,10 @@ internal class SshKtTest {
} }
// when // when
val res = prov.addKnownHost(KnownHost("github.com", listOf("dummyProtocol dummyKey", "dummyProtocol2 dummyKey2", ))) val res = prov.addKnownHost("github.com", listOf("dummyProtocol dummyKey", "dummyProtocol2 dummyKey2", ))
val res2 = prov.addKnownHost(KnownHost("github.com", listOf("dummyProtocol dummyKey", "dummyProtocol2 dummyKey2", ))) val res2 = prov.addKnownHost("github.com", listOf("dummyProtocol dummyKey", "dummyProtocol2 dummyKey2", ))
val res3 = prov.addKnownHost("github.com", listOf("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl", ), verifyKeys = true)
val res4 = prov.addKnownHost("github.com", listOf("ssh-ed25519 AAAAC3Nzalwrongkey!!!", ), verifyKeys = true)
// then // then
assertTrue(res.success) assertTrue(res.success)
@ -71,32 +70,8 @@ internal class SshKtTest {
assertTrue(res2.success) assertTrue(res2.success)
val keyCount = prov.cmd("grep -o -i dummyKey2 $KNOWN_HOSTS_FILE | wc -l").out?.trim() val keyCount = prov.cmd("grep -o -i dummyKey2 $KNOWN_HOSTS_FILE | wc -l").out?.trim()
assertEquals("1", keyCount) assertEquals("1", keyCount)
}
@ContainerTest assertTrue(res3.success)
fun addKnownHost_with_verifications() { assertFalse(res4.success)
// given
val prov = defaultTestContainer()
prov.task {
aptInstall("ssh")
deleteFile(KNOWN_HOSTS_FILE)
}
// when
val res1 = prov.addKnownHost(KnownHost.GITHUB, verifyKeys = true)
val res2 = prov.addKnownHost(KnownHost.GITHUB, verifyKeys = true)
val invalidKey = "ssh-ed25519 AAAAC3Nzalwrongkey!!!"
val res3 = prov.addKnownHost(KnownHost("github.com", listOf(invalidKey )), verifyKeys = true)
// then
assertTrue(res1.success)
assertTrue(prov.fileContainsText(KNOWN_HOSTS_FILE, KnownHost.GITHUB.hostKeys[0]))
assertTrue(res2.success)
assertTrue(prov.fileContainsText(KNOWN_HOSTS_FILE, KnownHost.GITHUB.hostKeys[0]))
assertFalse(res3.success)
assertFalse(prov.fileContainsText(KNOWN_HOSTS_FILE, invalidKey))
} }
} }

View file

@ -1,14 +0,0 @@
package org.domaindrivenarchitecture.provs.framework.ubuntu.secret.secretSources
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test
internal class EnvSecretSourceTest {
@Test
@Disabled // set env variable "envtest=envtestval" externally e.g. in IDE and run manually
fun secret() {
assertEquals("envtestval", EnvSecretSource("envtest").secret().plain())
}
}

View file

@ -1,35 +0,0 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.configuration.domain.ConfigFileName
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSourceType
import org.domaindrivenarchitecture.provs.framework.ubuntu.secret.SecretSupplier
import org.domaindrivenarchitecture.provs.server.domain.hetzner_csi.HetznerCSIConfig
import org.domaindrivenarchitecture.provs.server.domain.k8s_grafana_agent.GrafanaAgentConfig
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Test
internal class HetznerCSIRepositoryKtTest {
@Test
fun findHetznerCSIConfig_returns_config() {
// when
val config = findHetznerCSIConfig(ConfigFileName("src/test/resources/k3s-server-config-with-hetzner.yaml"))
// then
assertEquals(
HetznerCSIConfig(
hcloudApiToken = SecretSupplier(SecretSourceType.GOPASS, "path/to/apitoken"),
encryptionPassphrase = SecretSupplier(SecretSourceType.GOPASS, "path/to/encryption"),
), config
)
}
@Test
fun findHetznerCSIConfig_returns_null_if_no_hetzner_data_available() {
// when
val config = findHetznerCSIConfig(ConfigFileName("src/test/resources/k3s-server-config.yaml"))
// then
assertEquals(null, config)
}
}

View file

@ -0,0 +1,126 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.*
import org.domaindrivenarchitecture.provs.framework.core.docker.containerExec
import org.domaindrivenarchitecture.provs.framework.core.docker.provideContainer
import org.domaindrivenarchitecture.provs.framework.core.processors.ContainerStartMode
import org.domaindrivenarchitecture.provs.server.domain.applyK8sConfig
import org.domaindrivenarchitecture.provs.server.domain.installK3sAsContainers
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.domaindrivenarchitecture.provs.test.tags.NonCi
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled
internal class K3dKtTest {
@Disabled // remove line and execute manually as this test may take several minutes
@ContainerTest
@NonCi
fun installK3sAsContainers() {
// given
val containerName = "alpine-docker-dind"
local().task {
provideContainer(
containerName,
"yobasystems/alpine-docker:dind-amd64",
ContainerStartMode.CREATE_NEW_KILL_EXISTING, // for re-create a potentially existing container
sudo = false,
options = "--privileged"
)
// alpine does not have bash pre-installed - but bash is currently required for provs
containerExec(containerName, "sh -c \"apk add bash\"", sudo = false)
}
val result = docker(containerName, sudo = false).task {
cmd("apk update")
cmd("apk add sudo curl")
task(
"Install kubectl"
) {
sh("""
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
kubectl version --client
""".trimIndent())
}
// when
installK3sAsContainers()
applyK8sConfig(appleConfig())
cmd("kubectl wait --for=condition=ready --timeout=600s pod apple-app")
checkAppleService()
}
// then
assertTrue(result.success)
}
}
/**
* Checks if URL "$host/apple" is available and return text "apple"
*/
private fun Prov.checkAppleService(host: String = "127.0.0.1") = requireLast {
// repeat required as curl may return with "empty reply from server" or with "Recv failure: Connection reset by peer"
val res = repeatTaskUntilSuccess(12, 10) {
cmd("curl -m 30 $host/apple")
}.out?.trim()
if ("apple" == res) {
ProvResult(true, out = res)
} else {
ProvResult(false, err = "Url $host/apple did not return text \"apple\" but returned: $res")
}
}
fun appleConfig() =
"""
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: apple-ingress
annotations:
kubernetes.io/ingress.class: "traefik"
spec:
rules:
- http:
paths:
- path: /apple
pathType: Prefix
backend:
service:
name: apple-service
port:
number: 5678
---
kind: Pod
apiVersion: v1
metadata:
name: apple-app
labels:
app: apple
spec:
containers:
- name: apple-app
image: hashicorp/http-echo
args:
- "-text=apple"
---
kind: Service
apiVersion: v1
metadata:
name: apple-service
spec:
selector:
app: apple
ports:
- port: 5678 # Default port for image
"""

View file

@ -1,45 +0,0 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.framework.core.local
import org.domaindrivenarchitecture.provs.framework.core.remote
import org.domaindrivenarchitecture.provs.server.domain.k3s.K3sConfig
import org.domaindrivenarchitecture.provs.server.domain.k3s.Node
import org.domaindrivenarchitecture.provs.server.domain.k3s.provisionK3s
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Disabled
import org.junit.jupiter.api.Test
import java.lang.Thread.sleep
class K3sKtTest {
@Test // Extensive test, takes several minutes
@Disabled("1. update remoteIp and user, 2. enable remote ssh connection and 3. enable this test and then 4. run manually")
fun installK3s() {
// given
val remoteIp = "192.168.56.146"
val user = "xxx"
// enable remote ssh connection either manually or by the commented-out code below to copy local authorized_keys to remote
// remote(remoteIp, user, PromptSecretSource("PW for $user on $remoteIp").secret()).task {
// val authorizedKeysFilename = ".ssh/authorized_keys"
// val publicSshKey = local().getSecret("cat $authorizedKeysFilename") ?: Secret("") // or set directly by: val publicSshKey = Secret("public ssh key")
// createDir(".ssh")
// createSecretFile(authorizedKeysFilename, publicSshKey, posixFilePermission = "0644")
// }
// when
val res = remote(remoteIp, user).task { // connect by ssh
provisionK3s(K3sConfig(remoteIp, Node(remoteIp), echo = true, reprovision = false))
}
// then
assertTrue(res.success)
// check response echo pod
sleep(10000) // if time too short, increase or check curl manually
val echoResponse = local().cmd("curl http://$remoteIp/echo/").out
assertTrue(echoResponse?.contains("Hostname: echo-app") ?: false)
assertTrue(echoResponse?.contains("Host: $remoteIp") ?: false)
}
}

View file

@ -1,23 +0,0 @@
package org.domaindrivenarchitecture.provs.server.infrastructure
import org.domaindrivenarchitecture.provs.test.defaultTestContainer
import org.domaindrivenarchitecture.provs.test.tags.ContainerTest
import org.junit.jupiter.api.Assertions.assertTrue
class K9sKtTest {
@ContainerTest
fun installK9s() {
// given
val prov = defaultTestContainer()
// when
val res = prov.task {
installK9s()
installK9s() // test repeatability
}
// then
assertTrue(res.success)
}
}

View file

@ -1,18 +0,0 @@
fqdn: statistics.test.meissa-gmbh.de
node:
ipv4: 162.55.164.138
ipv6: 2a01:4f8:c010:672f::1
certmanager:
email: admin@meissa-gmbh.de
letsencryptEndpoint: prod
echo: true
reprovision: true
hetzner:
hcloudApiToken:
source: "GOPASS" # PLAIN, GOPASS or PROMPT
parameter: "path/to/apitoken" # the api key for the hetzner cloud
encryptionPassphrase:
source: "GOPASS" # PLAIN, GOPASS or PROMPT
parameter: "path/to/encryption" # the encryption passphrase for created volumes

Some files were not shown because too many files have changed in this diff Show more