{"id":1060,"date":"2022-04-20T13:50:25","date_gmt":"2022-04-20T10:50:25","guid":{"rendered":"https:\/\/www.bandidor.info\/wp\/?p=1060"},"modified":"2023-04-22T16:46:17","modified_gmt":"2023-04-22T13:46:17","slug":"docker-kubernetes-and-co","status":"publish","type":"post","link":"https:\/\/www.bandidor.info\/wp\/?p=1060","title":{"rendered":"Docker, Kubernetes, and Co."},"content":{"rendered":"\n<h1 class=\"wp-block-heading\">Prepare hosts<\/h1>\n\n\n\n<h2 class=\"wp-block-heading\">DNS Setup<\/h2>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~# cat \/etc\/hosts\n127.0.0.1 localhost\n192.168.2.201 master-node\n192.168.2.202 slave-node1\n192.168.2.203 slave-node2\n192.168.2.204 slave-node3\n\n# The following lines are desirable for IPv6 capable hosts\n::1 localhost ip6-localhost ip6-loopback\nff02::1 ip6-allnodes\nff02::2 ip6-allrouters<\/code><\/pre>\n\n\n\n<h2 class=\"wp-block-heading\">Add kernel modules<\/h2>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~# cat \/etc\/modules\n# \/etc\/modules: kernel modules to load at boot time.\n#\noverlay\nbr_netfilter\nnf_conntrack\n<\/code><\/pre>\n\n\n\n<h2>Install packages for VirtualBox additions<\/h2>\n<p><code>apt-get install linux-headers-$(uname -r)<\/code><\/p>\n<p><code>apt-get install build-essential<\/code><\/p>\n<p>Install VirtualBox additions after that.<\/p>\n<h2>Install NFS packages<\/h2>\n<p>Server (master-node)<\/p>\n<p><code>apt install nfs-kernel-server<\/code><\/p>\n<p>Worker nodes<\/p>\n<p><code>apt install nfs-common<\/code><\/p>\n<h2>Configure NFS shares<\/h2>\n\n\n\n<p>Create volumes\/directories to be shared<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~# mkdir -p \/var\/nfs\/vol\nroot@master-node:~# mkdir -p \/var\/nfs\/vol1\nroot@master-node:~# mkdir -p \/var\/nfs\/vol2\nroot@master-node:~# mkdir -p \/var\/nfs\/vol3<\/code><\/pre>\n\n\n\n<p>Configure shares<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~# cat \/etc\/exports\n\/var\/nfs\/vol    192.168.2.0\/24(rw,sync,no_root_squash,no_subtree_check)\n\/var\/nfs\/vol1   192.168.2.0\/24(rw,sync,no_root_squash,no_subtree_check)\n\/var\/nfs\/vol2   192.168.2.0\/24(rw,sync,no_root_squash,no_subtree_check)\n\/var\/nfs\/vol3   192.168.2.0\/24(rw,sync,no_root_squash,no_subtree_check)<\/code><\/pre>\n\n\n\n<p>Configure service<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~# systemctl is-enabled nfs-server\nenabled\nroot@master-node:~# systemctl restart nfs-server<\/code><\/pre>\n\n\n\n<p>Verify access from the server and from the worker nodes<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~# showmount --exports 192.168.2.201\nExport list for 192.168.2.201:\n\/var\/nfs\/vol3 192.168.2.0\/24\n\/var\/nfs\/vol2 192.168.2.0\/24\n\/var\/nfs\/vol1 192.168.2.0\/24\n\/var\/nfs\/vol  192.168.2.0\/24<\/code><\/pre>\n\n\n\n<pre class=\"wp-block-code\"><code>root@slave-node1:~# showmount --exports 192.168.2.201\nExport list for 192.168.2.201:\n\/var\/nfs\/vol3 192.168.2.0\/24\n\/var\/nfs\/vol2 192.168.2.0\/24\n\/var\/nfs\/vol1 192.168.2.0\/24\n\/var\/nfs\/vol  192.168.2.0\/24<\/code><\/pre>\n\n\n\n<h2 class=\"wp-block-heading\">Disable IPv6<\/h2>\n\n\n\n\n<pre class=\"terminal\"><code><br>vi \/etc\/sysctl.conf<br>&#8230;<br>net.ipv6.conf.all.disable_ipv6=1<br>net.ipv6.conf.default.disable_ipv6=1<br>net.ipv6.conf.lo.disable_ipv6=1<br><\/code><\/pre>\n\n\n\n\n<h1 class=\"wp-block-heading\">Start installing Kubernetes cluster<\/h1>\n\n\n\n<h2 class=\"wp-block-heading\">k0s<\/h2>\n\n\n\n<p><a href=\"https:\/\/techviewleo.com\/how-to-backup-and-restore-k0s-kubernetes-cluster\/\">https:\/\/techviewleo.com\/how-to-backup-and-restore-k0s-kubernetes-cluster\/<\/a><\/p>\n\n\n\n<p><a href=\"https:\/\/docs.k0sproject.io\/v1.23.6+k0s.1\/k0sctl-install\/\">https:\/\/docs.k0sproject.io\/v1.23.6+k0s.1\/k0sctl-install\/<\/a><\/p>\n\n\n\n<p><a href=\"https:\/\/techviewleo.com\/deploy-kubernetes-cluster-on-debian-using-k0s\/\">https:\/\/techviewleo.com\/deploy-kubernetes-cluster-on-debian-using-k0s\/<\/a><\/p>\n\n\n\n<h2 class=\"wp-block-heading\">MetalLB (Updated)<\/h2>\n\n\n\n<p>Follow the instruction here: <a href=\"https:\/\/computingforgeeks.com\/deploy-metallb-load-balancer-on-kubernetes\/\" title=\"\">https:\/\/computingforgeeks.com\/deploy-metallb-load-balancer-on-kubernetes\/<\/a><\/p>\n\n\n\n<p>To install MetalLB, apply the manifest:<\/p>\n\n\n\n\n<pre class=\"terminal\"><code><\/p>\n<p><code>kubectl apply -f https:\/\/raw.githubusercontent.com\/metallb\/metallb\/v0.13.9\/config\/manifests\/metallb-native.yaml<\/code><\/p>\n<p><\/code><\/pre>\n\n\n\n\n<p>Instead of adding a configmap, we now add a resource definition for IP address pool:<\/p>\n\n\n<div class=\"wp-block-syntaxhighlighter-code \"><pre class=\"brush: yaml; gutter: false; title: ; notranslate\" title=\"\">\napiVersion: metallb.io\/v1beta1\nkind: L2Advertisement\nmetadata:\n  name: l2-ip\n  namespace: metallb-system\nspec:\n  ipAddressPools:\n  - default-pool\n---\napiVersion: metallb.io\/v1beta1\nkind: IPAddressPool\nmetadata:\n  name: default-pool\n  namespace: metallb-system\nspec:\n  addresses:\n  - 192.168.2.240\/28\n<\/pre><\/div>\n\n\n<p><\/p>\n\n\n\n<p><\/p>\n\n\n\n<h2 class=\"wp-block-heading\">Install Helm<\/h2>\n\n\n\n<p><a href=\"https:\/\/helm.sh\/docs\/intro\/install\/#from-apt-debianubuntu\">https:\/\/helm.sh\/docs\/intro\/install\/#from-apt-debianubuntu<\/a><\/p>\n\n\n\n<h2 class=\"wp-block-heading\">Kubernetes NFS Subdir External Provisioner<\/h2>\n\n\n\n<p><a href=\"https:\/\/github.com\/kubernetes-sigs\/nfs-subdir-external-provisioner\">https:\/\/github.com\/kubernetes-sigs\/nfs-subdir-external-provisioner<\/a><\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~# helm repo add nfs-subdir-external-provisioner https:\/\/kubernetes-sigs.github.io\/nfs-subdir-external-provisioner\/\n\"nfs-subdir-external-provisioner\" has been added to your repositories\n\nroot@master-node:~# helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner\/nfs-subdir-external-provisioner --set nfs.server=192.168.2.201 --set nfs.path=\/var\/nfs\/vol1\nNAME: nfs-subdir-external-provisioner\nLAST DEPLOYED: Thu Apr 28 16:17:19 2022\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None<\/code><\/pre>\n\n\n\n<h2 class=\"wp-block-heading\">Make NFS storage class default<\/h2>\n\n\n\n\n<pre class=\"terminal\"><code><br>kubectl patch storageclass nfs-client -p &#8216;{&#8220;metadata&#8221;: {&#8220;annotations&#8221;:{&#8220;storageclass.kubernetes.io\/is-default-class&#8221;:&#8221;true&#8221;}}}&#8217;<\/p>\n\n\n\n<p>olegme@master-node:~\/k0suser@computer$ kubectl get storageclass<br>NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE<br>nfs-client (default) cluster.local\/nfs-release-nfs-subdir-external-provisioner Delete Immediate true 8m12s<\/p>\n\n\n\n<p><\/code><\/pre>\n\n\n\n\n<h2 class=\"wp-block-heading\">Configure example POD to use the external NFS Provisioner<\/h2>\n\n\n\n<p>Vagualy based on <a href=\"https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-persistent-volume-storage\/\">https:\/\/kubernetes.io\/docs\/tasks\/configure-pod-container\/configure-persistent-volume-storage\/<\/a><\/p>\n\n\n\n<p>Crucial element is in the PV Claim definition:<\/p>\n\n\n<div class=\"wp-block-syntaxhighlighter-code \"><pre class=\"brush: yaml; highlight: [7]; title: ; notranslate\" title=\"\">\nroot@master-node:~\/k0s\/manifests# cat pv-claim.yaml\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: task-pv-claim\nspec:\n  storageClassName: nfs-client\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 1Gi\n<\/pre><\/div>\n\n\n<p>Note <code>storageClassName <\/code>attribute <code><strong>nfs-client<\/strong><\/code>, it is the same as the <code>storageClass <\/code>definition we created in the previous step.<\/p>\n\n\n\n<h2 class=\"wp-block-heading\">Alternative to NFS Provisioner &#8211; Local Path Provisioner<\/h2>\n\n\n\n<p>To be found here &#8211; <a href=\"https:\/\/github.com\/rancher\/local-path-provisioner\" title=\"\">https:\/\/github.com\/rancher\/local-path-provisioner<\/a><\/p>\n\n\n\n<p>Lately, I experienced some strange issues with PostgreSQL installation using NFS provisioner and couldn&#8217;t really find the root cause. So I decided to move over to this simplest one. Don&#8217;t forget to change the <strong>default storage class<\/strong>!<\/p>\n\n\n\n<h1 class=\"wp-block-heading\">Install PostgreSQL cluster<\/h1>\n\n\n\n<h2 class=\"wp-block-heading\">PostgreSQL Operator<\/h2>\n\n\n\n<p>Start with the instruction from <a href=\"https:\/\/github.com\/zalando\/postgres-operator\">https:\/\/github.com\/zalando\/postgres-operator<\/a> up to the point where you are ready to deploy the first PostgreSQL cluster via the GUI. Then use GUI to create a skeleton of the deployment YAML.<\/p>\n\n\n\n<h2 class=\"wp-block-heading\">Create cluster<\/h2>\n\n\n\n<p>Start with the GUI:<\/p>\n\n\n\n<figure class=\"wp-block-image size-large is-resized\"><a href=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-1.png\"><img loading=\"lazy\" decoding=\"async\" src=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-1-1024x576.png\" alt=\"\" class=\"wp-image-1112\" width=\"840\" height=\"472\" srcset=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-1-1024x576.png 1024w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-1-300x169.png 300w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-1-768x432.png 768w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-1.png 1117w\" sizes=\"auto, (max-width: 840px) 100vw, 840px\" \/><\/a><figcaption class=\"wp-element-caption\">More instances will create more standby pods and probably are not needed. Also add user(s) and database(s):<\/figcaption><\/figure>\n\n\n\n<figure class=\"wp-block-image size-large\"><a href=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-2.png\"><img loading=\"lazy\" decoding=\"async\" width=\"1024\" height=\"506\" src=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-2-1024x506.png\" alt=\"\" class=\"wp-image-1113\" srcset=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-2-1024x506.png 1024w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-2-300x148.png 300w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-2-768x380.png 768w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-2.png 1163w\" sizes=\"auto, (max-width: 1024px) 100vw, 1024px\" \/><\/a><\/figure>\n\n\n\n<p>But don&#8217;t hit <strong><span style=\"background-color:#00d084\" class=\"tadv-background-color\"><span style=\"color:#ffffff\" class=\"tadv-color\">Create Cluster<\/span><\/span><\/strong> button, use <span style=\"background-color:#0693e3\" class=\"tadv-background-color\"><span style=\"color:#ffffff\" class=\"tadv-color\">Copy <\/span><\/span>instead and put it into a YAML file: <\/p>\n\n\n<div class=\"wp-block-syntaxhighlighter-code \"><pre class=\"brush: yaml; highlight: [20,27]; title: ; notranslate\" title=\"\">\nroot@master-node:~\/k0s\/manifests# vi pgtest4.yaml\nkind: &quot;postgresql&quot;\napiVersion: &quot;acid.zalan.do\/v1&quot;\n\nmetadata:\n  name: &quot;acid-pgtest4&quot;\n  namespace: &quot;default&quot;\n  labels:\n    team: acid\n\nspec:\n  teamId: &quot;acid&quot;\n  postgresql:\n    version: &quot;14&quot;\n  numberOfInstances: 2\n  enableMasterLoadBalancer: true\n  enableConnectionPooler: false\n  volume:\n    size: &quot;20Gi&quot;\n    storageClass: nfs-client\n  users:\n    camunda: &#x5B;]\n  databases:\n    camunda: camunda\n  allowedSourceRanges:\n    # IP ranges to access your cluster go here\n  - 0.0.0.0\/0\n\n  resources:\n    requests:\n      cpu: 100m\n      memory: 100Mi\n    limits:\n      cpu: 500m\n      memory: 500Mi\n\n<\/pre><\/div>\n\n\n<p>Two critical attributes cannot be added via GUI. <code><strong>storageClass: nfs-clinet<\/strong><\/code> will make sure that the PV Claims created request volumes from the NFS Provisioner and <code><strong>allowedSourceRanges: 0.0.0.0\/0<\/strong><\/code> will allow connection to the database cluster from any IP. If not specified, it will allow connections only from the <code>localhost<\/code>. This can be refined later.<\/p>\n\n\n\n<p>Apply the created YAML file:<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~\/k0s\/manifests# kubectl apply -f pgtest4.yaml\npostgresql.acid.zalan.do\/acid-pgtest4 created<\/code><\/pre>\n\n\n\n<p>And after a while observe your new PostgreSQL cluster be created:<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~\/k0s\/manifests# kubectl get svc -w\nNAME                   TYPE           CLUSTER-IP       EXTERNAL-IP     PORT(S)          AGE\nacid-pgtest2           LoadBalancer   10.99.248.241    192.168.2.241   5432:32015\/TCP   16h\nacid-pgtest2-config    ClusterIP      None             &lt;none&gt;          &lt;none&gt;           16h\nacid-pgtest2-pooler    ClusterIP      10.97.131.198    &lt;none&gt;          5432\/TCP         16h\nacid-pgtest2-repl      ClusterIP      10.96.181.13     &lt;none&gt;          5432\/TCP         16h\nacid-pgtest3           LoadBalancer   10.103.54.35     192.168.2.242   5432:32518\/TCP   13h\nacid-pgtest3-config    ClusterIP      None             &lt;none&gt;          &lt;none&gt;           13h\nacid-pgtest3-repl      ClusterIP      10.105.80.150    &lt;none&gt;          5432\/TCP         13h\nacid-pgtest4           LoadBalancer   10.102.124.164   192.168.2.243   5432:32511\/TCP   2m6s\nacid-pgtest4-config    ClusterIP      None             &lt;none&gt;          &lt;none&gt;           2m\nacid-pgtest4-repl      ClusterIP      10.99.60.140     &lt;none&gt;          5432\/TCP         2m6s\nkubernetes             ClusterIP      10.96.0.1        &lt;none&gt;          443\/TCP          2d17h\nmy-nginx               LoadBalancer   10.111.11.252    192.168.2.240   80:31001\/TCP     25h\nnginx-deployment       NodePort       10.104.72.240    &lt;none&gt;          80:30598\/TCP     2d\npostgres-operator      ClusterIP      10.101.22.126    &lt;none&gt;          8080\/TCP         17h\npostgres-operator-ui   ClusterIP      10.107.252.167   &lt;none&gt;          80\/TCP           17h<\/code><\/pre>\n\n\n\n<p>Our new Service is now up and available on the IP 192.168.2.243. Let&#8217;s check it.<br>We will need a password for our user <code>camunda<\/code>. We can do the following:<\/p>\n\n\n<div class=\"wp-block-syntaxhighlighter-code \"><pre class=\"brush: plain; title: ; notranslate\" title=\"\">\nroot@master-node:~\/k0s\/manifests# export PGPASSWORD=$(kubectl get secret camunda.acid-pgtest4.credentials.postgresql.acid.zalan.do -o &#039;jsonpath={.data.password}&#039; | base64 -d)               \nroot@master-node:~\/k0s\/manifests# echo $PGPASSWORD\nue2qB7ZQaykqAORAhpEvkps4GuvTrOYBzdnvf7XPirCyFtZVWWozJr6P5ggVjnW3\nroot@master-node:~\/k0s\/manifests#\n<\/pre><\/div>\n\n\n<p>In the first line above <code>camunda <\/code>is the user name and <code>acid-pgtest4<\/code> is the name of the service. With the above we set an environment variable, which will be used in <code>psql <\/code>command:<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>root@master-node:~\/k0s\/manifests# psql -U camunda -h 192.168.2.243\npsql (13.5 (Debian 13.5-0+deb11u1), server 14.2 (Ubuntu 14.2-1.pgdg18.04+1))\nWARNING: psql major version 13, server major version 14.\n         Some psql features might not work.\nSSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, bits: 256, compression: off)\nType \"help\" for help.\n\ncamunda=&gt; \\l\n                                  List of databases\n   Name    |  Owner   | Encoding |   Collate   |    Ctype    |   Access privileges\n-----------+----------+----------+-------------+-------------+-----------------------\n camunda   | camunda  | UTF8     | en_US.utf-8 | en_US.utf-8 |\n postgres  | postgres | UTF8     | en_US.utf-8 | en_US.utf-8 |\n template0 | postgres | UTF8     | en_US.utf-8 | en_US.utf-8 | =c\/postgres          +\n           |          |          |             |             | postgres=CTc\/postgres\n template1 | postgres | UTF8     | en_US.utf-8 | en_US.utf-8 | =c\/postgres          +\n           |          |          |             |             | postgres=CTc\/postgres\n(4 rows)\n\ncamunda=&gt;<\/code><\/pre>\n\n\n\n<p>The database can be now accessed from the host machine as well:<\/p>\n\n\n<div class=\"wp-block-image is-style-default\">\n<figure class=\"aligncenter size-full\"><a href=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-3.png\"><img loading=\"lazy\" decoding=\"async\" width=\"451\" height=\"326\" src=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-3.png\" alt=\"\" class=\"wp-image-1122\" srcset=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-3.png 451w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-3-300x217.png 300w\" sizes=\"auto, (max-width: 451px) 100vw, 451px\" \/><\/a><\/figure>\n<\/div>\n\n\n<h1 class=\"wp-block-heading\">Camunda Engine<\/h1>\n\n\n\n<h2 class=\"wp-block-heading\">Install via Helm chart<\/h2>\n\n\n\n<p>Starting point:&nbsp;<a href=\"https:\/\/camunda.com\/blog\/2021\/08\/releasing-camunda-platform-community-helm-chart\/\">https:\/\/camunda.com\/blog\/2021\/08\/releasing-camunda-platform-community-helm-chart\/<\/a><\/p>\n<p>Secrete creation differs a bit, different user and also the password, which we already setup for PostgreSQL access in the previous part.<\/p>\n\n\n<div class=\"wp-block-syntaxhighlighter-code \"><pre class=\"brush: plain; title: ; notranslate\" title=\"\">\nroot@master-node:~# kubectl create secret generic                   \\\n    camunda-bpm-platform-db-credentials             \\\n    --from-literal=DB_USERNAME=camunda \\\n    --from-literal=DB_PASSWORD=ue2qB7ZQaykqAORAhpEvkps4GuvTrOYBzdnvf7XPirCyFtZVWWozJr6P5ggVjnW3\nsecret\/camunda-bpm-platform-db-credentials created\n<\/pre><\/div>\n\n\n<p>Number of instances can be adjusted in the <code>values.yaml<\/code>. The file will look like this:<\/p>\n\n\n<div class=\"wp-block-syntaxhighlighter-code \"><pre class=\"brush: plain; title: ; notranslate\" title=\"\">\nroot@master-node:~# cat k0s\/manifests\/values.yaml\n# Default values for camunda-bpm-platform.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\ngeneral:\n  debug: false\n  replicaCount: 3\n  nameOverride: &quot;&quot;\n  fullnameOverride: &quot;camunda-bpm-platform&quot;\n\nimage:\n  repository: camunda\/camunda-bpm-platform\n  tag: latest\n  pullPolicy: IfNotPresent\n  pullSecrets: &#x5B;]\n  command: &#x5B;]\n  args: &#x5B;]\n\n# By default H2 database is used, which is handy for demos and tests,\n# however, H2 is not supported in a clustered scenario.\n# So for real-world workloads, an external database like PostgreSQL should be used.\ndatabase:\n  driver: org.postgresql.Driver\n  url: jdbc:postgresql:\/\/acid-pgtest4:5432\/camunda\n  credentialsSecretName: camunda-bpm-platform-db-credentials\n\nservice:\n  type: LoadBalancer\n  port: 8080\n  portName: http\n\nmetrics:\n  enabled: false\n  service:\n    type: ClusterIP\n    port: 9404\n    portName: metrics\n    annotations:\n      prometheus.io\/scrape: &quot;true&quot;\n      prometheus.io\/path: &quot;\/&quot;\n      prometheus.io\/port: &quot;9404&quot;\n\n...\n\n<\/pre><\/div>\n\n\n<p>Note that the service type has to be set to LoadBalancer if we deploy more than one instance. This has one caveat, though. With the current Helm chart, it is impossible to specify the sessionAffinity attribute. The web interface will behave somewhat weirdly if we don&#8217;t do that. For example, you won&#8217;t be able to disable the Telemetry consent pop-up or won&#8217;t be able to log in at all. Obviously, the Helm chart has to be adjusted, but the short-term solution would be to patch the service configuration post-install. In my case, I was able to accomplish this directly from Lens.<\/p>\n\n\n\n<p>Here is an excerpt from its manifest after the patching:<\/p>\n\n\n<div class=\"wp-block-image\">\n<figure class=\"aligncenter size-full\"><a href=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-5.png\"><img loading=\"lazy\" decoding=\"async\" width=\"772\" height=\"470\" src=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-5.png\" alt=\"\" class=\"wp-image-1134\" srcset=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-5.png 772w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-5-300x183.png 300w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-5-768x468.png 768w\" sizes=\"auto, (max-width: 772px) 100vw, 772px\" \/><\/a><\/figure>\n<\/div>\n\n\n<p><strong>Update.<\/strong> Here is also a command line version of the patching process:<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>kubectl patch svc camunda-bpm-platform -p '{\"spec\": {\"sessionAffinity\": \"ClientIP\"}}'<\/code><\/pre>\n\n\n\n<h2 class=\"wp-block-heading\">Confirm we can access the engine<\/h2>\n\n\n<div class=\"wp-block-image\">\n<figure class=\"aligncenter size-large\"><a href=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-6.png\"><img loading=\"lazy\" decoding=\"async\" width=\"1024\" height=\"720\" src=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-6-1024x720.png\" alt=\"\" class=\"wp-image-1137\" srcset=\"https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-6-1024x720.png 1024w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-6-300x211.png 300w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-6-768x540.png 768w, https:\/\/www.bandidor.info\/wp\/wp-content\/uploads\/2022\/04\/image-6.png 1107w\" sizes=\"auto, (max-width: 1024px) 100vw, 1024px\" \/><\/a><\/figure>\n<\/div>\n\n\n<p><\/p>\n\n\n\n<h1 class=\"wp-block-heading\">To be continued&#8230;<\/h1>\n\n\n\n<p>What we have achieved so far. We have installed a bare metal Kubernetes cluster with <code>k0s<\/code>, we have a working PostgreSQL cluster and a working Camunda 7.17 installation.<br>Let&#8217;s fill it with life and continue to the next article <a href=\"https:\/\/www.bandidor.info\/wp\/?p=1144\" title=\"Docker, Kubernetes, and Co. \u2013 part II\">Docker, Kubernetes, and Co. \u2013 part II<\/a><\/p>\n\n\n\n<p>You may also want to take a look at one of the previously published articles as we will be doing something similar: <a href=\"https:\/\/www.bandidor.info\/wp\/?p=857\" title=\"Camunda on Oracle Cloud \u2013 will it work?\">Camunda on Oracle Cloud \u2013 will it work?<\/a><\/p>\n\n\n\n<p> <\/p>\n\n\n","protected":false},"excerpt":{"rendered":"<p>Prepare hosts DNS Setup Add kernel modules Install packages for VirtualBox additions apt-get install linux-headers-$(uname -r) apt-get install build-essential Install VirtualBox additions after that. Install NFS packages Server (master-node) apt install nfs-kernel-server Worker nodes apt install nfs-common Configure NFS shares Create volumes\/directories to be shared Configure shares Configure service Verify access from the server and&#8230;<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"ngg_post_thumbnail":0,"jetpack_post_was_ever_published":false,"_jetpack_newsletter_access":"","_jetpack_dont_email_post_to_subs":false,"_jetpack_newsletter_tier_id":0,"_jetpack_memberships_contains_paywalled_content":false,"_jetpack_memberships_contains_paid_content":false,"footnotes":"","jetpack_publicize_message":"","jetpack_publicize_feature_enabled":true,"jetpack_social_post_already_shared":true,"jetpack_social_options":{"image_generator_settings":{"template":"highway","default_image_id":0,"font":"","enabled":false},"version":2}},"categories":[43,34,39],"tags":[42,35],"class_list":["post-1060","post","type-post","status-publish","format-standard","hentry","category-bpmn","category-camunda","category-kubernetes","tag-bpmn","tag-camunda"],"jetpack_publicize_connections":[],"jetpack_featured_media_url":"","jetpack_sharing_enabled":true,"jetpack_shortlink":"https:\/\/wp.me\/p2EszU-h6","_links":{"self":[{"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=\/wp\/v2\/posts\/1060","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=1060"}],"version-history":[{"count":62,"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=\/wp\/v2\/posts\/1060\/revisions"}],"predecessor-version":[{"id":1354,"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=\/wp\/v2\/posts\/1060\/revisions\/1354"}],"wp:attachment":[{"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=1060"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=1060"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.bandidor.info\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=1060"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}