Browse Docs

Kubernetes

Kubernetes sections in docs

Documentation regarding k8s technologies.

In this section

  • ๐ŸŽก Helm

    Admnistration

    • See what is currently installed
    1helm list -A
    2NAME    NAMESPACE       REVISION        UPDATED                                 STATUS          CHART           APP VERSION
    3nesux3  default         1               2022-08-12 20:01:16.0982324 +0200 CEST  deployed        nexus3-1.0.6    3.37.3
    
    • Install/Uninstall
     1helm status nesux3
     2helm uninstall nesux3
     3helm install nexus3 
     4helm history nexus3
     5
     6# work even if already installed
     7helm upgrade --install ingress-nginx ${DIR}/helm/ingress-nginx \
     8  --namespace=ingress-nginx \
     9  --create-namespace \
    10  -f $helm {DIR}/helm/ingress-values.yml
    11
    12#Make helm unsee an apps (it does not delete the apps) 
    13kubectl delete secret -l owner=helm,name=argo-cd
    
    • Handle Helm Repo and Charts
     1#Handle repo 
     2helm repo list
     3helm repo add gitlab https://charts.gitlab.io/
     4helm repo update
     5
     6#Pretty usefull to configure
     7helm show values elastic/eck-operator
     8helm show values grafana/grafana --version 8.5.1 
     9
    10#See different version available
    11helm search repo hashicorp/vault
    12helm search repo hashicorp/vault -l
    13
    14# download a chart
    15helm fetch ingress/ingress-nginx --untar 
    

    Tips

    • List all images needed in helm charts (but not the one with no tags)
    1helm template -g longhorn-1.4.1.tgz |yq -N '..|.image? | select(. == "*" and . != null)'|sort|uniq|grep ":"|egrep -v '*:[[:blank:]]' || echo ""
    
  • ๐ŸŽฒ Kubectl

    Connexion to k8s cluster

    Kubeconfig

    • Define KUBECONFIG in your profile
    1# Default one 
    2KUBECONFIG=~/.kube/config
    3
    4# Several context - to keep splited 
    5KUBECONFIG=~/.kube/k3sup-lab:~/.kube/k3s-dev
    6
    7# Or can be specified in command
    8kubectl get pods --kubeconfig=admin-kube-config
    
    • View and Set
     1kubectl config view
     2kubectl config current-context
     3
     4kubectl config set-context \
     5dev-context \
     6--namespace=dev-namespace \
     7--cluster=docker-desktop \
     8--user=dev-user
     9
    10kubectl config use-context lab
    
    • Switch context
    1#set Namespace 
    2kubectl config set-context --current --namespace=nexus3
    3kubectl config get-contexts
    

    Kubecm

    The problem with the kubeconfig is that it get nexted in one kubeconfig and difficult to manage on long term. The best way to install it, is with Arkade arkade get kubecm - see arkade.

  • ๐Ÿ™ ArgoCD
  • ๐Ÿ™ Network troubleshooting

    Troubleshoot DNS

    • vi dns.yml
     1apiVersion: v1
     2kind: Pod
     3metadata:
     4  name: dnsutils
     5  namespace: default
     6spec:
     7  containers:
     8  - name: dnsutils
     9    image: registry.k8s.io/e2e-test-images/jessie-dnsutils:1.3
    10    command:
    11      - sleep
    12      - "infinity"
    13    imagePullPolicy: IfNotPresent
    14  restartPolicy: Always
    
    • deploy dnsutils
    1k apply -f dns.yml
    2pod/dnsutils created
    3
    4kubectl get pods dnsutils
    5NAME       READY   STATUS    RESTARTS   AGE
    6dnsutils   1/1     Running   0          36s
    
    • Troubleshoot with dnsutils
     1kubectl exec -i -t dnsutils -- nslookup kubernetes.default
     2;; connection timed out; no servers could be reached
     3command terminated with exit code 1
     4
     5kubectl exec -ti dnsutils -- cat /etc/resolv.conf
     6search default.svc.cluster.local svc.cluster.local cluster.local psflab.local
     7nameserver 10.43.0.10
     8options ndots:5
     9
    10kubectl get endpoints kube-dns --namespace=kube-system
    11NAME       ENDPOINTS                                  AGE
    12kube-dns   10.42.0.6:53,10.42.0.6:53,10.42.0.6:9153   5d1h
    13
    14kubectl get svc kube-dns --namespace=kube-system
    15NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
    16kube-dns   ClusterIP   10.43.0.10   <none>        53/UDP,53/TCP,9153/TCP   5d1h
    

    CURL

     1cat << EOF > curl.yml 
     2apiVersion: v1
     3kind: Pod
     4metadata:
     5  name: curl
     6  namespace: default
     7spec:
     8  containers:
     9  - name: curl
    10    image: curlimages/curl
    11    command:
    12      - sleep
    13      - "infinity"
    14    imagePullPolicy: IfNotPresent
    15  restartPolicy: Always
    16EOF
    17
    18k apply -f curl.yml 
    19 
    20#Test du DNS
    21kubectl exec -i -t curl -- curl -v telnet://10.43.0.10:53
    22kubectl exec -i -t curl -- curl -v telnet://kube-dns.kube-system.svc.cluster.local:53
    23kubectl exec -i -t curl -- nslookup kube-dns.kube-system.svc.cluster.local
    24
    25curl -k -I --resolve subdomain.domain.com:52.165.230.62 https:/subdomain.domain.com/
    
  • ๐Ÿ  OKD

    Install

     1# Get latest version
     2OKD_VERSION=$(curl -s https://api.github.com/repos/okd-project/okd/releases/latest | jq -r .tag_name)
     3
     4# Download
     5curl -L https://github.com/okd-project/okd/releases/download/${OKD_VERSION}/openshift-install-linux-${OKD_VERSION}.tar.gz -O
     6curl -L https://github.com/okd-project/okd/releases/download/${OKD_VERSION}/openshift-client-linux-${OKD_VERSION}.tar.gz -O
     7
     8# Download FCOS iso
     9./openshift-install coreos print-stream-json | grep '\.iso[^.]'
    10./openshift-install coreos print-stream-json | jq .architectures.x86_64.artifacts.metal.formats.iso.disk.location
    11./openshift-install coreos print-stream-json | jq .architectures.x86_64.artifacts.vmware.formats.ova.disk.location
    12./openshift-install coreos print-stream-json | jq '.architectures.x86_64.artifacts.digitalocean.formats["qcow2.gz"].disk.location'
    13./openshift-install coreos print-stream-json | jq '.architectures.x86_64.artifacts.qemu.formats["qcow2.gz"].disk.location'
    14./openshift-install coreos print-stream-json | jq '.architectures.x86_64.artifacts.metal.formats.pxe | .. | .location? // empty'
    

    Install bare-metal

    Official doc

  • ๐Ÿ  OpenShift

    OC Mirror

    • Need at least one Operator:
     1kind: ImageSetConfiguration
     2apiVersion: mirror.openshift.io/v1alpha2
     3archiveSize: 4
     4storageConfig:
     5  registry:
     6    imageURL: quay.example.com:8443/mirror/oc-mirror-metadata
     7    skipTLS: false
     8mirror:
     9  platform:
    10    architectures:
    11      - "amd64"
    12    channels:
    13    - name: stable-4.14
    14      type: ocp
    15      shortestPath: true
    16    graph: true
    17  operators:
    18    - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.14
    19      packages:
    20        - name: kubevirt-hyperconverged
    21          channels:
    22            - name: 'stable'
    23        - name: serverless-operator
    24          channels:
    25            - name: 'stable'
    26  additionalImages:
    27  - name: registry.redhat.io/ubi9/ubi:latest
    28  helm: {}
    
     1# install oc-mirror:
     2curl https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/latest/oc-mirror.rhel9.tar.gz -O
     3
     4# Get an example of imageset
     5oc-mirror init --registry quay.example.com:8443/mirror/oc-mirror-metadata
     6
     7# Find operators in the list of Operators, channels, packages
     8oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14
     9oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged
    10oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged --channel=stable
    11
    12# mirror with a jumphost which online access
    13oc-mirror --config=imageset-config.yaml docker://quay.example.com:8443
    14
    15# mirror for airgap
    16oc-mirror --config=imageSetConfig.yaml file://tmp/download
    17oc-mirror --from=/tmp/upload/ docker://quay.example.com/ocp/operators
    18
    19# Refresh OperatorHub 
    20oc get pod -n openshift-marketplace
    21
    22# Get the index pod and delete it to refresh 
    23oc delete pod cs-redhat-operator-index-m2k2n -n openshift-marketplace 
    

    Install

     1## Get the coreOS which is gonna to be installed 
     2openshift-install coreos print-stream-json | grep '\.iso[^.]'
     3
     4openshift-install create install-config
     5
     6openshift-install create manifests
     7
     8openshift-install create ignition-configs
     9
    10openshift-install create cluster --dir . --log-level=info
    11openshift-install destroy cluster --log-level=info
    
    • for baremetal make a iso boot USB
    1dd if=$HOME/ocp-latest/rhcos-live.iso of=/dev/sdb bs=1024k status=progress
    

    Add node

    1export OPENSHIFT_CLUSTER_ID=$(oc get clusterversion -o jsonpath='{.items[].spec.clusterID}')
    2export CLUSTER_REQUEST=$(jq --null-input --arg openshift_cluster_id "$OPENSHIFT_CLUSTER_ID" '{
    3  "api_vip_dnsname": "<api_vip>", 
    4  "openshift_cluster_id": $openshift_cluster_id,
    5  "name": "<openshift_cluster_name>" 
    6}')
    

    Platform in install-config

    • Get all info on how to config
    1openshift-install explain installconfig.platform.libvirt
    
     1## none 
     2platform:
     3   none: {}
     4
     5## baremetal - use ipmi to provision baremetal
     6platform:
     7  baremetal:
     8    apiVIP: 192.168.111.5
     9    ingressVIP: 192.168.111.7
    10    provisioningNetwork: "Managed"
    11    provisioningNetworkCIDR: 172.22.0.0/24
    12    provisioningNetworkInterface: eno1
    13    clusterProvisioningIP: 172.22.0.2
    14    bootstrapProvisioningIP: 172.22.0.3
    15    hosts:
    16      - name: master-0
    17        role: master
    18        bmc:
    19          address: ipmi://192.168.111.1
    20          username: admin
    21          password: password
    22        bootMACAddress: 52:54:00:a1:9c:ae
    23        hardwareProfile: default
    24      - name: master-1
    25        role: master
    26        bmc:
    27          address: ipmi://192.168.111.2
    28          username: admin
    29          password: password
    30        bootMACAddress: 52:54:00:a1:9c:af
    31        hardwareProfile: default
    32      - name: master-2
    33        role: master
    34        bmc:
    35          address: ipmi://192.168.111.3
    36          username: admin
    37          password: password
    38        bootMACAddress: 52:54:00:a1:9c:b0
    39        hardwareProfile: default
    40
    41## vpshere - old syntax and deprecated form (new one in 4.15 with "failure domain")
    42vsphere:
    43    vcenter:
    44    username:
    45    password:
    46    datacenter:
    47    defaultDatastore:
    48    apiVIPs:
    49    - x.x.x.x
    50    ingressVIPs:
    51    - x.x.x.x
    52
    53## new syntax
    54platform:
    55  vsphere:
    56    apiVIPs:
    57    - x.x.x.x
    58    datacenter: xxxxxxxxxxxx_datacenter
    59    defaultDatastore: /xxxxxxxxxxxx_datacenter/datastore/Shared Storages/ssd-001602
    60    failureDomains:
    61     - name: CNV4
    62      region: fr
    63      server: xxxxxxxxxxxx.ovh.com
    64      topology:
    65        computeCluster: /xxxxxxxxxxxx_datacenter/host/Management Zone Cluster
    66        datacenter: xxxxxxxxxxxx_datacenter
    67        datastore: /xxxxxxxxxxxx_datacenter/datastore/Shared Storages/ssd-001602
    68        networks:
    69        - vds_mgmt
    70      zone: dc
    71    ingressVIPs:
    72    - x.x.x.x
    73    password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    74    username: admin
    75    vCenter: xxxxxxxxxxx.ovh.com
    76    vcenters:
    77    - datacenters:
    78      - xxxxxxxxxx_datacenter
    79      password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    80      port: 443
    81      server: xxxxxxx.ovh.com
    82      user: admin
    

    Utils

    1# Get Cluster ID
    2oc get clusterversion -o jsonpath='{.items[].spec.clusterID}'
    3
    4# Get Nodes which are Ready
    5oc get nodes --output jsonpath='{range .items[?(@.status.conditions[-1].type=="Ready")]}{.metadata.name} {.status.conditions[-1].type}{"\n"}{end}'
    6
    7# get images from all pods in a namespace
    8oc get pods -n  --output jsonpath='{range .items[*]}{.spec.containers[*].image}{"\n"}{end}'
    

    Set OperatorHub

    • in airgap
    1oc get catalogsources -n openshift-marketplace
    
  • ๐Ÿฃ Bash Functions for k8s

    A list of nice findings for Kubernetes

    • List all images in Helm chart
    1images=$(helm template -g $helm |yq -N '..|.image? | select(. == "*" and . != null)'|sort|uniq|grep ":"|egrep -v '*:[[:blank:]]' || echo "")
    
    • upload images listed in an Helm chart
     1load_helm_images(){
     2  # look in helm charts
     3  for helm in $(ls ../../roles/*/files/helm/*.tgz); do
     4    printf "\e[1;34m[INFO]\e[m Look for images in ${helm}...\n"
     5
     6    images=$(helm template -g $helm |yq -N '..|.image? | select(. == "*" and . != null)'|sort|uniq|grep ":"|egrep -v '*:[[:blank:]]' || echo "")
     7
     8    dir=$( dirname $helm | xargs dirname )
     9
    10    echo "####"
    11
    12    if [ "$images" != "" ]; then
    13      printf "\e[1;34m[INFO]\e[m Images found in the helm charts: ${images}\n"
    14      printf "\e[1;34m[INFO]\e[m Create ${dir}/images images...\n"
    15
    16      mkdir -p ${dir}/images
    17
    18      while i= read -r image_name; do
    19        archive_name=$(basename -a $(awk -F : '{print $1}'<<<${image_name}));
    20        printf "\e[1;34m[INFO]\e[m Pull images...\n"
    21        podman pull ${image_name};
    22        printf "\e[1;34m[INFO]\e[m Push ${image_name} in ${dir}/images/${archive_name}\n"
    23        podman save ${image_name} --format oci-archive -o ${dir}/images/${archive_name};
    24      done <<< ${images}
    25    else
    26      printf "\e[1;34m[INFO]\e[m No Images found in the helm charts: $helm\n"
    27    fi
    28  done
    29}
    
    • Check components version
    1function checkComponentsInstall() {
    2    componentsArray=("kubectl" "helm")
    3    for i in "${componentsArray[@]}"; do
    4      command -v "${i}" >/dev/null 2>&1 ||
    5        { echo "[ERROR] ${i} is required, but it's not installed. Aborting." >&2; exit 1; }
    6    done
    7}
    
    • Version comparator
     1function checkK8sVersion() {
     2    currentK8sVersion=$(kubectl version --short | grep "Server Version" | awk '{gsub(/v/,$5)}1 {print $3}')
     3    testVersionComparator 1.20 "$currentK8sVersion" '<'
     4    if [[ $k8sVersion == "ok" ]]; then
     5      echo "current kubernetes version is ok"
     6    else
     7      minikube start --kubernetes-version=v1.22.4;
     8    fi
     9}
    10
    11
    12# the comparator based on https://stackoverflow.com/a/4025065
    13versionComparator () {
    14    if [[ $1 == $2 ]]
    15    then
    16        return 0
    17    fi
    18    local IFS=.
    19    local i ver1=($1) ver2=($2)
    20    # fill empty fields in ver1 with zeros
    21    for ((i=${#ver1[@]}; i<${#ver2[@]}; i++))
    22    do
    23        ver1[i]=0
    24    done
    25    for ((i=0; i<${#ver1[@]}; i++))
    26    do
    27        if [[ -z ${ver2[i]} ]]
    28        then
    29            # fill empty fields in ver2 with zeros
    30            ver2[i]=0
    31        fi
    32        if ((10#${ver1[i]} > 10#${ver2[i]}))
    33        then
    34            return 1
    35        fi
    36        if ((10#${ver1[i]} < 10#${ver2[i]}))
    37        then
    38            return 2
    39        fi
    40    done
    41    return 0
    42}
    43
    44testVersionComparator () {
    45    versionComparator $1 $2
    46    case $? in
    47        0) op='=';;
    48        1) op='>';;
    49        2) op='<';;
    50    esac
    51    if [[ $op != "$3" ]]
    52    then
    53        echo "Kubernetes test fail: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'"
    54        k8sVersion="not ok"
    55    else
    56        echo "Kubernetes test pass: '$1 $op $2'"
    57        k8sVersion="ok"
    58    fi
    59}
    
  • ๐Ÿ“œ CertManager
  • ๐Ÿ”’ Vault on k8s

    Some time ago, I made a small shell script to handle Vault on a cluster kubernetes. For documentation purpose.

    Install Vault with helm

     1#!/bin/bash
     2
     3## Variables 
     4DIRNAME=$(dirname $0)
     5DEFAULT_VALUE="vault/values-override.yaml"
     6NewAdminPasswd="PASSWORD"
     7PRIVATE_REGISTRY_USER="registry-admin"
     8PRIVATE_REGISTRY_PASSWORD="PASSWORD"
     9PRIVATE_REGISTRY_ADDRESS="registry.example.com"
    10DOMAIN="example.com"
    11INGRESS="vault.${DOMAIN}"
    12
    13if [ -z ${CM_NS+x} ];then
    14  CM_NS='your-namespace'
    15fi
    16
    17if [ -z ${1+x} ]; then
    18  VALUES_FILE="${DIRNAME}/${DEFAULT_VALUE}"
    19  echo -e "\n[INFO] Using default values file '${DEFAULT_VALUE}'"
    20else
    21  if [ -f $1 ]; then
    22    echo -e "\n[INFO] Using values file $1"
    23    VALUES_FILE=$1
    24  else
    25    echo -e "\n[ERROR] No file exist $1"
    26    exit 1
    27  fi
    28fi
    29
    30## Functions 
    31function checkComponentsInstall() {
    32    componentsArray=("kubectl" "helm")
    33    for i in "${componentsArray[@]}"; do
    34      command -v "${i}" >/dev/null 2>&1 ||
    35        { echo "${i} is required, but it's not installed. Aborting." >&2; exit 1; }
    36    done
    37}
    38
    39function createSecret() {
    40kubectl get secret -n ${CM_NS} registry-pull-secret --no-headers 2> /dev/null \
    41|| \
    42kubectl create secret docker-registry -n ${CM_NS} registry-pull-secret \
    43  --docker-server=${PRIVATE_REGISTRY_ADDRESS} \
    44  --docker-username=${PRIVATE_REGISTRY_USER} \
    45  --docker-password=${PRIVATE_REGISTRY_ADDRESS}
    46}
    47
    48function installWithHelm() {
    49helm dep update ${DIRNAME}/helm
    50
    51helm upgrade --install vault ${DIRNAME}/helm \
    52--namespace=${CM_NS} --create-namespace \
    53--set global.imagePullSecrets.[0]=registry-pull-secret \
    54--set global.image.repository=${PRIVATE_REGISTRY_ADDRESS}/hashicorp/vault-k8s \
    55--set global.agentImage.repository=${PRIVATE_REGISTRY_ADDRESS}/hashicorp/vault \
    56--set ingress.hosts.[0]=${INGRESS} \
    57--set ingress.enabled=true \
    58--set global.leaderElection.namespace=${CM_NS}
    59
    60echo -e "\n[INFO] sleep 30s" && sleep 30
    61}
    62
    63checkComponentsInstall
    64createSecret
    65installWithHelm
    

    Init Vault on kubernetes

    Allow local kubernetes to create and reach secret on the Vault

  • ๐Ÿ”ฑ K3S
    • Specific to RHEL
     1# Create a trust zone for the two interconnect
     2sudo firewall-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 #pods
     3sudo firewall-cmd --permanent --zone=trusted --add-source=10.43.0.0/16 #services 
     4sudo firewall-cmd --reload
     5sudo firewall-cmd --list-all-zones
     6
     7# on Master
     8sudo rm -f /var/lib/cni/networks/cbr0/lock
     9sudo /usr/local/bin/k3s-killall.sh
    10sudo systemctl restart k3s
    11sudo systemctl status k3s
    12
    13# on Worker
    14sudo rm -f /var/lib/cni/networks/cbr0/lock
    15sudo /usr/local/bin/k3s-killall.sh
    16sudo systemctl restart k3s-agent
    17sudo systemctl status k3s-agent
    

    Check Certificates

     1# Get CA from K3s master
     2openssl s_client -connect localhost:6443 -showcerts < /dev/null 2>&1 | openssl x509 -noout -enddate
     3openssl s_client -showcerts -connect 193.168.51.103:6443 < /dev/null 2>/dev/null|openssl x509 -outform PEM
     4openssl s_client -showcerts -connect 193.168.51.103:6443 < /dev/null 2>/dev/null|openssl x509 -outform PEM | base64 | tr -d '\n'
     5
     6# Check end date:
     7for i in `ls /var/lib/rancher/k3s/server/tls/*.crt`; do echo $i; openssl x509 -enddate -noout -in $i; done
     8
     9# More efficient: 
    10cd /var/lib/rancher/k3s/server/tls/
    11for crt in *.crt; do printf '%s: %s\n' "$(date --date="$(openssl x509 -enddate -noout -in "$crt"|cut -d= -f 2)" --iso-8601)" "$crt"; done | sort
    12
    13# Check CA issuer
    14for i in $(find . -maxdepth 1 -type f -name "*.crt"); do  openssl x509 -in ${i} -noout -issuer; done
    

    General Checks RKE2/K3S

    Nice gist to troubleshoot etcd link

  • ๐Ÿš€ Operator SDK

    Operators have 3 kinds : go, ansible, helm.

     1## Init an Ansible project
     2operator-sdk init --plugins=ansible  --domain example.org --owner "Your name"
     3
     4## Command above will create a structure like:
     5netbox-operator
     6โ”œโ”€โ”€ Dockerfile
     7โ”œโ”€โ”€ Makefile
     8โ”œโ”€โ”€ PROJECT
     9โ”œโ”€โ”€ config
    10โ”‚ย ย  โ”œโ”€โ”€ crd
    11โ”‚ย ย  โ”œโ”€โ”€ default
    12โ”‚ย ย  โ”œโ”€โ”€ manager
    13โ”‚ย ย  โ”œโ”€โ”€ manifests
    14โ”‚ย ย  โ”œโ”€โ”€ prometheus
    15โ”‚ย ย  โ”œโ”€โ”€ rbac
    16โ”‚ย ย  โ”œโ”€โ”€ samples
    17โ”‚ย ย  โ”œโ”€โ”€ scorecard
    18โ”‚ย ย  โ””โ”€โ”€ testing
    19โ”œโ”€โ”€ molecule
    20โ”‚ย ย  โ”œโ”€โ”€ default
    21โ”‚ย ย  โ””โ”€โ”€ kind
    22โ”œโ”€โ”€ playbooks
    23โ”‚ย ย  โ””โ”€โ”€ install.yml
    24โ”œโ”€โ”€ requirements.yml
    25โ”œโ”€โ”€ roles
    26โ”‚ย ย  โ””โ”€โ”€ deployment
    27โ””โ”€โ”€ watches.yaml
    
    1## Create first role
    2operator-sdk create api --group app  --version v1alpha1 --kind Deployment --generate-role
    
Thursday, January 15, 2026 Monday, January 1, 1