GUI
Gitlab
Gitlab
Glab CLI https://glab.readthedocs.io/en/latest/intro.html 1# add token 2glab auth login --hostname mygitlab.example.com 3# view fork of dep installer 4glab repo view mygitlab.example.com/copain/project 5# clone fork of dep installer 6glab repo clone mygitlab.example.com/copain/project Install 1Optimization 2puma['worker_processes'] = 16 3puma['worker_timeout'] = 60 4puma['min_threads'] = 1 5puma['max_threads'] = 4 6puma['per_worker_max_memory_mb'] = 2048 Certificats Generate CSR in /data/gitlab/csr/server_cert.cnf 1[req] 2default_bits = 2048 3distinguished_name = req_distinguished_name 4req_extensions = req_ext 5prompt = no 6 7[req_distinguished_name] 8C = PL 9ST = Poland 10L = Warsaw 11O = myOrg 12OU = DEV 13CN = gitlab.example.com 14 15[req_ext] 16subjectAltName = @alt_names 17 18[alt_names] 19DNS = gitlab.example.com 20IP = 192.168.01.01 1# Create CSR 2openssl req -new -newkey rsa:2048 -nodes -keyout gitlab.example.com.key -config /data/gitlab/csr/server_cert.cnf -out gitlab.example.com.csr 3 4openssl req -noout -text -in gitlab.example.com.csr 5 6# Sign your CSR with your PKI. If you PKI is a windows one, you should get back a .CER file. 7 8# check info: 9openssl x509 -text -in gitlab.example.com.cer -noout 1### push it in crt/key in Gitlab 2cp /tmp/gitlab.example.com.cer cert/gitlab.example.com.crt 3cp /tmp/gitlab.example.com.key cert/gitlab.example.com.key 4cp /tmp/gitlab.example.com.cer cert/192.168.01.01.crt 5cp /tmp/gitlab.example.com.key cert/192.168.01.01.key 6 7### push rootCA in gitlab 8cp /etc/pki/ca-trust/source/anchors/domain-issuing.crt /data/gitlab/config/trusted-certs/domain-issuing.crt 9cp /etc/pki/ca-trust/source/anchors/domain-rootca.crt /data/gitlab/config/trusted-certs/domain-rootca.crt 10 11### Reconfigure 12vi /data/gitlab/config/gitlab.rb 13docker exec gitlab bash -c 'update-ca-certificates' 14docker exec gitlab bash -c 'gitlab-ctl reconfigure' 15 16### Stop / Start 17docker stop gitlab 18docker rm gitlab 19docker run -d -p 5050:5050 -p 2289:22 -p 443:443 --restart=always \ 20-v /data/gitlab/config:/etc/gitlab \ 21-v /data/gitlab/logs:/var/log/gitlab \ 22-v /data/gitlab/data:/var/opt/gitlab \ 23-v /data/gitlab/cert:/etc/gitlab/ssl \ 24-v /data/gitlab/config/trusted-certs:/usr/local/share/ca-certificates \ 25--name gitlab gitlab/gitlab-ce:15.0.5-ce.0 Health-Checks 1docker exec gitlab bash -c 'gitlab-ctl status' 2docker exec -it gitlab gitlab-rake gitlab:check SANITIZE=true 3docker exec -it gitlab gitlab-rake gitlab:env:info Backup 1docker exec -it gitlab gitlab-rake gitlab:backup:create --trace 2 3#Alternate way to do it 4docker exec gitlab bash -c 'gitlab-backup create' 5docker exec gitlab bash -c 'gitlab-backup create SKIP=repositories' 6docker exec gitlab bash -c 'gitlab-backup create SKIP=registry' Restore from a Backup 1Restore 2gitlab-ctl reconfigure 3gitlab-ctl start 4gitlab-ctl stop unicorn 5gitlab-ctl stop sidekiq 6gitlab-ctl status 7ls -lart /var/opt/gitlab/backups 8 9docker exec -it gitlab gitlab-rake gitlab:backup:restore --trace 10docker exec -it gitlab gitlab-rake gitlab:backup:restore BACKUP=1537738690_2018_09_23_10.8.3 --trace 11 12Restart 13docker exec gitlab bash -c 'gitlab-ctl restart' Update Pre-checks before update sudo docker exec -it gitlab gitlab-rake gitlab:check sudo docker exec -it gitlab gitlab-rake gitlab:doctor:secrets
Github
Github
Get tag_name from latest 1export RKE_VERSION=$(curl -s https://update.rke2.io/v1-release/channels | jq -r '.data[] | select(.id=="stable") | .latest' | awk -F"+" '{print $1}'| sed 's/v//') 2export CERT_VERSION=$(curl -s https://api.github.com/repos/cert-manager/cert-manager/releases/latest | jq -r .tag_name) 3export RANCHER_VERSION=$(curl -s https://api.github.com/repos/rancher/rancher/releases/latest | jq -r .tag_name) 4export LONGHORN_VERSION=$(curl -s https://api.github.com/repos/longhorn/longhorn/releases/latest | jq -r .tag_name) 5export NEU_VERSION=$(curl -s https://api.github.com/repos/neuvector/neuvector-helm/releases/latest | jq -r .tag_name) Install gh 1# ubuntu 2type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y) 3curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \ 4&& sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \ 5&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ 6&& sudo apt update \ 7&& sudo apt install gh -y 8 9# Redhat 10sudo dnf install 'dnf-command(config-manager)' 11sudo dnf config-manager --add-repo https://cli.github.com/packages/rpm/gh-cli.repo 12sudo dnf install gh Autocompletions 1gh completion zsh > $ZSH/completions/_gh Create an ssh key ed Login 1gh auth login -p ssh -h GitHub.com -s read:project,delete:repo,repo,workflow -w 2 3gh auth status 4github.com 5 โœ“ Logged in to github.com as MorzeBaltyk ($HOME/.config/gh/hosts.yml) 6 โœ“ Git operations for github.com configured to use ssh protocol. 7 โœ“ Token: gho_************************************ 8 โœ“ Token scopes: delete_repo, gist, read:org, read:project, repo To use your key One way:
Gitea
Gitea
Prerequis - Firewalld activated, important otherwise the routing to the app is not working - Podman, jq installed Import image 1podman pull docker.io/gitea/gitea:1-rootless 2podman save docker.io/gitea/gitea:1-rootless -o gitea-rootless.tar 3podman load < gitea-rootless.tar Install cat /etc/systemd/system/container-gitea-app.service 1# container-gitea-app.service 2[Unit] 3Description=Podman container-gitea-app.service 4 5Wants=network.target 6After=network-online.target 7RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage 8 9[Service] 10Environment=PODMAN_SYSTEMD_UNIT=%n 11Restart=on-failure 12TimeoutStopSec=70 13PIDFile=%t/container-gitea-app.pid 14Type=forking 15 16ExecStartPre=/bin/rm -f %t/container-gitea-app.pid %t/container-gitea-app.ctr-id 17ExecStart=/usr/bin/podman container run \ 18 --conmon-pidfile %t/container-gitea-app.pid \ 19 --cidfile %t/container-gitea-app.ctr-id \ 20 --cgroups=no-conmon \ 21 --replace \ 22 --detach \ 23 --tty \ 24 --env DB_TYPE=sqlite3 \ 25 --env DB_HOST=gitea-db:3306 \ 26 --env DB_NAME=gitea \ 27 --env DB_USER=gitea \ 28 --env DB_PASSWD=9Oq6P9Tsm6j8J7c18Jxc \ 29 --volume gitea-data-volume:/var/lib/gitea:Z \ 30 --volume gitea-config-volume:/etc/gitea:Z \ 31 --network gitea-net \ 32 --publish 2222:2222 \ 33 --publish 3000:3000 \ 34 --label "io.containers.autoupdate=registry" \ 35 --name gitea-app \ 36 docker.io/gitea/gitea:1-rootless 37 38ExecStop=/usr/bin/podman container stop \ 39 --ignore \ 40 --cidfile %t/container-gitea-app.ctr-id \ 41 -t 10 42 43ExecStopPost=/usr/bin/podman container rm \ 44 --ignore \ 45 -f \ 46 --cidfile %t/container-gitea-app.ctr-id 47 48[Install] 49WantedBy=multi-user.target default.target Configuration inside /var/lib/containers/storage/volumes/gitea-config-volume/_data/app.ini
Git
Git
GIT is a distributed version control system that was created by Linus Torvalds, the mastermind of Linux itself. It was designed to be a superior version control system to those that were readily available, the two most common of these being CVS and Subversion (SVN). Whereas CVS and SVN use the Client/Server model for their systems, GIT operates a little differently. Instead of downloading a project, making changes, and uploading it back to the server, GIT makes the local machine act as a server. Tecmint
Collection
Collection
List 1ansible-galaxy collection list Install an Ansible Collection 1# From Ansible Galaxy official repo 2ansible-galaxy collection install community.general 3 4# From a tarball locally 5ansible-galaxy collection install ./community-general-6.0.0.tar.gz 6 7# From custom Repo 8ansible-galaxy collection install git+https://git.example.com/projects/namespace.collectionName.git 9ansible-galaxy collection install git+https://git.example.com/projects/namespace.collectionName,v1.0.2 10ansible-galaxy collection install git+https://git.example.com/namespace/collectionName.git 11 12# From a requirement.yml file 13ansible-galaxy collection install -r ./requirement.yaml Requirement file to install Ansible Collection 1collections: 2- name: kubernetes.core 3 4- source: https://gitlab.example.com/super-group/collector.git 5 type: git 6 version: "v1.0.6" 7 8- source: https://gitlab.ipolicedev.int/another-projects/plates.git 9 type: git
Certificates Authority
Certificates Authority
Trust a CA on Linux host 1# [RHEL] RootCA from DC need to be installed on host: 2cp my-domain-issuing.crt /etc/pki/ca-trust/source/anchors/my_domain_issuing.crt 3cp my-domain-rootca.crt /etc/pki/ca-trust/source/anchors/my_domain_rootca.crt 4update-ca-trust extract 5 6# [Ubuntu] 7sudo apt-get install -y ca-certificates 8sudo cp local-ca.crt /usr/local/share/ca-certificates 9sudo update-ca-certificates
CEPH
Bash Shortcurt
Bash Shortcurt
Most usefull shortcut Ctrl + r : Search and reverse. (ctrl+r pour remonter l’history). Ctrl + l : Clear the screen (instead to use โ€œclearโ€ command). Ctrl + p : Repeat last command. Ctrl + x + Ctrl + e : Edit the current command on an external editor. (Need to define export EDITOR=vim ). Ctrl + shift + v : Copy / paste in linux. Ctrl + a : Move to the begin of the line. Ctrl + e : Move to the end of the line. Ctrl + xx : Move to the opposite end of the line. Ctrl + left : Move to left one word. Ctrl + right : Move to right one word.
Administration
Administration
Hosted-engine Administration Connect to VM hosted-engine with root and password setup during the install: 1# Generate a backup 2engine-backup --scope=all --mode=backup --file=/root/backup --log=/root/backuplog 3 4# Restore from a backup on Fresh install 5engine-backup --mode=restore --file=file_name --log=log_file_name --provision-db --restore-permissions 6engine-setup 7 8# Restore a backup on existing install 9engine-cleanup 10engine-backup --mode=restore --file=file_name --log=log_file_name --restore-permissions 11engine-setup host Administration Connect in ssh to the Host: 1# Pass a host in maintenance mode manually 2hosted-engine --vm-status 3hosted-engine --set-maintenance --mode=global 4hosted-engine --vm-status 5 6# Remove maintenance mode 7hosted-engine --set-maintenance --mode=none 8hosted-engine --vm-status 9 10# upgrade hosted-engine 11hosted-engine --set-maintenance --mode=none 12hosted-engine --vm-status 13engine-upgrade-check 14dnf update ovirt\*setup\* # update the setup package 15engine-setup # launch it to update the engine /!\ Connect individually to KVM Virtmanager does not work OVirt use libvirt but not like KVM do…
๐Ÿšฆ Gita
๐Ÿšฆ Gita
Presentation Gita is opensource project in python to handle a bit number of projects available: Here 1# Install 2pip3 install -U gita 3 4# add repo in gita 5gita add dcc/ssg/toolset 6gita add -r dcc/ssg # recursively add 7gita add -a dcc # resursively add and auto-group based on folder structure 8 9# create a group 10gita group add docs -n ccn 11 12# Checks 13gita ls 14gita ll -g 15gita group ls 16gita group ll 17gita st dcc 18 19# Use 20gita pull ccn 21gita push ccn 22 23gita freeze
๐Ÿš  Quay.io
๐Ÿš  Quay.io
Deploy a Quay.io / Mirror-registry on container Nothing original, it just the documentation of redhat, but can be usefull to kickstart a registry. Prerequisites: 10G /home 15G /var 300G /srv or /opt (regarding QuayRoot) min 2 or more vCPUs. min 8 GB of RAM. 1# packages 2sudo yum install -y podman 3sudo yum install -y rsync 4sudo yum install -y jq 5 6# Get tar 7mirror="https://mirror.openshift.com/pub/openshift-v4/clients" 8wget ${mirror}/mirror-registry/latest/mirror-registry.tar.gz 9tar zxvf mirror-registry.tar.gz 10 11# Get oc-mirror 12curl https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/latest/oc-mirror.rhel9.tar.gz -O 13 14# Basic install 15sudo ./mirror-registry install \ 16 --quayHostname quay01.example.local \ 17 --quayRoot /opt 18 19# More detailed install 20sudo ./mirror-registry install \ 21 --quayHostname quay01.example.local \ 22 --quayRoot /srv \ 23 --quayStorage /srv/quay-pg \ 24 --pgStorage /srv/quay-storage \ 25 --sslCert tls.crt \ 26 --sslKey tls.key 27 28podman login -u init \ 29 -p 7u2Dm68a1s3bQvz9twrh4Nel0i5EMXUB \ 30 quay01.example.local:8443 \ 31 --tls-verify=false 32 33# By default login go in: 34cat $XDG_RUNTIME_DIR/containers/auth.json 35 36# Get IP 37sudo podman inspect --format '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' quay-app 38 39#unistall 40sudo ./mirror-registry uninstall -v \ 41 --quayRoot <example_directory_name> 42 43# Info 44curl -u init:password https://quay01.example.local:8443/v2/_catalog | jq 45curl -u root:password https://<url>:<port>/v2/ocp4/openshift4/tags/list | jq 46 47# Get an example of imageset 48oc-mirror init --registry quay.example.com:8443/mirror/oc-mirror-metadata 49 50# Get list of Operators, channels, packages 51oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 52oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged 53oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged --channel=stable unlock user init/admin 1QUAY_POSTGRES=`podman ps | grep quay-postgres | awk '{print $1}'` 2 3podman exec -it $QUAY_POSTGRES psql -d quay -c "UPDATE "public.user" SET invalid_login_attempts = 0 WHERE username = 'init'" Source Mirror-registry
๐Ÿš€ Operator SDK
๐Ÿš€ Operator SDK
Operators have 3 kinds : go, ansible, helm. 1## Init an Ansible project 2operator-sdk init --plugins=ansible --domain example.org --owner "Your name" 3 4## Command above will create a structure like: 5netbox-operator 6โ”œโ”€โ”€ Dockerfile 7โ”œโ”€โ”€ Makefile 8โ”œโ”€โ”€ PROJECT 9โ”œโ”€โ”€ config 10โ”‚ย โ”œโ”€โ”€ crd 11โ”‚ย โ”œโ”€โ”€ default 12โ”‚ย โ”œโ”€โ”€ manager 13โ”‚ย โ”œโ”€โ”€ manifests 14โ”‚ย โ”œโ”€โ”€ prometheus 15โ”‚ย โ”œโ”€โ”€ rbac 16โ”‚ย โ”œโ”€โ”€ samples 17โ”‚ย โ”œโ”€โ”€ scorecard 18โ”‚ย โ””โ”€โ”€ testing 19โ”œโ”€โ”€ molecule 20โ”‚ย โ”œโ”€โ”€ default 21โ”‚ย โ””โ”€โ”€ kind 22โ”œโ”€โ”€ playbooks 23โ”‚ย โ””โ”€โ”€ install.yml 24โ”œโ”€โ”€ requirements.yml 25โ”œโ”€โ”€ roles 26โ”‚ย โ””โ”€โ”€ deployment 27โ””โ”€โ”€ watches.yaml 1## Create first role 2operator-sdk create api --group app --version v1alpha1 --kind Deployment --generate-role
๐Ÿ”ฑ K3S
๐Ÿ”ฑ K3S
Specific to RHEL 1# Create a trust zone for the two interconnect 2sudo firewall-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 #pods 3sudo firewall-cmd --permanent --zone=trusted --add-source=10.43.0.0/16 #services 4sudo firewall-cmd --reload 5sudo firewall-cmd --list-all-zones 6 7# on Master 8sudo rm -f /var/lib/cni/networks/cbr0/lock 9sudo /usr/local/bin/k3s-killall.sh 10sudo systemctl restart k3s 11sudo systemctl status k3s 12 13# on Worker 14sudo rm -f /var/lib/cni/networks/cbr0/lock 15sudo /usr/local/bin/k3s-killall.sh 16sudo systemctl restart k3s-agent 17sudo systemctl status k3s-agent Check Certificates 1# Get CA from K3s master 2openssl s_client -connect localhost:6443 -showcerts < /dev/null 2>&1 | openssl x509 -noout -enddate 3openssl s_client -showcerts -connect 193.168.51.103:6443 < /dev/null 2>/dev/null|openssl x509 -outform PEM 4openssl s_client -showcerts -connect 193.168.51.103:6443 < /dev/null 2>/dev/null|openssl x509 -outform PEM | base64 | tr -d '\n' 5 6# Check end date: 7for i in `ls /var/lib/rancher/k3s/server/tls/*.crt`; do echo $i; openssl x509 -enddate -noout -in $i; done 8 9# More efficient: 10cd /var/lib/rancher/k3s/server/tls/ 11for crt in *.crt; do printf '%s: %s\n' "$(date --date="$(openssl x509 -enddate -noout -in "$crt"|cut -d= -f 2)" --iso-8601)" "$crt"; done | sort 12 13# Check CA issuer 14for i in $(find . -maxdepth 1 -type f -name "*.crt"); do openssl x509 -in ${i} -noout -issuer; done General Checks RKE2/K3S Nice gist to troubleshoot etcd link
๐Ÿ”— Dependencies
๐Ÿ”— Dependencies
Package with pip3 1pip3 freeze netaddr > requirements.txt 2pip3 download -r requirements.txt -d wheel 3mv requirements.txt wheel 4tar -zcf wheelhouse.tar.gz wheel 5tar -zxf wheelhouse.tar.gz 6pip3 install -r wheel/requirements.txt --no-index --find-links wheel Package with Poetry 1curl -sSL https://install.python-poetry.org | python3 - 2poetry new rp-poetry 3poetry add ansible 4poetry add poetry 5poetry add netaddr 6poetry add kubernetes 7poetry add jsonpatch 8poetry add `cat ~/.ansible/collections/ansible_collections/kubernetes/core/requirements.txt` 9 10poetry build 11 12pip3 install dist/rp_poetry-0.1.0-py3-none-any.whl 13 14poetry export --without-hashes -f requirements.txt -o requirements.txt Push dans Nexus 1poetry config repositories.test http://localhost 2poetry publish -r test Images Builder 1podman login registry.redhat.io 2podman pull registry.redhat.io/ansible-automation-platform-22/ansible-python-base-rhel8:1.0.0-230 3 4pyenv local 3.9.13 5python -m pip install poetry 6poetry init 7poetry add ansible-builder
๐Ÿ”’ Vault on k8s
๐Ÿ”’ Vault on k8s
Some time ago, I made a small shell script to handle Vault on a cluster kubernetes. For documentation purpose. Install Vault with helm 1#!/bin/bash 2 3## Variables 4DIRNAME=$(dirname $0) 5DEFAULT_VALUE="vault/values-override.yaml" 6NewAdminPasswd="PASSWORD" 7PRIVATE_REGISTRY_USER="registry-admin" 8PRIVATE_REGISTRY_PASSWORD="PASSWORD" 9PRIVATE_REGISTRY_ADDRESS="registry.example.com" 10DOMAIN="example.com" 11INGRESS="vault.${DOMAIN}" 12 13if [ -z ${CM_NS+x} ];then 14 CM_NS='your-namespace' 15fi 16 17if [ -z ${1+x} ]; then 18 VALUES_FILE="${DIRNAME}/${DEFAULT_VALUE}" 19 echo -e "\n[INFO] Using default values file '${DEFAULT_VALUE}'" 20else 21 if [ -f $1 ]; then 22 echo -e "\n[INFO] Using values file $1" 23 VALUES_FILE=$1 24 else 25 echo -e "\n[ERROR] No file exist $1" 26 exit 1 27 fi 28fi 29 30## Functions 31function checkComponentsInstall() { 32 componentsArray=("kubectl" "helm") 33 for i in "${componentsArray[@]}"; do 34 command -v "${i}" >/dev/null 2>&1 || 35 { echo "${i} is required, but it's not installed. Aborting." >&2; exit 1; } 36 done 37} 38 39function createSecret() { 40kubectl get secret -n ${CM_NS} registry-pull-secret --no-headers 2> /dev/null \ 41|| \ 42kubectl create secret docker-registry -n ${CM_NS} registry-pull-secret \ 43 --docker-server=${PRIVATE_REGISTRY_ADDRESS} \ 44 --docker-username=${PRIVATE_REGISTRY_USER} \ 45 --docker-password=${PRIVATE_REGISTRY_ADDRESS} 46} 47 48function installWithHelm() { 49helm dep update ${DIRNAME}/helm 50 51helm upgrade --install vault ${DIRNAME}/helm \ 52--namespace=${CM_NS} --create-namespace \ 53--set global.imagePullSecrets.[0]=registry-pull-secret \ 54--set global.image.repository=${PRIVATE_REGISTRY_ADDRESS}/hashicorp/vault-k8s \ 55--set global.agentImage.repository=${PRIVATE_REGISTRY_ADDRESS}/hashicorp/vault \ 56--set ingress.hosts.[0]=${INGRESS} \ 57--set ingress.enabled=true \ 58--set global.leaderElection.namespace=${CM_NS} 59 60echo -e "\n[INFO] sleep 30s" && sleep 30 61} 62 63checkComponentsInstall 64createSecret 65installWithHelm Init Vault on kubernetes Allow local kubernetes to create and reach secret on the Vault
๐Ÿ“ฆ Archive
๐Ÿ“ฆ Archive
Tar - ยซ tape archiver ยป Preserve files permissions and ownership. The Basic 1# Archive 2tar cvf mon_archive.tar <fichier1> <fichier2> </rep/doosier/> 3 4## Archive and compress with zstd everything in the current dir and push to /target/dir 5tar -I zstd -vcf archive.tar.zstd -C /target/dir . 6 7# Extract 8tar xvf mon_archive.tar 9 10# Extract push to target dir 11tar -zxvf new.tar.gz -C /target/dir Other usefull options โ€ข t : list archive’s content. โ€ข T : Archive list given by a file. โ€ข P : Absolute path is preserve (usefull for backup /etc) โ€ข X : exclude โ€ข z : compression Gunzip โ€ข j : compression Bzip2 โ€ข J : compression Lzmacd
๐Ÿ“ Storage
๐Ÿ“ Storage
General concern If you want to move VMs to an another Storage Domain, you need to copy the template from it as well! Remove a disk: 1# IF RHV does not use anymore disk those should appear empty in lsblk: 2lsblk -a 3sdf 8:80 0 4T 0 disk 4โ””โ”€36001405893b456536be4d67a7f6716e3 253:38 0 4T 0 mpath 5sdg 8:96 0 4T 0 disk 6โ””โ”€36001405893b456536be4d67a7f6716e3 253:38 0 4T 0 mpath 7sdh 8:112 0 4T 0 disk 8โ””โ”€36001405893b456536be4d67a7f6716e3 253:38 0 4T 0 mpath 9sdi 8:128 0 0 disk 10โ””โ”€360014052ab23b1cee074fe38059d7c94 253:39 0 100G 0 mpath 11sdj 8:144 0 0 disk 12โ””โ”€360014052ab23b1cee074fe38059d7c94 253:39 0 100G 0 mpath 13sdk 8:160 0 0 disk 14โ””โ”€360014052ab23b1cee074fe38059d7c94 253:39 0 100G 0 mpath 15 16# find all disks from LUN ID 17LUN_ID="360014054ce7e566a01d44c1a4758b092" 18list_disk=$(dmsetup deps -o devname ${LUN_ID}| cut -f 2 |cut -c 3- |tr -d "()" | tr " " "\n") 19echo ${list_disk} 20 21# Remove from multipath 22multipath -f "${LUN_ID}" 23 24# remove disk 25for i in ${list_disk}; do echo ${i}; blockdev --flushbufs /dev/${i}; echo 1 > /sys/block/${i}/device/delete; done 26 27# You can which disk link with which LUN on CEPH side 28ls -l /dev/disk/by-* NFS for OLVM/oVirt Since oVirt need a shared stockage, we can create a local NFS to bypass this point if no Storage bay.
๐Ÿ‘พ Pypi Repository
๐Ÿ‘พ Pypi Repository
Pypi Repo for airgap env Let’s take as an example py dependencies for Netbox 1# Tools needed 2dnf install -y python3.11 3pip install --upgrade pip setuptool python-pypi-mirror twine 4 5# init mirror 6python3.11 -m venv mirror 7mkdir download 8 9# Get list of Py packages needed 10curl raw.githubusercontent.com/netbox-community/netbox/v3.7.3/requirements.txt -o requirements.txt 11echo pip >> requirements.txt 12echo setuptools >> requirements.txt 13echo uwsgi >> requirements.txt 14 15# Make sure repository CA is installed 16curl http://pki.server/pki/cacerts/ISSUING_CA.pem -o /etc/pki/ca-trust/source/anchors/issuing.crt 17curl http://pki.server/pki/cacerts/ROOT_CA.pem -o /etc/pki/ca-trust/source/anchors/root.crt 18update-ca-trust 19 20 21source mirror/bin/activate 22pypi-mirror download -b -d download -r requirements.tx 23twine upload --repository-url https://nexus3.server/repository/internal-pypi/ download/*.whl --cert /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 24twine upload --repository-url https://nexus3.server/repository/internal-pypi/ /download/*.tar.gz --cert /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem Then on target host inside /etc/pip.conf :
๐Ÿ‘พ Nexus3
๐Ÿ‘พ Nexus3
Deploy a Nexus3 in container on VM Load the image 1podman pull sonatype/nexus3:3.59.0 2podman save sonatype/nexus3:3.59.0 -o nexus3.tar 3podman load < nexus3.tar Create a service inside /etc/systemd/system/container-nexus3.service with content below: 1[Unit] 2Description=Nexus Podman container 3Wants=syslog.service 4 5[Service] 6User=nexus-system 7Group=nexus-system 8Restart=always 9ExecStart=/usr/bin/podman run \ 10 --log-level=debug \ 11 --rm \ 12 -ti \ 13 --publish 8081:8081 \ 14 --name nexus \ 15 sonatype/nexus3:3.59.0 16 17ExecStop=/usr/bin/podman stop -t 10 nexus 18 19[Install] 20WantedBy=multi-user.target
๐Ÿ‘ท Makefile
๐Ÿ‘ท Makefile
Shell Variable $$var $$( python -c ‘import sys; print(sys.implementation.name)’ ) Make Variable T ?= foo # give a default value T := $(shell whoami) # execute shell immediately to put in the var PHONY to execute several makefile Example 1 1SUBDIRS = foo bar baz 2 3## dir is a Shell variables 4## SUBDIR and MAKE are Internal make variables 5subdirs: 6 for dir in $(SUBDIRS); do \ 7 $(MAKE) -C $$dir; \ 8 done Example 2 1SUBDIRS = foo bar baz 2 3.PHONY: subdirs $(SUBDIRS) 4subdirs: $(SUBDIRS) 5$(SUBDIRS): 6 $(MAKE) -C $@ 7foo: baz Idea for a testing tools 1git clone xxx /tmp/xxx&& make -C !$/Makefile 2make download le conteneur 3make build le binaire 4make met le dans /use/local/bin 5make clean 6make help Sources: Tutorials
๐Ÿ‘ฎ Justfile
๐Ÿ‘ฎ Justfile
Interesting example from justfile documentation: where it create mktemp and set it in variable then by concatenation you get a full path to the tar.gz. Then the Recipe “publish” create the artifact again and push it to a server. 1tmpdir := `mktemp` # Create a tmp file 2version := "0.2.7" 3tardir := tmpdir / "awesomesauce-" + version 4tarball := tardir + ".tar.gz" # use tmpfile path to create a tarball 5 6publish: 7 rm -f {{tarball}} 8 mkdir {{tardir}} 9 cp README.md *.c {{tardir}} 10 tar zcvf {{tarball}} {{tardir}} 11 scp {{tarball}} me@server.com:release/ 12 rm -rf {{tarball}} {{tardir}} This one can be really usefull to define a default value which can be redefine with env variable:
๐Ÿ‘ฎ CUE-lang
๐Ÿ‘ฎ CUE-lang
CUE stands for Configure, Unify, Execute Basics Installation 1# Install GO 2GO_VERSION="1.21.0" 3wget https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz 4sudo tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz 5export PATH=$PATH:/usr/local/go/bin 6 7go install cuelang.org/go/cmd/cue@latest 8sudo cp -pr ./go /usr/local/. 9 10# or use Container 11printf "\e[1;34m[INFO]\e[m Install CUElang:\n"; 12podman pull docker.io/cuelang/cue:latest concepts top -> schema -> constraint -> data -> bottom Command 1# import a file 2cue import imageset-config.yaml 3 4# Validate 5cue vet imageset-config.cue imageset-config.yaml 6 7 8* Some basics example 9 10```go 11// This is a comment 12_greeting: "Welcome" // Hidden fields start with "_" 13#project: "CUE" // Definitions start with "#" 14 15message: "\(_greeting) to \(#project)!" // Regular fields are exported 16 17#Person: { 18 age: number // Mandatory condition and must be a number 19 hobbies?: [...string] // non mandatory but if present must be a list of string 20} 21 22// Constrain which call #Person and check if age 23#Adult: #Person & { 24 age: >=18 25} 26 27// =~ match a regular expression 28#Phone: string & =~ "[0-9]+" 29 30// Mapping 31instanceType: { 32 web: "small" 33 app: "medium" 34 db: "large" 35} 36 37server1: { 38 role: "app" 39 instance: instanceType[role] 40} 41 42// server1.instance: "medium" Scripting 1# executable have extension name "_tool.cue" 2 3# usage 4cue cmd prompter 1package foo 2 3import ( 4 "tool/cli" 5 "tool/exec" 6 "tool/file" 7) 8 9// moved to the data.cue file to show how we can reference "pure" Cue files 10city: "Amsterdam" 11 12// A command named "prompter" 13command: prompter: { 14 15 // save transcript to this file 16 var: { 17 file: *"out.txt" | string @tag(file) 18 } // you can use "-t flag=filename.txt" to change the output file, see "cue help injection" for more details 19 20 // prompt the user for some input 21 ask: cli.Ask & { 22 prompt: "What is your name?" 23 response: string 24 } 25 26 // run an external command, starts after ask 27 echo: exec.Run & { 28 // note the reference to ask and city here 29 cmd: ["echo", "Hello", ask.response + "!", "Have you been to", city + "?"] 30 stdout: string // capture stdout, don't print to the terminal 31 } 32 33 // append to a file, starts after echo 34 append: file.Append & { 35 filename: var.file 36 contents: echo.stdout // because we reference the echo task 37 } 38 39 // also starts after echo, and concurrently with append 40 print: cli.Print & { 41 text: echo.stdout // write the output to the terminal since we captured it previously 42 } 43} Sources Offical Documentation
๐Ÿด Sed
๐Ÿด Sed
The Basics 1sed -e 'โ€ฆ' -e 'โ€ฆ' # Several execution 2sed -i # Replace in place 3sed -r # Play with REGEX 4 5# The most usefull 6sed -e '/^[ ]*#/d' -e '/^$/d' <fich.> # openfile without empty or commented lines 7sed 's/ -/\n -/g' # replace all "-" with new lines 8sed 's/my_match.*/ /g' # remove from the match till end of line 9sed -i '4048d;3375d' ~/.ssh/known_hosts # delete lines Number 10 11# Buffer 12s/.*@(.*)/$1/; # keep what is after @ put it in buffer ( ) and reuse it with $1. 13sed -e '/^;/! s/.*-reserv.*/; Reserved: &/' file.txt # resuse search with & 14 15# Search a line 16sed -e '/192.168.130/ s/^/#/g' -i /etc/hosts # Comment a line 17sed -re 's/^;(r|R)eserved:/; Reserved:/g' file.txt # Search several string 18 19# Insert - add two lines below a match pattern 20sed -i '/.*\"description\".*/s/$/ \n \"after\" : \"network.target\"\,\n \"requires\" : \"network.target\"\,/g' my_File 21 22# Append 23sed '/WORD/ a Add this line after every line with WORD' 24 25# if no occurence, then add it after "use_authtok" 26sed -e '/remember=10/!s/use_authtok/& remember=10/' -i /etc/pam.d/system-auth-permanent
๐Ÿณ Docker
๐Ÿณ Docker
1# see images available on your hosts 2docker image list 3 4# equal to above 5docker images 6REPOSITORY TAG IMAGE ID CREATED SIZE 7httpd latest 6fa26f20557b 45 hours ago 164MB 8hello-world latest 75280d40a50b 4 months ago 1.69kB 9 10# give sha 11docker images --no-trunc=true 12 13# delete unused images 14docker rmi $(docker images -q) 15# delete images without tags 16docker rmi $(docker images | grep "^<none>" | awk '{print $3}')
๐Ÿฌ Podman
๐Ÿฌ Podman
Description Buildah: is used to build Open Container Initiative (OCI) format or Docker format container images without the need for a daemon. Podman: provides the ability to directly run container images without a daemon. Podman can pull container images from a container registry, if they are not available locally. Skopeo: offers features for pulling and pushing containers to registries. Moving containers between registries is supported. Container image inspection is also offered and some introspective capabilities can be performed, without first downloading the container itself.
๐Ÿฆ Awk
๐Ÿฆ Awk
The Basics awk is treat each line as a table, by default space are separators of columns. General syntax is awk 'search {action}' file_to_parse. 1# Give the value higher than 75000 in column $4 2df | awk '$4 > 75000' 3 4# Print the all line when column $4 is higher than 75000 5df | awk '$4 > 75000 {print $0}' But if you look for a string, the search need to be included in /search/ or ;search;. When you print $0 represent the all line, $1 first column, $2 second column etc.
๐Ÿฃ Bash Functions for k8s
๐Ÿฃ Bash Functions for k8s
A list of nice findings for Kubernetes List all images in Helm chart 1images=$(helm template -g $helm |yq -N '..|.image? | select(. == "*" and . != null)'|sort|uniq|grep ":"|egrep -v '*:[[:blank:]]' || echo "") upload images listed in an Helm chart 1load_helm_images(){ 2 # look in helm charts 3 for helm in $(ls ../../roles/*/files/helm/*.tgz); do 4 printf "\e[1;34m[INFO]\e[m Look for images in ${helm}...\n" 5 6 images=$(helm template -g $helm |yq -N '..|.image? | select(. == "*" and . != null)'|sort|uniq|grep ":"|egrep -v '*:[[:blank:]]' || echo "") 7 8 dir=$( dirname $helm | xargs dirname ) 9 10 echo "####" 11 12 if [ "$images" != "" ]; then 13 printf "\e[1;34m[INFO]\e[m Images found in the helm charts: ${images}\n" 14 printf "\e[1;34m[INFO]\e[m Create ${dir}/images images...\n" 15 16 mkdir -p ${dir}/images 17 18 while i= read -r image_name; do 19 archive_name=$(basename -a $(awk -F : '{print $1}'<<<${image_name})); 20 printf "\e[1;34m[INFO]\e[m Pull images...\n" 21 podman pull ${image_name}; 22 printf "\e[1;34m[INFO]\e[m Push ${image_name} in ${dir}/images/${archive_name}\n" 23 podman save ${image_name} --format oci-archive -o ${dir}/images/${archive_name}; 24 done <<< ${images} 25 else 26 printf "\e[1;34m[INFO]\e[m No Images found in the helm charts: $helm\n" 27 fi 28 done 29} Check components version 1function checkComponentsInstall() { 2 componentsArray=("kubectl" "helm") 3 for i in "${componentsArray[@]}"; do 4 command -v "${i}" >/dev/null 2>&1 || 5 { echo "[ERROR] ${i} is required, but it's not installed. Aborting." >&2; exit 1; } 6 done 7} Version comparator 1function checkK8sVersion() { 2 currentK8sVersion=$(kubectl version --short | grep "Server Version" | awk '{gsub(/v/,$5)}1 {print $3}') 3 testVersionComparator 1.20 "$currentK8sVersion" '<' 4 if [[ $k8sVersion == "ok" ]]; then 5 echo "current kubernetes version is ok" 6 else 7 minikube start --kubernetes-version=v1.22.4; 8 fi 9} 10 11 12# the comparator based on https://stackoverflow.com/a/4025065 13versionComparator () { 14 if [[ $1 == $2 ]] 15 then 16 return 0 17 fi 18 local IFS=. 19 local i ver1=($1) ver2=($2) 20 # fill empty fields in ver1 with zeros 21 for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) 22 do 23 ver1[i]=0 24 done 25 for ((i=0; i<${#ver1[@]}; i++)) 26 do 27 if [[ -z ${ver2[i]} ]] 28 then 29 # fill empty fields in ver2 with zeros 30 ver2[i]=0 31 fi 32 if ((10#${ver1[i]} > 10#${ver2[i]})) 33 then 34 return 1 35 fi 36 if ((10#${ver1[i]} < 10#${ver2[i]})) 37 then 38 return 2 39 fi 40 done 41 return 0 42} 43 44testVersionComparator () { 45 versionComparator $1 $2 46 case $? in 47 0) op='=';; 48 1) op='>';; 49 2) op='<';; 50 esac 51 if [[ $op != "$3" ]] 52 then 53 echo "Kubernetes test fail: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" 54 k8sVersion="not ok" 55 else 56 echo "Kubernetes test pass: '$1 $op $2'" 57 k8sVersion="ok" 58 fi 59}
๐Ÿ  OpenShift
๐Ÿ  OpenShift
OC Mirror Need at least one Operator: 1kind: ImageSetConfiguration 2apiVersion: mirror.openshift.io/v1alpha2 3archiveSize: 4 4storageConfig: 5 registry: 6 imageURL: quay.example.com:8443/mirror/oc-mirror-metadata 7 skipTLS: false 8mirror: 9 platform: 10 architectures: 11 - "amd64" 12 channels: 13 - name: stable-4.14 14 type: ocp 15 shortestPath: true 16 graph: true 17 operators: 18 - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.14 19 packages: 20 - name: kubevirt-hyperconverged 21 channels: 22 - name: 'stable' 23 - name: serverless-operator 24 channels: 25 - name: 'stable' 26 additionalImages: 27 - name: registry.redhat.io/ubi9/ubi:latest 28 helm: {} 1# install oc-mirror: 2curl https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/latest/oc-mirror.rhel9.tar.gz -O 3 4# Get an example of imageset 5oc-mirror init --registry quay.example.com:8443/mirror/oc-mirror-metadata 6 7# Find operators in the list of Operators, channels, packages 8oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 9oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged 10oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged --channel=stable 11 12# mirror with a jumphost which online access 13oc-mirror --config=imageset-config.yaml docker://quay.example.com:8443 14 15# mirror for airgap 16oc-mirror --config=imageSetConfig.yaml file://tmp/download 17oc-mirror --from=/tmp/upload/ docker://quay.example.com/ocp/operators 18 19# Refresh OperatorHub 20oc get pod -n openshift-marketplace 21 22# Get the index pod and delete it to refresh 23oc delete pod cs-redhat-operator-index-m2k2n -n openshift-marketplace Install 1## Get the coreOS which is gonna to be installed 2openshift-install coreos print-stream-json | grep '\.iso[^.]' 3 4openshift-install create install-config 5 6openshift-install create manifests 7 8openshift-install create ignition-configs 9 10openshift-install create cluster --dir . --log-level=info 11openshift-install destroy cluster --log-level=info for baremetal make a iso boot USB 1dd if=$HOME/ocp-latest/rhcos-live.iso of=/dev/sdb bs=1024k status=progress Add node 1export OPENSHIFT_CLUSTER_ID=$(oc get clusterversion -o jsonpath='{.items[].spec.clusterID}') 2export CLUSTER_REQUEST=$(jq --null-input --arg openshift_cluster_id "$OPENSHIFT_CLUSTER_ID" '{ 3 "api_vip_dnsname": "<api_vip>", 4 "openshift_cluster_id": $openshift_cluster_id, 5 "name": "<openshift_cluster_name>" 6}') Platform in install-config Get all info on how to config 1openshift-install explain installconfig.platform.libvirt 1## none 2platform: 3 none: {} 4 5## baremetal - use ipmi to provision baremetal 6platform: 7 baremetal: 8 apiVIP: 192.168.111.5 9 ingressVIP: 192.168.111.7 10 provisioningNetwork: "Managed" 11 provisioningNetworkCIDR: 172.22.0.0/24 12 provisioningNetworkInterface: eno1 13 clusterProvisioningIP: 172.22.0.2 14 bootstrapProvisioningIP: 172.22.0.3 15 hosts: 16 - name: master-0 17 role: master 18 bmc: 19 address: ipmi://192.168.111.1 20 username: admin 21 password: password 22 bootMACAddress: 52:54:00:a1:9c:ae 23 hardwareProfile: default 24 - name: master-1 25 role: master 26 bmc: 27 address: ipmi://192.168.111.2 28 username: admin 29 password: password 30 bootMACAddress: 52:54:00:a1:9c:af 31 hardwareProfile: default 32 - name: master-2 33 role: master 34 bmc: 35 address: ipmi://192.168.111.3 36 username: admin 37 password: password 38 bootMACAddress: 52:54:00:a1:9c:b0 39 hardwareProfile: default 40 41## vpshere - old syntax and deprecated form (new one in 4.15 with "failure domain") 42vsphere: 43 vcenter: 44 username: 45 password: 46 datacenter: 47 defaultDatastore: 48 apiVIPs: 49 - x.x.x.x 50 ingressVIPs: 51 - x.x.x.x 52 53## new syntax 54platform: 55 vsphere: 56 apiVIPs: 57 - x.x.x.x 58 datacenter: xxxxxxxxxxxx_datacenter 59 defaultDatastore: /xxxxxxxxxxxx_datacenter/datastore/Shared Storages/ssd-001602 60 failureDomains: 61 - name: CNV4 62 region: fr 63 server: xxxxxxxxxxxx.ovh.com 64 topology: 65 computeCluster: /xxxxxxxxxxxx_datacenter/host/Management Zone Cluster 66 datacenter: xxxxxxxxxxxx_datacenter 67 datastore: /xxxxxxxxxxxx_datacenter/datastore/Shared Storages/ssd-001602 68 networks: 69 - vds_mgmt 70 zone: dc 71 ingressVIPs: 72 - x.x.x.x 73 password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 74 username: admin 75 vCenter: xxxxxxxxxxx.ovh.com 76 vcenters: 77 - datacenters: 78 - xxxxxxxxxx_datacenter 79 password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 80 port: 443 81 server: xxxxxxx.ovh.com 82 user: admin Utils 1# Get Cluster ID 2oc get clusterversion -o jsonpath='{.items[].spec.clusterID}' 3 4# Get Nodes which are Ready 5oc get nodes --output jsonpath='{range .items[?(@.status.conditions[-1].type=="Ready")]}{.metadata.name} {.status.conditions[-1].type}{"\n"}{end}' 6 7# get images from all pods in a namespace 8oc get pods -n --output jsonpath='{range .items[*]}{.spec.containers[*].image}{"\n"}{end}' Set OperatorHub in airgap 1oc get catalogsources -n openshift-marketplace