Documentation regarding Redhat-like specific systems.
prerequisites :
hostname -f == hostname 1yum install -y ipa-server ipa-server-dns
2
3ipa-server-install \
4 --domain=example.com \
5 --realm=EXAMPLE.COM \
6 --ds-password=password \
7 --admin-password=password \
8 --hostname=classroom.example.com \
9 --ip-address=172.25.0.254 \
10 --reverse-zone=0.25.172.in-addr.arpa. \
11 --forwarder=208.67.222.222 \
12 --allow-zone-overlap \
13 --setup-dns \
14 --unattended
1yum install -y ipa-client
2
3ipa-client-install --mkhomedir --enable-dns-updates --force-ntpd -p admin@EXAMPLE.COM --password='password' --force-join -U
4
5# Test login
6echo -n 'password' | kinit admin
1sudo sh -c "cat <<EOF > ~/IdmZoneCheck.sh
2#!/bin/bash
3### IdM zone check ###
4# Check if the zone name is provided as a parameter #
5if [ -z "$1" ];
6then
7 echo -e "Provide the zone name to be checked as a parameter!\n(ex: IdmZoneCheck.sh domain.local)"
8 exit
9fi
10clear
11echo -e "### IDM / TCP ###\n\n"
12echo -e "TCP / kerberos-master (SRV)"
13dig +short _kerberos-master._tcp.$1. SRV
14echo -e "_TCP / kerberos (SRV)"
15dig +short _kerberos._tcp.$1. SRV
16echo -e "_TCP / kpasswd (SRV)"
17dig +short _kpasswd._tcp.$1. SRV
18echo -e "_TCP / ldap (SRV)"
19dig +short _ldap._tcp.$1. SRV
20echo -e "\n### IDM / UDP ###\n\n"
21echo -e "_UDP / kerberos-master (SRV)"
22dig +short _kerberos-master._udp.$1. SRV
23echo -e "_UDP / kerberos (SRV)"
24dig +short _kerberos._udp.$1. SRV
25echo -e "_UCP / kpasswd (SRV)"
26dig +short _kpasswd._udp.$1. SRV
27echo -e "\n### IDM / MSDCS DC TCP ###\n\n"
28echo -e "_MSDCS / TCP / kerberos (SRV)"
29dig +short _kerberos._tcp.dc._msdcs.$1. SRV
30echo -e "_MSDCS / TCP / ldap (SRV)"
31dig +short _ldap._tcp.dc._msdcs.$1. SRV
32echo -e "\n### IDM / MSDCS DC UDP ###\n\n"
33echo -e "_MSDCS / UDP / kerberos (SRV)"
34dig +short _kerberos._udp.dc._msdcs.$1. SRV
35echo -e "\n### IDM / REALM ###\n\n"
36echo -e "REALM (TXT)"
37dig +short _kerberos.$1. TXT
38echo -e "\n### IDM / CA ###\n\n"
39echo -e "A / ipa-ca"
40dig +short ipa-ca.$1. A
41echo -e "\n### IDM / A ###\n\n"
42echo -e "A / $HOSTNAME"
43dig +short $HOSTNAME. A
44EOF
1./IdmZoneCheck.sh idm.ad-support.local
1sudo realm list
2authselect current
3sssctl domain-list
4sssctl config-check
5getent -s files passwd
6getent -s sss passwd user
7getent passwd
8dig -t SRV _ldap._tcp.example.com
9sssctl user-checks toto -s sshd -a auth
Prerequisites :
for RHEL8 :
1dnf -y install realmd adcli sssd oddjob oddjob-mkhomedir samba-common-tools krb5-workstation authselect-compat
2
3realm discover example.com
4realm join example.com -U svc-sssd --client-software=sssd --os-name=RedHat --os-version=8
5
6sudo authselect select sssd with-mkhomedir
7sudo systemctl enable --now oddjobd.service
/etc/sssd/sssd.conf 1[sssd]
2services = nss, pam, ssh, sudo
3domains = example.com
4config_file_version = 2
5default_domain_suffix = example.com
6
7[domain/example.com]
8default_shell = /bin/bash
9override_shell = /bin/bash
10
11ad_domain = example.com
12krb5_realm = example.com
13realmd_tags = manages-system joined-with-adcli
14cache_credentials = True
15id_provider = ad
16krb5_store_password_if_offline = True
17ldap_id_mapping = True
18ldap_user_objectsid = objectSid
19ldap_group_objectsid = objectSid
20ldap_user_primary_group = primaryGroupID
21
22use_fully_qualified_names = True
23fallback_homedir = /home/%u
24
25access_provider = ad
26ldap_access_order=filter,expire
27ldap_account_expire_policy = ad
28ad_access_filter = (memberOf=CN=INTERNAL Team,OU=team-platform,OU=test-groups,DC=example,DC=com)
29
30
31[nss]
32homedir_substring = /home
33
34[pam]
35pam_pwd_expiration_warning = 7
36pam_account_expired_message = Account expired, please contact AD administrator.
37pam_account_locked_message = Account locked, please contact AD administrator.
38pam_verbosity = 3
39
40[ssh]
41
42[sudo]
1sss_cache -E; systemctl restart sssd ; sss_cache -E
2systemctl status sssd
/etc/sudoers.d/admin :1%EXAMPLE.COM\\internal\ team ALL=(ALL) ALL
1realm permit -g 'internal team@example.com'
Documentation about how to be produtivity with a terminal.
Ctrl + r : Search and reverse. (ctrl+r pour remonter l’history).Ctrl + l : Clear the screen (instead to use โclearโ command).Ctrl + p : Repeat last command.Ctrl + x + Ctrl + e : Edit the current command on an external editor. (Need to define export EDITOR=vim ).Ctrl + shift + v : Copy / paste in linux.Ctrl + a : Move to the begin of the line.Ctrl + e : Move to the end of the line.Ctrl + xx : Move to the opposite end of the line.Ctrl + left : Move to left one word.Ctrl + right : Move to right one word.
man <cmd> : Open man page of command.
space : go ahead page by page.b : go back page by page.q : quit.Enter : go line by line./<word> : search a word in man.n : go to the next expression that you search.N : go back to search expression.man -k <key word> : look for in all man for your key words.man -k <word1>.*<word2> : “.*” allow to search several words.whatis <cmd> : give short explaination about the command.
Usefull to keep a track or document and share what have been done.
script : save all commandes and result in a “typescript” file.script -a : append to an existing “typescript” file (otherwise erase previous one).exit : to stop session.
asciinema : save the terminal session in video.
For RHEL - something like Tlog exists and can be configure and centralised with Rsyslog.
/etc/DIR_COLORS.xterm define terminal colors
dircolors change colors in the ls output
git clone https://github.com/tmux-plugins/tmux-logging.git
tmux new -s ma_session : Create new session.tmux attach : Attach to the last used session.tmux attach -t X : Attach to ymux sessions with X number.tmux ls : List active tmux sessions.tmux split-window -dh "!!" : Run command in separate panel.tmux source-file ~/.tmux.conf : Reload config
C-b w : List sessions/panels.C-b x : Close panel or session.
echo $’\xae’ = “ยฎ”
Vim has a special shorthand for entering characters with diacritical marks. If you need some familiar variant of a Latin alphabet character youโll be able to input it with the digraph system.
Digraph input is started in insert or command mode (but not normal mode) by pressing Ctrl-k, then two printable characters in succession.
The first is often the โbaseโ form of the letter, and the second denotes the appropriate embellishment.
https://vimvalley.com/ https://vim-adventures.com/ https://www.vimgolf.com/
1# HCL
2mkdir -p ~/.vim/pack/jvirtanen/start
3cd ~/.vim/pack/jvirtanen/start
4git clone https://github.com/jvirtanen/vim-hcl.git
5
6# Justfile
7mkdir -p ~/.vim/pack/vendor/start
8cd ~/.vim/pack/vendor/start
9git clone https://github.com/NoahTheDuke/vim-just.git
trigger a vim tutorial vimtutor
the most powerfull command:. : Repeat the last modification repete toutes les dernieres modif realisees.* : Where the cursor is located, keep in memory the word and goes to next one..* : together repeat an action on next word.
Documentation about commands which should works on all unix-like systems.
mdadm (multiple devices admin) is software solution to manage RAID.
It allow:
/dev/sdb, /dev/sdc) or (/dev/sdb1, /dev/sdc1)raidtools1# View real-time information about your md devices
2cat /proc/mdstat
3
4# Monitor for failed disks (indicated by "(F)" next to the disk)
5watch cat /proc/mdstat
1# Display details about the RAID array (replace /dev/md0 with your array)
2mdadm --detail /dev/md0
3
4# Examine RAID disks for information (not volume) similar to --detail
5mdadm --examine /dev/sd*
The conf file /etc/mdadm.conf does not exist by default and need to be created once you finish your install.
This file is required for the autobuild at boot.
SshFS sert ร monter sur son FS, un autre systรจme de fichier distant, ร travers une connexion SSH, le tout avec des droits utilisateur. L’avantage est de manipuler les donnรฉes distantes avec n’importe quel gestionnaire de fichier (Nautilus, Konqueror, ROX, ou mรชme la ligne de commande).
- Pre-requis : droits d'administration, connexion ethernet, installation de FUSE et du paquet SSHFS.
- Les utilisateurs de sshfs doivent faire partie du groupe fuse.
Rq : FUSE permet ร un utilisateur de monter lui-mรชme un systรจme de fichier. Normalement, pour monter un systรจme de fichier, il faut รชtre administrateur ou que celui-ci l’ait prรฉvu dans ยซ /etc/fstab ยป avec des informations en dur.
First Install samba and samba-client (for debug + test)
/etc/samba/smb.conf1[home]
2Workgroup=WORKGROUP (le grp par defaul sur windows)
3Hosts allow = ...
4[shared]
5browseable = yes
6path = /shared
7valid users = user01, @un_group_au_choix
8writable = yes
9passdb backend = tdbsam #passwords are stored in the /var/lib/samba/private/passdb.tdb file.
testparm
/usr/bin/testparm -s /etc/samba/smb.conf
smbclient -L \192.168.56.102 -U test : list all samba shares available
smbclient //192.168.56.102/sharedrepo -U test : connect to the share
pdbedit -L : list user smb (better than smbclient)
NFS vs iscsi
Concurrent access to a block device like iSCSI is not possible with standard file systems. You’ll need a shared disk filesystem (like GFS or OCSFS) to allow this, but in most cases the easiest solution would be to just use a network share (via SMB/CIFS or NFS) if this is sufficient for your application.
ext4 : le plus rรฉpandu sous GNU/Linux (issu de ext2 et ext3). Il est journalisรฉ, c’est ร dire qu’il trace les opรฉrations d’รฉcriture pour garantir l’intรฉgritรฉ des donnรฉes en cas d’arrรชt brutal du disque. De plus, il peut gรฉrer des volumes de taille jusque 1 024 pรฉbioctets et permet la prรฉ-allocation d’une zone contiguรซ pour un fichier, afin de minimiser la fragmentation. Utilisez ce systรจme de fichiers si vous comptez pouvoir relire des informations depuis votre Mac OS X ou Windows.
Preserve files permissions and ownership.
The Basic
1# Archive
2tar cvf mon_archive.tar <fichier1> <fichier2> </rep/doosier/>
3
4## Archive and compress with zstd everything in the current dir and push to /target/dir
5tar -I zstd -vcf archive.tar.zstd -C /target/dir .
6
7# Extract
8tar xvf mon_archive.tar
9
10# Extract push to target dir
11tar -zxvf new.tar.gz -C /target/dir
Other usefull options โข t : list archive’s content. โข T : Archive list given by a file. โข P : Absolute path is preserve (usefull for backup /etc) โข X : exclude โข z : compression Gunzip โข j : compression Bzip2 โข J : compression Lzmacd
1# check partion
2parted -l /dev/sda
3fdisk -l
4
5# check partition - visible before the mkfs
6ls /sys/sda/sda*
7ls /dev/sd*
8
9# give partition after the mkfs or pvcreate
10blkid
11blkid -o list
12
13# summary about the disks, partitions, FS and LVM
14lsblk
15lsblk -f
in script mode
1# with fdisk
2printf "n\np\n1\n\n\nt\n8e\nw\n" | sudo fdisk "/dev/sdb"
3
4# with parted
5sudo parted /dev/sdb mklabel gpt mkpart primary 1 100% set 1 lvm on
Gparted : interface graphique (ce base sur parted un utilitaire GNU - Table GPT)
list of component:
LVM2 use a new driver, the device-mapper allow the us of diskยดs sectors in different targets: - linear (most used in LVM). - stripped (stripped on several disks) - error (all I/O are consider in errors) - snapshot (allow snapshot async)
1lvs --all --segments -o +devices
2server_xplore_col1 vgdata -wi-ao---- 21 striped 1.07t /dev/md2(40229),/dev/md3(40229),/dev/md4(40229),/dev/md5(40229),โฆ
3server_xplore_col2 vgdata -wi-ao---- 1 linear 219.87g /dev/md48(0)
1# Summary
2pvs
3vgs
4lvs
5
6# Scanner
7pvscan
8vgscan
9lvscan
10
11# Details info
12pvdisplay [sda]
13pvdisplay -m /dev/emcpowerd1
14vgdisplay [vg_root]
15lvdisplay [/dev/vg_root/lv_usr]
16
17# Summary details
18lvmdiskscan
19 /dev/sda1 [ 600.00 MiB]
20 /dev/sda2 [ 1.00 GiB]
21 /dev/sda3 [ 38.30 GiB] LVM physical volume
22 /dev/sdb1 [ <100.00 GiB] LVM physical volume
23 /dev/sdc1 [ <50.00 GiB] LVM physical volume
24 /dev/sdj [ 20.00 GiB]
25 1 disk
26 2 partitions
27 0 LVM physical volume whole disks
28 3 LVM physical volumes
1parted /dev/sda resizepart 3 100%
2udevadm settle
3pvresize /dev/sda3
4
5# Extend a XFS to a fixe size
6lvextend -L 30G /dev/vg00/var
7xfs_growfs /dev/vg00/var
8
9# Add some space to a ext4 FS
10lvextend -L +10G /dev/vg00/var
11resize2fs /dev/vg00/var
12
13# Extend to a pourcentage and resize automaticly whatever is the FS type.
14lvextend -l +100%FREE /dev/vg00/var -r
1parted /dev/sdb mklabel gpt mkpart primary 1 100% set 1 lvm on
2udevadm settle
3pvcreate /dev/sdb1
4vgcreate vg01 /dev/sdb1
5lvcreate -n lv_data -l 100%FREE vg01
6
7# Create a XFS
8mkfs.xfs /dev/vg01/lv_data
9mkdir /data
10echo "/dev/mapper/vg01-lv_data /data xfs defaults 0 0" >> /etc/fstab
11mount -a
12
13# Create an ext4
14mkfs.ext4 /dev/vg01/lv_data
15mkdir /data
16echo "/dev/mapper/vg01-lv_data /data ext4 defaults 0 0" >> /etc/fstab
17mount -a
1swapoff -v /dev/dm-1
2lvremove /dev/vg00/swap
3vi /etc/fstab
4vi /etc/default/grub
5grub2-mkconfig -o /boot/efi/EFI/redhat/grub.cfg
6grubby --remove-args "rd.lvm.lv=vg00/swap" --update-kernel /boot/vmlinuz-3.10.0-1160.71.1.el7.x86_64
7grubby --remove-args "rd.lvm.lv=vg00swap" --update-kernel /boot/vmlinuz-3.10.0-1160.el7.x86_64
8grubby --remove-args "rd.lvm.lv=vg00/swap" --update-kernel /boot/vmlinuz-0-rescue-cd2525c8417d4f798a7e6c371121ef34
9echo "vm.swappiness = 0" >> /etc/sysctl.conf
10sysctl -p
1# #n case of crash, just relaunch pvmove without arguments
2pvmove /dev/emcpowerd1 /dev/emcpowerc1
3
4# Remove PV from a VG
5vgreduce /dev/emcpowerd1 vg01
6
7# Remove all unused PV from VG01
8vgreduce -a vg01
9
10# remove all PV
11pvremove /dev/emcpowerd1
/var even if doesn’t want:1lvchange -ay --ignorelockingfailure --sysinit vgroot/var
1# VG rename
2vgrename
3
4# LV rename
5lvrename
6
7# PV does not need to be rename
Even if in the past I was using partition MS-DOS disklabel or GPT disklabel for PV, I prefer now to use directly LVM on the main block device. There is no reason to use 2 disklabels, unless you have a very specific use case (like disk with boot sector and boot partition).
S.M.A.R.T. is a technology that allows you to monitor and analyze the health and performance of your hard drives. It provides valuable information about the status of your storage devices. Here are some useful commands and tips for using S.M.A.R.T. with smartctl:
To display S.M.A.R.T. information for a specific drive, you can use the following command:
1smartctl -a /dev/sda
This command will show all available S.M.A.R.T. data for the /dev/sda drive.
1yum install iscsi-initiator-utils
2
3#Checks
4iscsiadm -m session -P 0 # get the target name
5iscsiadm -m session -P 3 | grep "Target: iqn\|Attached scsi disk\|Current Portal"
6
7# Discover and mount ISCSI disk
8iscsiadm -m discovery -t st -p 192.168.40.112
9iscsiadm --mode discovery --type sendtargets --portal 192.168.40.112
10
11# Login
12iscsiadm -m node -T iqn.1992-04.com.emc:cx.ckm00192201413.b0 -l
13iscsiadm -m node -T iqn.1992-04.com.emc:cx.ckm00192201413.b1 -l
14iscsiadm -m node -T iqn.1992-04.com.emc:cx.ckm00192201413.a1 -l
15iscsiadm -m node -T iqn.1992-04.com.emc:cx.ckm00192201413.a0 -l
16
17# Enable/Start service
18systemctl enable iscsid iscsi && systemctl stop iscsid iscsi && systemctl start iscsid iscsi
1for BUS in /sys/class/scsi_host/host*/scan; do echo "- - -" > ${BUS} ; done
2
3sudo sh -c 'for BUS in /sys/class/scsi_host/host*/scan; do echo "- - -" > ${BUS} ; done '
Partition your FS
1yum install device-mapper-multipath
vim /etc/multipath.conf:1defaults {
2user_friendly_names yes
3path_grouping_policy multibus
4}
1multipaths {
2 multipath {
3 wwid "36000d310004142000000000000000f23"
4 alias oralog1
5 }
1 devices {
2 device {
3 vendor "DGC"
4 product ".*"
5 product_blacklist "LUNZ"
6 :
7 path_checker emc_clariion ### Rev 47 alua
8 hardware_handler "1 alua" ### modified for alua
9 prio alua ### modified for alua
10 :
11 }
12 }
Checks config with: multipathd show config |more
1# in crontab or tmux session - take every hour a track of the memory usage
2for i in {1..24} ; do echo -n "===================== " ; date ; free -m ; top -b -n1 | head -n 15 ; sleep 3600; done >> /var/log/SYSADM/memory.log &
1diff -W200 -y <(unzip -vqq file1.jar | awk '{ if ($1 > 0) {printf("%s\t%s\n", $1, $8)}}' | sort -k2) <(unzip -vqq file2.jar | awk '{ if ($1 > 0) {printf("%s\t%s\n", $1, $8)}}' | sort -k2)
fuser: 1fuser -m </dir or /files> # Find process blocking/using this directory or files.
2fuser -cu </dir or /files> # Same as above but add the user
3fuser -kcu </dir or /files> # Kill process
4fuser -v -k -HUP -i ./ # Send HUP signal to process
5
6# Output will send you <PID + letter>, here is the meaning:
7# c current directory.
8# e executable being run.
9# f open file. (omitted in default display mode).
10# F open file for writing. (omitted in default display mode).
11# r root directory.
12# m mmap'ed file or shared library.
lsof ( = list open file):1lsof +D /var/log # Find all files blocked with the process and user.
2lsof -a +L1 <mountpoint> # Process blocking a FS.
3lsof -c ssh -c init # Find files open by thoses processes.
4lsof -p 1753 # Find files open by PID process.
5lsof -u root # Find files open by user.
6lsof -u ^user # Find files open by user except this one.
7kill -9 `lsof -t -u toto` # kill user's processes. (option -t output only PID).
1#When you have no fuser or lsof:
2find /proc/*/fd -type f -links 0 -exec ls -lrt {} \;
1# Get the state
2firewall-cmd --state
3systemctl status firewalld
4
5# Get infos
6firewall-cmd --get-default-zone
7firewall-cmd --get-active-zones
8firewall-cmd --get-zones
9firewall-cmd --set-default-zone=home
10
11firewall-cmd --permanent --zone=FedoraWorkstation --add-source=00:FF:B0:CB:30:0A
12firewall-cmd --permanent --zone=FedoraWorkstation --add-service=ssh
13
14firewall-cmd --get-log-denied
15firewall-cmd --set-log-denied=<all, unicast, broadcast, multicast, or off>
1#Remove
2firewall-cmd --zone=public --add-service=ftp --permanent
3firewall-cmd --zone=public --remove-service=ftp --permanent
4firewall-cmd --zone=public --remove-port=53/tcp --permanent
5firewall-cmd --zone=public --list-services
6
7# Add
8firewall-cmd --zone=public --new-service=portal --permanent
9firewall-cmd --zone=public --service=portal --add-port=8080/tcp --permanent
10firewall-cmd --zone=public --service=portal --add-port=8443/tcp --permanent
11firewall-cmd --zone=public --add-service=portal --permanent
12firewall-cmd --reload
13
14firewall-cmd --zone=public --new-service=k3s-server --permanent
15firewall-cmd --zone=public --service=k3s-server --add-port=443/tcp --permanent
16firewall-cmd --zone=public --service=k3s-server --add-port=6443/tcp --permanent
17firewall-cmd --zone=public --service=k3s-server --add-port=8472/udp --permanent
18firewall-cmd --zone=public --service=k3s-server --add-port=10250/tcp --permanent
19firewall-cmd --zone=public --add-service=k3s-server --permanent
20firewall-cmd --reload
21
22firewall-cmd --zone=public --new-service=quay --permanent
23firewall-cmd --zone=public --service=quay --add-port=8443/tcp --permanent
24firewall-cmd --zone=public --add-service=quay --permanent
25firewall-cmd --reload
26
27firewall-cmd --get-services # It's also possible to add a service from list
28firewall-cmd --runtime-to-permanent
1for s in `firewall-cmd --list-services`; do echo $s; firewall-cmd --permanent --service "$s" --get-ports; done;
2
3sudo sh -c 'for s in `firewall-cmd --list-services`; do echo $s; firewall-cmd --permanent --service "$s" --get-ports; done;'
4ssh
522/tcp
6dhcpv6-client
7546/udp
1firewall-cmd --info-service cfrm-IC
2cfrm-IC
3 ports: 7780/tcp 8440/tcp 8443/tcp
4 protocols:
5 source-ports:
6 modules:
7 destination:
1firewall-cmd --list-all
2public (active)
3 target: default
4 icmp-block-inversion: no
5 interfaces: ens192
6 sources:
7 services: ssh dhcpv6-client https Oracle nimsoft
8 ports: 10050/tcp 1521/tcp
9 protocols:
10 masquerade: no
11 forward-ports:
12 source-ports:
13 icmp-blocks:
14 rich rules:
1firewall-cmd --zone=backup --list-all
1firewall-cmd --get-active-zones
2backup
3 interfaces: ens224
4public
5 interfaces: ens192
1ls /etc/firewalld/
2firewalld.conf helpers/ icmptypes/ ipsets/ lockdown-whitelist.xml services/ zones/
1firewall-cmd --get-ipset-types
2firewall-cmd --permanent --get-ipsets
3firewall-cmd --permanent --info-ipset=integration
4firewall-cmd --ipset=integration --get-entries
5
6firewall-cmd --permanent --new-ipset=test --type=hash:net
7firewall-cmd --ipset=local-blocklist --add-entry=103.133.104.0/23
1nmcli con show
2NAME UUID TYPE DEVICE
3ens192 4d0087a0-740a-4356-8d9e-f58b63fd180c ethernet ens192
4ens224 3dcb022b-62a2-4632-8b69-ab68e1901e3b ethernet ens224
5
6nmcli dev status
7DEVICE TYPE STATE CONNECTION
8ens192 ethernet connected ens192
9ens224 ethernet connected ens224
10ens256 ethernet connected ens256
11lo loopback unmanaged --
12
13# Get interfaces details :
14nmcli connection show ens192
15nmcli -p con show ens192
16
17# Get DNS settings in interface
18UUID=$(nmcli --get-values connection.uuid c show "cloud-init eth0")
19nmcli --get-values ipv4.dns c show $UUID
1nmcli connection add type ethernet mac "00:50:56:80:11:ff" ifname "ens224"
2nmcli connection add type ethernet mac "00:50:56:80:8a:0b" ifname "ens256"
1nmcli con load /etc/sysconfig/network-scripts/ifcfg-ens224
2nmcli con up ens192
1nmcli con mod enp1s0 +ipv4.addresses "192.168.122.11/24"
2ip addr del 10.163.148.36/24 dev ens160
3
4nmcli con reload # before to reapply
5nmcli device reapply ens224
6systemctl status network.service
7systemctl restart network.service
1UUID=$(nmcli --get-values connection.uuid c show "cloud-init eth0")
2DNS_LIST=$(nmcli --get-values ipv4.dns c show $UUID)
3nmcli conn modify "$UUID" ipv4.dns "${DNS_LIST} ${DNS_IP}"
4
5# /etc/resolved is managed by systemd-resolved
6sudo systemctl restart systemd-resolved
- BIOS est lancรฉ automatiquement et dรฉtecte les pรฉriphs.
- Charge la routine de dรฉmarrage depuis le MBR (Master Boot Record) - C'est le disk de boot et se trouve sur le premier secteur du disque dur.
- Le MBR contient un loader qui charge le "second stage loader" c'est le "boot loader" qui est propre au systรจme qu'on charge.
-> linux a LILO (Linux Loader) ou GRUB ( Grand Unified Bootloader)
- LILO charge le noyau en mรฉmoire, le dรฉcompresse et lui passe les paramรจtres.
- Le noyau monte le FS / (ร partir de lร , les commandes dans /sbin et /bin sont disponibles)
- Le Noyau exรฉcute le premier procรจs "init"
LILO peut avoir plusieurs Noyaux comme choix. Le choix par default : “Linux”.
/etc/lilo.conf : Config des parametres du noyau
/sbin/lilo : pour que les nouveaux params soient enregistrรฉs.
-> crรฉรฉ le fichier /boot/map qui contient les blocs physiques oรน se trouve le prog de dรฉmarrage.
1# [RHEL] RootCA from DC need to be installed on host:
2cp my-domain-issuing.crt /etc/pki/ca-trust/source/anchors/my_domain_issuing.crt
3cp my-domain-rootca.crt /etc/pki/ca-trust/source/anchors/my_domain_rootca.crt
4update-ca-trust extract
5
6# [Ubuntu]
7sudo apt-get install -y ca-certificates
8sudo cp local-ca.crt /usr/local/share/ca-certificates
9sudo update-ca-certificates
The grid is the component responsable for Clustering in oracle.
Grid (couche clusterware) -> ASM -> Disk Group
- Oracle Restart = Single instance = 1 Grid (with or without ASM)
- Oracle RAC OneNode = 2 instances Oracle in Actif/Passif with shared storage
- Oracle RAC (Actif/Actif)
1# As oracle user:
2srvctl config scan
3
4SCAN name: host-env-datad1-scan.domain, Network: 1
5Subnet IPv4: 172.16.228.0/255.255.255.0/ens192, static
6Subnet IPv6:
7SCAN 1 IPv4 VIP: 172.16.228.33
8SCAN VIP is enabled.
9SCAN VIP is individually enabled on nodes:
10SCAN VIP is individually disabled on nodes:
11SCAN 2 IPv4 VIP: 172.16.228.35
12SCAN VIP is enabled.
13SCAN VIP is individually enabled on nodes:
14SCAN VIP is individually disabled on nodes:
15SCAN 3 IPv4 VIP: 172.16.228.34
16SCAN VIP is enabled.
17SCAN VIP is individually enabled on nodes:
18SCAN VIP is individually disabled on nodes:
1# As oracle user
2srvctl config database
3srvctl config database -d <SID>
4srvctl status database -d <SID>
5srvctl status nodeapps -n host-env-datad1n1
6srvctl config nodeapps -n host-env-datad1n1
7# ============
8srvctl stop database -d DB_NAME
9srvctl stop database -d DB_NAME -o normal
10srvctl stop database -d DB_NAME -o immediate
11srvctl stop database -d DB_NAME -o transactional
12srvctl stop database -d DB_NAME -o abort
13srvctl stop instance -d DB_NAME -i INSTANCE_NAME
14# =============
15srvctl start database -d DB_NAME -n host-env-datad1n1
16srvctl start database -d DB_NAME -o nomount
17srvctl start database -d DB_NAME -o mount
18srvctl start database -d DB_NAME -o open
19# ============
20srvctl relocate database -db DB_NAME -node host-env-datad1n1
21srvctl modify database -d DB_NAME -instance DB_NAME
22srvctl restart database -d DB_NAME
23# === Do not do it
24srvctl modify instance -db DB_NAME -instance DB_NAME_2 -node host-env-datad1n2
25srvctl modify database -d DB_NAME -instance DB_NAME
26srvctl modify database -d oraclath -instance oraclath
1crs_stat
2crsctl status res
3crsctl status res -t
4crsctl check cluster -all
5
6# Example how it should look:
7/opt/oracle/grid/12.2.0.1/bin/crsctl check cluster -all
8**************************************************************
9host-env-datad1n1:
10CRS-4535: Cannot communicate with Cluster Ready Services
11CRS-4529: Cluster Synchronization Services is online
12CRS-4534: Cannot communicate with Event Manager
13**************************************************************
14host-env-datad1n2:
15CRS-4537: Cluster Ready Services is online
16CRS-4529: Cluster Synchronization Services is online
17CRS-4533: Event Manager is online
18**************************************************************
1show parameter cluster
2
3NAME TYPE VALUE
4------------------------------------ ----------- ------------------------------
5cdb_cluster boolean FALSE
6cdb_cluster_name string DB_NAME
7cluster_database boolean TRUE
8cluster_database_instances integer 2
9cluster_interconnects string
1-- Prevent Database to switch over
2ALTER database cluster_database=FALSE;
1# as root
2/u01/oracle/base/product/19.0.0/grid/bin/crsctl stop crs -f
3/u01/oracle/base/product/19.0.0/grid/bin/crsctl disable crs
4
5# Shutdown/startup VM or other actions
6
7# as root
8/u01/oracle/base/product/19.0.0/grid/bin/crsctl enable crs
9/u01/oracle/base/product/19.0.0/grid/bin/crsctl start crs
1# as oracle user
2srvctl stop database -d oraclath
3
4# As root user, on both nodes:
5/opt/oracle/grid/12.2.0.1/bin/crsctl stop crs -f
6/opt/oracle/grid/12.2.0.1/bin/crsctl disable crs
7
8# As root user, on both nodes:
9/opt/oracle/grid/12.2.0.1/bin/crsctl enable crs
10/opt/oracle/grid/12.2.0.1/bin/crsctl start crs
11
12# checks after restart
13ps -ef | grep asm_pmon | grep -v "grep"
14
15# if ASM is up and running
16srvctl start database -d oraclath -node host1-env-data1n1.domain
1# As oracle user
2srvctl status scan_listener
3
4PRCR-1068 : Failed to query resources
5CRS-0184 : Cannot communicate with the CRS daemon.
the solution:
1. oraenv # ora SID = +ASM1 (if second nodes +ASM2 )
2sqlplus / as sysasm
3startup
1srvctl start asm -n ora-node1-hostname
1srvctl status asm
2asmcmd lsdsk
3asmcmd lsdsk -G DATA
4srvctl status diskgroup -g DATA
1# List clients
2asmcmd lsct
3
4DB_Name Status Software_Version Compatible_version Instance_Name Disk_Group
5+ASM CONNECTED 19.0.0.0.0 19.0.0.0.0 +ASM DATA
6+ASM CONNECTED 19.0.0.0.0 19.0.0.0.0 +ASM FRA
7MANA CONNECTED 12.2.0.1.0 12.2.0.0.0 MANA DATA
8MANA CONNECTED 12.2.0.1.0 12.2.0.0.0 MANA FRA
9MREPORT CONNECTED 12.2.0.1.0 12.2.0.0.0 MREPORT DATA
10MREPORT CONNECTED 12.2.0.1.0 12.2.0.0.0 MREPORT FRA
11
12# Files Open
13asmcmd lsof
14
15DB_Name Instance_Name Path
16MANA MANA +DATA/MANA/DATAFILE/blob.268.1045299983
17MANA MANA +DATA/MANA/DATAFILE/data.270.1045299981
18MANA MANA +DATA/MANA/DATAFILE/indx.269.1045299983
19MANA MANA +DATA/MANA/control01.ctl
20MANA MANA +DATA/MANA/redo01a.log
21MANA MANA +DATA/MANA/redo02a.log
22MANA MANA +DATA/MANA/redo03a.log
23MANA MANA +DATA/MANA/redo04a.log
24MANA MANA +DATA/MANA/sysaux01.dbf
25[...]
1. oraenv # ora SID = +ASM
2asmcmd
1# list
2oracleasm listdisks
3DATA2
4FRA1
5
6# check
7oracleasm status
8Checking if ASM is loaded: yes
9Checking if /dev/oracleasm is mounted: yes
10
11# check one ASM volume
12oracleasm querydisk -d DATA2
13Disk "DATA2" is a valid ASM disk on device [8,49]
14
15# scan
16oracleasm scandisks
17Reloading disk partitions: done
18Cleaning any stale ASM disks...
19Scanning system for ASM disks...
20Instantiating disk "DATA3"
21
22# Create, delete, rename
23oracleasm createdisk DATA3 /dev/sdf1
24oracleasm deletedisk
25oracleasm renamedisk
1cat asmliblist.sh
2#!/bin/bash
3for asmlibdisk in `ls /dev/oracleasm/disks/*`
4 do
5 echo "ASMLIB disk name: $asmlibdisk"
6 asmdisk=`kfed read $asmlibdisk | grep dskname | tr -s ' '| cut -f2 -d' '`
7 echo "ASM disk name: $asmdisk"
8 majorminor=`ls -l $asmlibdisk | tr -s ' ' | cut -f5,6 -d' '`
9 device=`ls -l /dev | tr -s ' ' | grep -w "$majorminor" | cut -f10 -d' '`
10 echo "Device path: /dev/$device"
11 done
Disk Group : all disks in teh same DG should have same size. Different type of DG, external means that LUN replication is on storage side. When a disk is added to DG wait for rebalancing before continuing operations.
---
config:
theme: forest
layout: elk
---
flowchart TD
subgraph s1["Instance DB"]
style s1 fill:#E8F5E9,stroke:#388E3C,stroke-width:2px
subgraph s1a["Background Processes"]
style s1a fill:#FFF9C4,stroke:#FBC02D,stroke-width:1px
n5["PMON (Process Monitor)"]
n6["SMON (System Monitor)"]
n10["RECO (Recoverer Process)"]
end
subgraph s1b["PGA (Process Global Area)"]
style s1b fill:#E3F2FD,stroke:#1976D2,stroke-width:1px
n1["Processes"]
end
subgraph s1c["SGA (System Global Area)"]
style s1c fill:#FFEBEE,stroke:#D32F2F,stroke-width:1px
subgraph n7["Shared Pool (SP)"]
style n7 fill:#F3E5F5,stroke:#7B1FA2,stroke-width:1px
n7a["DC (Dictionary Cache)"]
n7b["LC (Library Cache)"]
n7c["RC (Result Cache)"]
end
n8["DB Cache (DBC)"]
n9["Redo Buffer"]
n3["DBWR (DB Writer)"]
n4["LGWR (Log Writer)"]
n5["PMON (Process Monitor)"]
n6["SMON (System Monitor)"]
n10["RECO (Recoverer Process)"]
end
end
subgraph s2["Database: Physical Files"]
style s2 fill:#FFF3E0,stroke:#F57C00,stroke-width:2px
n11["TBS (Tablespaces, files in .DBF)"]
n12["Redo Log Files"]
n13["Control Files"]
n14["SPFILE (Binary Authentication File)"]
n15["ArchiveLog files"]
end
subgraph s3["Operating System"]
style s3 fill:#E0F7FA,stroke:#00796B,stroke-width:2px
n16["Listener (Port 1521)"]
end
n3 --> n11
n3 --> n7c
n4 --> n12
n6 --> n7a
s3 --> s1
s1c <--> n12
s1c <--> n13
s1c <--> n14
n7b <--> n7c
classDef Aqua stroke-width:1px, stroke-dasharray:none, stroke:#0288D1, fill:#B3E5FC, color:#01579B
classDef Yellow stroke-width:1px, stroke-dasharray:none, stroke:#FBC02D, fill:#FFF9C4, color:#F57F17
classDef Green stroke-width:1px, stroke-dasharray:none, stroke:#388E3C, fill:#C8E6C9, color:#1B5E20
classDef Red stroke-width:1px, stroke-dasharray:none, stroke:#D32F2F, fill:#FFCDD2, color:#B71C1C
class n11,n12,n13,n14,n15 Aqua
class n5,n6,n10 Yellow
class n1 Green
class n7,n8,n9,n3,n4 Red
An Oracle server includes an Oracle Instance and an Oracle Database.
1# Check if listner is present
2ps -edf | grep lsn
3
4# Prompt Listner
5lsnrctl
6LSNRCTL> help
7The following operations are available
8An asterisk (*) denotes a modifier or extended command:
9
10start stop status services
11version reload save_config trace
12spawn quit exit set*
13show*
14
15lsnrctl status
16lsnrctl start
17
18# Logs
19less /opt/oracle/product/12c/db/network/admin/listener.ora
1# in Oracle prompt
2show parameter listener;
3NAME TYPE VALUE
4------------------------------------ ----------- ------------------------------
5listener_networks string
6local_listener string LISTENER_TOTO
7remote_listener string
tnsnames.ora.1# in Oracle prompt
2alter system set local_listener='LISTENER_TOTO' scope=both;
3alter system register;
1lsnrctl status
2
3LSNRCTL for Linux: Version 12.2.0.1.0 - Production on 29-APR-2021 18:58:48
4Copyright (c) 1991, 2016, Oracle. All rights reserved.
5Connecting to (ADDRESS=(PROTOCOL=tcp)(HOST=)(PORT=1521))
6STATUS of the LISTENER
7------------------------
8Alias LISTENER
9Version TNSLSNR for Linux: Version 12.2.0.1.0 - Production
10Start Date 29-APR-2021 18:11:13
11Uptime 0 days 0 hr. 47 min. 34 sec
12Trace Level off
13Security ON: Local OS Authentication
14SNMP OFF
15Listener Log File /u01/oracle/base/diag/tnslsnr/myhost/listener/alert/log.xml
16Listening Endpoints Summary...
17 (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=myhost.example.com)(PORT=1521)))
18Services Summary...
19Service "+ASM" has 1 instance(s).
20 Instance "+ASM", status READY, has 1 handler(s) for this service...
21Service "+ASM_DATA" has 1 instance(s).
22 Instance "+ASM", status READY, has 1 handler(s) for this service...
23Service "+ASM_FRA" has 1 instance(s).
24 Instance "+ASM", status READY, has 1 handler(s) for this service...
25Service "IANA" has 1 instance(s).
26 Instance "IANA", status READY, has 1 handler(s) for this service...
27Service "IANAXDB" has 1 instance(s).
28 Instance "IANA", status READY, has 1 handler(s) for this service...
29The command completed successfully
Services have to be listed in tnsnames.ora of client hosts.
1SELECT *
2 FROM USER_OBJECTS
3 WHERE object_type = 'PROCEDURE'
4 AND object_name = 'grant_RW'
SELECT right on one schema to the role 1CREATE OR REPLACE PROCEDURE grant_RO_to_schema(
2 username VARCHAR2,
3 grantee VARCHAR2)
4AS
5BEGIN
6 FOR r IN (
7 SELECT owner, table_name
8 FROM all_tables
9 WHERE owner = username
10 )
11 LOOP
12 EXECUTE IMMEDIATE
13 'GRANT SELECT ON '||r.owner||'.'||r.table_name||' to ' || grantee;
14 END LOOP;
15END;
16/
17
18-- See if procedure is ok --
19SHOW ERRORS
20
21CREATE ROLE '${ROLE_NAME}' NOT IDENTIFIED;
22GRANT CONNECT TO '${ROLE_NAME}';
23GRANT SELECT ANY SEQUENCE TO '${ROLE_NAME}';
24GRANT CREATE ANY TABLE TO '${ROLE_NAME}';
25
26-- Play the Procedure --
27EXEC grant_RO_to_schema('${SCHEMA}','${ROLE_NAME}')
1su - oracle -c '
2export SQLPLUS="sqlplus -S / as sysdba"
3export ORAENV_ASK=NO;
4export ORACLE_SID='${SID}';
5. oraenv | grep -v "remains";
6
7${SQLPLUS} <<EOF2
8set lines 200 pages 2000;
9CREATE OR REPLACE PROCEDURE grant_RW_to_schema(
10 username VARCHAR2,
11 grantee VARCHAR2)
12AS
13BEGIN
14 FOR r IN (
15 SELECT owner, table_name
16 FROM all_tables
17 WHERE owner = username
18 )
19 LOOP
20 EXECUTE IMMEDIATE
21 '\''GRANT SELECT,DELETE,UPDATE,INSERT,ALTER ON '\''||r.owner||'\''.'\''||r.table_name||'\'' to '\'' || grantee;
22 END LOOP;
23END;
24/
25CREATE ROLE '${ROLE_NAME}' NOT IDENTIFIED;
26GRANT CONNECT TO '${ROLE_NAME}';
27GRANT SELECT ANY SEQUENCE TO '${ROLE_NAME}';
28GRANT CREATE ANY TABLE TO '${ROLE_NAME}';
29GRANT CREATE ANY INDEX TO '${ROLE_NAME}';
30EXEC grant_RW_to_schema('\'''${SCHEMA}''\'','\'''${ROLE_NAME}''\'')
31exit;
32EOF2
33unset ORAENV_ASK;
34'
1-- This one is working better :
2CREATE OR REPLACE PROCEDURE grant_RW_to_schema(
3myschema VARCHAR2,
4myrole VARCHAR2)
5AS
6BEGIN
7for t in (select owner,object_name,object_type from all_objects where owner=myschema and object_type in ('TABLE','VIEW','PROCEDURE','FUNCTION','PACKAGE')) loop
8if t.object_type in ('TABLE','VIEW') then
9EXECUTE immediate 'GRANT SELECT, UPDATE, INSERT, DELETE ON '||t.owner||'.'||t.object_name||' TO '|| myrole;
10elsif t.object_type in ('PROCEDURE','FUNCTION','PACKAGE') then
11EXECUTE immediate 'GRANT EXECUTE ON '||t.owner||'.'||t.object_name||' TO '|| myrole;
12end if;
13end loop;
14end;
15/
1# Set the SID
2ORAENV_ASK=NO
3export ORACLE_SID=HANA
4. oraenv
5
6# Trigger oneline command
7echo -e "select inst_id, instance_name, host_name, database_status from gv\$instance;" | sqlplus -S / as sysdba
1su - oracle -c '
2export SQLPLUS="sqlplus -S / as sysdba"
3export ORAENV_ASK=NO;
4export ORACLE_SID='${SID}';
5. oraenv | grep -v "remains";
6
7${SQLPLUS} <<EOF2
8set lines 200 pages 2000;
9select inst_id, instance_name, host_name, database_status from gv\$instance;
10exit;
11EOF2
12
13unset ORAENV_ASK;
14'
1-- with an absolute path
2@C:\Users\Matthieu\test.sql
3
4-- or trigger from director on which sqlplus was launched
5@test.sql
6
7-- START syntax possible as well
8START test.sql
1-- User variable (if not define, oracle will prompt)
2SELECT * FROM &my_table;
3
4-- Prompt user to set a variable
5ACCEPT my_table PROMPT "Which table would you like to interrogate ? "
6SELECT * FROM $my_table;
sqlplus command: 1export ORACLE_SID=SQM2DWH3
2
3echo "connect ODS/ODS
4BEGIN
5ODS.PURGE_ODS.PURGE_LOG();
6ODS.PURGE_ODS.PURGE_DATA();
7END;
8/" | sqlplus /nolog
9
10echo "connect DSA/DSA
11BEGIN
12DSA.PURGE_DSA.PURGE_LOG();
13DSA.PURGE_DSA.PURGE_DATA();
14END;
15/" | sqlplus /nolog
tablespaces.sh 1#!/bin/ksh
2
3sqlplus -s system/manager <<!
4SET HEADING off;
5SET PAGESIZE 0;
6SET TERMOUT OFF;
7SET FEEDBACK OFF;
8SELECT df.tablespace_name||','||
9 df.bytes / (1024 * 1024)||','||
10 SUM(fs.bytes) / (1024 * 1024)||','||
11 Nvl(Round(SUM(fs.bytes) * 100 / df.bytes),1)||','||
12 Round((df.bytes - SUM(fs.bytes)) * 100 / df.bytes)
13 FROM dba_free_space fs,
14 (SELECT tablespace_name,SUM(bytes) bytes FROM dba_data_files GROUP BY tablespace_name) df
15 WHERE fs.tablespace_name (+) = df.tablespace_name
16 GROUP BY df.tablespace_name,df.bytes
17 ORDER BY 1 ASC;
18quit
19!
20
21exit 0
1#!/bin/ksh
2
3sqlplus -s system/manager <<!
4
5set pagesize 60 linesize 132 verify off
6break on file_id skip 1
7
8column file_id heading "File|Id"
9column tablespace_name for a15
10column object for a15
11column owner for a15
12column MBytes for 999,999
13
14select tablespace_name,
15'free space' owner, /*"owner" of free space */
16' ' object, /*blank object name */
17file_id, /*file id for the extent header*/
18block_id, /*block id for the extent header*/
19CEIL(blocks*4/1024) MBytes /*length of the extent, in Mega Bytes*/
20from dba_free_space
21where tablespace_name like '%TEMP%'
22union
23select tablespace_name,
24substr(owner, 1, 20), /*owner name (first 20 chars)*/
25substr(segment_name, 1, 32), /*segment name */
26file_id, /*file id for extent header */
27block_id, /*block id for extent header */
28CEIL(blocks*4/1024) MBytes /*length of the extent, in Mega Bytes*/
29from dba_extents
30where tablespace_name like '%TEMP%'
31order by 1, 4, 5
32/
33
34quit
35!
36
37exit 0
sqlplus:1SQL> SET TRIMSPOOL on
2SQL> SET LINESIZE 1000
3SQL> SPOOL /root/output.txt
4SQL> select RULEID as RuleID, RULENAME as ruleName,to_char(DBMS_LOB.SUBSTR(EPLRULESTATEMENT,4000,1() as ruleStmt from gep_rules;
5SQL> SPOOL OFF
script.sql:1SET TRIMSPOOL on
2SET LINESIZE 10000
3SPOOL resultat.txt
4ACCEPT var PROMPT "Which table do you want to get ? "
5SELECT * FROM &var;
6SPOOL OFF
1SQL> Create table emp as select * from employees;
2SQL> UPDATE emp SET LAST_NAME='ABC';
3SQL> commit;
S3cmd is a tool to handle blockstorage S3 type.
1# Ubuntu install
2sudo apt-get install s3cmd
3
4# Redhat install
5sudo dnf install s3cmd
6
7# or from sources
8wget https://sourceforge.net/projects/s3tools/files/s3cmd/2.2.0/s3cmd-2.2.0.tar.gz
9tar xzf s3cmd-2.2.0.tar.gz
10cd s3cmd-2.2.0
11sudo python3 setup.py install
Log in to the DigitalOcean Control Panel.
Navigate to API > Spaces Access Keys and generate a new key pair.
1 #On your Azure CLI
2 az --version # Version expected 2.1.0 or higher
3
4 az group delete --name kubernetes -y
5
6 az group create -n kubernetes -l westeurope
7
8 az network vnet create -g kubernetes \
9 -n kubernetes-vnet \
10 --address-prefix 10.240.0.0/24 \
11 --subnet-name kubernetes-subnet
12
13 az network nsg create -g kubernetes -n kubernetes-nsg
14
15 az network vnet subnet update -g kubernetes \
16 -n kubernetes-subnet \
17 --vnet-name kubernetes-vnet \
18 --network-security-group kubernetes-nsg
19
20 az network nsg rule create -g kubernetes \
21 -n kubernetes-allow-ssh \
22 --access allow \
23 --destination-address-prefix '*' \
24 --destination-port-range 22 \
25 --direction inbound \
26 --nsg-name kubernetes-nsg \
27 --protocol tcp \
28 --source-address-prefix '*' \
29 --source-port-range '*' \
30 --priority 1000
31
32 az network nsg rule create -g kubernetes \
33 -n kubernetes-allow-api-server \
34 --access allow \
35 --destination-address-prefix '*' \
36 --destination-port-range 6443 \
37 --direction inbound \
38 --nsg-name kubernetes-nsg \
39 --protocol tcp \
40 --source-address-prefix '*' \
41 --source-port-range '*' \
42 --priority 1001
43
44 az network nsg rule list -g kubernetes --nsg-name kubernetes-nsg --query "[].{Name:name, Direction:direction, Priority:priority, Port:destinationPortRange}" -o table
45
46 az network lb create -g kubernetes --sku Standard \
47 -n kubernetes-lb \
48 --backend-pool-name kubernetes-lb-pool \
49 --public-ip-address kubernetes-pip \
50 --public-ip-address-allocation static
51
52 az network public-ip list --query="[?name=='kubernetes-pip'].{ResourceGroup:resourceGroup, Region:location,Allocation:publicIpAllocationMethod,IP:ipAddress}" -o table
53 #For Ubuntu
54 # az vm image list --location westeurope --publisher Canonical --offer UbuntuServer --sku 18.04-LTS --all -o table
55 # For Redhat
56 # az vm image list --location westeurope --publisher RedHat --offer RHEL --sku 8 --all -o table
57 # => choosen one : 8-lvm-gen2
58 WHICHOS="RedHat:RHEL:8-lvm-gen2:8.5.2022032206"
59
60 # K8s Controller
61 az vm availability-set create -g kubernetes -n controller-as
62
63 for i in 0 1 2; do
64 echo "[Controller ${i}] Creating public IP..."
65 az network public-ip create -n controller-${i}-pip -g kubernetes --sku Standard > /dev/null
66 echo "[Controller ${i}] Creating NIC..."
67 az network nic create -g kubernetes \
68 -n controller-${i}-nic \
69 --private-ip-address 10.240.0.1${i} \
70 --public-ip-address controller-${i}-pip \
71 --vnet kubernetes-vnet \
72 --subnet kubernetes-subnet \
73 --ip-forwarding \
74 --lb-name kubernetes-lb \
75 --lb-address-pools kubernetes-lb-pool >/dev/null
76
77 echo "[Controller ${i}] Creating VM..."
78 az vm create -g kubernetes \
79 -n controller-${i} \
80 --image ${WHICHOS} \
81 --nics controller-${i}-nic \
82 --availability-set controller-as \
83 --nsg '' \
84 --admin-username 'kuberoot' \
85 --admin-password 'Changeme!' \
86 --size Standard_B2s \
87 --storage-sku StandardSSD_LRS
88 #--generate-ssh-keys > /dev/null
89 done
90
91 #K8s Worker
92 az vm availability-set create -g kubernetes -n worker-as
93 for i in 0 1; do
94 echo "[Worker ${i}] Creating public IP..."
95 az network public-ip create -n worker-${i}-pip -g kubernetes --sku Standard > /dev/null
96 echo "[Worker ${i}] Creating NIC..."
97 az network nic create -g kubernetes \
98 -n worker-${i}-nic \
99 --private-ip-address 10.240.0.2${i} \
100 --public-ip-address worker-${i}-pip \
101 --vnet kubernetes-vnet \
102 --subnet kubernetes-subnet \
103 --ip-forwarding > /dev/null
104 echo "[Worker ${i}] Creating VM..."
105 az vm create -g kubernetes \
106 -n worker-${i} \
107 --image ${WHICHOS} \
108 --nics worker-${i}-nic \
109 --tags pod-cidr=10.200.${i}.0/24 \
110 --availability-set worker-as \
111 --nsg '' \
112 --generate-ssh-keys \
113 --size Standard_B2s \
114 --storage-sku StandardSSD_LRS \
115 --admin-username 'kuberoot'> /dev/null \
116 --admin-password 'Changeme!' \
117 done
118
119 #Summarize
120 az vm list -d -g kubernetes -o table
1# most simple
2arkade get doctl
3
4# normal way
5curl -OL https://github.com/digitalocean/doctl/releases/download/v1.104.0/doctl-1.104.0-linux-amd64.tar.gz
6tar xf doctl-1.104.0-linux-amd64.tar.gz
7mv doctl /usr/local/bin
8
9# Auto-Completion ZSH
10 doctl completion zsh > $ZSH/completions/_doctl
1doctl compute region list
2doctl compute size list
3doctl compute image list-distribution
4doctl compute image list --public
1doctl auth init --context test
2doctl auth list
3doctl auth switch --context test2
1doctl projects create --name rkub --environment staging --purpose "stage rkub with github workflows"
1doctl compute ssh-key list
2doctl compute droplet create test --region fra1 --image rockylinux-9-x64 --size s-1vcpu-1gb --ssh-keys <fingerprint>
3doctl compute droplet delete test -f
1export DO_PAT="dop_v1_xxxxxxxxxxxxxxxx"
2doctl auth init --context rkub
3
4# inside a dir with a tf file
5terraform init
6terraform validate
7terraform plan -var "do_token=${DO_PAT}"
8terraform apply -var "do_token=${DO_PAT}" -auto-approve
9
10# clean apply
11terraform plan -out=infra.tfplan -var "do_token=${DO_PAT}"
12terraform apply infra.tfplan
13
14# Control
15terraform show terraform.tfstate
16
17# Destroy
18terraform plan -destroy -out=terraform.tfplan -var "do_token=${DO_PAT}"
19terraform apply terraform.tfplan
Connect to Droplet with private ssh key ssh root@$(terraform output -json ip_address_workers | jq -r ‘.[0]’) -i .key
1# pre-checks hardware for intel CPU
2grep -e 'vmx' /proc/cpuinfo
3lscpu | grep Virtualization
4lsmod | grep kvm
5
6# on RHEL9 Workstation
7sudo dnf install virt-install virt-viewer -y
8sudo dnf install -y libvirt
9sudo dnf install virt-manager -y
10sudo dnf install -y virt-top libguestfs-tools guestfs-tools
11sudo gpasswd -a $USER libvirt
12
13# Helper
14sudo dnf -y install bridge-utils
15
16# Start libvirt
17sudo systemctl start libvirtd
18sudo systemctl enable libvirtd
19sudo systemctl status libvirtd
1virsh nodeinfo
Important note that network are created with root user but VM with current user.
If you want to move VMs to an another Storage Domain, you need to copy the template from it as well!
Remove a disk:
1# IF RHV does not use anymore disk those should appear empty in lsblk:
2lsblk -a
3sdf 8:80 0 4T 0 disk
4โโ36001405893b456536be4d67a7f6716e3 253:38 0 4T 0 mpath
5sdg 8:96 0 4T 0 disk
6โโ36001405893b456536be4d67a7f6716e3 253:38 0 4T 0 mpath
7sdh 8:112 0 4T 0 disk
8โโ36001405893b456536be4d67a7f6716e3 253:38 0 4T 0 mpath
9sdi 8:128 0 0 disk
10โโ360014052ab23b1cee074fe38059d7c94 253:39 0 100G 0 mpath
11sdj 8:144 0 0 disk
12โโ360014052ab23b1cee074fe38059d7c94 253:39 0 100G 0 mpath
13sdk 8:160 0 0 disk
14โโ360014052ab23b1cee074fe38059d7c94 253:39 0 100G 0 mpath
15
16# find all disks from LUN ID
17LUN_ID="360014054ce7e566a01d44c1a4758b092"
18list_disk=$(dmsetup deps -o devname ${LUN_ID}| cut -f 2 |cut -c 3- |tr -d "()" | tr " " "\n")
19echo ${list_disk}
20
21# Remove from multipath
22multipath -f "${LUN_ID}"
23
24# remove disk
25for i in ${list_disk}; do echo ${i}; blockdev --flushbufs /dev/${i}; echo 1 > /sys/block/${i}/device/delete; done
26
27# You can which disk link with which LUN on CEPH side
28ls -l /dev/disk/by-*
Since oVirt need a shared stockage, we can create a local NFS to bypass this point if no Storage bay.
1# Generate a backup
2engine-backup --scope=all --mode=backup --file=/root/backup --log=/root/backuplog
3
4# Restore from a backup on Fresh install
5engine-backup --mode=restore --file=file_name --log=log_file_name --provision-db --restore-permissions
6engine-setup
7
8# Restore a backup on existing install
9engine-cleanup
10engine-backup --mode=restore --file=file_name --log=log_file_name --restore-permissions
11engine-setup
1# Pass a host in maintenance mode manually
2hosted-engine --vm-status
3hosted-engine --set-maintenance --mode=global
4hosted-engine --vm-status
5
6# Remove maintenance mode
7hosted-engine --set-maintenance --mode=none
8hosted-engine --vm-status
9
10# upgrade hosted-engine
11hosted-engine --set-maintenance --mode=none
12hosted-engine --vm-status
13engine-upgrade-check
14dnf update ovirt\*setup\* # update the setup package
15engine-setup # launch it to update the engine
/!\ Connect individually to KVM Virtmanager does not work OVirt use libvirt but not like KVM do…
Check Compatibilty hardware: Oracle Linux Hardware Certification List (HCL)
A minimum of two (2) KVM hosts and no more than seven (7).
A fully-qualified domain name for your engine and host with forward and reverse lookup records set in the DNS.
/var/tmp 10 GB space at least
Prepared a shared-storage (nfs or iscsi) of at least 74 GB to be used as a data storage domain dedicated to the engine virtual machine. ISCSI need to be discovered before oVirt install.
1ansible-galaxy collection list
1# From Ansible Galaxy official repo
2ansible-galaxy collection install community.general
3
4# From a tarball locally
5ansible-galaxy collection install ./community-general-6.0.0.tar.gz
6
7# From custom Repo
8ansible-galaxy collection install git+https://git.example.com/projects/namespace.collectionName.git
9ansible-galaxy collection install git+https://git.example.com/projects/namespace.collectionName,v1.0.2
10ansible-galaxy collection install git+https://git.example.com/namespace/collectionName.git
11
12# From a requirement.yml file
13ansible-galaxy collection install -r ./requirement.yaml
1collections:
2- name: kubernetes.core
3
4- source: https://gitlab.example.com/super-group/collector.git
5 type: git
6 version: "v1.0.6"
7
8- source: https://gitlab.ipolicedev.int/another-projects/plates.git
9 type: git
1ansible-inventory --list | jq -r 'map_values(select(.hosts != null and (.hosts | contains(["myhost"])))) | keys[]'
1kafka_host: "[{{ groups['KAFKA'] | map('extract', hostvars, 'inventory_hostname') | map('regex_replace', '^', '\"') | map('regex_replace', '\\\"', '\"') | map('regex_replace', '$', ':'+ kafka_port +'\"') | join(', ') }}]"
2
3elasticsearch_host: "{{ groups['ELASTICSEARCH'] | map('extract', hostvars, 'inventory_hostname') | map('regex_replace', '^', '\"') | map('regex_replace', '\\\"', '\"') | map('regex_replace', '$', ':'+ elasticsearch_port +'\"') | join(', ') }}"
1ansible-pull -U https://github.com/MozeBaltyk/Okub.git ./playbooks/tasks/provision.yml
1#cloud-config
2timezone: ${timezone}
3
4packages:
5 - qemu-guest-agent
6 - git
7
8package_update: true
9package_upgrade: true
10
11
12## Test 1
13ansible:
14 install_method: pip
15 package_name: ansible-core
16 run_user: ansible
17 galaxy:
18 actions:
19 - ["ansible-galaxy", "collection", "install", "community.general"]
20 - ["ansible-galaxy", "collection", "install", "ansible.posix"]
21 - ["ansible-galaxy", "collection", "install", "ansible.utils"]
22 pull:
23 playbook_name: ./playbooks/tasks/provision.yml
24 url: "https://github.com/MozeBaltyk/Okub.git"
25
26## Test 2
27ansible:
28 install_method: pip
29 package_name: ansible
30 #run_user only with install_method: pip
31 run_user: ansible
32 setup_controller:
33 repositories:
34 - path: /home/ansible/Okub
35 source: https://github.com/MozeBaltyk/Okub.git
36 run_ansible:
37 - playbook_dir: /home/ansible/Okub
38 playbook_name: ./playbooks/tasks/provision.yml
39########
1systemctl --failed
2systemctl list-jobs --after
3journalctl -e
Checks user-data and config:
Buildah: is used to build Open Container Initiative (OCI) format or Docker format container images without the need for a daemon.
Podman: provides the ability to directly run container images without a daemon. Podman can pull container images from a container registry, if they are not available locally.
Skopeo: offers features for pulling and pushing containers to registries. Moving containers between registries is supported. Container image inspection is also offered and some introspective capabilities can be performed, without first downloading the container itself.
1# see images available on your hosts
2docker image list
3
4# equal to above
5docker images
6REPOSITORY TAG IMAGE ID CREATED SIZE
7httpd latest 6fa26f20557b 45 hours ago 164MB
8hello-world latest 75280d40a50b 4 months ago 1.69kB
9
10# give sha
11docker images --no-trunc=true
12
13# delete unused images
14docker rmi $(docker images -q)
15# delete images without tags
16docker rmi $(docker images | grep "^<none>" | awk '{print $3}')
1dirs -c
2for DIR in $(find ./examples -type d); do
3 pushd $DIR
4 terraform init
5 terraform fmt -check
6 terraform validate
7 popd
8 done
1export DO_PAT="dop_v1_xxxxxxxxxxxxxxxx"
2doctl auth init --context rkub
3
4# inside a dir with a tf file
5terraform init
6terraform validate
7terraform plan -var "do_token=${DO_PAT}"
8terraform apply -var "do_token=${DO_PAT}" -auto-approve
9
10# clean apply
11terraform plan -out=infra.tfplan -var "do_token=${DO_PAT}"
12terraform apply infra.tfplan
13
14# Control
15terraform show terraform.tfstate
16
17# Destroy
18terraform plan -destroy -out=terraform.tfplan -var "do_token=${DO_PAT}"
19terraform apply terraform.tfplan
1ssh root@$(terraform output -json ip_address_workers | jq -r '.[0]') -i .key
Two possibilities:
See also documentation about Podman and Docker
1# list index catalog
2curl https://registry.k3s.example.com/v2/_catalog | jq
3
4# List tags available regarding an image
5curl https://registry.k3s.example.com/v2/myhaproxy/tags/list
6
7# list index catalog - with user/password
8curl https://registry-admin:<PWD>@registry.k3s.example.com/v2/_catalog | jq
9
10# list index catalog - when you need to specify the CA
11curl -u user:password https://<url>:<port>/v2/_catalog --cacert ca.crt | jq
12
13# list index catalog - for OCP
14curl -u user:password https://<url>:<port>/v2/ocp4/openshift4/tags/list | jq
15
16# Login to registry with podman
17podman login -u registry-admin -p <PWD> registry.k3s.example.com
18
19# Push images in the registry
20skopeo copy "--dest-creds=registry-admin:<PWD>" docker://docker.io/goharbor/harbor-core:v2.6.1 docker://registry.k3s.example.com/goharbor/harbor-core:v2.6.1
1ip a
2sudo vi /etc/docker/daemon.json
1{
2"insecure-registries": ["192.168.1.11:5000"]
3}
1sudo systemctl restart docker
2docker info
Check docker config
Load the image
1podman pull sonatype/nexus3:3.59.0
2podman save sonatype/nexus3:3.59.0 -o nexus3.tar
3podman load < nexus3.tar
Create a service inside /etc/systemd/system/container-nexus3.service with content below:
1[Unit]
2Description=Nexus Podman container
3Wants=syslog.service
4
5[Service]
6User=nexus-system
7Group=nexus-system
8Restart=always
9ExecStart=/usr/bin/podman run \
10 --log-level=debug \
11 --rm \
12 -ti \
13 --publish 8081:8081 \
14 --name nexus \
15 sonatype/nexus3:3.59.0
16
17ExecStop=/usr/bin/podman stop -t 10 nexus
18
19[Install]
20WantedBy=multi-user.target
Nothing original, it just the documentation of redhat, but can be usefull to kickstart a registry.
Prerequisites:
1# packages
2sudo yum install -y podman
3sudo yum install -y rsync
4sudo yum install -y jq
5
6# Get tar
7mirror="https://mirror.openshift.com/pub/openshift-v4/clients"
8wget ${mirror}/mirror-registry/latest/mirror-registry.tar.gz
9tar zxvf mirror-registry.tar.gz
10
11# Get oc-mirror
12curl https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/latest/oc-mirror.rhel9.tar.gz -O
13
14# Basic install
15sudo ./mirror-registry install \
16 --quayHostname quay01.example.local \
17 --quayRoot /opt
18
19# More detailed install
20sudo ./mirror-registry install \
21 --quayHostname quay01.example.local \
22 --quayRoot /srv \
23 --quayStorage /srv/quay-pg \
24 --pgStorage /srv/quay-storage \
25 --sslCert tls.crt \
26 --sslKey tls.key
27
28podman login -u init \
29 -p 7u2Dm68a1s3bQvz9twrh4Nel0i5EMXUB \
30 quay01.example.local:8443 \
31 --tls-verify=false
32
33# By default login go in:
34cat $XDG_RUNTIME_DIR/containers/auth.json
35
36# Get IP
37sudo podman inspect --format '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' quay-app
38
39#unistall
40sudo ./mirror-registry uninstall -v \
41 --quayRoot <example_directory_name>
42
43# Info
44curl -u init:password https://quay01.example.local:8443/v2/_catalog | jq
45curl -u root:password https://<url>:<port>/v2/ocp4/openshift4/tags/list | jq
46
47# Get an example of imageset
48oc-mirror init --registry quay.example.com:8443/mirror/oc-mirror-metadata
49
50# Get list of Operators, channels, packages
51oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14
52oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged
53oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged --channel=stable
1QUAY_POSTGRES=`podman ps | grep quay-postgres | awk '{print $1}'`
2
3podman exec -it $QUAY_POSTGRES psql -d quay -c "UPDATE "public.user" SET invalid_login_attempts = 0 WHERE username = 'init'"
- Firewalld activated, important otherwise the routing to the app is not working
- Podman, jq installed
1podman pull docker.io/gitea/gitea:1-rootless
2podman save docker.io/gitea/gitea:1-rootless -o gitea-rootless.tar
3podman load < gitea-rootless.tar
cat /etc/systemd/system/container-gitea-app.service
1# container-gitea-app.service
2[Unit]
3Description=Podman container-gitea-app.service
4
5Wants=network.target
6After=network-online.target
7RequiresMountsFor=/var/lib/containers/storage /var/run/containers/storage
8
9[Service]
10Environment=PODMAN_SYSTEMD_UNIT=%n
11Restart=on-failure
12TimeoutStopSec=70
13PIDFile=%t/container-gitea-app.pid
14Type=forking
15
16ExecStartPre=/bin/rm -f %t/container-gitea-app.pid %t/container-gitea-app.ctr-id
17ExecStart=/usr/bin/podman container run \
18 --conmon-pidfile %t/container-gitea-app.pid \
19 --cidfile %t/container-gitea-app.ctr-id \
20 --cgroups=no-conmon \
21 --replace \
22 --detach \
23 --tty \
24 --env DB_TYPE=sqlite3 \
25 --env DB_HOST=gitea-db:3306 \
26 --env DB_NAME=gitea \
27 --env DB_USER=gitea \
28 --env DB_PASSWD=9Oq6P9Tsm6j8J7c18Jxc \
29 --volume gitea-data-volume:/var/lib/gitea:Z \
30 --volume gitea-config-volume:/etc/gitea:Z \
31 --network gitea-net \
32 --publish 2222:2222 \
33 --publish 3000:3000 \
34 --label "io.containers.autoupdate=registry" \
35 --name gitea-app \
36 docker.io/gitea/gitea:1-rootless
37
38ExecStop=/usr/bin/podman container stop \
39 --ignore \
40 --cidfile %t/container-gitea-app.ctr-id \
41 -t 10
42
43ExecStopPost=/usr/bin/podman container rm \
44 --ignore \
45 -f \
46 --cidfile %t/container-gitea-app.ctr-id
47
48[Install]
49WantedBy=multi-user.target default.target
Configuration inside /var/lib/containers/storage/volumes/gitea-config-volume/_data/app.ini
1export RKE_VERSION=$(curl -s https://update.rke2.io/v1-release/channels | jq -r '.data[] | select(.id=="stable") | .latest' | awk -F"+" '{print $1}'| sed 's/v//')
2export CERT_VERSION=$(curl -s https://api.github.com/repos/cert-manager/cert-manager/releases/latest | jq -r .tag_name)
3export RANCHER_VERSION=$(curl -s https://api.github.com/repos/rancher/rancher/releases/latest | jq -r .tag_name)
4export LONGHORN_VERSION=$(curl -s https://api.github.com/repos/longhorn/longhorn/releases/latest | jq -r .tag_name)
5export NEU_VERSION=$(curl -s https://api.github.com/repos/neuvector/neuvector-helm/releases/latest | jq -r .tag_name)
1# ubuntu
2type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y)
3curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
4&& sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
5&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
6&& sudo apt update \
7&& sudo apt install gh -y
8
9# Redhat
10sudo dnf install 'dnf-command(config-manager)'
11sudo dnf config-manager --add-repo https://cli.github.com/packages/rpm/gh-cli.repo
12sudo dnf install gh
1gh completion zsh > $ZSH/completions/_gh
1gh auth login -p ssh -h GitHub.com -s read:project,delete:repo,repo,workflow -w
2
3gh auth status
4github.com
5 โ Logged in to github.com as MorzeBaltyk ($HOME/.config/gh/hosts.yml)
6 โ Git operations for github.com configured to use ssh protocol.
7 โ Token: gho_************************************
8 โ Token scopes: delete_repo, gist, read:org, read:project, repo
One way:
https://glab.readthedocs.io/en/latest/intro.html
1# add token
2glab auth login --hostname mygitlab.example.com
3# view fork of dep installer
4glab repo view mygitlab.example.com/copain/project
5# clone fork of dep installer
6glab repo clone mygitlab.example.com/copain/project
1Optimization
2puma['worker_processes'] = 16
3puma['worker_timeout'] = 60
4puma['min_threads'] = 1
5puma['max_threads'] = 4
6puma['per_worker_max_memory_mb'] = 2048
Generate CSR in /data/gitlab/csr/server_cert.cnf
1[req]
2default_bits = 2048
3distinguished_name = req_distinguished_name
4req_extensions = req_ext
5prompt = no
6
7[req_distinguished_name]
8C = PL
9ST = Poland
10L = Warsaw
11O = myOrg
12OU = DEV
13CN = gitlab.example.com
14
15[req_ext]
16subjectAltName = @alt_names
17
18[alt_names]
19DNS = gitlab.example.com
20IP = 192.168.01.01
1# Create CSR
2openssl req -new -newkey rsa:2048 -nodes -keyout gitlab.example.com.key -config /data/gitlab/csr/server_cert.cnf -out gitlab.example.com.csr
3
4openssl req -noout -text -in gitlab.example.com.csr
5
6# Sign your CSR with your PKI. If you PKI is a windows one, you should get back a .CER file.
7
8# check info:
9openssl x509 -text -in gitlab.example.com.cer -noout
1### push it in crt/key in Gitlab
2cp /tmp/gitlab.example.com.cer cert/gitlab.example.com.crt
3cp /tmp/gitlab.example.com.key cert/gitlab.example.com.key
4cp /tmp/gitlab.example.com.cer cert/192.168.01.01.crt
5cp /tmp/gitlab.example.com.key cert/192.168.01.01.key
6
7### push rootCA in gitlab
8cp /etc/pki/ca-trust/source/anchors/domain-issuing.crt /data/gitlab/config/trusted-certs/domain-issuing.crt
9cp /etc/pki/ca-trust/source/anchors/domain-rootca.crt /data/gitlab/config/trusted-certs/domain-rootca.crt
10
11### Reconfigure
12vi /data/gitlab/config/gitlab.rb
13docker exec gitlab bash -c 'update-ca-certificates'
14docker exec gitlab bash -c 'gitlab-ctl reconfigure'
15
16### Stop / Start
17docker stop gitlab
18docker rm gitlab
19docker run -d -p 5050:5050 -p 2289:22 -p 443:443 --restart=always \
20-v /data/gitlab/config:/etc/gitlab \
21-v /data/gitlab/logs:/var/log/gitlab \
22-v /data/gitlab/data:/var/opt/gitlab \
23-v /data/gitlab/cert:/etc/gitlab/ssl \
24-v /data/gitlab/config/trusted-certs:/usr/local/share/ca-certificates \
25--name gitlab gitlab/gitlab-ce:15.0.5-ce.0
1docker exec gitlab bash -c 'gitlab-ctl status'
2docker exec -it gitlab gitlab-rake gitlab:check SANITIZE=true
3docker exec -it gitlab gitlab-rake gitlab:env:info
1docker exec -it gitlab gitlab-rake gitlab:backup:create --trace
2
3#Alternate way to do it
4docker exec gitlab bash -c 'gitlab-backup create'
5docker exec gitlab bash -c 'gitlab-backup create SKIP=repositories'
6docker exec gitlab bash -c 'gitlab-backup create SKIP=registry'
1Restore
2gitlab-ctl reconfigure
3gitlab-ctl start
4gitlab-ctl stop unicorn
5gitlab-ctl stop sidekiq
6gitlab-ctl status
7ls -lart /var/opt/gitlab/backups
8
9docker exec -it gitlab gitlab-rake gitlab:backup:restore --trace
10docker exec -it gitlab gitlab-rake gitlab:backup:restore BACKUP=1537738690_2018_09_23_10.8.3 --trace
11
12Restart
13docker exec gitlab bash -c 'gitlab-ctl restart'
sudo docker exec -it gitlab gitlab-rake gitlab:check sudo docker exec -it gitlab gitlab-rake gitlab:doctor:secrets
Gita is opensource project in python to handle a bit number of projects available: Here
1# Install
2pip3 install -U gita
3
4# add repo in gita
5gita add dcc/ssg/toolset
6gita add -r dcc/ssg # recursively add
7gita add -a dcc # resursively add and auto-group based on folder structure
8
9# create a group
10gita group add docs -n ccn
11
12# Checks
13gita ls
14gita ll -g
15gita group ls
16gita group ll
17gita st dcc
18
19# Use
20gita pull ccn
21gita push ccn
22
23gita freeze
GIT is a distributed version control system that was created by Linus Torvalds, the mastermind of Linux itself. It was designed to be a superior version control system to those that were readily available, the two most common of these being CVS and Subversion (SVN). Whereas CVS and SVN use the Client/Server model for their systems, GIT operates a little differently. Instead of downloading a project, making changes, and uploading it back to the server, GIT makes the local machine act as a server. Tecmint
1helm list -A
2NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
3nesux3 default 1 2022-08-12 20:01:16.0982324 +0200 CEST deployed nexus3-1.0.6 3.37.3
1helm status nesux3
2helm uninstall nesux3
3helm install nexus3
4helm history nexus3
5
6# work even if already installed
7helm upgrade --install ingress-nginx ${DIR}/helm/ingress-nginx \
8 --namespace=ingress-nginx \
9 --create-namespace \
10 -f $helm {DIR}/helm/ingress-values.yml
11
12#Make helm unsee an apps (it does not delete the apps)
13kubectl delete secret -l owner=helm,name=argo-cd
1#Handle repo
2helm repo list
3helm repo add gitlab https://charts.gitlab.io/
4helm repo update
5
6#Pretty usefull to configure
7helm show values elastic/eck-operator
8helm show values grafana/grafana --version 8.5.1
9
10#See different version available
11helm search repo hashicorp/vault
12helm search repo hashicorp/vault -l
13
14# download a chart
15helm fetch ingress/ingress-nginx --untar
1helm template -g longhorn-1.4.1.tgz |yq -N '..|.image? | select(. == "*" and . != null)'|sort|uniq|grep ":"|egrep -v '*:[[:blank:]]' || echo ""
1# Default one
2KUBECONFIG=~/.kube/config
3
4# Several context - to keep splited
5KUBECONFIG=~/.kube/k3sup-lab:~/.kube/k3s-dev
6
7# Or can be specified in command
8kubectl get pods --kubeconfig=admin-kube-config
1kubectl config view
2kubectl config current-context
3
4kubectl config set-context \
5dev-context \
6--namespace=dev-namespace \
7--cluster=docker-desktop \
8--user=dev-user
9
10kubectl config use-context lab
1#set Namespace
2kubectl config set-context --current --namespace=nexus3
3kubectl config get-contexts
The problem with the kubeconfig is that it get nexted in one kubeconfig and difficult to manage on long term.
The best way to install it, is with Arkade arkade get kubecm - see arkade.
vi dns.yml 1apiVersion: v1
2kind: Pod
3metadata:
4 name: dnsutils
5 namespace: default
6spec:
7 containers:
8 - name: dnsutils
9 image: registry.k8s.io/e2e-test-images/jessie-dnsutils:1.3
10 command:
11 - sleep
12 - "infinity"
13 imagePullPolicy: IfNotPresent
14 restartPolicy: Always
1k apply -f dns.yml
2pod/dnsutils created
3
4kubectl get pods dnsutils
5NAME READY STATUS RESTARTS AGE
6dnsutils 1/1 Running 0 36s
1kubectl exec -i -t dnsutils -- nslookup kubernetes.default
2;; connection timed out; no servers could be reached
3command terminated with exit code 1
4
5kubectl exec -ti dnsutils -- cat /etc/resolv.conf
6search default.svc.cluster.local svc.cluster.local cluster.local psflab.local
7nameserver 10.43.0.10
8options ndots:5
9
10kubectl get endpoints kube-dns --namespace=kube-system
11NAME ENDPOINTS AGE
12kube-dns 10.42.0.6:53,10.42.0.6:53,10.42.0.6:9153 5d1h
13
14kubectl get svc kube-dns --namespace=kube-system
15NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
16kube-dns ClusterIP 10.43.0.10 <none> 53/UDP,53/TCP,9153/TCP 5d1h
1cat << EOF > curl.yml
2apiVersion: v1
3kind: Pod
4metadata:
5 name: curl
6 namespace: default
7spec:
8 containers:
9 - name: curl
10 image: curlimages/curl
11 command:
12 - sleep
13 - "infinity"
14 imagePullPolicy: IfNotPresent
15 restartPolicy: Always
16EOF
17
18k apply -f curl.yml
19
20#Test du DNS
21kubectl exec -i -t curl -- curl -v telnet://10.43.0.10:53
22kubectl exec -i -t curl -- curl -v telnet://kube-dns.kube-system.svc.cluster.local:53
23kubectl exec -i -t curl -- nslookup kube-dns.kube-system.svc.cluster.local
24
25curl -k -I --resolve subdomain.domain.com:52.165.230.62 https:/subdomain.domain.com/
1# Get latest version
2OKD_VERSION=$(curl -s https://api.github.com/repos/okd-project/okd/releases/latest | jq -r .tag_name)
3
4# Download
5curl -L https://github.com/okd-project/okd/releases/download/${OKD_VERSION}/openshift-install-linux-${OKD_VERSION}.tar.gz -O
6curl -L https://github.com/okd-project/okd/releases/download/${OKD_VERSION}/openshift-client-linux-${OKD_VERSION}.tar.gz -O
7
8# Download FCOS iso
9./openshift-install coreos print-stream-json | grep '\.iso[^.]'
10./openshift-install coreos print-stream-json | jq .architectures.x86_64.artifacts.metal.formats.iso.disk.location
11./openshift-install coreos print-stream-json | jq .architectures.x86_64.artifacts.vmware.formats.ova.disk.location
12./openshift-install coreos print-stream-json | jq '.architectures.x86_64.artifacts.digitalocean.formats["qcow2.gz"].disk.location'
13./openshift-install coreos print-stream-json | jq '.architectures.x86_64.artifacts.qemu.formats["qcow2.gz"].disk.location'
14./openshift-install coreos print-stream-json | jq '.architectures.x86_64.artifacts.metal.formats.pxe | .. | .location? // empty'
1kind: ImageSetConfiguration
2apiVersion: mirror.openshift.io/v1alpha2
3archiveSize: 4
4storageConfig:
5 registry:
6 imageURL: quay.example.com:8443/mirror/oc-mirror-metadata
7 skipTLS: false
8mirror:
9 platform:
10 architectures:
11 - "amd64"
12 channels:
13 - name: stable-4.14
14 type: ocp
15 shortestPath: true
16 graph: true
17 operators:
18 - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.14
19 packages:
20 - name: kubevirt-hyperconverged
21 channels:
22 - name: 'stable'
23 - name: serverless-operator
24 channels:
25 - name: 'stable'
26 additionalImages:
27 - name: registry.redhat.io/ubi9/ubi:latest
28 helm: {}
1# install oc-mirror:
2curl https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/latest/oc-mirror.rhel9.tar.gz -O
3
4# Get an example of imageset
5oc-mirror init --registry quay.example.com:8443/mirror/oc-mirror-metadata
6
7# Find operators in the list of Operators, channels, packages
8oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14
9oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged
10oc-mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.14 --package=kubevirt-hyperconverged --channel=stable
11
12# mirror with a jumphost which online access
13oc-mirror --config=imageset-config.yaml docker://quay.example.com:8443
14
15# mirror for airgap
16oc-mirror --config=imageSetConfig.yaml file://tmp/download
17oc-mirror --from=/tmp/upload/ docker://quay.example.com/ocp/operators
18
19# Refresh OperatorHub
20oc get pod -n openshift-marketplace
21
22# Get the index pod and delete it to refresh
23oc delete pod cs-redhat-operator-index-m2k2n -n openshift-marketplace
1## Get the coreOS which is gonna to be installed
2openshift-install coreos print-stream-json | grep '\.iso[^.]'
3
4openshift-install create install-config
5
6openshift-install create manifests
7
8openshift-install create ignition-configs
9
10openshift-install create cluster --dir . --log-level=info
11openshift-install destroy cluster --log-level=info
1dd if=$HOME/ocp-latest/rhcos-live.iso of=/dev/sdb bs=1024k status=progress
1export OPENSHIFT_CLUSTER_ID=$(oc get clusterversion -o jsonpath='{.items[].spec.clusterID}')
2export CLUSTER_REQUEST=$(jq --null-input --arg openshift_cluster_id "$OPENSHIFT_CLUSTER_ID" '{
3 "api_vip_dnsname": "<api_vip>",
4 "openshift_cluster_id": $openshift_cluster_id,
5 "name": "<openshift_cluster_name>"
6}')
1openshift-install explain installconfig.platform.libvirt
1## none
2platform:
3 none: {}
4
5## baremetal - use ipmi to provision baremetal
6platform:
7 baremetal:
8 apiVIP: 192.168.111.5
9 ingressVIP: 192.168.111.7
10 provisioningNetwork: "Managed"
11 provisioningNetworkCIDR: 172.22.0.0/24
12 provisioningNetworkInterface: eno1
13 clusterProvisioningIP: 172.22.0.2
14 bootstrapProvisioningIP: 172.22.0.3
15 hosts:
16 - name: master-0
17 role: master
18 bmc:
19 address: ipmi://192.168.111.1
20 username: admin
21 password: password
22 bootMACAddress: 52:54:00:a1:9c:ae
23 hardwareProfile: default
24 - name: master-1
25 role: master
26 bmc:
27 address: ipmi://192.168.111.2
28 username: admin
29 password: password
30 bootMACAddress: 52:54:00:a1:9c:af
31 hardwareProfile: default
32 - name: master-2
33 role: master
34 bmc:
35 address: ipmi://192.168.111.3
36 username: admin
37 password: password
38 bootMACAddress: 52:54:00:a1:9c:b0
39 hardwareProfile: default
40
41## vpshere - old syntax and deprecated form (new one in 4.15 with "failure domain")
42vsphere:
43 vcenter:
44 username:
45 password:
46 datacenter:
47 defaultDatastore:
48 apiVIPs:
49 - x.x.x.x
50 ingressVIPs:
51 - x.x.x.x
52
53## new syntax
54platform:
55 vsphere:
56 apiVIPs:
57 - x.x.x.x
58 datacenter: xxxxxxxxxxxx_datacenter
59 defaultDatastore: /xxxxxxxxxxxx_datacenter/datastore/Shared Storages/ssd-001602
60 failureDomains:
61 - name: CNV4
62 region: fr
63 server: xxxxxxxxxxxx.ovh.com
64 topology:
65 computeCluster: /xxxxxxxxxxxx_datacenter/host/Management Zone Cluster
66 datacenter: xxxxxxxxxxxx_datacenter
67 datastore: /xxxxxxxxxxxx_datacenter/datastore/Shared Storages/ssd-001602
68 networks:
69 - vds_mgmt
70 zone: dc
71 ingressVIPs:
72 - x.x.x.x
73 password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
74 username: admin
75 vCenter: xxxxxxxxxxx.ovh.com
76 vcenters:
77 - datacenters:
78 - xxxxxxxxxx_datacenter
79 password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
80 port: 443
81 server: xxxxxxx.ovh.com
82 user: admin
1# Get Cluster ID
2oc get clusterversion -o jsonpath='{.items[].spec.clusterID}'
3
4# Get Nodes which are Ready
5oc get nodes --output jsonpath='{range .items[?(@.status.conditions[-1].type=="Ready")]}{.metadata.name} {.status.conditions[-1].type}{"\n"}{end}'
6
7# get images from all pods in a namespace
8oc get pods -n --output jsonpath='{range .items[*]}{.spec.containers[*].image}{"\n"}{end}'
1oc get catalogsources -n openshift-marketplace
1images=$(helm template -g $helm |yq -N '..|.image? | select(. == "*" and . != null)'|sort|uniq|grep ":"|egrep -v '*:[[:blank:]]' || echo "")
1load_helm_images(){
2 # look in helm charts
3 for helm in $(ls ../../roles/*/files/helm/*.tgz); do
4 printf "\e[1;34m[INFO]\e[m Look for images in ${helm}...\n"
5
6 images=$(helm template -g $helm |yq -N '..|.image? | select(. == "*" and . != null)'|sort|uniq|grep ":"|egrep -v '*:[[:blank:]]' || echo "")
7
8 dir=$( dirname $helm | xargs dirname )
9
10 echo "####"
11
12 if [ "$images" != "" ]; then
13 printf "\e[1;34m[INFO]\e[m Images found in the helm charts: ${images}\n"
14 printf "\e[1;34m[INFO]\e[m Create ${dir}/images images...\n"
15
16 mkdir -p ${dir}/images
17
18 while i= read -r image_name; do
19 archive_name=$(basename -a $(awk -F : '{print $1}'<<<${image_name}));
20 printf "\e[1;34m[INFO]\e[m Pull images...\n"
21 podman pull ${image_name};
22 printf "\e[1;34m[INFO]\e[m Push ${image_name} in ${dir}/images/${archive_name}\n"
23 podman save ${image_name} --format oci-archive -o ${dir}/images/${archive_name};
24 done <<< ${images}
25 else
26 printf "\e[1;34m[INFO]\e[m No Images found in the helm charts: $helm\n"
27 fi
28 done
29}
1function checkComponentsInstall() {
2 componentsArray=("kubectl" "helm")
3 for i in "${componentsArray[@]}"; do
4 command -v "${i}" >/dev/null 2>&1 ||
5 { echo "[ERROR] ${i} is required, but it's not installed. Aborting." >&2; exit 1; }
6 done
7}
1function checkK8sVersion() {
2 currentK8sVersion=$(kubectl version --short | grep "Server Version" | awk '{gsub(/v/,$5)}1 {print $3}')
3 testVersionComparator 1.20 "$currentK8sVersion" '<'
4 if [[ $k8sVersion == "ok" ]]; then
5 echo "current kubernetes version is ok"
6 else
7 minikube start --kubernetes-version=v1.22.4;
8 fi
9}
10
11
12# the comparator based on https://stackoverflow.com/a/4025065
13versionComparator () {
14 if [[ $1 == $2 ]]
15 then
16 return 0
17 fi
18 local IFS=.
19 local i ver1=($1) ver2=($2)
20 # fill empty fields in ver1 with zeros
21 for ((i=${#ver1[@]}; i<${#ver2[@]}; i++))
22 do
23 ver1[i]=0
24 done
25 for ((i=0; i<${#ver1[@]}; i++))
26 do
27 if [[ -z ${ver2[i]} ]]
28 then
29 # fill empty fields in ver2 with zeros
30 ver2[i]=0
31 fi
32 if ((10#${ver1[i]} > 10#${ver2[i]}))
33 then
34 return 1
35 fi
36 if ((10#${ver1[i]} < 10#${ver2[i]}))
37 then
38 return 2
39 fi
40 done
41 return 0
42}
43
44testVersionComparator () {
45 versionComparator $1 $2
46 case $? in
47 0) op='=';;
48 1) op='>';;
49 2) op='<';;
50 esac
51 if [[ $op != "$3" ]]
52 then
53 echo "Kubernetes test fail: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'"
54 k8sVersion="not ok"
55 else
56 echo "Kubernetes test pass: '$1 $op $2'"
57 k8sVersion="ok"
58 fi
59}
Some time ago, I made a small shell script to handle Vault on a cluster kubernetes. For documentation purpose.
1#!/bin/bash
2
3## Variables
4DIRNAME=$(dirname $0)
5DEFAULT_VALUE="vault/values-override.yaml"
6NewAdminPasswd="PASSWORD"
7PRIVATE_REGISTRY_USER="registry-admin"
8PRIVATE_REGISTRY_PASSWORD="PASSWORD"
9PRIVATE_REGISTRY_ADDRESS="registry.example.com"
10DOMAIN="example.com"
11INGRESS="vault.${DOMAIN}"
12
13if [ -z ${CM_NS+x} ];then
14 CM_NS='your-namespace'
15fi
16
17if [ -z ${1+x} ]; then
18 VALUES_FILE="${DIRNAME}/${DEFAULT_VALUE}"
19 echo -e "\n[INFO] Using default values file '${DEFAULT_VALUE}'"
20else
21 if [ -f $1 ]; then
22 echo -e "\n[INFO] Using values file $1"
23 VALUES_FILE=$1
24 else
25 echo -e "\n[ERROR] No file exist $1"
26 exit 1
27 fi
28fi
29
30## Functions
31function checkComponentsInstall() {
32 componentsArray=("kubectl" "helm")
33 for i in "${componentsArray[@]}"; do
34 command -v "${i}" >/dev/null 2>&1 ||
35 { echo "${i} is required, but it's not installed. Aborting." >&2; exit 1; }
36 done
37}
38
39function createSecret() {
40kubectl get secret -n ${CM_NS} registry-pull-secret --no-headers 2> /dev/null \
41|| \
42kubectl create secret docker-registry -n ${CM_NS} registry-pull-secret \
43 --docker-server=${PRIVATE_REGISTRY_ADDRESS} \
44 --docker-username=${PRIVATE_REGISTRY_USER} \
45 --docker-password=${PRIVATE_REGISTRY_ADDRESS}
46}
47
48function installWithHelm() {
49helm dep update ${DIRNAME}/helm
50
51helm upgrade --install vault ${DIRNAME}/helm \
52--namespace=${CM_NS} --create-namespace \
53--set global.imagePullSecrets.[0]=registry-pull-secret \
54--set global.image.repository=${PRIVATE_REGISTRY_ADDRESS}/hashicorp/vault-k8s \
55--set global.agentImage.repository=${PRIVATE_REGISTRY_ADDRESS}/hashicorp/vault \
56--set ingress.hosts.[0]=${INGRESS} \
57--set ingress.enabled=true \
58--set global.leaderElection.namespace=${CM_NS}
59
60echo -e "\n[INFO] sleep 30s" && sleep 30
61}
62
63checkComponentsInstall
64createSecret
65installWithHelm
Allow local kubernetes to create and reach secret on the Vault
1# Create a trust zone for the two interconnect
2sudo firewall-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 #pods
3sudo firewall-cmd --permanent --zone=trusted --add-source=10.43.0.0/16 #services
4sudo firewall-cmd --reload
5sudo firewall-cmd --list-all-zones
6
7# on Master
8sudo rm -f /var/lib/cni/networks/cbr0/lock
9sudo /usr/local/bin/k3s-killall.sh
10sudo systemctl restart k3s
11sudo systemctl status k3s
12
13# on Worker
14sudo rm -f /var/lib/cni/networks/cbr0/lock
15sudo /usr/local/bin/k3s-killall.sh
16sudo systemctl restart k3s-agent
17sudo systemctl status k3s-agent
1# Get CA from K3s master
2openssl s_client -connect localhost:6443 -showcerts < /dev/null 2>&1 | openssl x509 -noout -enddate
3openssl s_client -showcerts -connect 193.168.51.103:6443 < /dev/null 2>/dev/null|openssl x509 -outform PEM
4openssl s_client -showcerts -connect 193.168.51.103:6443 < /dev/null 2>/dev/null|openssl x509 -outform PEM | base64 | tr -d '\n'
5
6# Check end date:
7for i in `ls /var/lib/rancher/k3s/server/tls/*.crt`; do echo $i; openssl x509 -enddate -noout -in $i; done
8
9# More efficient:
10cd /var/lib/rancher/k3s/server/tls/
11for crt in *.crt; do printf '%s: %s\n' "$(date --date="$(openssl x509 -enddate -noout -in "$crt"|cut -d= -f 2)" --iso-8601)" "$crt"; done | sort
12
13# Check CA issuer
14for i in $(find . -maxdepth 1 -type f -name "*.crt"); do openssl x509 -in ${i} -noout -issuer; done
Nice gist to troubleshoot etcd link
Operators have 3 kinds : go, ansible, helm.
1## Init an Ansible project
2operator-sdk init --plugins=ansible --domain example.org --owner "Your name"
3
4## Command above will create a structure like:
5netbox-operator
6โโโ Dockerfile
7โโโ Makefile
8โโโ PROJECT
9โโโ config
10โย ย โโโ crd
11โย ย โโโ default
12โย ย โโโ manager
13โย ย โโโ manifests
14โย ย โโโ prometheus
15โย ย โโโ rbac
16โย ย โโโ samples
17โย ย โโโ scorecard
18โย ย โโโ testing
19โโโ molecule
20โย ย โโโ default
21โย ย โโโ kind
22โโโ playbooks
23โย ย โโโ install.yml
24โโโ requirements.yml
25โโโ roles
26โย ย โโโ deployment
27โโโ watches.yaml
1## Create first role
2operator-sdk create api --group app --version v1alpha1 --kind Deployment --generate-role
Interesting example from justfile documentation:
where it create mktemp and set it in variable then by concatenation you get a full path to the tar.gz.
Then the Recipe “publish” create the artifact again and push it to a server.
1tmpdir := `mktemp` # Create a tmp file
2version := "0.2.7"
3tardir := tmpdir / "awesomesauce-" + version
4tarball := tardir + ".tar.gz" # use tmpfile path to create a tarball
5
6publish:
7 rm -f {{tarball}}
8 mkdir {{tardir}}
9 cp README.md *.c {{tardir}}
10 tar zcvf {{tarball}} {{tardir}}
11 scp {{tarball}} me@server.com:release/
12 rm -rf {{tarball}} {{tardir}}
This one can be really usefull to define a default value which can be redefine with env variable:
$$var $$( python -c ‘import sys; print(sys.implementation.name)’ )
T ?= foo # give a default value T := $(shell whoami) # execute shell immediately to put in the var
Example 1
1SUBDIRS = foo bar baz
2
3## dir is a Shell variables
4## SUBDIR and MAKE are Internal make variables
5subdirs:
6 for dir in $(SUBDIRS); do \
7 $(MAKE) -C $$dir; \
8 done
Example 2
1SUBDIRS = foo bar baz
2
3.PHONY: subdirs $(SUBDIRS)
4subdirs: $(SUBDIRS)
5$(SUBDIRS):
6 $(MAKE) -C $@
7foo: baz
1git clone xxx /tmp/xxx&& make -C !$/Makefile
2make download le conteneur
3make build le binaire
4make met le dans /use/local/bin
5make clean
6make help
1# Install GO
2GO_VERSION="1.21.0"
3wget https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz
4sudo tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz
5export PATH=$PATH:/usr/local/go/bin
6
7# Install Cobra - CLI builder
8go install github.com/spf13/cobra-cli@latest
9sudo cp -pr ./go /usr/local/.
1mkdir -p ${project} && cd ${project}
2go mod init ${project}
3cobra-cli init
4go build
5go install
6cobra-cli add timezone
CUE stands for Configure, Unify, Execute
1# Install GO
2GO_VERSION="1.21.0"
3wget https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz
4sudo tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz
5export PATH=$PATH:/usr/local/go/bin
6
7go install cuelang.org/go/cmd/cue@latest
8sudo cp -pr ./go /usr/local/.
9
10# or use Container
11printf "\e[1;34m[INFO]\e[m Install CUElang:\n";
12podman pull docker.io/cuelang/cue:latest
top -> schema -> constraint -> data -> bottom
1# import a file
2cue import imageset-config.yaml
3
4# Validate
5cue vet imageset-config.cue imageset-config.yaml
6
7
8* Some basics example
9
10```go
11// This is a comment
12_greeting: "Welcome" // Hidden fields start with "_"
13#project: "CUE" // Definitions start with "#"
14
15message: "\(_greeting) to \(#project)!" // Regular fields are exported
16
17#Person: {
18 age: number // Mandatory condition and must be a number
19 hobbies?: [...string] // non mandatory but if present must be a list of string
20}
21
22// Constrain which call #Person and check if age
23#Adult: #Person & {
24 age: >=18
25}
26
27// =~ match a regular expression
28#Phone: string & =~ "[0-9]+"
29
30// Mapping
31instanceType: {
32 web: "small"
33 app: "medium"
34 db: "large"
35}
36
37server1: {
38 role: "app"
39 instance: instanceType[role]
40}
41
42// server1.instance: "medium"
1# executable have extension name "_tool.cue"
2
3# usage
4cue cmd prompter
1package foo
2
3import (
4 "tool/cli"
5 "tool/exec"
6 "tool/file"
7)
8
9// moved to the data.cue file to show how we can reference "pure" Cue files
10city: "Amsterdam"
11
12// A command named "prompter"
13command: prompter: {
14
15 // save transcript to this file
16 var: {
17 file: *"out.txt" | string @tag(file)
18 } // you can use "-t flag=filename.txt" to change the output file, see "cue help injection" for more details
19
20 // prompt the user for some input
21 ask: cli.Ask & {
22 prompt: "What is your name?"
23 response: string
24 }
25
26 // run an external command, starts after ask
27 echo: exec.Run & {
28 // note the reference to ask and city here
29 cmd: ["echo", "Hello", ask.response + "!", "Have you been to", city + "?"]
30 stdout: string // capture stdout, don't print to the terminal
31 }
32
33 // append to a file, starts after echo
34 append: file.Append & {
35 filename: var.file
36 contents: echo.stdout // because we reference the echo task
37 }
38
39 // also starts after echo, and concurrently with append
40 print: cli.Print & {
41 text: echo.stdout // write the output to the terminal since we captured it previously
42 }
43}
1# Import values with details connexion
2. .\values.ps1
3
4$scriptFilePath ="$MyPath\Install\MysqlBase\Script.sql"
5
6# Load the required DLL file (depend on your connector)
7[void][System.Reflection.Assembly]::LoadFrom("C:\Program Files (x86)\MySQL\MySQL Connector Net 8.0.23\Assemblies\v4.5.2\MySql.Data.dll")
8
9# Load in var the SQL script file
10$scriptContent = Get-Content -Path $scriptFilePath -Raw
11
12# Execute the modified SQL script
13$Connection = [MySql.Data.MySqlClient.MySqlConnection]@{
14 ConnectionString = "server=$MysqlIP;uid=$MysqlUser;Port=3306;user id=$MysqlUser;pwd=$MysqlPassword;database=$MysqlDatabase;pooling=false;CharSet=utf8;SslMode=none"
15 }
16 $sql = New-Object MySql.Data.MySqlClient.MySqlCommand
17 $sql.Connection = $Connection
18 $sql.CommandText = $scriptContent
19 write-host $sql.CommandText
20 $Connection.Open()
21 $sql.ExecuteNonQuery()
22 $Connection.Close()
1# Convert your json in object and put it in variable
2$a = Get-Content 'D:\temp\mytest.json' -raw | ConvertFrom-Json
3$a.update | % {if($_.name -eq 'test1'){$_.version=3.0}}
4
5$a | ConvertTo-Json -depth 32| set-content 'D:\temp\mytestBis.json'
1#The file we want to change
2$xmlFilePath = "$MyPath\EXAMPLE\some.config"
3
4 # Read the XML file content
5 $xml = [xml](Get-Content $xmlFilePath)
6
7 $node = $xml.connectionStrings.add | where {$_.name -eq 'MetaData' -And $_.providerName -eq 'MySql.Data.MySqlClient'}
8 $node.connectionString = $AuditDB_Value
9
10 $node1 = $xml.connectionStrings.add | where {$_.name -eq 'Account'}
11 $node1.connectionString = $Account_Value
12
13 # Save the updated XML back to the file
14 $xml.Save($xmlFilePath)
15
16 Write-Host "$xmlFilePath Updated"
1# Read the JSON file and convert to a PowerShell object
2$jsonContent = Get-Content -Raw -Path ".\example.json" | ConvertFrom-Json
3
4# Read CSV and set a Header to determine the column
5$csvState = Import-CSV -Path .\referentials\states.csv -Header "ID", "VALUE" -Delimiter "`t"
6# Convert in object
7$csvState | ForEach-Object { $TableState[$_.ID] = $_.VALUE }
8
9# Loop through the Entities array and look for the state
10foreach ($item in $jsonContent.Entities) {
11 $stateValue = $item.State
12
13 # Compare the ID and stateValue then get the Value
14 $status = ($csvState | Where-Object { $_.'ID' -eq $stateValue }).VALUE
15
16 Write-Host "Status: $status"
17}
https://devblogs.microsoft.com/powershell-community/update-xml-files-using-powershell/
1# curl method
2curl -LsSf https://astral.sh/uv/install.sh | sh
3
4# Pip method
5pip install uv
1pyenv install 3.12
2pyenv local 3.12
3python -m venv .venv
4source .venv/bin/activate
5pip install pandas
6python
7
8# equivalent in uv
9uv run --python 3.12 --with pandas python
1uv python list --only-installed
2uv python install 3.12
3uv venv /path/to/environment --python 3.12
4uv pip install django
5uv pip compile requirements.in -o requirements.txt
6
7uv init myproject
8uv sync
9uv run manage.py runserver
import statements:1#!/usr/bin/env -S uv run --script
2# /// script
3# requires-python = ">=3.12"
4# dependencies = [
5# "ffmpeg-normalize",
6# ]
7# ///
Then can be run with uv run sync-flickr-dates.py. uv will create a Python 3.12 venv for us.
For me this is in ~/.cache/uv (which you can find via uv cache dir).
Let’s take as an example py dependencies for Netbox
1# Tools needed
2dnf install -y python3.11
3pip install --upgrade pip setuptool python-pypi-mirror twine
4
5# init mirror
6python3.11 -m venv mirror
7mkdir download
8
9# Get list of Py packages needed
10curl raw.githubusercontent.com/netbox-community/netbox/v3.7.3/requirements.txt -o requirements.txt
11echo pip >> requirements.txt
12echo setuptools >> requirements.txt
13echo uwsgi >> requirements.txt
14
15# Make sure repository CA is installed
16curl http://pki.server/pki/cacerts/ISSUING_CA.pem -o /etc/pki/ca-trust/source/anchors/issuing.crt
17curl http://pki.server/pki/cacerts/ROOT_CA.pem -o /etc/pki/ca-trust/source/anchors/root.crt
18update-ca-trust
19
20
21source mirror/bin/activate
22pypi-mirror download -b -d download -r requirements.tx
23twine upload --repository-url https://nexus3.server/repository/internal-pypi/ download/*.whl --cert /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
24twine upload --repository-url https://nexus3.server/repository/internal-pypi/ /download/*.tar.gz --cert /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
Then on target host inside /etc/pip.conf :
1pip3 freeze netaddr > requirements.txt
2pip3 download -r requirements.txt -d wheel
3mv requirements.txt wheel
4tar -zcf wheelhouse.tar.gz wheel
5tar -zxf wheelhouse.tar.gz
6pip3 install -r wheel/requirements.txt --no-index --find-links wheel
1curl -sSL https://install.python-poetry.org | python3 -
2poetry new rp-poetry
3poetry add ansible
4poetry add poetry
5poetry add netaddr
6poetry add kubernetes
7poetry add jsonpatch
8poetry add `cat ~/.ansible/collections/ansible_collections/kubernetes/core/requirements.txt`
9
10poetry build
11
12pip3 install dist/rp_poetry-0.1.0-py3-none-any.whl
13
14poetry export --without-hashes -f requirements.txt -o requirements.txt
1poetry config repositories.test http://localhost
2poetry publish -r test
1podman login registry.redhat.io
2podman pull registry.redhat.io/ansible-automation-platform-22/ansible-python-base-rhel8:1.0.0-230
3
4pyenv local 3.9.13
5python -m pip install poetry
6poetry init
7poetry add ansible-builder
awk is treat each line as a table, by default space are separators of columns.
General syntax is awk 'search {action}' file_to_parse.
1# Give the value higher than 75000 in column $4
2df | awk '$4 > 75000'
3
4# Print the all line when column $4 is higher than 75000
5df | awk '$4 > 75000 {print $0}'
But if you look for a string, the search need to be included in /search/ or ;search;.
When you print $0 represent the all line, $1 first column, $2 second column etc.
1sed -e 'โฆ' -e 'โฆ' # Several execution
2sed -i # Replace in place
3sed -r # Play with REGEX
4
5# The most usefull
6sed -e '/^[ ]*#/d' -e '/^$/d' <fich.> # openfile without empty or commented lines
7sed 's/ -/\n -/g' # replace all "-" with new lines
8sed 's/my_match.*/ /g' # remove from the match till end of line
9sed -i '4048d;3375d' ~/.ssh/known_hosts # delete lines Number
10
11# Buffer
12s/.*@(.*)/$1/; # keep what is after @ put it in buffer ( ) and reuse it with $1.
13sed -e '/^;/! s/.*-reserv.*/; Reserved: &/' file.txt # resuse search with &
14
15# Search a line
16sed -e '/192.168.130/ s/^/#/g' -i /etc/hosts # Comment a line
17sed -re 's/^;(r|R)eserved:/; Reserved:/g' file.txt # Search several string
18
19# Insert - add two lines below a match pattern
20sed -i '/.*\"description\".*/s/$/ \n \"after\" : \"network.target\"\,\n \"requires\" : \"network.target\"\,/g' my_File
21
22# Append
23sed '/WORD/ a Add this line after every line with WORD'
24
25# if no occurence, then add it after "use_authtok"
26sed -e '/remember=10/!s/use_authtok/& remember=10/' -i /etc/pam.d/system-auth-permanent