General concern

  • If you want to move VMs to an another Storage Domain, you need to copy the template from it as well!

  • Remove a disk:

 1# IF RHV does not use anymore disk those should appear empty in lsblk: 
 2lsblk -a
 3sdf                                                                                     8:80   0     4T  0 disk
 4โ””โ”€36001405893b456536be4d67a7f6716e3                                                   253:38   0     4T  0 mpath
 5sdg                                                                                     8:96   0     4T  0 disk
 6โ””โ”€36001405893b456536be4d67a7f6716e3                                                   253:38   0     4T  0 mpath
 7sdh                                                                                     8:112  0     4T  0 disk
 8โ””โ”€36001405893b456536be4d67a7f6716e3                                                   253:38   0     4T  0 mpath
 9sdi                                                                                     8:128  0         0 disk
10โ””โ”€360014052ab23b1cee074fe38059d7c94                                                   253:39   0   100G  0 mpath
11sdj                                                                                     8:144  0         0 disk
12โ””โ”€360014052ab23b1cee074fe38059d7c94                                                   253:39   0   100G  0 mpath
13sdk                                                                                     8:160  0         0 disk
14โ””โ”€360014052ab23b1cee074fe38059d7c94                                                   253:39   0   100G  0 mpath
15
16# find all disks from LUN ID
17LUN_ID="360014054ce7e566a01d44c1a4758b092"
18list_disk=$(dmsetup deps -o devname ${LUN_ID}| cut -f 2 |cut -c 3- |tr -d "()" | tr " " "\n")
19echo ${list_disk}
20
21# Remove from multipath 
22multipath -f "${LUN_ID}"
23
24# remove disk 
25for i in ${list_disk}; do echo ${i}; blockdev --flushbufs /dev/${i}; echo 1 > /sys/block/${i}/device/delete; done
26
27# You can which disk link with which LUN on CEPH side 
28ls -l /dev/disk/by-*

NFS for OLVM/oVirt

Since oVirt need a shared stockage, we can create a local NFS to bypass this point if no Storage bay.

 1parted /dev/sda
 2pvcreate /dev/sda2
 3vgcreate rhvh /dev/sda2
 4lvcreate -L 100G rhvh -n data
 5mkfs.ext4 /dev/mapper/rhvh-data
 6echo "/dev/mapper/rhvh-data /data ext4 defaults,discard 1 2" >> /etc/fstab
 7
 8chown 36:36 /data
 9chmod 0755 /data
10dnf install nfs-utils -y
11systemctl enable --now nfs-server
12systemctl enable --now rpcbind
13echo "/data *(rw)" >> /etc/exports
14firewall-cmd --add-service=nfs โ€“permanent
15firewall-cmd โ€“reload
16
17exportfs -rav
18exportfs 

ISCSI

  • ISCSI need to be at discovered before install (ref to iscsi doc)

  • Adding a disk:

https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/administration_guide/index#Adding_iSCSI_Storage_storage_admin

  • config for CEPH iscsi volume with multipathing:
 1# Look for LIO
 2multipathd show config |more
 3
 4# Replace TCMU with RBD
 5sed -i 's/TCMU device/RBD/g' /etc/multipath.conf
 6
 7# Check config
 8cat /etc/multipath.conf | grep -v \# |sed '/^$/d'
 9defaults {
10polling_interval 5
11no_path_retry 16
12user_friendly_names no
13flush_on_last_del yes
14fast_io_fail_tmo 5
15dev_loss_tmo 30
16max_fds 4096
17}
18blacklist {
19protocol "(scsi:adt|scsi:sbp)"
20}
21overrides {
22no_path_retry 16
23}
24devices {
25device {
26vendor "LIO-ORG"
27product "RBD"
28hardware_handler "1 alua"
29path_grouping_policy "failover"
30path_selector "queue-length 0"
31failback 60
32path_checker tur
33prio alua
34prio_args exclusive_pref_bit
35fast_io_fail_tmo 25
36no_path_retry queue
37}
38}
39
40# Restart
41systemctl restart iscsi
42systemctl restart multipathd
43systemctl status multipathd -l
44systemctl status iscsi -l
45iscsiadm -m discovery -t st -p 172.16.12.50:3260
46iscsiadm -m node -T iqn.2003-01.com.redhat.iscsi-gw:ceph-igw -l
47iscsiadm -m session
48iscsiadm -m session -P 3