Node with error.
[root@thor ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 931.5G 0 disk
sdb 8:16 0 931.5G 0 disk
sdc 8:32 0 477G 0 disk
sdd 8:48 1 57.3G 0 disk
├─sdd1 8:49 1 1G 0 part /boot
└─sdd2 8:50 1 56.3G 0 part
├─cl-root 253:0 0 35.2G 0 lvm /
├─cl-swap 253:1 0 4G 0 lvm
└─cl-home 253:2 0 17.2G 0 lvm /home
[root@thor ~]# wipefs -a /dev/sdc
Also tried other means to ensure drive is wiped.
[root@thor ~]# dd if=/dev/zero of=/dev/sdc bs=512 count=10000
10000+0 records in
10000+0 records out
5120000 bytes (5.1 MB, 4.9 MiB) copied, 0.0654142 s, 78.3 MB/s
I noted that multipathd had hooks on the drive... thought maybe that was something to do with it:
# Collect local disk ID that are local with replication
# Ex: thor
[root@thor ~]# multipath -F
create: WDC_WDS100T2B0B-00YS70_19106A802926 undef ATA,WDC WDS100T2B0B
size=932G features='1 queue_if_no_path' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 1:0:0:0 sda 8:0 undef ready running
create: WDC_WDS100T2B0B-00YS70_192490801828 undef ATA,WDC WDS100T2B0B
size=932G features='1 queue_if_no_path' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 2:0:0:0 sdb 8:16 undef ready running
create: Samsung_SSD_850_PRO_512GB_S250NXAGA15787L undef ATA,Samsung SSD 850
size=477G features='1 queue_if_no_path' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 4:0:0:0 sdc 8:32 undef ready running
#### May not be helpful .. trying to avoid issues of "…err": "vdo: ERROR - Device /dev/sdc excluded by a filter.\n",
# Blacklist local disk from multipath
vi /etc/multipath.conf
blacklist {
wwid WDC_WDS100T2B0B-00YS70_19106A802926
wwid WDC_WDS100T2B0B-00YS70_192490801828
wwid Samsung_SSD_850_PRO_512GB_S250NXAGA15787L
protocol "(scsi:adt|scsi:sbp)"
}
systemctl restart multipathd.service
multipath -F
multipath -v2
<reboot>
No change.
# Snip of where it fails below. Full log attached
TASK [gluster.infra/roles/backend_setup : Enable and start vdo service] ********
ok: [thorst.penguinpages.local]
TASK [gluster.infra/roles/backend_setup : Create VDO with specified size] ******
failed: [thorst.penguinpages.local] (item={'name': 'vdo_sdc', 'device': '/dev/sdc', 'slabsize': '32G', 'logicalsize': '11000G', 'blockmapcachesize': '128M', 'emulate512': 'off', 'writepolicy': 'auto', 'maxDiscardSize': '16M'}) => {"ansible_index_var": "index", "ansible_loop_var": "item", "changed": false, "err": "vdo: ERROR - Device /dev/sdc excluded by a filter.\n", "index": 0, "item": {"blockmapcachesize": "128M", "device": "/dev/sdc", "emulate512": "off", "logicalsize": "11000G", "maxDiscardSize": "16M", "name": "vdo_sdc", "slabsize": "32G", "writepolicy": "auto"}, "msg": "Creating VDO vdo_sdc failed.", "rc": 1}
### HCI Single node Deploy Ansible File
Code: Select all
hc_nodes:
hosts:
thorst.penguinpages.local:
gluster_infra_volume_groups:
- vgname: gluster_vg_sdc
pvname: /dev/mapper/vdo_sdc
gluster_infra_mount_devices:
- path: /gluster_bricks/engine
lvname: gluster_lv_engine
vgname: gluster_vg_sdc
- path: /gluster_bricks/data
lvname: gluster_lv_data
vgname: gluster_vg_sdc
- path: /gluster_bricks/vmstore
lvname: gluster_lv_vmstore
vgname: gluster_vg_sdc
gluster_infra_vdo:
- name: vdo_sdc
device: /dev/sdc
slabsize: 32G
logicalsize: 11000G
blockmapcachesize: 128M
emulate512: 'off'
writepolicy: auto
maxDiscardSize: 16M
blacklist_mpath_devices:
- sdc
gluster_infra_thick_lvs:
- vgname: gluster_vg_sdc
lvname: gluster_lv_engine
size: 1000G
gluster_infra_thinpools:
- vgname: gluster_vg_sdc
thinpoolname: gluster_thinpool_gluster_vg_sdc
poolmetadatasize: 3G
gluster_infra_lv_logicalvols:
- vgname: gluster_vg_sdc
thinpool: gluster_thinpool_gluster_vg_sdc
lvname: gluster_lv_data
lvsize: 5000G
- vgname: gluster_vg_sdc
thinpool: gluster_thinpool_gluster_vg_sdc
lvname: gluster_lv_vmstore
lvsize: 5000G
vars:
gluster_infra_disktype: JBOD
gluster_set_selinux_labels: true
gluster_infra_fw_ports:
- 2049/tcp
- 54321/tcp
- 5900/tcp
- 5900-6923/tcp
- 5666/tcp
- 16514/tcp
gluster_infra_fw_permanent: true
gluster_infra_fw_state: enabled
gluster_infra_fw_zone: public
gluster_infra_fw_services:
- glusterfs
gluster_features_force_varlogsizecheck: false
cluster_nodes:
- thorst.penguinpages.local
gluster_features_hci_cluster: '{{ cluster_nodes }}'
gluster_features_hci_volumes:
- volname: engine
brick: /gluster_bricks/engine/engine
arbiter: 0
- volname: data
brick: /gluster_bricks/data/data
arbiter: 0
- volname: vmstore
brick: /gluster_bricks/vmstore/vmstore
arbiter: 0
gluster_features_hci_volume_options:
storage.owner-uid: '36'
storage.owner-gid: '36'
features.shard: 'on'
performance.low-prio-threads: '32'
performance.strict-o-direct: 'on'
network.remote-dio: 'off'
network.ping-timeout: '30'
user.cifs: 'off'
nfs.disable: 'on'
performance.quick-read: 'off'
performance.read-ahead: 'off'
performance.io-cache: 'off'
cluster.eager-lock: enable