crmsh scst can not start
wow123yf opened this issue · 0 comments
wow123yf commented
hello
i try to configure alua in my cluster , i try to learning the URL:http://marcitland.blogspot.com/2013/04/building-using-highly-available-esos.html
But I ran into the following problem.
my cluster ESOS - Enterprise Storage OS 1.3.10
[root@storage1 ~]# cat /etc/scst.conf
# Automatically generated by SCST Configurator v3.3.0-pre1.
# Non-key attributes
force_global_sgv_pool 0
max_tasklet_cmd 10
poll_us 0
setup_id 0x0
suspend 0
threads 24
TARGET_DRIVER copy_manager {
# Non-key attributes
allow_not_connected_copy 0
TARGET copy_manager_tgt {
# Non-key attributes
addr_method PERIPHERAL
black_hole 0
cpu_mask ffffff
forwarding 0
io_grouping_type auto
rel_tgt_id 0
}
}
TARGET_DRIVER iscsi {
enabled 0
TARGET iqn.2019-10.esos.storage1:389cd {
enabled 0
rel_tgt_id 1
# Non-key attributes
DataDigest None
FirstBurstLength 65536
HeaderDigest None
ImmediateData Yes
InitialR2T No
MaxBurstLength 1048576
MaxOutstandingR2T 32
MaxRecvDataSegmentLength 1048576
MaxSessions 0
MaxXmitDataSegmentLength 1048576
NopInInterval 30
NopInTimeout 30
QueuedCommands 32
RDMAExtensions Yes
RspTimeout 90
addr_method PERIPHERAL
black_hole 0
cpu_mask ffffff
forwarding 0
io_grouping_type auto
per_portal_acl 0
GROUP windows {
INITIATOR iqn.1991-05.com.microsoft:desktop-yf
# Non-key attributes
addr_method PERIPHERAL
black_hole 0
cpu_mask ffffff
io_grouping_type auto
per_sess_dedicated_tgt_threads 0
}
}
}
TARGET_DRIVER qla2x00t {
TARGET 20:01:00:0e:d5:28:8a:7f {
HW_TARGET
enabled 0
# Non-key attributes
addr_method PERIPHERAL
black_hole 0
cpu_mask ffffff
explicit_confirmation 0
forwarding 0
io_grouping_type auto
node_name 20:00:00:0e:d5:28:8a:7f
port_name 20:01:00:0e:d5:28:8a:7f
rel_tgt_id 0
}
TARGET 20:01:00:0e:d5:28:8a:80 {
HW_TARGET
enabled 0
# Non-key attributes
addr_method PERIPHERAL
black_hole 0
cpu_mask ffffff
explicit_confirmation 0
forwarding 0
io_grouping_type auto
node_name 20:00:00:0e:d5:28:8a:80
port_name 20:01:00:0e:d5:28:8a:80
rel_tgt_id 0
}
}
DEVICE_GROUP esos {
TARGET_GROUP local {
group_id 1
state offline
# Non-key attributes
preferred 0
TARGET iqn.2019-10.esos.storage1:389cd
}
TARGET_GROUP remote {
group_id 2
state active
# Non-key attributes
preferred 0
TARGET iqn.2019-10.esos.storage2:b442b {
rel_tgt_id 2
}
}
}
crm(live)# configure show
node 1: storage1.xtc.com
node 2: storage2.xtc.com
primitive p_drbd_r0 ocf:linbit:drbd \
params drbd_resource=r0 \
op monitor interval=10 role=Master \
op monitor interval=20 role=Slave \
op start interval=0 timeout=240 \
op stop interval=0 timeout=100
primitive p_lvm_r0 LVM \
params volgrpname=r0 \
op start interval=0 timeout=30 \
op stop interval=0 timeout=30 \
meta target-role=Started
primitive p_scst ocf:esos:scst \
params alua=true device_group=esos local_tgt_grp=local remote_tgt_grp=remote \
op monitor interval=10 role=Master \
op monitor interval=20 role=Slave \
op start interval=0 timeout=120 \
op stop interval=0 timeout=60
group g_drbd p_drbd_r0
group g_lvm p_lvm_r0
ms ms_drbd g_drbd \
meta master-max=2 master-node-max=1 clone-max=2 clone-node-max=1 notify=true interleave=true
ms ms_scst p_scst \
meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true interleave=true
clone clone_lvm g_lvm \
meta interleave=true target-role=Started
colocation c_r0_r1 inf: ms_scst:Started clone_lvm:Started ms_drbd:Master
order o_r0_r1 inf: ms_drbd:promote clone_lvm:start ms_scst:start
property cib-bootstrap-options: \
have-watchdog=false \
dc-version=1.1.16-94ff4df \
cluster-infrastructure=corosync \
cluster-name=esos \
stonith-enabled=false \
last-lrm-refresh=1570628241
crm(live)# status
Stack: corosync
Current DC: storage1.xtc.com (version 1.1.16-94ff4df) - partition with quorum
Last updated: Wed Oct 9 14:37:17 2019
Last change: Wed Oct 9 14:23:55 2019 by root via crm_shadow on storage1.xtc.com
2 nodes configured
6 resources configured
Online: [ storage1.xtc.com storage2.xtc.com ]
Full list of resources:
Master/Slave Set: ms_drbd [g_drbd]
Masters: [ storage1.xtc.com storage2.xtc.com ]
Clone Set: clone_lvm [g_lvm]
Started: [ storage1.xtc.com storage2.xtc.com ]
Master/Slave Set: ms_scst [p_scst]
p_scst (ocf::esos:scst): FAILED storage1.xtc.com (blocked)
p_scst (ocf::esos:scst): FAILED storage2.xtc.com (blocked)
Failed Actions:
* p_scst_stop_0 on storage1.xtc.com 'not configured' (6): call=19, status=complete, exitreason='none',
last-rc-change='Wed Oct 9 14:28:45 2019', queued=0ms, exec=32ms
* p_scst_stop_0 on storage2.xtc.com 'not configured' (6): call=19, status=complete, exitreason='none',
last-rc-change='Wed Oct 9 14:32:17 2019', queued=0ms, exec=32ms
if i use the command "lvdisplay" ,it's unable to display results
[root@storage2 ~]# lvdisplay
WARNING: Not using lvmetad because locking_type is 3 (clustered).