blob: 2bda7bd9f8c156abf5813522fb1dc47e0e67b51b (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
|
#!/usr/bin/openrc-run
# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
_get_lvm_path() {
local lvm_path=
for lvm_path in /bin/lvm /usr/bin/lvm ; do
[ -x "${lvm_path}" ] && break
done
echo "${lvm_path}"
}
_use_lvmetad() {
local lvm_path="$(_get_lvm_path)"
[ ! -x "${lvm_path}" ] && return 1
${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmetad=1'
}
_use_lvmlockd() {
local lvm_path="$(_get_lvm_path)"
[ ! -x "${lvm_path}" ] && return 1
${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmlockd=1'
}
depend() {
before checkfs fsck
after modules device-mapper
# We may want lvmetad based on the configuration. If we added lvmetad
# support while lvm2 is running then we aren't dependent on it. For the
# more common case, if its disabled in the config we aren't dependent
# on it.
config /etc/lvm/lvm.conf
local _want=
if service_started ; then
_want=$(service_get_value want)
else
if _use_lvmetad ; then
_want="${_want} lvmetad"
fi
if _use_lvmlockd ; then
_want="${_want} lvmlockd"
fi
fi
# Make sure you review /etc/conf.d/lvm as well!
# Depending on your system, it might also introduce udev & mdraid
need sysfs
if [ -n "${_want}" ] ; then
want ${_want}
fi
}
config='global { locking_dir = "/run/lock/lvm" }'
dm_in_proc() {
local retval=0
for x in devices misc ; do
grep -qs 'device-mapper' /proc/${x}
retval=$((${retval} + $?))
done
return ${retval}
}
start() {
# LVM support for /usr, /home, /opt ....
# This should be done *before* checking local
# volumes, or they never get checked.
# NOTE: Add needed modules for LVM or RAID, etc
# to /etc/modules.autoload if needed
lvm_path="$(_get_lvm_path)"
if [ -z "${lvm_path}" ] ; then
eerror "Failed to find lvm binary in /bin or /sbin!"
return 1
fi
if [ -z "${CDBOOT}" ] ; then
if [ -e /proc/modules ] && ! dm_in_proc ; then
ebegin "Trying to load dm-mod module"
modprobe dm-mod 2>/dev/null
eend $?
fi
if [ -d /proc/lvm ] || dm_in_proc ; then
local has_errors=0 verbose_command
yesno "${rc_verbose}" && verbose_command=" -v"
ebegin "Starting the Logical Volume Manager"
if _use_lvmetad ; then
# Extra PV find pass because some devices might not have been available until very recently
${lvm_path} pvscan${verbose_command} --config "${config}" --cache
[ $? -ne 0 ] && has_errors=1
fi
# Now make the nodes
${lvm_path} vgscan${verbose_command} --config "${config}" --mknodes
[ $? -ne 0 ] && has_errors=1
# Enable all VGs
${lvm_path} vgchange${verbose_command} --config "${config}" --sysinit --activate y
[ $? -ne 0 ] && has_errors=1
if _use_lvmlockd ; then
# Start lockd VGs as required
${lvm_path} vgchange${verbose_command} --config "${config}" --lock-start --lock-opt auto
[ $? -ne 0 ] && has_errors=1
fi
eend ${has_errors} "Failed to start the Logical Volume Manager"
fi
fi
}
start_post() {
local _want=
if _use_lvmetad ; then
_want="${_want} lvmetad"
fi
if _use_lvmlockd ; then
_want="${_want} lvmlockd"
fi
service_set_value want "${_want}"
}
stop() {
lvm_path="$(_get_lvm_path)"
if [ -z "${lvm_path}" ] ; then
eerror "Failed to find lvm binary in /bin or /sbin!"
return 1
fi
# Stop LVM2
if [ -f /etc/lvmtab -o -d /etc/lvm ] \
&& [ -d /proc/lvm -o "$(grep device-mapper /proc/misc 2>/dev/null)" ]
then
local VGS=$($lvm_path vgs --config "${config}" -o vg_name --noheadings --nosuffix --rows 2> /dev/null)
if [ -z "${VGS}" ] ; then
# nothing to do for us
return 0
fi
local has_errors=0 verbose_command eend_cmd="eend"
yesno "${rc_verbose}" && verbose_command=" -v"
local msg="Failed to stop Logical Volume Manager"
if [ "${RC_RUNLEVEL}" = shutdown ] ; then
# failures on shutdown are non-fatal
eend_cmd="ewend"
msg="${msg} (possibly some LVs still needed for /usr or root)"
fi
ebegin "Stopping the Logical Volume Manager"
${lvm_path} vgchange${verbose_command} --config "${config}" --sysinit --activate n
[ $? -ne 0 ] && has_errors=1
${eend_cmd} ${has_errors} "${msg}"
fi
# at this point make sure we always exit without indicating an error
return 0
}
# vim:ts=4
|