aboutsummaryrefslogtreecommitdiff
path: root/sys-fs
diff options
context:
space:
mode:
authorPetr Vaněk <arkamar@atlas.cz>2020-04-26 12:05:00 +0200
committerAnthony G. Basile <blueness@gentoo.org>2020-04-27 08:44:51 -0400
commitd8d90c46de2431c9c848d58f6d3ef001fd3d17a3 (patch)
treefb1d92df8453d64446be571097c78107ce593f0a /sys-fs
parentnet-libs/libqmi: Fixed upstream in commit 6097ad5, release 1.18.2 (diff)
downloadmusl-d8d90c46de2431c9c848d58f6d3ef001fd3d17a3.tar.gz
musl-d8d90c46de2431c9c848d58f6d3ef001fd3d17a3.tar.bz2
musl-d8d90c46de2431c9c848d58f6d3ef001fd3d17a3.zip
sys-fs/lvm2: add missing rc script
Package-Manager: Portage-2.3.89, Repoman-2.3.20 Signed-off-by: Anthony G. Basile <blueness@gentoo.org>
Diffstat (limited to 'sys-fs')
-rw-r--r--sys-fs/lvm2/files/lvm.rc-2.02.187173
1 files changed, 173 insertions, 0 deletions
diff --git a/sys-fs/lvm2/files/lvm.rc-2.02.187 b/sys-fs/lvm2/files/lvm.rc-2.02.187
new file mode 100644
index 0000000..3468adc
--- /dev/null
+++ b/sys-fs/lvm2/files/lvm.rc-2.02.187
@@ -0,0 +1,173 @@
+#!/sbin/openrc-run
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+_get_lvm_path() {
+ local lvm_path=
+ for lvm_path in /bin/lvm /sbin/lvm ; do
+ [ -x "${lvm_path}" ] && break
+ done
+ echo "${lvm_path}"
+}
+
+_use_lvmetad() {
+ local lvm_path="$(_get_lvm_path)"
+ [ ! -x "${lvm_path}" ] && return 1
+ ${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmetad=1'
+}
+
+_use_lvmlockd() {
+ local lvm_path="$(_get_lvm_path)"
+ [ ! -x "${lvm_path}" ] && return 1
+ ${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmlockd=1'
+}
+
+depend() {
+ before checkfs fsck
+ after modules device-mapper
+ # We may want lvmetad based on the configuration. If we added lvmetad
+ # support while lvm2 is running then we aren't dependent on it. For the
+ # more common case, if its disabled in the config we aren't dependent
+ # on it.
+ config /etc/lvm/lvm.conf
+ local _want=
+
+ if service_started ; then
+ _want=$(service_get_value want)
+ else
+ if _use_lvmetad ; then
+ _want="${_want} lvmetad"
+ fi
+
+ if _use_lvmlockd ; then
+ _want="${_want} lvmlockd"
+ fi
+ fi
+
+ # Make sure you review /etc/conf.d/lvm as well!
+ # Depending on your system, it might also introduce udev & mdraid
+ need sysfs
+
+ if [ -n "${_want}" ] ; then
+ want ${_want}
+ fi
+}
+
+config='global { locking_dir = "/run/lock/lvm" }'
+
+dm_in_proc() {
+ local retval=0
+ for x in devices misc ; do
+ grep -qs 'device-mapper' /proc/${x}
+ retval=$((${retval} + $?))
+ done
+ return ${retval}
+}
+
+start() {
+ # LVM support for /usr, /home, /opt ....
+ # This should be done *before* checking local
+ # volumes, or they never get checked.
+
+ # NOTE: Add needed modules for LVM or RAID, etc
+ # to /etc/modules.autoload if needed
+
+ lvm_path="$(_get_lvm_path)"
+ if [ -z "${lvm_path}" ] ; then
+ eerror "Failed to find lvm binary in /bin or /sbin!"
+ return 1
+ fi
+
+ if [ -z "${CDBOOT}" ] ; then
+ if [ -e /proc/modules ] && ! dm_in_proc ; then
+ ebegin "Trying to load dm-mod module"
+ modprobe dm-mod 2>/dev/null
+ eend $?
+ fi
+
+ if [ -d /proc/lvm ] || dm_in_proc ; then
+ local has_errors=0 verbose_command
+
+ yesno "${rc_verbose}" && verbose_command=" -v"
+
+ ebegin "Starting the Logical Volume Manager"
+
+ if _use_lvmetad ; then
+ # Extra PV find pass because some devices might not have been available until very recently
+ ${lvm_path} pvscan${verbose_command} --config "${config}" --cache
+ [ $? -ne 0 ] && has_errors=1
+ fi
+
+ # Now make the nodes
+ ${lvm_path} vgscan${verbose_command} --config "${config}" --mknodes
+ [ $? -ne 0 ] && has_errors=1
+
+ # Enable all VGs
+ ${lvm_path} vgchange${verbose_command} --config "${config}" --sysinit --activate y
+ [ $? -ne 0 ] && has_errors=1
+
+ if _use_lvmlockd ; then
+ # Start lockd VGs as required
+ ${lvm_path} vgchange${verbose_command} --config "${config}" --lock-start --lock-opt auto
+ [ $? -ne 0 ] && has_errors=1
+ fi
+
+ eend ${has_errors} "Failed to start the Logical Volume Manager"
+ fi
+ fi
+}
+
+start_post() {
+ local _want=
+ if _use_lvmetad ; then
+ _want="${_want} lvmetad"
+ fi
+
+ if _use_lvmlockd ; then
+ _want="${_want} lvmlockd"
+ fi
+
+ service_set_value want "${_want}"
+}
+
+stop() {
+ lvm_path="$(_get_lvm_path)"
+ if [ -z "${lvm_path}" ] ; then
+ eerror "Failed to find lvm binary in /bin or /sbin!"
+ return 1
+ fi
+
+ # Stop LVM2
+ if [ -f /etc/lvmtab -o -d /etc/lvm ] \
+ && [ -d /proc/lvm -o "$(grep device-mapper /proc/misc 2>/dev/null)" ]
+ then
+ local VGS=$($lvm_path vgs --config "${config}" -o vg_name --noheadings --nosuffix --rows 2> /dev/null)
+ if [ -z "${VGS}" ] ; then
+ # nothing to do for us
+ return 0
+ fi
+
+ local has_errors=0 verbose_command eend_cmd="eend"
+
+ yesno "${rc_verbose}" && verbose_command=" -v"
+
+ local msg="Failed to stop Logical Volume Manager"
+ if [ "${RC_RUNLEVEL}" = shutdown ] ; then
+ # failures on shutdown are non-fatal
+ eend_cmd="ewend"
+ msg="${msg} (possibly some LVs still needed for /usr or root)"
+ fi
+
+ ebegin "Stopping the Logical Volume Manager"
+
+ ${lvm_path} vgchange${verbose_command} --config "${config}" --sysinit --activate n
+ [ $? -ne 0 ] && has_errors=1
+
+ ${eend_cmd} ${has_errors} "${msg}"
+ fi
+
+ # at this point make sure we always exit without indicating an error
+ return 0
+}
+
+# vim:ts=4