Add mirror 'zfs-utils-git' at 'fa449d7'

This commit is contained in:
Nathan 2022-10-05 13:18:34 -05:00
parent bcea64b89d
commit d6ff24bb8d
7 changed files with 490 additions and 0 deletions

View File

@ -21,6 +21,11 @@
"branch": "master",
"revision": "bc1ae3304485ac89aeab2804c7e3c6f2bd3459b6"
},
"zfs-utils-git": {
"url": "https://aur.archlinux.org/zfs-utils-git.git",
"branch": "master",
"revision": "fa449d7d94fb39f79f2846733023fb31286851b3"
},
"zfsbootmenu": {
"url": "https://aur.archlinux.org/zfsbootmenu.git",
"branch": "master",

27
zfs-utils-git/.SRCINFO Normal file
View File

@ -0,0 +1,27 @@
pkgbase = zfs-utils-git
pkgdesc = Userspace utilities for the Zettabyte File System.
pkgver = 2.1.99.r1389.g48cf170d5a
pkgrel = 1
epoch = 2
url = https://zfsonlinux.org/
arch = x86_64
groups = zfs-git
license = CDDL
makedepends = python
makedepends = python-setuptools
makedepends = python-cffi
makedepends = git
optdepends = python: pyzfs and extra utilities,
optdepends = python-cffi: pyzfs
provides = zfs-utils=2.1.99.r1389.g48cf170d5a
conflicts = zfs-utils
source = git+https://github.com/openzfs/zfs.git
source = zfs.initcpio.install
source = zfs.initcpio.hook
source = zfs.initcpio.zfsencryptssh.install
b2sums = SKIP
b2sums = f7c78e5a0ce887e89e5cdc52515381d647a51586cb05c52a900e1307520f6f0fa828f8f5fd5a30823b233dcd79f0496375b21d044103e1d765e20f728c2d0fee
b2sums = 04892c161e9197d3d4d4cf5ae97d03e91b980426c033973cdcf76ff957e4ef0b2d541a7d184102ff267a29e5db772ac06616cacb3c4252f2fe5a18f0d81891f2
b2sums = 04e2af875e194df393d6cff983efc3fdf02a03a745d1b0b1e4a745f873d910b4dd0a45db956c1b5b2d97e9d5bf724ef12e23f7a2be3d5c12be027eaccf42349a
pkgname = zfs-utils-git

5
zfs-utils-git/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
zfs
src
pkg
*.pkg.*
*.log

75
zfs-utils-git/PKGBUILD Normal file
View File

@ -0,0 +1,75 @@
# Maintainer: Yurii Kolesnykov <root@yurikoles.com>
# Contributor: Eli Schwartz <eschwartz@archlinux.org>
# Contributor: Iacopo Isimbaldi <isiachi@rhye.it>
# PRs are welcome: https://github.com/yurikoles-aur/zfs-utils-git
pkgname=zfs-utils-git
pkgver=2.1.99.r1389.g48cf170d5a
pkgrel=1
epoch=2
pkgdesc="Userspace utilities for the Zettabyte File System."
arch=('x86_64')
url='https://zfsonlinux.org/'
license=('CDDL')
groups=('zfs-git')
makedepends=('python' 'python-setuptools' 'python-cffi' 'git')
optdepends=('python: pyzfs and extra utilities', 'python-cffi: pyzfs')
provides=("${pkgname%-git}=${pkgver}")
conflicts=("${pkgname%-git}")
source=('git+https://github.com/openzfs/zfs.git'
'zfs.initcpio.install'
'zfs.initcpio.hook'
'zfs.initcpio.zfsencryptssh.install')
b2sums=('SKIP'
'f7c78e5a0ce887e89e5cdc52515381d647a51586cb05c52a900e1307520f6f0fa828f8f5fd5a30823b233dcd79f0496375b21d044103e1d765e20f728c2d0fee'
'04892c161e9197d3d4d4cf5ae97d03e91b980426c033973cdcf76ff957e4ef0b2d541a7d184102ff267a29e5db772ac06616cacb3c4252f2fe5a18f0d81891f2'
'04e2af875e194df393d6cff983efc3fdf02a03a745d1b0b1e4a745f873d910b4dd0a45db956c1b5b2d97e9d5bf724ef12e23f7a2be3d5c12be027eaccf42349a')
pkgver() {
cd zfs
git describe --long | sed 's/^zfs-//;s/-rc/rc/;s/\([^-]*-g\)/r\1/;s/-/./g'
}
prepare() {
cd zfs
autoreconf -fi
}
build() {
cd zfs
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--sbindir=/usr/bin \
--with-mounthelperdir=/usr/bin \
--with-udevdir=/usr/lib/udev \
--libexecdir=/usr/lib/zfs \
--enable-pyzfs \
--enable-systemd \
--with-config=user \
--with-zfsexecdir=/usr/lib/zfs
make
}
package() {
cd zfs
make DESTDIR="${pkgdir}" install
# Remove uneeded files
rm -r "${pkgdir}"/etc/init.d
rm -r "${pkgdir}"/etc/sudoers.d #???
# We're experimenting with dracut in [extra], so start installing this.
#rm -r "${pkgdir}"/usr/lib/dracut
rm -r "${pkgdir}"/usr/lib/modules-load.d
rm -r "${pkgdir}"/usr/share/initramfs-tools
# Install the support files
install -D -m644 "${srcdir}"/zfs.initcpio.hook "${pkgdir}"/usr/lib/initcpio/hooks/zfs
install -D -m644 "${srcdir}"/zfs.initcpio.install "${pkgdir}"/usr/lib/initcpio/install/zfs
install -D -m644 "${srcdir}"/zfs.initcpio.zfsencryptssh.install "${pkgdir}"/usr/lib/initcpio/install/zfsencryptssh
install -D -m644 contrib/bash_completion.d/zfs "${pkgdir}"/usr/share/bash-completion/completions/zfs
}

View File

@ -0,0 +1,236 @@
#
# WARNING: This script is parsed by ash in busybox at boot time, not bash!
# http://linux.die.net/man/1/ash
# https://wiki.ubuntu.com/DashAsBinSh
# http://www.jpsdomain.org/public/2008-JP_bash_vs_dash.pdf
#
ZPOOL_FORCE=""
ZPOOL_IMPORT_FLAGS=""
ZFS_BOOT_ONLY=""
zfs_get_bootfs () {
for zfs_dataset in $(zpool list -H -o bootfs); do
case ${zfs_dataset} in
"" | "-")
# skip this line/dataset
;;
"no pools available")
return 1
;;
*)
ZFS_DATASET=${zfs_dataset}
return 0
;;
esac
done
return 1
}
zfs_decrypt_fs() {
dataset=$1
# Make sure dataset is encrypted; get fails if ZFS does not support encryption
encryption="$(zfs get -H -o value encryption "${dataset}" 2>/dev/null)" || return 0
[ "${encryption}" != "off" ] || return 0
# Make sure the dataset is locked
keystatus="$(zfs get -H -o value keystatus "${dataset}")" || return 0
[ "${keystatus}" != "available" ] || return 0
# Make sure the encryptionroot is sensible
encryptionroot="$(zfs get -H -o value encryptionroot "${dataset}")" || return 0
[ "${encryptionroot}" != "-" ] || return 0
# Export encryption root to be used by other hooks (SSH)
echo "${encryptionroot}" > /.encryptionroot
prompt_override=""
if keylocation="$(zfs get -H -o value keylocation "${encryptionroot}")"; then
# If key location is a file, determine if it can by overridden by prompt
if [ "${keylocation}" != "prompt" ]; then
if keyformat="$(zfs get -H -o value keyformat "${encryptionroot}")"; then
[ "${keyformat}" = "passphrase" ] && prompt_override="yes"
fi
fi
# If key location is a local file, check if file exists
if [ "${keylocation%%://*}" = "file" ]; then
keyfile="${keylocation#file://}"
# If file not yet exist, wait for udev to create device nodes
if [ ! -r "${keyfile}" ]; then
udevadm settle
# Wait for udev up to 10 seconds
if [ ! -r "${keyfile}" ]; then
echo "Waiting for key ${keyfile} for ${encryptionroot}..."
for _ in $(seq 1 20); do
sleep 0.5s
[ -r "${keyfile}" ] && break
done
fi
if [ ! -r "${keyfile}" ]; then
echo "Key ${keyfile} for ${encryptionroot} hasn't appeared. Trying anyway."
fi
fi
fi
fi
# Loop until key is loaded here or by another vector (SSH, for instance)
while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ]; do
# Try the default loading mechanism
zfs load-key "${encryptionroot}" && break
# Load failed, try a prompt if the failure was not a prompt
if [ -n "${prompt_override}" ]; then
echo "Unable to load key ${keylocation}; please type the passphrase"
echo "To retry the file, interrupt now or repeatedly input a wrong passphrase"
zfs load-key -L prompt "${encryptionroot}" && break
fi
# Throttle retry attempts
sleep 2
done
if [ -f /.encryptionroot ]; then
rm /.encryptionroot
fi
}
zfs_mount_handler () {
if [ "${ZFS_DATASET}" = "bootfs" ] ; then
if ! zfs_get_bootfs ; then
# Lets import everything and try again
zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
if ! zfs_get_bootfs ; then
err "ZFS: Cannot find bootfs."
exit 1
fi
fi
fi
local pool="${ZFS_DATASET%%/*}"
local rwopt_exp="${rwopt:-ro}"
if ! zpool list -H "${pool}" > /dev/null 2>&1; then
if [ ! "${rwopt_exp}" = "rw" ]; then
msg "ZFS: Importing pool ${pool} readonly."
ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on"
else
msg "ZFS: Importing pool ${pool}."
fi
if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then
err "ZFS: Unable to import pool ${pool}."
exit 1
fi
fi
local node="$1"
local rootmnt=$(zfs get -H -o value mountpoint "${ZFS_DATASET}")
local tab_file="/etc/fstab"
local zfs_datasets="$(zfs list -H -o name -t filesystem -r ${ZFS_DATASET})"
# Mount the root, and any child datasets
for dataset in ${zfs_datasets}; do
mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
canmount=$(zfs get -H -o value canmount "${dataset}")
# skip dataset
[ ${dataset} != "${ZFS_DATASET}" -a \( ${canmount} = "off" -o ${canmount} = "noauto" -o ${mountpoint} = "none" \) ] && continue
if [ ${mountpoint} = "legacy" ]; then
if [ -f "${tab_file}" ]; then
if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
zfs_decrypt_fs "${dataset}"
mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
fi
fi
else
zfs_decrypt_fs "${dataset}"
mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}/${mountpoint##${rootmnt}}"
fi
done
}
set_flags() {
# Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
[ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f"
# Disable late hook, useful if we want to use zfs-import-cache.service instead
[ ! "${zfs_boot_only}" = "" ] && ZFS_BOOT_ONLY="1"
# Add import directory to import command flags
[ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
[ "${zfs_import_dir}" = "" ] && [ -f /etc/zfs/zpool.cache.org ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -c /etc/zfs/zpool.cache.org"
}
run_hook() {
set_flags
# Wait 15 seconds for ZFS devices to show up
[ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
case ${root} in
# root=zfs
"zfs")
mount_handler="zfs_mount_handler"
;;
# root=ZFS=... syntax (grub)
"ZFS="*)
mount_handler="zfs_mount_handler"
ZFS_DATASET="${root#*[=]}"
;;
esac
case ${zfs} in
"")
# skip this line/dataset
;;
auto|bootfs)
ZFS_DATASET="bootfs"
mount_handler="zfs_mount_handler"
local pool="[a-zA-Z][^ ]*"
;;
*)
ZFS_DATASET="${zfs}"
mount_handler="zfs_mount_handler"
local pool="${ZFS_DATASET%%/*}"
;;
esac
# Allow at least n seconds for zfs device to show up. Especially
# when using zfs_import_dir instead of zpool.cache, the listing of
# available pools can be slow, so this loop must be top-tested to
# ensure we do one 'zpool import' pass after the timer has expired.
sleep ${ZFS_WAIT} & pid=$!
local break_after=0
while :; do
kill -0 $pid > /dev/null 2>&1 || break_after=1
if [ -c "/dev/zfs" ]; then
zpool import ${ZPOOL_IMPORT_FLAGS} | awk "
BEGIN { pool_found=0; online=0; unavail=0 }
/^ ${pool} .*/ { pool_found=1 }
/^\$/ { pool_found=0 }
/UNAVAIL/ { if (pool_found == 1) { unavail=1 } }
/ONLINE/ { if (pool_found == 1) { online=1 } }
END { if (online == 1 && unavail != 1)
{ exit 0 }
else
{ exit 1 }
}" && break
fi
[ $break_after == 1 ] && break
sleep 1
done
kill $pid > /dev/null 2>&1
}
run_latehook () {
set_flags
# only run zpool import, if flags were set (cache file found / zfs_import_dir specified) and zfs_boot_only is not set
[ ! "${ZPOOL_IMPORT_FLAGS}" = "" ] && [ "${ZFS_BOOT_ONLY}" = "" ] && zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
}
# vim:set ts=4 sw=4 ft=sh et:

View File

@ -0,0 +1,103 @@
#!/bin/bash
build() {
map add_module \
zavl \
znvpair \
zunicode \
zcommon \
zfs \
spl
map add_binary \
fsck.zfs \
mount.zfs \
seq \
zdb \
zed \
zfs \
zhack \
zinject \
zpool \
zstreamdump \
/lib/udev/vdev_id \
/lib/udev/zvol_id \
findmnt \
udevadm
map add_file \
/lib/udev/rules.d/60-zvol.rules \
/lib/udev/rules.d/69-vdev.rules \
/lib/udev/rules.d/90-zfs.rules \
/lib/libgcc_s.so.1
map add_dir \
/etc/zfs/zed.d
add_runscript
# allow mount(8) to "autodetect" ZFS
echo 'zfs' >>"${BUILDROOT}/etc/filesystems"
[[ -f /etc/zfs/zpool.cache ]] && cp "/etc/zfs/zpool.cache" "${BUILDROOT}/etc/zfs/zpool.cache.org"
[[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf"
[[ -f /etc/fstab ]] && add_file "/etc/fstab"
}
help() {
cat<<HELPEOF
This hook allows you to use ZFS as your root filesystem.
Command Line Setup:
You can append the following arguments to your kernel parameters list. See
https://wiki.archlinux.org/index.php/Kernel_parameters for more information.
To use ZFS as your boot filesystem:
zfs=bootfs or zfs=auto or root=zfs
To use a pool or dataset:
zfs=<pool/dataset>
To force importing of a ZFS pool:
zfs_force=1
If set to 1, this will use "zpool import -f" when attempting to import
pools.
To change the seconds of time to wait for ZFS devices to show up at boot:
zfs_wait=30
To search for devices in a directory other than "/dev":
zfs_import_dir=/dev/disk/by-uuid
or
zfs_import_dir=/dev/disk/by-partuuid
or
zfs_import_dir=/dev/disk/by-path
etc.
Following initcpio convention, the 'rw' option must be specified to load the
pool as read/write. Pools are loaded as read only by default.
Examples:
To use bootfs on your pool, use
zfs=bootfs rw
This will setup your root using tank/root zfs pool.
zfs=tank/root rw
If you want to set properties for zfs-on-linux module, you should add them to
/etc/modprobe.d/zfs.conf and then rebuild initcpio.
HELPEOF
}
# vim: set ts=4 sw=4 ft=sh et:

View File

@ -0,0 +1,39 @@
#!/bin/bash
make_etc_passwd() {
echo 'root:x:0:0:root:/root:/bin/zfsdecrypt_shell' >> "${BUILDROOT}"/etc/passwd
echo '/bin/zfsdecrypt_shell' > "${BUILDROOT}"/etc/shells
}
make_zfsdecrypt_shell() {
decrypt_shell='#!/bin/sh
if [ -f "/.encryptionroot" ]; then
# source zfs hook functions
. /hooks/zfs
# decrypt bootfs
zfs_decrypt_fs "$(cat /.encryptionroot)"
# kill pending decryption attempt to allow the boot process to continue
killall zfs
else
echo "ZFS is not ready yet. Please wait!"
fi'
printf '%s' "$decrypt_shell" > "${BUILDROOT}"/bin/zfsdecrypt_shell
chmod a+x "${BUILDROOT}"/bin/zfsdecrypt_shell
}
build ()
{
make_etc_passwd
make_zfsdecrypt_shell
}
help ()
{
cat<<HELPEOF
This hook is meant to be used in conjunction with mkinitcpio-dropbear,
mkinitcpio-netconf and/ormkinitcpio-ppp. This will provide a way to unlock
your encrypted ZFS root filesystem remotely.
HELPEOF
}
# vim: set ts=4 sw=4 ft=sh et: