diff --git a/Proxmox/reduce-ssd-writes.sh b/Proxmox/reduce-ssd-writes.sh new file mode 100755 index 0000000..794c910 --- /dev/null +++ b/Proxmox/reduce-ssd-writes.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Changes recommended for reducing writes on SSDs, this is generally all bad for hard drives: https://serverfault.com/questions/950794/how-to-limit-zfs-writes-on-nvme-ssd-in-raid1-to-avoid-rapid-disk-wear/950896#950896 + +# This may cause data loss and increase other resource usage (like CPU, etc). + +# This script assumes your zfs pool is rpool. + +echo "Enabling a variety of settings to reduce writes on SSDs. This may cause data loss and increase the usage of other resources like CPU. DO NOT run this on a hard drive. Hit Ctrl + C to cancel." + +sleep 10 + +echo “options zfs zfs_txg_timeout=30” > /etc/modprobe.d/zfs.conf # Set txg_timeout to 30 seconds. This introduces a higher risk of data loss. +zfs set atime=off rpool # Turn off atime. +zfs set logbias=throughput rpool # Change logbias to throughput. +zfs set compression=lz4 rpool # Set compression to lz4 instead of the older LZJB. +zfs set recordsize=16K rpool +# ZFS seems to recommend smaller recordsizes for VMs if you're running on SSDs in general depending on the workload: +# https://openzfs.github.io/openzfs-docs/Performance%20and%20Tuning/Workload%20Tuning.html#virtual-machines +# https://openzfs.github.io/openzfs-docs/Performance%20and%20Tuning/Workload%20Tuning.html#zvol-volblocksize +# 16K: https://serverfault.com/a/1120640 + +echo "This is the ashift, this should be 12:" +zpool get all | grep ashift + +echo "And trim should be enabled:" +systemctl status fstrim.timer +