Hello I have an issue with a backup script for ZFS snapshots:
basically the break down of the script is this:
### START OF SCRIPT
# These variables are named first because they are nested in other variables.
snap_prefix=snap
retention=10
# Full paths to these utilities are needed when running the script from cron.
#date=/usr/bin/date
GDATE="/opt/csw/bin/gdate"
grep=/usr/bin/grep
#mbuffer=/usr/local/bin/mbuffer
sed=/usr/bin/sed
sort=/usr/bin/sort
xargs=/usr/bin/xargs
zfs=/sbin/zfs
src_0="ServerStoreR10SSD"
dst_0="zpoolRZ5SATA3/backuppool4/ServerStoreR10SSD"
host="root@hostbk"
today="$snap_prefix-`date +%Y%m%d`"
#yesterday="$snap_prefix-`date -v -1d +%Y%m%d`"
yesterday=$snap_prefix-`$GDATE -d "-1 day" +"%Y%m%d"`
snap_today="$src_0@$today"
snap_yesterday="$src_0@$yesterday"
snap_old=`$zfs list -t snapshot -o name | $grep "$src_0@$snap_prefix*" | $sort -r | $sed 1,${retention}d | $sort | $xargs -n 1`
log=/root/bin/zfsreplication/cronlog/ServerStoreR10SSD.txt
# Create a blank line between the previous log entry and this one.
echo >> $log
# Print the name of the script.
echo "zfsrep_ServerStoreR10SSD.sh" >> $log
# Print the current date/time.
$date >> $log
echo >> $log
# Look for today's snapshot and, if not found, create it.
if $zfs list -H -o name -t snapshot | $sort | $grep "$snap_today$" > /dev/null
then
echo "Today's snapshot '$snap_today' already exists." >> $log
# Uncomment if you want the script to exit when it does not create today's snapshot:
#exit 1
else
echo "Taking today's snapshot: $snap_today" >> $log
$zfs snapshot -r $snap_today >> $log 2>&1
fi
echo >> $log
# Look for yesterday snapshot and, if found, perform incremental replication, else print error message.
if $zfs list -H -o name -t snapshot | $sort | $grep "$snap_yesterday$" > /dev/null
then
echo "Yesterday's snapshot '$snap_yesterday' exists. Proceeding with replication..." >> $log
$zfs send -R -i $snap_yesterday $snap_today | ssh $host $zfs receive -vudF $dst_0 >> $log 2>&1
#For use in local snapshots
#$zfs send -R -i $snap_yesterday $snap_today | $zfs receive -vudF $dst_0 >> $log 2>&1
echo >> $log
echo "Replication complete." >> $log
else
echo "Error: Replication not completed. Missing yesterday's snapshot." >> $log
fi
echo >> $log
# Remove snapshot(s) older than the value assigned to $retention.
echo "Attempting to destroy old snapshots..." >> $log
if [ -n "$snap_old" ]
then
echo "Destroying the following old snapshots:" >> $log
echo "$snap_old" >> $log
$zfs list -t snapshot -o name | $grep "$src_0@$snap_prefix*" | $sort -r
| $sed 1,${retention}d | $sort | $xargs -n 1 $zfs destroy -r >> $log 2>&1
else
echo "Could not find any snapshots to destroy." >> $log
fi
# Mark the end of the script with a delimiter.
echo "**********" >> $log
# END OF SCRIPT
~
the log shows the following
Yesterday's snapshot 'ServerStoreR10SSD@snap-20170419' exists. Proceeding with replication... cannot receive: specified fs (zpoolRZ5SATA3/backuppool4/ServerStoreR10SSD) does not exist attempting destroy zpoolRZ5SATA3/backuppool4/ServerStoreR10SSD failed - trying rename zpoolRZ5SATA3/backuppool4/ServerStoreR10SSD to zpoolRZ5SATA3/backuppool4/ServerStoreR10SSDrecv-5424-1 cannot open 'zpoolRZ5SATA3/backuppool4/ServerStoreR10SSD': dataset does not exist
The script was successfully up until one point when i had a power outage. The main issue is that every time it runs the incremental portion the receiving zfs pool gets renamed to something weird like "..recv-5424-1" hence it cannot open the destination pool and the backup fails...
any suggestions please?