SHA256
1
0

Initial commit

This commit is contained in:
2025-12-04 23:23:42 -05:00
commit 765f598313
58 changed files with 2736 additions and 0 deletions

52
scripts/btrfs-convert Executable file
View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Convert directories to btrfs subvolumes
usage() {
echo "Usage: $0 <directory1> [directory2 ...]"
echo "Converts each directory to a btrfs subvolume."
}
btrfs_convert() {
local d dir tmp_dir
for d in "$@"; do
dir="$d"
tmp_dir="$dir.tmp"
if [[ ! -d "$dir" ]]; then
echo "[ERROR] Directory '$dir' does not exist. Skipping."
continue
fi
if [[ -d "$tmp_dir" ]]; then
echo "[ERROR] Temporary directory '$tmp_dir' already exists. Skipping '$dir'."
continue
fi
echo "[INFO] Creating btrfs subvolume: '$tmp_dir'..."
if ! btrfs subvolume create "$tmp_dir" &>/dev/null; then
echo "[ERROR] Failed to create btrfs subvolume '$tmp_dir'. Skipping '$dir'."
continue
fi
echo "[INFO] Moving contents from '$dir' to '$tmp_dir'..."
if ! mv "$dir"/* "$tmp_dir"/ 2>/dev/null; then
echo "[ERROR] Failed to move contents from '$dir' to '$tmp_dir'. Cleaning up."
rm -rf "$tmp_dir"
continue
fi
echo "[INFO] Removing original directory '$dir'..."
if ! rm -rf "$dir"; then
echo "[ERROR] Failed to remove '$dir'. Manual cleanup may be required."
continue
fi
echo "[INFO] Renaming '$tmp_dir' to '$dir'..."
if ! mv "$tmp_dir" "$dir"; then
echo "[ERROR] Failed to rename '$tmp_dir' to '$dir'. Manual cleanup may be required."
continue
fi
echo "[SUCCESS] Converted '$dir' to a btrfs subvolume."
done
}
if [[ $# -lt 1 ]]; then
usage
exit 1
fi
btrfs_convert "$@"

44
scripts/chroot-rescue Executable file
View File

@@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Mount and chroot a linux system
set -euo pipefail
lsblk
# Set defaults
RP="${ROOT_PART:-/dev/nvme0n0p3}"
BP="${BOOT_PART:-/dev/nvme0n0p1}"
read -r -p "Root partition [$RP]: " input_rp
RP="${input_rp:-$RP}"
read -r -p "Boot partition [$BP]: " input_bp
BP="${input_bp:-$BP}"
MD="${MOUNT_DIR:-/mnt/${RP##*/}}"
[[ -d "$MD" ]] && MD="$MD-$RANDOM"
read -r -p "Mount directory [$MD]: " input_md
MD="${input_md:-$MD}"
if [[ ! -e "$RP" || ! -e "$BP" ]]; then
echo "[ERROR] Root or boot partition does not exist."
exit 1
fi
# Mount and enter the chroot
echo "[INFO] Mounting and entering chroot..."
sudo mkdir -p "$MD"
sudo mount "$RP" "$MD"
for i in proc sys dev; do
sudo mount --bind "/$i" "$MD/$i"
done
sudo mount "$BP" "$MD/boot/efi"
sudo chroot "$MD"
# After chroot
echo "[INFO] Exiting and unmounting chroot..."
sudo umount "$MD/boot/efi"
for i in proc sys dev; do
sudo umount "$MD/$i"
done
sudo umount "$MD"
exit 0

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env bash
# One-liner to deploy containerized home-assistant
podman run -d --name="home-assistant" -v ~/.config/home-assistant:/config -v /etc/localtime:/etc/localtime:ro --net=host docker.io/homeassistant/home-assistant:stable &&
podman generate systemd --name "home-assistant" --container-prefix "" --separator "" > ~/.config/systemd/user/home-assistant.service &&
systemctl --user daemon-reload &&
systemctl --user enable --now home-assistant

193
scripts/drive-info Executable file
View File

@@ -0,0 +1,193 @@
#!/usr/bin/env bash
# Gathers disk info including:
# Hardware info
# Filesystem data
# Btrfs array membership
# LUKS encryption
# SMART status
#
# Usage: sudo ./drive-info.sh
#
# Requires root privileges for complete information access
# Check for root privileges
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root for complete information"
exit 1
fi
# Get list of block devices (excluding loop devices, partitions, and virtual devices)
devices=$(lsblk -dn -o NAME,TYPE | grep disk | awk '{print $1}')
for device in $devices; do
dev_path="/dev/$device"
# Get basic size info
size=$(lsblk -dno SIZE "$dev_path" 2>/dev/null)
# Get comprehensive SMART information
smart_info=$(smartctl -i "$dev_path" 2>/dev/null)
smart_health=$(smartctl -H "$dev_path" 2>/dev/null | grep "SMART overall-health" | awk '{print $NF}')
smart_all=$(smartctl -A "$dev_path" 2>/dev/null)
# Extract model
model=$(echo "$smart_info" | grep "Device Model:" | cut -d: -f2- | xargs)
[[ -z "$model" ]] && model=$(echo "$smart_info" | grep "Model Number:" | cut -d: -f2- | xargs)
[[ -z "$model" ]] && model=$(cat "/sys/block/$device/device/model" 2>/dev/null | xargs)
# Extract serial number
serial=$(echo "$smart_info" | grep "Serial Number:" | cut -d: -f2- | xargs)
[[ -z "$serial" ]] && serial=$(lsblk -dno SERIAL "$dev_path" 2>/dev/null)
# Extract WWN
wwn=$(echo "$smart_info" | grep "LU WWN Device Id:" | cut -d: -f2- | xargs | sed 's/ //g')
[[ -z "$wwn" ]] && wwn=$(lsblk -dno WWN "$dev_path" 2>/dev/null)
# Extract rotation rate
rpm=$(echo "$smart_info" | grep "Rotation Rate:" | cut -d: -f2- | xargs)
if [[ -z "$rpm" || "$rpm" == "Solid State Device" ]]; then
rot=$(cat "/sys/block/$device/queue/rotational" 2>/dev/null)
[[ "$rot" == "0" ]] && rpm="SSD"
fi
# Extract form factor
form_factor=$(echo "$smart_info" | grep "Form Factor:" | cut -d: -f2- | xargs)
# Extract interface and link speed
interface=$(echo "$smart_info" | grep "SATA Version" | cut -d: -f2- | xargs)
[[ -z "$interface" ]] && interface=$(echo "$smart_info" | grep "Transport protocol:" | cut -d: -f2- | xargs)
[[ -z "$interface" ]] && interface=$(echo "$smart_info" | grep "NVMe Version:" | cut -d: -f2- | xargs | awk '{print "NVMe " $1}')
# Extract temperature
temp=$(echo "$smart_all" | grep -i "Temperature" | head -1 | awk '{print $10}')
[[ -n "$temp" ]] && temp="${temp}°C"
# Extract power-on hours
power_hours=$(echo "$smart_all" | grep "Power_On_Hours" | awk '{print $10}')
[[ -z "$power_hours" ]] && power_hours=$(echo "$smart_all" | grep "Power On Hours" | awk '{print $3}')
# Get disk ID
disk_id=""
for f in /dev/disk/by-id/*; do
[[ -e "$f" ]] || continue
# resolve symlink target and compare basename to device, skip entries that reference partitions
target=$(readlink -f "$f" 2>/dev/null) || continue
if [[ "$(basename "$target")" == "$device" && "$f" != *part* ]]; then
disk_id=$(basename "$f")
break
fi
done
# Get filesystem UUID
uuid=$(lsblk -no UUID "$dev_path" 2>/dev/null | head -1)
[[ -z "$uuid" ]] && uuid=$(lsblk -no UUID "${dev_path}1" 2>/dev/null)
# Check for LUKS encryption
luks_uuid=""
luks_mapper=""
if cryptsetup isLuks "$dev_path" 2>/dev/null; then
luks_uuid=$(cryptsetup luksUUID "$dev_path" 2>/dev/null)
for mapper in /dev/mapper/luks-*; do
[[ -e "$mapper" ]] || continue
mapper_name=$(basename "$mapper")
[[ "$mapper_name" == "luks-$luks_uuid" ]] && luks_mapper="$mapper_name" && break
done
fi
# Get partition table type
ptable=$(blkid -p -s PTTYPE "$dev_path" 2>/dev/null | grep -oP 'PTTYPE="\K[^"]+')
[[ -z "$ptable" ]] && ptable="none"
# Get initial mount point
mount_point=$(findmnt -no TARGET "$dev_path" 2>/dev/null | head -1)
if [[ -z "$mount_point" && -n "$luks_mapper" ]]; then
mount_point=$(findmnt -no TARGET "/dev/mapper/$luks_mapper" 2>/dev/null | head -1)
fi
# Get HBA information
hba_info=""
if [[ -L "/sys/block/$device" ]]; then
dev_path_sys=$(readlink -f "/sys/block/$device")
# Exclude USB, virtual, and NVMe devices from HBA detection
if [[ ! "$dev_path_sys" =~ (usb|virtual|nvme) ]]; then
phy=$(echo "$dev_path_sys" | grep -oP 'phy-\K[0-9]+' | head -1)
port=$(echo "$dev_path_sys" | grep -oP 'port-\K[0-9]+' | head -1)
target=$(echo "$dev_path_sys" | grep -oP 'target\K[0-9:]+' | head -1)
# Find the actual storage controller in the PCI chain
mapfile -t pci_addrs < <(echo "$dev_path_sys" | grep -oP '\d+:\d+:\d+\.\d+')
for addr in "${pci_addrs[@]}"; do
desc=$(lspci -s "$addr" 2>/dev/null | cut -d: -f3-)
if [[ "$desc" =~ (SAS|SATA|RAID|HBA|LSI|Adaptec|AHCI) ]]; then
pci_addr="$addr"
pci_desc="$desc"
break
fi
done
# Build HBA info string
if [[ -n "$pci_addr" ]]; then
hba_info="PCI: $pci_addr ($pci_desc)"
[[ -n "$phy" ]] && hba_info="$hba_info | PHY: $phy"
[[ -n "$port" ]] && hba_info="$hba_info | Port: $port"
[[ -n "$target" ]] && hba_info="$hba_info | Target: $target"
fi
fi
fi
# Get Btrfs information
btrfs_label=""
btrfs_uuid=""
btrfs_devid=""
# Check device or its LUKS mapper for btrfs
check_dev="$dev_path"
if [[ -n "$luks_mapper" ]]; then
check_dev="/dev/mapper/$luks_mapper"
fi
btrfs_show=$(btrfs filesystem show "$check_dev" 2>/dev/null)
if btrfs filesystem show "$check_dev" &>/dev/null; then
btrfs_label=$(echo "$btrfs_show" | head -1 | grep -oP "Label: '\K[^']+")
btrfs_uuid=$(echo "$btrfs_show" | head -1 | grep -oP "uuid: \K[a-f0-9-]+")
btrfs_devid=$(echo "$btrfs_show" | grep -E "(${check_dev}|${dev_path})" | grep -oP "devid\s+\K[0-9]+" | head -1)
# If not mounted, check if any other device in the btrfs array is mounted
if [[ -z "$mount_point" && -n "$btrfs_uuid" ]]; then
all_devs=$(echo "$btrfs_show" | grep "path" | grep -oP "path \K[^ ]+")
for btrfs_dev in $all_devs; do
mount_point=$(findmnt -no TARGET "$btrfs_dev" 2>/dev/null | head -1)
[[ -n "$mount_point" ]] && break
done
fi
fi
# Default mount point if still empty
[[ -z "$mount_point" ]] && mount_point="Not mounted"
# Text output
echo "╔════════════════════════════════════════╗"
echo "║ Device: $dev_path"
echo "╚════════════════════════════════════════╝"
echo "Model: $model"
echo "Serial: $serial"
echo "WWN: $wwn"
echo "Size: $size"
echo "Rotation: $rpm"
echo "Form Factor: $form_factor"
echo "Interface: $interface"
echo "Disk ID: $disk_id"
echo "Filesystem UUID: $uuid"
echo "LUKS UUID: $luks_uuid"
echo "Partition Table: $ptable"
echo "Mount: $mount_point"
echo "HBA Info: $hba_info"
echo "SMART Health: $smart_health"
echo "Temperature: $temp"
echo "Power On Hours: $power_hours"
echo "Btrfs Label: $btrfs_label"
echo "Btrfs UUID: $btrfs_uuid"
echo "Btrfs devid: $btrfs_devid"
echo
done

21
scripts/estimate-musicdir Executable file
View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# Estimate the size of a music directory if FLACs are lossy compressed
MUSIC_DIR="${1:-/home/bryan/media/music}"
# Sum existing MP3s
MP3_BYTES=$(find "$MUSIC_DIR" -type f -iname "*.mp3" -exec du -b {} + | awk '{sum+=$1} END{print sum}')
# Sum FLACs
FLAC_BYTES=$(find "$MUSIC_DIR" -type f -iname "*.flac" -exec du -b {} + | awk '{sum+=$1} END{print sum}')
# Estimate FLACs as 160k Ogg (roughly 1/8 size of FLAC)
EST_FLAC_OGG=$(( FLAC_BYTES / 8 ))
# Total estimated size
TOTAL_EST=$(( MP3_BYTES + EST_FLAC_OGG ))
# Human-readable
EST_HR=$(numfmt --to=iec-i --suffix=B "$TOTAL_EST")
echo "Estimated total size (MP3 + FLAC → 160k Ogg): $EST_HR"

29
scripts/extract Executable file
View File

@@ -0,0 +1,29 @@
#!/usr/bin/env bash
# Automatically decompresses most filetypes
extract() {
local a
[[ $# -eq 0 ]] && { echo "usage: extract <archive...>" >&2; return 1; }
for a in "$@"; do
[[ ! -f $a ]] && { echo "$a: not a file" >&2; continue; }
case $a in
*.tar.*|*.tgz|*.tbz2) tar xvf "$a" --auto-compress ;;
*.tar) tar xvf "$a" ;;
*.gz) gunzip "$a" ;;
*.bz2) bunzip2 "$a" ;;
*.xz) unxz "$a" ;;
*.zst) unzstd "$a" ;;
*.zip) unzip "$a" ;;
*.rar) unrar x "$a" ;;
*.7z) 7z x "$a" ;;
*.Z) uncompress "$a" ;;
*) echo "$a: cannot extract" ;;
esac
done
}
# Allow script to be safely sourced
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
extract "$@"
exit
fi

7
scripts/history-clean Executable file
View File

@@ -0,0 +1,7 @@
#!/usr/bin/env bash
# Cleans the history file of PGP messages and keys
histfile="${1:-$HISTFILE:-$HOME/.histfile}"
cp -a "$histfile" "/tmp/$histfile.bak"
sed --in-place '/gpg/d' "$histfile"
sed --in-place '/-----BEGIN PGP MESSAGE-----/,/-----END PGP MESSAGE-----/d' "$histfile"

60
scripts/iso-to-mkv Executable file
View File

@@ -0,0 +1,60 @@
#!/usr/bin/env bash
# Convert ISO files to MKV format with automatic season/episode naming
set -euo pipefail
SEARCH_DIR="${1:-$(pwd)}"
OUT_DIR="${2:-$SEARCH_DIR/out}"
if [[ ! -d "$SEARCH_DIR" ]]; then
echo "[ERROR] Search directory '$SEARCH_DIR' does not exist."
exit 1
fi
mkdir -p "$OUT_DIR"
prev_season=""
ep=1
echo "[INFO] Searching for ISO files in '$SEARCH_DIR'..."
find "$SEARCH_DIR" -type f -iname '*.iso' | sort | while read -r iso; do
echo "[INFO] Processing: $iso"
parent=$(basename "$(dirname "$iso")")
if [[ ! $parent =~ S([0-9]+) ]]; then
echo "[WARN] Skipping '$iso' - parent directory doesn't match season pattern."
continue
fi
season=$(printf "%02d" "${BASH_REMATCH[1]}")
if [[ "$season" != "$prev_season" ]]; then
ep=1
prev_season="$season"
fi
ripdir="$OUT_DIR/temp/$parent"
mkdir -p "$ripdir" "$OUT_DIR/Season $season"
echo "[INFO] Ripping ISO with MakeMKV..."
if ! snap run makemkv.makemkvcon -r mkv --minlength=1800 iso:"$iso" all "$ripdir"; then
echo "[ERROR] Failed to rip '$iso'. Skipping."
continue
fi
for mkv in "$ripdir"/*.mkv; do
[[ -e "$mkv" ]] || continue
out="$OUT_DIR/Season $season/S${season}E$(printf "%02d" "$ep").mkv"
echo "[INFO] Converting to: $out"
if ffmpeg -nostdin -hide_banner -loglevel error -i "$mkv" \
-map 0:v -map 0:a:m:language:eng -map 0:s:m:language:eng \
-c copy "$out"; then
rm "$mkv"
((ep++))
else
echo "[ERROR] FFmpeg conversion failed for '$mkv'."
fi
done
done
echo "[INFO] Conversion complete. Output in '$OUT_DIR'."

View File

@@ -0,0 +1,63 @@
<#
.SYNOPSIS
Adds JRiver Media Center folders & processes to Windows Defender exclusions
.DESCRIPTION
powershell -ExecutionPolicy Bypass -File .\jriver-exclusions.ps1
#>
function Add-ItemExclusion {
param(
[string]$Item,
[ValidateSet('Path','Process')]$Type
)
try {
if ($Type -eq 'Path') {
Add-MpPreference -ExclusionPath $Item -ErrorAction Stop
} else {
Add-MpPreference -ExclusionProcess $Item -ErrorAction Stop
}
Write-Host "Added ${Type}: ${Item}"
}
catch {
Write-Warning "Skipped/failed ${Type}: ${Item} - $_"
}
}
Write-Host "Configuring JRiver Media Center (folders via wildcards, processes version 3050)"
# Folder exclusions (wildcards cover all files inside)
$folders = @(
'C:\Program Files\J River',
'C:\Program Files\J River\Media Center *',
"$Env:APPDATA\J River",
"$Env:APPDATA\J River\Media Center *"
)
# Process exclusions (explicit versions 3050)
$processes = @()
for ($v = 30; $v -le 50; $v++) {
$processes += "MC$v.exe"
$processes += "Media Center $v.exe"
}
# Add static processes that are version-independent
$processes += @('JRService.exe','JRWorker.exe','JRWeb.exe')
# Add exclusions
Write-Host "=== Adding folder exclusions ==="
$folders | ForEach-Object { Add-ItemExclusion -Item $_ -Type Path }
Write-Host "=== Adding process exclusions ==="
$processes | Sort-Object -Unique | ForEach-Object { Add-ItemExclusion -Item $_ -Type Process }
# Validation step
$pref = Get-MpPreference
Write-Host ''
Write-Host "=== Current Defender exclusions ==="
Write-Host "Paths:"
$pref.ExclusionPath | ForEach-Object { Write-Host " $_" }
Write-Host ''
Write-Host "Processes:"
$pref.ExclusionProcess | ForEach-Object { Write-Host " $_" }
Write-Host ''
Write-Host "=== Validation complete ==="

View File

@@ -0,0 +1,25 @@
IfElse(
IsEqual([Media Type], Audio),
If(IsEqual([Media Sub Type], Podcast),
podcasts/Clean([Album],3),
music/Clean([Album Artist (auto)],3)/[[Year]] Clean([Album],3)),
IsEqual([Media Sub Type], Movie),
movies/Clean([Name], 3),
IsEqual([Media Sub Type], TV Show),
tv/Clean([Series],3)/Season PadNumber([Season], 2)
)
IfElse(
IsEqual([Media Type], Audio),
If(IsEmpty([Disc #],1),
1[Track #],
[Disc #][Track #]) - Clean([Artist] - [Name],3),
IsEqual([Media Sub Type], Movie),
Clean([Name],3) [[Year]],
IsEqual([Media Sub Type], TV Show),
Clean([Series] - S[Season]E[Episode] - [Name],3)
)
IfElse(IsEqual([Media Type], Audio), If(IsEqual([Media Sub Type], Podcast), podcasts/Clean([Album],3), music/RemoveCharacters(Clean([Album Artist (auto)],3),.,2)/[[Year]] RemoveCharacters(Clean([Album],3),.,3)), IsEqual([Media Sub Type], Movie), movies/Clean(RemoveCharacters([Name],:), 3) [[Year]], IsEqual([Media Sub Type], TV Show), tv/Clean([Series],3)/Season PadNumber([Season], 2))
IfElse(IsEqual([Media Type], Audio), If(IsEmpty([Disc #],1), 1[Track #], [Disc #][Track #]) - RemoveCharacters(Clean([Artist] - [Name],3),.,3), IsEqual([Media Sub Type], Movie), Clean(RemoveCharacters([Name],:),3) [[Year]], IsEqual([Media Sub Type], TV Show), Clean([Series] - S[Season]E[Episode] - [Name],3))

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env python3
"""
Fix JRiver date imported fields to use the earliest date for each album.
"""
import sys
from pathlib import Path, PureWindowsPath
def get_album_path(line: str) -> str:
"""Extract and return the album directory path from a filename field."""
filename = line.lstrip('<Field Name="Filename">').rstrip('</Field>\n')
path = PureWindowsPath(filename)
return str(path.parent)
def get_date(line: str) -> int:
"""Extract and return the date imported value from a date field."""
date = line.lstrip('<Field Name="Date Imported">').rstrip('</Field>\n')
return int(date)
def main() -> None:
"""Main function to process JRiver library file."""
if len(sys.argv) != 3:
print("Usage: jriver-fix-date-imported <input_file> <output_file>", file=sys.stderr)
sys.exit(1)
in_file = Path(sys.argv[1])
out_file = Path(sys.argv[2])
if not in_file.exists():
print(f"[ERROR] Input file '{in_file}' does not exist.", file=sys.stderr)
sys.exit(1)
# Read input file
with open(in_file, "r", encoding="utf-8") as f:
lines = f.readlines()
# Build album to tracks mapping
albums: dict[str, list[tuple[int, int]]] = {}
current_album: str | None = None
for lnum, line in enumerate(lines):
if '<Field Name="Filename">' in line:
current_album = get_album_path(line)
elif '<Field Name="Date Imported">' in line:
date = get_date(line)
if current_album:
albums.setdefault(current_album, []).append((lnum, date))
# Update lines with earliest date for each album
for _, tracks in albums.items():
earliest_date: int = min(tracks, key=lambda t: t[1])[1]
for lnum, _ in tracks:
lines[lnum] = f'<Field Name="Date Imported">{earliest_date}</Field>\n'
# Write output file
with open(out_file, 'w', encoding="utf-8") as f:
f.writelines(lines)
print(f"[SUCCESS] Processed {len(albums)} albums. Output written to '{out_file}'.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,63 @@
#!/usr/bin/env python3
"""
Replace JRiver date imported fields with date modified when the latter is earlier.
"""
import sys
from pathlib import Path
def get_import_date(line: str) -> int:
"""Extract and return the date imported value from a date imported field."""
date = line.lstrip('<Field Name="Date Imported">').rstrip('</Field>\n')
return int(date)
def get_create_date(line: str) -> int:
"""Extract and return the date modified value from a date modified field."""
date = line.lstrip('<Field Name="Date Modified">').rstrip('</Field>\n')
return int(date)
def main() -> None:
"""Main function to process JRiver library file."""
if len(sys.argv) != 3:
print("Usage: jriver-replace-date-imported <input_file> <output_file>", file=sys.stderr)
sys.exit(1)
in_file = Path(sys.argv[1])
out_file = Path(sys.argv[2])
if not in_file.exists():
print(f"[ERROR] Input file '{in_file}' does not exist.", file=sys.stderr)
sys.exit(1)
# Read input file
with open(in_file, "r", encoding="utf-8") as f:
lines = f.readlines()
# Process lines and replace dates where appropriate
import_date: int = 0
date_imported_line: int = 0
replacements = 0
for lnum, line in enumerate(lines):
if '<Field Name="Date Imported">' in line:
import_date = get_import_date(line)
date_imported_line = lnum
elif '<Field Name="Date Modified">' in line:
create_date = get_create_date(line)
if create_date < import_date:
print(f"[INFO] Replacing {import_date} with {create_date} at line {date_imported_line}")
lines[date_imported_line] = f'<Field Name="Date Imported">{create_date}</Field>\n'
replacements += 1
# Write output file
with open(out_file, 'w', encoding="utf-8") as f:
f.writelines(lines)
print(f"[SUCCESS] Made {replacements} replacements. Output written to '{out_file}'.")
if __name__ == "__main__":
main()

104
scripts/prune-files Executable file
View File

@@ -0,0 +1,104 @@
#!/usr/bin/env bash
# Remove all but the latest N versions of files matching given prefixes
# Usage: prune-files -k 3 thisfileprefix [thatfileprefix]
set -euo pipefail
prune-files() {
local -a PREFIXES
local KEEP_INT=1 # default number of files to keep
local DRY_RUN=false
printHelpAndExit() {
cat <<-'EOF'
USAGE:
prune-files -k 3 thisfileprefix [thatfileprefix]
OPTIONS:
-k, --keep NUMBER
Keep NUMBER of the latest files that match each file prefix (Default: 1)
-n, --dry-run
Show what would be removed without actually deleting files
-h, --help
Print this help dialog and exit
EOF
[[ -z "$1" ]] && exit 0 || exit "$1"
}
parseInput() {
local _input
if _input=$(getopt -o hk:n -l help,keep:,dry-run -- "$@"); then
eval set -- "$_input"
while true; do
case "$1" in
-k|--keep) shift; KEEP_INT="$1" ;;
-n|--dry-run) DRY_RUN=true ;;
-h|--help) printHelpAndExit 0 ;;
--) shift; break ;;
esac
shift
done
else
echo "[ERROR] Incorrect option(s) provided" >&2
printHelpAndExit 1
fi
if [[ $# -eq 0 ]]; then
echo "[ERROR] At least one file prefix must be provided" >&2
printHelpAndExit 1
fi
if ! [[ "$KEEP_INT" =~ ^[0-9]+$ ]] || [[ "$KEEP_INT" -lt 1 ]]; then
echo "[ERROR] --keep must be a positive integer" >&2
exit 1
fi
PREFIXES=("$@")
}
findAndRemove() {
local prefix file count
for prefix in "${PREFIXES[@]}"; do
count=0
echo "[INFO] Processing files with prefix: $prefix"
# List files matching the prefix sorted by modification time (latest first),
# then remove all except the first KEEP_INT files.
while IFS= read -r file; do
if [[ "$DRY_RUN" == true ]]; then
echo "[DRY-RUN] Would remove: $file"
else
echo "[INFO] Removing: $file"
if ! rm -- "$file"; then
echo "[ERROR] Failed to remove: $file" >&2
fi
fi
((count++))
done < <(
find . -maxdepth 1 -type f -name "${prefix}*" -printf '%T@ %p\n' 2>/dev/null | \
sort -rn | \
awk -v keep="$KEEP_INT" 'NR > keep {print $2}'
)
if [[ $count -eq 0 ]]; then
echo "[INFO] No files to remove for prefix: $prefix"
else
echo "[INFO] Processed $count file(s) for prefix: $prefix"
fi
done
}
main() {
parseInput "$@"
findAndRemove
}
main "$@"
}
# Allow script to be safely sourced
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
prune-files "$@"
exit $?
fi

39
scripts/random-words Executable file
View File

@@ -0,0 +1,39 @@
#!/usr/bin/env bash
# This function will create a random word pair with an underscore separator ex. turtle_ladder
# It accepts one optional argument (an integer) (the number of words to return)
set -euo pipefail
random_words() {
local num="${1:-2}"
local -a arr
local word
# Validate input
if ! [[ "$num" =~ ^[0-9]+$ ]] || [[ "$num" -lt 1 ]]; then
echo "[ERROR] Argument must be a positive integer" >&2
return 1
fi
# Check if dictionary file exists
if [[ ! -f /usr/share/dict/words ]]; then
echo "[ERROR] Dictionary file /usr/share/dict/words not found" >&2
return 1
fi
for ((i=0; i<num; i++)); do
# Get random word and sanitize in one pass
word=$(shuf -n1 /usr/share/dict/words | tr -d '-_' | tr '[:upper:]' '[:lower:]')
arr+=("$word")
done
# Join array with underscores
local IFS="_"
echo "${arr[*]}"
}
# Allow this file to be executed directly if not being sourced
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
random_words "$@"
exit $?
fi

107
scripts/remove-small-dirs Executable file
View File

@@ -0,0 +1,107 @@
#!/usr/bin/env bash
# Remove directories below a specified size (in KB)
# Usage: remove-small-dirs DIRECTORY [SIZE_THRESHOLD]
# Default SIZE_THRESHOLD is 1000 KB
set -euo pipefail
usage() {
cat <<-EOF
Usage: remove-small-dirs [OPTIONS] DIRECTORY [SIZE_THRESHOLD]
Remove directories below a specified size (default: 1000 KB).
OPTIONS:
-n, --dry-run Show what would be removed without deleting
-h, --help Display this help message
ARGUMENTS:
DIRECTORY Directory to search for small directories
SIZE_THRESHOLD Maximum size in KB (default: 1000)
EOF
}
# Parse options
DRY_RUN=false
while [[ $# -gt 0 ]]; do
case "$1" in
-n|--dry-run)
DRY_RUN=true
shift
;;
-h|--help)
usage
exit 0
;;
-*)
echo "[ERROR] Unknown option: $1" >&2
usage
exit 1
;;
*)
break
;;
esac
done
if [[ $# -lt 1 ]]; then
echo "[ERROR] You must provide a directory." >&2
usage
exit 1
fi
dir="$1"
SIZE="${2:-1000}"
if [[ ! -d "$dir" ]]; then
echo "[ERROR] Directory does not exist: $dir" >&2
exit 1
fi
if ! [[ "$SIZE" =~ ^[0-9]+$ ]] || [[ "$SIZE" -lt 1 ]]; then
echo "[ERROR] SIZE_THRESHOLD must be a positive integer" >&2
exit 1
fi
echo "[INFO] Searching for directories <= $SIZE KB in '$dir'..."
# Find directories with size less or equal to SIZE
# Sort by depth (deepest first) to avoid removing parent before child
small_dirs=$(find "$dir" -mindepth 1 -type d -exec du -ks {} + | \
awk -v size="$SIZE" '$1 <= size {print $2}' | \
awk '{ print length, $0 }' | sort -rn | cut -d' ' -f2-)
if [[ -z "$small_dirs" ]]; then
echo "[INFO] No directories with size <= $SIZE KB found in '$dir'."
exit 0
fi
echo "[INFO] Found $(echo "$small_dirs" | wc -l) directories to remove:"
echo "$small_dirs"
if [[ "$DRY_RUN" == true ]]; then
echo "[DRY-RUN] Would remove the above directories."
exit 0
fi
read -r -p "Remove these directories? [y/N] " response
response="${response,,}" # Convert to lowercase
if [[ ! "$response" =~ ^(yes|y)$ ]]; then
echo "[INFO] Exiting, no changes were made."
exit 0
fi
count=0
while IFS= read -r small_dir; do
if [[ -d "$small_dir" ]]; then
echo "[INFO] Removing: $small_dir"
if rm -rf "$small_dir"; then
((count++))
else
echo "[ERROR] Failed to remove: $small_dir" >&2
fi
fi
done <<< "$small_dirs"
echo "[SUCCESS] Removed $count directories."

86
scripts/speedtest-compare Executable file
View File

@@ -0,0 +1,86 @@
#!/usr/bin/env bash
# This script performs speedtests over Wireguard and native connections and prints their output
set -euo pipefail
usage() {
cat <<-EOF
Usage: speedtest-compare [OPTIONS]
Compare network speed between Wireguard and native connections.
OPTIONS:
-s, --server ID Specify server ID for native test (default: 17170)
-u, --upload Include upload speed test
-h, --help Display this help message
EOF
}
# Parse options
UPLOAD_FLAG="--no-upload"
SERVER_ID="17170"
while [[ $# -gt 0 ]]; do
case "$1" in
-s|--server) shift; SERVER_ID="$1"; shift ;;
-u|--upload) UPLOAD_FLAG=""; shift ;;
-h|--help) usage; exit 0 ;;
*)
echo "[ERROR] Unknown option: $1" >&2
usage
exit 1
;;
esac
done
# Check if speedtest-cli is installed
if ! command -v speedtest-cli &>/dev/null; then
echo "[ERROR] speedtest-cli is not installed. Please install it first." >&2
exit 1
fi
run_test() {
local output pingBps pingPart bpsPart pingInt bpsInt mbpsInt
# Run speedtest-cli and extract the 7th and 8th CSV fields
if ! output=$(speedtest-cli $UPLOAD_FLAG --csv "$@" 2>/dev/null); then
echo "[ERROR] Speedtest failed" >&2
return 1
fi
pingBps=$(echo "$output" | cut -d"," -f7-8)
# Extract ping value (as an integer) and bps (and convert to Mbps)
pingPart="${pingBps%,*}"
bpsPart="${pingBps#*,}"
pingInt="${pingPart%.*}"
bpsInt="${bpsPart%.*}"
mbpsInt=$(( bpsInt / 1000000 ))
echo "$pingInt $mbpsInt"
}
echo "[INFO] Running speedtest comparison..."
echo ""
# Test Wireguard using automatic server selection
echo "Testing Wireguard connection..."
if output=$(run_test); then
read -r pingInt mbpsInt <<< "$output"
echo " Ping: ${pingInt}ms"
echo " Speed: ${mbpsInt}Mbps"
else
echo " [ERROR] Wireguard test failed"
fi
echo ""
# Test native connection to ISP
echo "Testing native connection (server: $SERVER_ID)..."
if output=$(run_test --server "$SERVER_ID"); then
read -r pingInt mbpsInt <<< "$output"
echo " Ping: ${pingInt}ms"
echo " Speed: ${mbpsInt}Mbps"
else
echo " [ERROR] Native test failed"
fi

71
scripts/ssh-wrap Executable file
View File

@@ -0,0 +1,71 @@
#!/usr/bin/env bash
# Usage: ssh-wrap user@host [ssh-options]
# Wrapper to handle SSH host key changes automatically
set -uo pipefail
if [[ $# -eq 0 ]]; then
echo "Usage: ssh-wrap user@host [ssh-options]" >&2
exit 1
fi
# Capture SSH output
output=$(ssh "$@" 2>&1)
exit_code=$?
# Print the SSH output so user sees what happened
echo "$output"
# If SSH succeeded, we're done
if [[ $exit_code -eq 0 ]]; then
exit 0
fi
# Check if the known_hosts warning appears
if echo "$output" | grep -q "REMOTE HOST IDENTIFICATION HAS CHANGED"; then
echo ""
echo "[WARNING] Host key has changed - possible man-in-the-middle attack or host reinstall."
# Extract the known_hosts file and line number from the "Offending RSA key in ..." line
# The line format typically is: "Offending RSA key in /path/to/known_hosts:line"
if offending_info=$(echo "$output" | grep "Offending.*key in"); then
KNOWN_HOSTS_FILE=$(echo "$offending_info" | awk '{print $5}' | cut -d: -f1)
LINE_NUMBER=$(echo "$offending_info" | awk -F: '{print $NF}')
if [[ -z "$KNOWN_HOSTS_FILE" || -z "$LINE_NUMBER" || ! -f "$KNOWN_HOSTS_FILE" ]]; then
echo "[ERROR] Could not extract offending key information or file doesn't exist." >&2
exit 1
fi
echo "[INFO] Offending key detected in: $KNOWN_HOSTS_FILE on line: $LINE_NUMBER"
read -rp "Remove offending key and retry SSH connection? [y/N]: " RESPONSE
if [[ "$RESPONSE" =~ ^[Yy]$ ]]; then
# Backup known_hosts
if cp "$KNOWN_HOSTS_FILE" "$KNOWN_HOSTS_FILE.bak"; then
echo "[INFO] Backup created: $KNOWN_HOSTS_FILE.bak"
else
echo "[ERROR] Failed to create backup." >&2
exit 1
fi
# Remove offending line
if sed -i "${LINE_NUMBER}d" "$KNOWN_HOSTS_FILE"; then
echo "[INFO] Offending key removed. Retrying SSH connection..."
ssh "$@"
else
echo "[ERROR] Failed to remove offending key." >&2
exit 1
fi
else
echo "[INFO] Key was not removed. Exiting."
exit 1
fi
else
echo "[ERROR] Could not extract offending key information. Remove it manually if needed." >&2
exit 1
fi
else
# SSH failed for another reason
exit $exit_code
fi

4
scripts/strip-exif Executable file
View File

@@ -0,0 +1,4 @@
#!/usr/bin/env bash
# Strips all EXIF data from images provided as arguments
exiftool -all= "$@"

68
scripts/sync-music Executable file
View File

@@ -0,0 +1,68 @@
#!/usr/bin/env bash
# Sync and transcode music files to a destination directory
set -e
SRC="${1:?Source directory required}"
DST="${2:?Destination directory required}"
JOBS="${3:-12}"
command -v opusenc >/dev/null || { echo "ERROR: opusenc not found" >&2; exit 1; }
mkdir -p "$DST"
echo "Syncing music from $SRC to $DST (using $JOBS parallel jobs)"
# Process source files in parallel
process_file() {
local src="$1"
local rel="${src#"$SRC/"}"
local dst="$DST/$rel"
case "${src,,}" in
*.flac)
dst="${dst%.*}.opus"
if [[ ! -f "$dst" || "$src" -nt "$dst" ]]; then
echo "Converting: $rel"
mkdir -p "$(dirname "$dst")"
opusenc --quiet --bitrate 160 --vbr "$src" "$dst"
fi
;;
*.mp3)
if [[ ! -f "$dst" || "$src" -nt "$dst" ]]; then
echo "Copying: $rel"
mkdir -p "$(dirname "$dst")"
cp -p "$src" "$dst"
fi
;;
esac
}
export -f process_file
export SRC DST
find -L "$SRC" -type f \( -iname "*.flac" -o -iname "*.mp3" \) -print0 | \
xargs -0 -P "$JOBS" -I {} bash -c 'process_file "$@"' _ {}
# Remove stray files
while IFS= read -r -d '' dst; do
rel="${dst#"$DST/"}"
base="${rel%.*}"
case "${dst,,}" in
*.opus)
[[ -f "$SRC/$base.flac" || -f "$SRC/$base.FLAC" ]] && continue
echo "Removing: $rel"
rm -f "$dst"
;;
*.mp3)
[[ -f "$SRC/$rel" ]] && continue
echo "Removing: $rel"
rm -f "$dst"
;;
esac
done < <(find -L "$DST" -type f \( -iname "*.opus" -o -iname "*.mp3" \) -print0)
# Clean empty directories
find "$DST" -type d -empty -delete 2>/dev/null || true
echo "Done"

78
scripts/tmux-management Executable file
View File

@@ -0,0 +1,78 @@
#!/usr/bin/env bash
# Open a tiled tmux window with one pane per host each in its own tmux session.
# The local session is always the last (active) pane.
set -euo pipefail
# Configuration (override with env vars if desired)
HOSTS=(workstation laptop) # hosts in pane order
REMOTE_SESSION=${REMOTE_SESSION:-main} # tmux session on remotes
SYNCHRONIZE=${SYNCHRONIZE:-1} # 1 = broadcast keystrokes
INCLUDE_LOCAL=${INCLUDE_LOCAL:-1} # 0 = skip local host
LOCAL_SHELL_ONLY=${LOCAL_SHELL_ONLY:-0} # 1 = plain shell locally
DEBUG=${DEBUG:-0}
debug() { if (( DEBUG )); then echo "Debug: $*"; fi; }
# Returns 0 if $2 is found in nameref array $1
array_contains() {
local -n arr=$1
local needle=$2
for element in "${arr[@]}"; do
[[ "$element" == "$needle" ]] && return 0
done
return 1
}
LOCAL=$(hostname -s)
# Build TARGETS list so that LOCAL is always last
TARGETS=()
for h in "${HOSTS[@]}"; do
[[ $h != "$LOCAL" ]] && TARGETS+=("$h")
done
if (( INCLUDE_LOCAL )); then
TARGETS+=("$LOCAL")
fi
(( ${#TARGETS[@]} )) || { echo "No hosts to connect to."; exit 1; }
SESSION=$(IFS=-; echo "${TARGETS[*]}")
debug "Session : $SESSION"
debug "Targets : ${TARGETS[*]}"
# Reattach if session already exists
if tmux has-session -t "$SESSION" 2>/dev/null; then
exec tmux attach -t "$SESSION"
fi
# Builds the command that will run inside a pane
open_cmd() {
local tgt=$1
if [[ $tgt == "$LOCAL" ]]; then
if (( LOCAL_SHELL_ONLY )); then
printf '%q -l' "${SHELL:-bash}"
else
printf 'tmux -L %q new -A -s %q' "${SESSION}_local" "$REMOTE_SESSION"
fi
else
printf 'ssh -t %q tmux new -A -s %q' "$tgt" "$REMOTE_SESSION"
fi
}
# Create the first pane
tmux new-session -d -s "$SESSION" -n "$SESSION" "$(open_cmd "${TARGETS[0]}")"
# Create remaining panes
for tgt in "${TARGETS[@]:1}"; do
tmux split-window -t "$SESSION:0" -h "$(open_cmd "$tgt")"
done
tmux select-layout -t "$SESSION:0" tiled
((SYNCHRONIZE)) && tmux setw -t "$SESSION:0" synchronize-panes on
# Activate the last pane (local host)
local_index=$(( ${#TARGETS[@]} - 1 ))
tmux select-pane -t "$SESSION:0.$local_index"
exec tmux attach -t "$SESSION"

6
scripts/tree-to-markdown Executable file
View File

@@ -0,0 +1,6 @@
#!/usr/bin/env bash
# Make a nice markdown file from a directory tree
tree=$(tree -f --noreport --charset ascii "$1" |
sed -e 's/| \+/ /g' -e 's/[|`]-\+/ */g' -e 's:\(* \)\(\(.*/\)\([^/]\+\)\):\1[\4](\2):g')
printf "# Code/Directory Structure:\n\n%s" "$tree"

16
scripts/update-git-hooks Executable file
View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
# Update the post-receive hooks of multiple bare git repos
for i in /var/lib/git/gogs-repositories/bryan/*/hooks/post-receive; do
# Get repo name
rn="${i%/hooks/post-receive}"
rn="${rn##*/}"
# Don't duplicate the line if it already exists
while IFS= read -r line; do
[[ "$line" == "git push --mirror git@github.com:cryobry/${rn}" ]] && continue
done < "$i"
# Append the line
echo "git push --mirror git@github.com:cryobry/${rn}" >> "$i"
done