Initial commit
This commit is contained in:
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.ansible
|
||||
testing/
|
||||
7
bootstrap
Executable file
7
bootstrap
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
# Bootstrap Ansible environment
|
||||
# Usage: ./bootstrap [--force]
|
||||
|
||||
pip install --upgrade pip "$@"
|
||||
pip install --upgrade --requirement pip-requirements "$@"
|
||||
ansible-galaxy install --role-file galaxy-requirements.yml "$@"
|
||||
5
deploy-vm
Executable file
5
deploy-vm
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ansible-playbook -i inventories/remote-hosts.yml playbooks/deploy.yml -l workstation --ask-become-pass
|
||||
|
||||
ansible-playbook -i inventories/remotehosts playbook.yml -l testing --ask-vault-pass --ask-become-pass --diff "$@"
|
||||
5
deploy-workstation
Executable file
5
deploy-workstation
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ansible-playbook -i inventories/localhosts playbook.yml -l workstation --ask-vault-pass --ask-become-pass --check --diff
|
||||
|
||||
ansible-playbook -i inventories/localhosts playbook.yml -l workstation --ask-vault-pass --ask-become-pass --diff "$@"
|
||||
10
deploy.code-workspace
Normal file
10
deploy.code-workspace
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"path": "."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"ansible.python.interpreterPath": "/home/bryan/.virtualenvs/deploy/bin/python"
|
||||
}
|
||||
}
|
||||
7
dotfiles/common/.config/aichat/config.yaml.j2
Normal file
7
dotfiles/common/.config/aichat/config.yaml.j2
Normal file
@@ -0,0 +1,7 @@
|
||||
# see https://github.com/sigoden/aichat/blob/main/config.example.yaml
|
||||
|
||||
model: claude:claude-haiku-4-5-20251001
|
||||
|
||||
clients:
|
||||
- type: claude
|
||||
api_key: {{ ANTHROPIC_API_KEY }}
|
||||
2
dotfiles/common/.env.j2
Normal file
2
dotfiles/common/.env.j2
Normal file
@@ -0,0 +1,2 @@
|
||||
OPENAI_API_KEY="{{ OPENAI_API_KEY }}"
|
||||
ANTHROPIC_API_KEY="{{ ANTHROPIC_API_KEY }}"
|
||||
10
dotfiles/common/.gitconfig
Normal file
10
dotfiles/common/.gitconfig
Normal file
@@ -0,0 +1,10 @@
|
||||
# This is Git's per-user configuration file.
|
||||
[user]
|
||||
email = bryanroessler@gmail.com
|
||||
name = Bryan Roessler
|
||||
[color]
|
||||
ui = auto
|
||||
[credential]
|
||||
helper = cache
|
||||
[init]
|
||||
defaultBranch = main
|
||||
46
dotfiles/common/.local/share/nautilus/scripts/share-link
Executable file
46
dotfiles/common/.local/share/nautilus/scripts/share-link
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env bash
|
||||
# Nautilus script for creating one or more shared links
|
||||
# Requires wl-clipboard and notify-send
|
||||
|
||||
ssh_server="bryanroessler.com"
|
||||
ssh_files_path="/var/www/repos.bryanroessler.com/files"
|
||||
www_files_path="https://repos.bryanroessler.com/files"
|
||||
|
||||
if [[ "$#" -lt 1 ]]; then
|
||||
echo "You must provide at least one argument"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
hash wl-copy &>/dev/null || { echo "Please install wl-copy"; exit 1; }
|
||||
hash rsync &>/dev/null || { echo "Please install rsync"; exit 1; }
|
||||
|
||||
if [[ -v NAUTILUS_SCRIPT_SELECTED_URIS ]]; then
|
||||
readarray -t files <<< "$NAUTILUS_SCRIPT_SELECTED_URIS"
|
||||
for f in "${files[@]}"; do
|
||||
f="${f#file://}"
|
||||
f="${f//\%20/ }"
|
||||
fixed_files+=("$f")
|
||||
done
|
||||
else
|
||||
fixed_files=("$@")
|
||||
fi
|
||||
|
||||
links_array=()
|
||||
for f in "${fixed_files[@]}"; do
|
||||
[[ "$f" == "" ]] && continue
|
||||
fname="${f##*/}"
|
||||
random64=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 64 | head -n 1)
|
||||
nohup rsync -a "$f" "${ssh_server}:${ssh_files_path}/${random64}/" &
|
||||
links_array+=("$www_files_path/${random64}/${fname// /%20}")
|
||||
done
|
||||
|
||||
if [[ "${#links_array[@]}" == 1 ]]; then
|
||||
printf '%s' "${links_array[@]}" | wl-copy
|
||||
else
|
||||
printf '%s\n' "${links_array[@]}" | wl-copy
|
||||
fi
|
||||
|
||||
hash notify-send &>/dev/null &&
|
||||
notify-send -t 3000 -i face-smile "share-link" "File(s) uploaded and link copied to clipboard"
|
||||
|
||||
exit 0
|
||||
3
dotfiles/common/.ssh/authorized_keys.j2
Normal file
3
dotfiles/common/.ssh/authorized_keys.j2
Normal file
@@ -0,0 +1,3 @@
|
||||
{% for key in SSH_AUTHORIZED_KEYS %}
|
||||
{{ key }}
|
||||
{% endfor %}
|
||||
42
dotfiles/common/.ssh/config
Normal file
42
dotfiles/common/.ssh/config
Normal file
@@ -0,0 +1,42 @@
|
||||
HashKnownHosts yes
|
||||
AddressFamily inet
|
||||
|
||||
Host *
|
||||
StrictHostKeyChecking no
|
||||
|
||||
Host uv.asc.edu
|
||||
User uabbcr
|
||||
ProxyJump roessler@hartmanlab.genetics.uab.edu
|
||||
|
||||
Host router
|
||||
Hostname router.lan
|
||||
User root
|
||||
ServerAliveCountMax 10
|
||||
ServerAliveInterval 120
|
||||
|
||||
Host home-router
|
||||
Hostname home-router
|
||||
User root
|
||||
|
||||
Host ax6000
|
||||
Hostname ax6000.lan
|
||||
User root
|
||||
|
||||
Host bryanroessler.com
|
||||
Hostname 23.94.201.46
|
||||
|
||||
Host hartmanlab
|
||||
Hostname hartmanlab.genetics.uab.edu
|
||||
User roessler
|
||||
|
||||
Host workstation
|
||||
Hostname workstation
|
||||
|
||||
Host laptop
|
||||
Hostname laptop
|
||||
|
||||
Host vm-fedora42
|
||||
Hostname vm-fedora42
|
||||
|
||||
Host vm-alma9
|
||||
Hostname 192.168.122.235
|
||||
32
dotfiles/common/.tmux.conf
Normal file
32
dotfiles/common/.tmux.conf
Normal file
@@ -0,0 +1,32 @@
|
||||
# Synchronize windows shortcut
|
||||
unbind a
|
||||
bind a set-window-option synchronize-panes
|
||||
|
||||
# Use | and - to split a window vertically and horizontally instead of " and % respoectively
|
||||
unbind '"'
|
||||
unbind %
|
||||
bind | split-window -h -c "#{pane_current_path}"
|
||||
bind - split-window -v -c "#{pane_current_path}"
|
||||
|
||||
# Automatically set window title
|
||||
set-window-option -g automatic-rename on
|
||||
set-option -g set-titles on
|
||||
|
||||
# Alias (Ctrl-b + k) to kill the current session
|
||||
bind-key k kill-session
|
||||
unbind r
|
||||
bind r \
|
||||
source-file ~/.tmux.conf \;\
|
||||
display 'Reloaded tmux config'
|
||||
|
||||
# Set the history limit so we get lots of scrollback.
|
||||
setw -g history-limit 50000000
|
||||
|
||||
# switch panes using Alt-arrow without prefix
|
||||
bind -n M-Left select-pane -L
|
||||
bind -n M-Right select-pane -R
|
||||
bind -n M-Up select-pane -U
|
||||
bind -n M-Down select-pane -D
|
||||
|
||||
# Enable mouse mode
|
||||
set -g mouse on
|
||||
169
dotfiles/common/.vimrc
Normal file
169
dotfiles/common/.vimrc
Normal file
@@ -0,0 +1,169 @@
|
||||
" File: .vimrc
|
||||
" Author: Jake Zimmerman <jake@zimmerman.io>
|
||||
"
|
||||
" How I configure Vim :P
|
||||
"
|
||||
|
||||
" Gotta be first
|
||||
set nocompatible
|
||||
|
||||
filetype off
|
||||
|
||||
set rtp+=~/.vim/bundle/Vundle.vim
|
||||
call vundle#begin()
|
||||
|
||||
Plugin 'VundleVim/Vundle.vim'
|
||||
|
||||
" ----- Making Vim look good ------------------------------------------
|
||||
Plugin 'altercation/vim-colors-solarized'
|
||||
Plugin 'tomasr/molokai'
|
||||
Plugin 'vim-airline/vim-airline'
|
||||
Plugin 'vim-airline/vim-airline-themes'
|
||||
|
||||
" ----- Vim as a programmer's text editor -----------------------------
|
||||
Plugin 'scrooloose/nerdtree'
|
||||
Plugin 'jistr/vim-nerdtree-tabs'
|
||||
Plugin 'vim-syntastic/syntastic'
|
||||
Plugin 'xolox/vim-misc'
|
||||
Plugin 'xolox/vim-easytags'
|
||||
Plugin 'majutsushi/tagbar'
|
||||
Plugin 'ctrlpvim/ctrlp.vim'
|
||||
Plugin 'vim-scripts/a.vim'
|
||||
|
||||
" ----- Working with Git ----------------------------------------------
|
||||
Plugin 'airblade/vim-gitgutter'
|
||||
Plugin 'tpope/vim-fugitive'
|
||||
|
||||
" ----- Other text editing features -----------------------------------
|
||||
Plugin 'Raimondi/delimitMate'
|
||||
|
||||
" ----- man pages, tmux -----------------------------------------------
|
||||
Plugin 'jez/vim-superman'
|
||||
Plugin 'christoomey/vim-tmux-navigator'
|
||||
|
||||
" ----- Syntax plugins ------------------------------------------------
|
||||
Plugin 'jez/vim-c0'
|
||||
Plugin 'jez/vim-ispc'
|
||||
Plugin 'kchmck/vim-coffee-script'
|
||||
|
||||
" ---- Extras/Advanced plugins ----------------------------------------
|
||||
" Highlight and strip trailing whitespace
|
||||
"Plugin 'ntpeters/vim-better-whitespace'
|
||||
" Easily surround chunks of text
|
||||
"Plugin 'tpope/vim-surround'
|
||||
" Align CSV files at commas, align Markdown tables, and more
|
||||
"Plugin 'godlygeek/tabular'
|
||||
" Automaticall insert the closing HTML tag
|
||||
"Plugin 'HTML-AutoCloseTag'
|
||||
" Make tmux look like vim-airline (read README for extra instructions)
|
||||
"Plugin 'edkolev/tmuxline.vim'
|
||||
" All the other syntax plugins I use
|
||||
"Plugin 'ekalinin/Dockerfile.vim'
|
||||
"Plugin 'digitaltoad/vim-jade'
|
||||
"Plugin 'tpope/vim-liquid'
|
||||
"Plugin 'cakebaker/scss-syntax.vim'
|
||||
|
||||
call vundle#end()
|
||||
|
||||
filetype plugin indent on
|
||||
|
||||
" --- General settings ---
|
||||
set backspace=indent,eol,start
|
||||
set ruler
|
||||
set number
|
||||
set showcmd
|
||||
set incsearch
|
||||
set hlsearch
|
||||
|
||||
syntax on
|
||||
|
||||
set mouse=a
|
||||
|
||||
" We need this for plugins like Syntastic and vim-gitgutter which put symbols
|
||||
" in the sign column.
|
||||
hi clear SignColumn
|
||||
|
||||
" ----- Plugin-Specific Settings --------------------------------------
|
||||
|
||||
" ----- altercation/vim-colors-solarized settings -----
|
||||
" Toggle this to "light" for light colorscheme
|
||||
set background=dark
|
||||
|
||||
" Uncomment the next line if your terminal is not configured for solarized
|
||||
let g:solarized_termcolors=256
|
||||
|
||||
" Set the colorscheme
|
||||
colorscheme solarized
|
||||
|
||||
|
||||
" ----- bling/vim-airline settings -----
|
||||
" Always show statusbar
|
||||
set laststatus=2
|
||||
|
||||
" Fancy arrow symbols, requires a patched font
|
||||
" To install a patched font, run over to
|
||||
" https://github.com/abertsch/Menlo-for-Powerline
|
||||
" download all the .ttf files, double-click on them and click "Install"
|
||||
" Finally, uncomment the next line
|
||||
"let g:airline_powerline_fonts = 1
|
||||
|
||||
" Show PASTE if in paste mode
|
||||
let g:airline_detect_paste=1
|
||||
|
||||
" Show airline for tabs too
|
||||
let g:airline#extensions#tabline#enabled = 1
|
||||
|
||||
" Use the solarized theme for the Airline status bar
|
||||
let g:airline_theme='solarized'
|
||||
|
||||
" ----- jistr/vim-nerdtree-tabs -----
|
||||
" Open/close NERDTree Tabs with \t
|
||||
nmap <silent> <leader>t :NERDTreeTabsToggle<CR>
|
||||
" To have NERDTree always open on startup
|
||||
let g:nerdtree_tabs_open_on_console_startup = 1
|
||||
|
||||
" ----- scrooloose/syntastic settings -----
|
||||
let g:syntastic_error_symbol = '✘'
|
||||
let g:syntastic_warning_symbol = "▲"
|
||||
augroup mySyntastic
|
||||
au!
|
||||
au FileType tex let b:syntastic_mode = "passive"
|
||||
augroup END
|
||||
|
||||
|
||||
" ----- xolox/vim-easytags settings -----
|
||||
" Where to look for tags files
|
||||
set tags=./tags;,~/.vimtags
|
||||
" Sensible defaults
|
||||
let g:easytags_events = ['BufReadPost', 'BufWritePost']
|
||||
let g:easytags_async = 1
|
||||
let g:easytags_dynamic_files = 2
|
||||
let g:easytags_resolve_links = 1
|
||||
let g:easytags_suppress_ctags_warning = 1
|
||||
|
||||
" ----- majutsushi/tagbar settings -----
|
||||
" Open/close tagbar with \b
|
||||
nmap <silent> <leader>b :TagbarToggle<CR>
|
||||
" Uncomment to open tagbar automatically whenever possible
|
||||
"autocmd BufEnter * nested :call tagbar#autoopen(0)
|
||||
|
||||
|
||||
" ----- airblade/vim-gitgutter settings -----
|
||||
" In vim-airline, only display "hunks" if the diff is non-zero
|
||||
let g:airline#extensions#hunks#non_zero_only = 1
|
||||
|
||||
|
||||
" ----- Raimondi/delimitMate settings -----
|
||||
let delimitMate_expand_cr = 1
|
||||
augroup mydelimitMate
|
||||
au!
|
||||
au FileType markdown let b:delimitMate_nesting_quotes = ["`"]
|
||||
au FileType tex let b:delimitMate_quotes = ""
|
||||
au FileType tex let b:delimitMate_matchpairs = "(:),[:],{:},`:'"
|
||||
au FileType python let b:delimitMate_nesting_quotes = ['"', "'"]
|
||||
augroup END
|
||||
|
||||
" ----- jez/vim-superman settings -----
|
||||
" better man page support
|
||||
noremap K :SuperMan <cword><CR>
|
||||
|
||||
143
dotfiles/common/.zshrc
Normal file
143
dotfiles/common/.zshrc
Normal file
@@ -0,0 +1,143 @@
|
||||
# Shell options
|
||||
setopt autocd menucomplete correct globdots extendedglob nomatch notify \
|
||||
share_history inc_append_history hist_expire_dups_first hist_reduce_blanks \
|
||||
hist_find_no_dups hist_verify extended_history auto_pushd pushd_ignore_dups \
|
||||
prompt_subst
|
||||
unsetopt beep
|
||||
bindkey -e
|
||||
|
||||
# Load secrets
|
||||
if [[ -f $HOME/.env ]]; then
|
||||
set -a # automatically export all variables
|
||||
source "$HOME/.env"
|
||||
set +a
|
||||
fi
|
||||
|
||||
# Completions
|
||||
local compdump=${XDG_CACHE_HOME:-$HOME/.cache}/zsh/zcompdump-${HOST}-${ZSH_VERSION}
|
||||
[[ -d ${compdump:h} ]] || mkdir -p ${compdump:h}
|
||||
zstyle ':completion:*' menu select
|
||||
zstyle ':completion:*' gain-privileges 1
|
||||
zstyle ':completion:*:descriptions' format '%U%B%d%b%u'
|
||||
zmodload zsh/complist
|
||||
autoload -Uz compinit && compinit -d "$compdump"
|
||||
|
||||
# History
|
||||
HISTFILE=${XDG_STATE_HOME:-$HOME}/.histfile
|
||||
[[ -d $HISTFILE:h ]] || mkdir -p $HISTFILE:h
|
||||
HISTSIZE=1000000
|
||||
SAVEHIST=1000000
|
||||
autoload -Uz up-line-or-beginning-search down-line-or-beginning-search
|
||||
zle -N up-line-or-beginning-search
|
||||
zle -N down-line-or-beginning-search
|
||||
|
||||
# Colors
|
||||
autoload -Uz colors && colors
|
||||
|
||||
# Prompt
|
||||
if [[ $EUID -eq 0 ]]; then
|
||||
user_color=red
|
||||
else
|
||||
user_color=white
|
||||
fi
|
||||
|
||||
# Assign colors based on the hostname
|
||||
# https://www.ditig.com/256-colors-cheat-sheet
|
||||
if [[ -v TOOLBOX_PATH ]]; then
|
||||
host_color=magenta
|
||||
elif [[ -v DISTROBOX_ENTER_PATH ]]; then
|
||||
host_color=15
|
||||
else
|
||||
case $HOSTNAME in
|
||||
laptop) host_color=green ;;
|
||||
workstation) host_color=red ;;
|
||||
bryan-pc) host_color=cyan ;;
|
||||
time4vps) host_color=blue ;;
|
||||
racknerd) host_color=yellow ;;
|
||||
htpc) host_color=214 ;;
|
||||
hartmanlab) host_color=magenta ;;
|
||||
router) host_color=blue ;;
|
||||
ax6000) host_color=87 ;;
|
||||
home-router) host_color=218 ;;
|
||||
vm-fedora*) host_color=57 ;;
|
||||
vm-alma*) host_color=214 ;;
|
||||
*) host_color=white ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
_git_prompt() {
|
||||
local br
|
||||
if br=$(git symbolic-ref --short HEAD 2>/dev/null); then
|
||||
print -n " %F{242}($br)%f"
|
||||
fi
|
||||
}
|
||||
|
||||
PROMPT='[%F{'$user_color'}%n%f@%F{'$host_color'}%m%f]%~$(_git_prompt)%(!.#.$) '
|
||||
# RPROMPT='%*' # display clock to right of screen
|
||||
precmd() { print -Pn "\e]0;%n@%m: ${PWD/#$HOME/~}\a" ; }
|
||||
|
||||
# Set hostname on OpenWRT
|
||||
[[ -z $HOSTNAME ]] && HOSTNAME=$(noglob uci get system.@system[0].hostname 2>/dev/null)
|
||||
|
||||
# Paths
|
||||
typeset -U path PATH
|
||||
path=(
|
||||
$HOME/bin
|
||||
$HOME/.local/bin
|
||||
$HOME/documents/develop/scripts/shell/.local/bin
|
||||
$HOME/.cargo/bin
|
||||
$path
|
||||
)
|
||||
export PATH
|
||||
export R_LIBS_USER="$HOME/R/qhtcp-workflow"
|
||||
|
||||
# Keybindings
|
||||
typeset -g -A key
|
||||
for k v in \
|
||||
Home khome End kend Insert kich1 Backspace kbs Delete kdch1 \
|
||||
Up kcuu1 Down kcud1 Left kcub1 Right kcuf1 PageUp kpp PageDown knp ShiftTab kcbt; do
|
||||
[[ -n ${terminfo[$v]} ]] && key[$k]=${terminfo[$v]}
|
||||
done
|
||||
bindkey -- ${key[Home]-} beginning-of-line
|
||||
bindkey -- ${key[End]-} end-of-line
|
||||
bindkey -- ${key[Insert]-} overwrite-mode
|
||||
bindkey -- ${key[Backspace]-} backward-delete-char
|
||||
bindkey -- ${key[Delete]-} delete-char
|
||||
bindkey -- ${key[Left]-} backward-char
|
||||
bindkey -- ${key[Right]-} forward-char
|
||||
bindkey -- ${key[PageUp]-} beginning-of-buffer-or-history
|
||||
bindkey -- ${key[PageDown]-} end-of-buffer-or-history
|
||||
bindkey -- ${key[ShiftTab]-} reverse-menu-complete
|
||||
bindkey -- ${key[Up]-} up-line-or-beginning-search
|
||||
bindkey -- ${key[Down]-} down-line-or-beginning-search
|
||||
|
||||
if (( ${+terminfo[smkx]} && ${+terminfo[rmkx]} )); then
|
||||
autoload -Uz add-zle-hook-widget
|
||||
zle_app_start() { echoti smkx; }
|
||||
zle_app_finish() { echoti rmkx; }
|
||||
add-zle-hook-widget zle-line-init zle_app_start
|
||||
add-zle-hook-widget zle-line-finish zle_app_finish
|
||||
fi
|
||||
|
||||
# Aliases and one-liners
|
||||
alias ll='ls -lh'
|
||||
alias la='ls -A'
|
||||
alias vmd='vmd -nt'
|
||||
alias dnf-list-files='dnf repoquery -l'
|
||||
alias gedit='gnome-text-editor'
|
||||
alias xclip='xclip -selection c'
|
||||
alias pdoman='podman'
|
||||
alias git-list='git ls-tree -r HEAD --name-only'
|
||||
alias chatgpt='aichat'
|
||||
alias chromium='chromium --disable-features=GlobalShortcutsPortal'
|
||||
|
||||
podman-pull-all() {
|
||||
for image in $(podman images --format "{{.Repository}}:{{.Tag}}"); do
|
||||
podman pull "$image"
|
||||
done
|
||||
}
|
||||
|
||||
buildah-prune() { buildah rm --all; }
|
||||
|
||||
export EDITOR="code --wait"
|
||||
|
||||
176
dotfiles/laptop/.config/btrbk/btrbk.conf
Normal file
176
dotfiles/laptop/.config/btrbk/btrbk.conf
Normal file
@@ -0,0 +1,176 @@
|
||||
#
|
||||
# Example btrbk configuration file
|
||||
#
|
||||
#
|
||||
# Please refer to the btrbk.conf(5) man-page for a complete
|
||||
# description of all configuration options.
|
||||
#
|
||||
# Note that the options can be overridden per volume/subvolume/target
|
||||
# in the corresponding sections.
|
||||
#
|
||||
# Enable transaction log
|
||||
transaction_log /var/log/btrbk.log
|
||||
|
||||
# Enable stream buffer. Adding a buffer between the sending and
|
||||
# receiving side is generally a good idea.
|
||||
# NOTE: If enabled, make sure the "mbuffer" package is installed on
|
||||
# the target host!
|
||||
#stream_buffer 512m
|
||||
|
||||
# Directory in which the btrfs snapshots are created. Relative to
|
||||
# <volume-directory> of the volume section.
|
||||
# If not set, the snapshots are created in <volume-directory>.
|
||||
#
|
||||
# If you want to set a custom name for the snapshot (and backups),
|
||||
# use the "snapshot_name" option within the subvolume section.
|
||||
#
|
||||
# NOTE: btrbk does not autmatically create this directory, and the
|
||||
# snapshot creation will fail if it is not present.
|
||||
#
|
||||
snapshot_dir .snapshots
|
||||
|
||||
# Always create snapshots. Set this to "ondemand" to only create
|
||||
# snapshots if the target volume is reachable. Set this to "no" if
|
||||
# snapshot creation is done by another instance of btrbk.
|
||||
snapshot_create ondemand
|
||||
|
||||
# Perform incremental backups (set to "strict" if you want to prevent
|
||||
# creation of non-incremental backups if no parent is found).
|
||||
#incremental yes
|
||||
|
||||
# Specify after what time (in full hours after midnight) backups/
|
||||
# snapshots are considered as a daily backup/snapshot
|
||||
#preserve_hour_of_day 0
|
||||
|
||||
# Specify on which day of week weekly/monthly backups are to be
|
||||
# preserved.
|
||||
#preserve_day_of_week sunday
|
||||
|
||||
# Preserve all snapshots for a minimum period of time.
|
||||
#snapshot_preserve_min 1d
|
||||
|
||||
# Retention policy for the source snapshots.
|
||||
#snapshot_preserve <NN>h <NN>d <NN>w <NN>m <NN>y
|
||||
|
||||
# Preserve all backup targets for a minimum period of time.
|
||||
#target_preserve_min no
|
||||
|
||||
# Retention policy for backup targets:
|
||||
#target_preserve <NN>h <NN>d <NN>w <NN>m <NN>y
|
||||
|
||||
# Retention policy for archives ("btrbk archive" command):
|
||||
#archive_preserve_min no
|
||||
#archive_preserve <NN>h <NN>d <NN>w <NN>m <NN>y
|
||||
|
||||
# Specify SSH private key for "ssh://" volumes / targets:
|
||||
#ssh_identity /etc/btrbk/ssh/id_ed25519
|
||||
ssh_identity /root/.ssh/id_ed25519
|
||||
ssh_user root
|
||||
ssh_compression no
|
||||
#ssh_cipher_spec default
|
||||
|
||||
compat_remote busybox
|
||||
send_protocol 2
|
||||
|
||||
# Enable compression for remote btrfs send/receive operations:
|
||||
stream_compress no
|
||||
#stream_compress_level default
|
||||
#stream_compress_threads default
|
||||
|
||||
# Enable lock file support: Ensures that only one instance of btrbk
|
||||
# can be run at a time.
|
||||
lockfile /var/lock/btrbk.lock
|
||||
|
||||
# Don't wait for transaction commit on deletion. Set this to "after"
|
||||
# or "each" to make sure the deletion of subvolumes is committed to
|
||||
# disk when btrbk terminates.
|
||||
#btrfs_commit_delete no
|
||||
#
|
||||
# Volume section: "volume <volume-directory>"
|
||||
#
|
||||
# <volume-directory> Directory of a btrfs volume (or subvolume)
|
||||
# containing the subvolume to be backuped
|
||||
# (usually the mount-point of a btrfs filesystem
|
||||
# mounted with subvolid=5 option)
|
||||
#
|
||||
# Subvolume section: "subvolume <subvolume-name>"
|
||||
#
|
||||
# <subvolume-name> Subvolume to be backuped, relative to
|
||||
# <volume-directory> in volume section.
|
||||
#
|
||||
# Target section: "target <type> <volume-directory>"
|
||||
#
|
||||
# <type> (optional) type, defaults to "send-receive".
|
||||
# <volume-directory> Directory of a btrfs volume (or subvolume)
|
||||
# receiving the backups.
|
||||
#
|
||||
# NOTE: The parser does not care about indentation, this is only for
|
||||
# human readability. The options always apply to the last section
|
||||
# encountered, overriding the corresponding option of the upper
|
||||
# section. This means that the global options must be set before any
|
||||
# "volume" section.
|
||||
#
|
||||
#
|
||||
# Example configuration:
|
||||
#
|
||||
# Backup to external disk mounted on /mnt/btr_backup
|
||||
#volume /mnt/btr_pool
|
||||
# no action if external disk is not attached
|
||||
# snapshot_create ondemand
|
||||
|
||||
# propagates to all subvolume sections:
|
||||
# target /mnt/btr_backup/_btrbk
|
||||
|
||||
# subvolume root_gentoo
|
||||
# subvolume kvm
|
||||
# use different retention policy for kvm backups
|
||||
# target_preserve 7d 4w
|
||||
|
||||
|
||||
# Backup to external disk as well as some remote host
|
||||
#volume /mnt/btr_data
|
||||
# subvolume home
|
||||
# always create snapshot, even if targets are unreachable
|
||||
# snapshot_create always
|
||||
# target /mnt/btr_backup/_btrbk
|
||||
# target ssh://backup.my-remote-host.com/mnt/btr_backup
|
||||
|
||||
|
||||
# Backup from remote host, with different naming
|
||||
#volume ssh://my-remote-host.com/mnt/btr_pool
|
||||
# subvolume data_0
|
||||
# snapshot_dir snapshots/btrbk
|
||||
# snapshot_name data_main
|
||||
# target /mnt/btr_backup/_btrbk/my-remote-host.com
|
||||
|
||||
|
||||
# Resume backups from remote host which runs its own btrbk instance
|
||||
# creating snapshots for "home" in "/mnt/btr_pool/btrbk_snapshots".
|
||||
#volume ssh://my-remote-host.com/mnt/btr_pool
|
||||
# snapshot_dir btrbk_snapshots
|
||||
# snapshot_create no
|
||||
# snapshot_preserve_min all
|
||||
# subvolume home
|
||||
# target /mnt/btr_backup/_btrbk/my-remote-host.com
|
||||
|
||||
snapshot_preserve_min 2d
|
||||
snapshot_preserve 14d
|
||||
|
||||
target_preserve_min no
|
||||
target_preserve 14d 10w *m
|
||||
|
||||
archive_preserve_min latest
|
||||
archive_preserve 12m 10y
|
||||
|
||||
subvolume /
|
||||
target_preserve 14d 10w 6m
|
||||
snapshot_dir /.snapshots
|
||||
target ssh://workstation/mnt/backup/laptop/root
|
||||
# target ssh://router.lan/mnt/backup/laptop/root
|
||||
|
||||
volume /home
|
||||
subvolume bryan
|
||||
# target /mnt/backup/laptop/home
|
||||
target ssh://workstation/mnt/backup/laptop/home
|
||||
# target ssh://router.lan/mnt/backup/laptop/home
|
||||
|
||||
253
dotfiles/workstation/.config/btrbk/btrbk.conf
Normal file
253
dotfiles/workstation/.config/btrbk/btrbk.conf
Normal file
@@ -0,0 +1,253 @@
|
||||
#
|
||||
# Example btrbk configuration file
|
||||
#
|
||||
#
|
||||
# Please refer to the btrbk.conf(5) man-page for a complete
|
||||
# description of all configuration options.
|
||||
# For more examples, see README.md included with this package.
|
||||
#
|
||||
# btrbk.conf(5): <https://digint.ch/btrbk/doc/btrbk.conf.5.html>
|
||||
# README.md: <https://digint.ch/btrbk/doc/readme.html>
|
||||
#
|
||||
# Note that the options can be overridden per volume/subvolume/target
|
||||
# in the corresponding sections.
|
||||
#
|
||||
|
||||
|
||||
# Enable transaction log
|
||||
transaction_log /var/log/btrbk.log
|
||||
|
||||
# Specify SSH private key for remote connections
|
||||
ssh_identity /home/bryan/.config/btrbk/id_ed25519
|
||||
ssh_user root
|
||||
|
||||
# Use sudo if btrbk or lsbtr is run by regular user
|
||||
backend_local_user btrfs-progs-sudo
|
||||
|
||||
# Enable stream buffer. Adding a buffer between the sending and
|
||||
# receiving side is generally a good idea.
|
||||
# NOTE: If enabled, make sure to install the "mbuffer" package!
|
||||
stream_buffer 1g
|
||||
|
||||
# Directory in which the btrfs snapshots are created. Relative to
|
||||
# <volume-directory> of the volume section.
|
||||
# If not set, the snapshots are created in <volume-directory>.
|
||||
#
|
||||
# If you want to set a custom name for the snapshot (and backups),
|
||||
# use the "snapshot_name" option within the subvolume section.
|
||||
#
|
||||
# NOTE: btrbk does not automatically create this directory, and the
|
||||
# snapshot creation will fail if it is not present.
|
||||
#
|
||||
snapshot_dir .snapshots
|
||||
|
||||
# Always create snapshots. Set this to "ondemand" to only create
|
||||
# snapshots if the target volume is reachable. Set this to "no" if
|
||||
# snapshot creation is done by another instance of btrbk.
|
||||
snapshot_create onchange
|
||||
|
||||
# Perform incremental backups (set to "strict" if you want to prevent
|
||||
# creation of non-incremental backups if no parent is found).
|
||||
#incremental yes
|
||||
|
||||
# Specify after what time (in full hours after midnight) backups/
|
||||
# snapshots are considered as a daily backup/snapshot
|
||||
#preserve_hour_of_day 0
|
||||
|
||||
# Specify on which day of week weekly/monthly backups are to be
|
||||
# preserved.
|
||||
#preserve_day_of_week sunday
|
||||
|
||||
# Preserve all snapshots for a minimum period of time.
|
||||
#snapshot_preserve_min 1d
|
||||
|
||||
# Retention policy for the source snapshots.
|
||||
#snapshot_preserve <NN>h <NN>d <NN>w <NN>m <NN>y
|
||||
|
||||
# Preserve all backup targets for a minimum period of time.
|
||||
#target_preserve_min no
|
||||
|
||||
# Retention policy for backup targets:
|
||||
#target_preserve <NN>h <NN>d <NN>w <NN>m <NN>y
|
||||
|
||||
# Retention policy for archives ("btrbk archive" command):
|
||||
#archive_preserve_min no
|
||||
#archive_preserve <NN>h <NN>d <NN>w <NN>m <NN>y
|
||||
|
||||
# Enable compression for remote btrfs send/receive operations:
|
||||
#stream_compress no
|
||||
#stream_compress_level default
|
||||
#stream_compress_threads default
|
||||
|
||||
# Enable lock file support: Ensures that only one instance of btrbk
|
||||
# can be run at a time.
|
||||
lockfile /var/lock/btrbk.lock
|
||||
|
||||
# Don't wait for transaction commit on deletion. Enable this to make
|
||||
# sure the deletion of subvolumes is committed to disk when btrbk
|
||||
# terminates.
|
||||
#btrfs_commit_delete no
|
||||
|
||||
|
||||
#
|
||||
# Volume section (optional): "volume <volume-directory>"
|
||||
#
|
||||
# <volume-directory> Base path within a btrfs filesystem
|
||||
# containing the subvolumes to be backuped
|
||||
# (usually the mount-point of a btrfs filesystem
|
||||
# mounted with subvolid=5 option).
|
||||
#
|
||||
# Subvolume section: "subvolume <subvolume-name>"
|
||||
#
|
||||
# <subvolume-name> Subvolume to be backuped, relative to
|
||||
# <volume-directory> in volume section.
|
||||
#
|
||||
# Target section: "target <type> <volume-directory>"
|
||||
#
|
||||
# <type> (optional) type, defaults to "send-receive".
|
||||
# <volume-directory> Directory within a btrfs filesystem
|
||||
# receiving the backups.
|
||||
#
|
||||
# NOTE: The parser does not care about indentation, this is only for
|
||||
# human readability. All options apply to the last section
|
||||
# encountered, overriding the corresponding option of the upper
|
||||
# section. This means that the global options must be set on top,
|
||||
# before any "volume", "subvolume" or "target section.
|
||||
#
|
||||
|
||||
#
|
||||
# Example retention policy:
|
||||
#
|
||||
# snapshot_preserve_min 2d
|
||||
# snapshot_preserve 14d
|
||||
# target_preserve_min no
|
||||
# target_preserve 20d 10w *m
|
||||
|
||||
snapshot_preserve_min 2d
|
||||
snapshot_preserve 14d
|
||||
|
||||
target_preserve_min no
|
||||
target_preserve 14d 10w *m
|
||||
|
||||
archive_preserve_min latest
|
||||
archive_preserve 12m 10y
|
||||
|
||||
# Global settings
|
||||
compat_remote busybox
|
||||
send_protocol 2
|
||||
|
||||
# Root backup workaround, omit the volume section
|
||||
subvolume /
|
||||
target_preserve 14d 10w 6m
|
||||
snapshot_dir /.snapshots # Absolute path to snapshots dir
|
||||
target /mnt/backup/workstation/root
|
||||
# target ssh://router.lan/mnt/backup/workstation/root
|
||||
# target /run/media/bryan/backup/workstation/root
|
||||
# target ssh://home-router/mnt/backup/workstation/root
|
||||
|
||||
volume /home
|
||||
subvolume bryan
|
||||
target /mnt/backup/workstation/home
|
||||
# target ssh://router.lan/mnt/backup/workstation/home
|
||||
target_preserve 14d 10w 6m
|
||||
# target ssh://home-router/mnt/backup/workstation/home
|
||||
# target /run/media/bryan/backup/workstation/home
|
||||
|
||||
volume /mnt/downloads
|
||||
subvolume downloads
|
||||
target /mnt/backup/workstation/downloads
|
||||
# target /run/media/bryan/backup/workstation/downloads
|
||||
|
||||
volume /
|
||||
subvolume /mnt/ebooks
|
||||
target /mnt/backup/media
|
||||
subvolume /mnt/cover-art
|
||||
target /mnt/backup/media
|
||||
# target ssh://router.lan/mnt/backup/media
|
||||
# target ssh://home-router/mnt/backup/media
|
||||
|
||||
volume /mnt/array/media
|
||||
target /mnt/backup/media
|
||||
# target ssh://router.lan/mnt/backup/media
|
||||
# target ssh://home-router/mnt/backup/media
|
||||
subvolume pictures
|
||||
subvolume music
|
||||
target_preserve_min all # for home-router to keep samba share (and safer overall)
|
||||
|
||||
|
||||
# #
|
||||
# # Simple setup: Backup root and home to external disk
|
||||
# #
|
||||
# snapshot_dir /btrbk_snapshots
|
||||
# target /mnt/btr_backup
|
||||
# subvolume /
|
||||
# subvolume /home
|
||||
|
||||
|
||||
# #
|
||||
# # Complex setup
|
||||
# #
|
||||
# # In order to keep things organized, it is recommended to use "volume"
|
||||
# # sections and mount the top-level subvolume (subvolid=5):
|
||||
# #
|
||||
# # $ mount -o subvolid=5 /dev/sda1 /mnt/btr_pool
|
||||
# #
|
||||
# # Backup to external disk mounted on /mnt/btr_backup
|
||||
# volume /mnt/btr_pool
|
||||
# # Create snapshots in /mnt/btr_pool/btrbk_snapshots
|
||||
# snapshot_dir btrbk_snapshots
|
||||
|
||||
# # Target for all subvolume sections:
|
||||
# target /mnt/btr_backup
|
||||
|
||||
# # Some default btrfs installations (e.g. Ubuntu) use "@" for rootfs
|
||||
# # (mounted at "/") and "@home" (mounted at "/home"). Note that this
|
||||
# # is only a naming convention.
|
||||
# #subvolume @
|
||||
# subvolume root
|
||||
# subvolume home
|
||||
# subvolume kvm
|
||||
# # Use different retention policy for kvm backups:
|
||||
# target_preserve 7d 4w
|
||||
|
||||
|
||||
# # Backup data to external disk as well as remote host
|
||||
# volume /mnt/btr_data
|
||||
# subvolume data
|
||||
# # Always create snapshot, even if targets are unreachable
|
||||
# snapshot_create always
|
||||
# target /mnt/btr_backup
|
||||
# target ssh://backup.my-remote-host.com/mnt/btr_backup
|
||||
|
||||
|
||||
# # Backup from remote host, with different naming
|
||||
# volume ssh://my-remote-host.com/mnt/btr_pool
|
||||
# subvolume data_0
|
||||
# snapshot_dir snapshots/btrbk
|
||||
# snapshot_name data_main
|
||||
# target /mnt/btr_backup/my-remote-host.com
|
||||
|
||||
|
||||
# # Backup on demand (noauto) to remote host running busybox, login as
|
||||
# # regular user using ssh-agent with current user name (ssh_user no)
|
||||
# # and default credentials (ssh_identity no).
|
||||
# volume /home
|
||||
# noauto yes
|
||||
# compat busybox
|
||||
# backend_remote btrfs-progs-sudo
|
||||
# ssh_user no
|
||||
# ssh_identity no
|
||||
|
||||
# target ssh://my-user-host.com/mnt/btr_backup/home
|
||||
# subvolume alice
|
||||
# subvolume bob
|
||||
|
||||
|
||||
# # Resume backups from remote host which runs its own btrbk instance
|
||||
# # creating snapshots for "home" in "/mnt/btr_pool/btrbk_snapshots".
|
||||
# volume ssh://my-remote-host.com/mnt/btr_pool
|
||||
# snapshot_dir btrbk_snapshots
|
||||
# snapshot_create no
|
||||
# snapshot_preserve_min all
|
||||
# subvolume home
|
||||
# target /mnt/btr_backup/my-remote-host.com
|
||||
6
galaxy-requirements.yml
Normal file
6
galaxy-requirements.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
# roles:
|
||||
# - linux-system-roles.systemd
|
||||
|
||||
collections:
|
||||
- name: ansible.posix
|
||||
4
group_vars/all/filesystems.yml
Normal file
4
group_vars/all/filesystems.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
directories:
|
||||
- path: "{{ ansible_facts['env']['HOME'] }}/.local/bin"
|
||||
mode: '0755'
|
||||
6
group_vars/all/services.yml
Normal file
6
group_vars/all/services.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
services_system:
|
||||
- dnf-automatic.timer
|
||||
|
||||
services_user:
|
||||
- psd
|
||||
67
group_vars/all/software.yml
Normal file
67
group_vars/all/software.yml
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
dnf_add_repositories:
|
||||
- name: zsh-completions
|
||||
description: zsh-completions from openSUSE
|
||||
baseurl: https://download.opensuse.org/repositories/shells:zsh-users:zsh-completions/Fedora_Rawhide/
|
||||
gpgkey: https://download.opensuse.org/repositories/shells:zsh-users:zsh-completions/Fedora_Rawhide/repodata/repomd.xml.key
|
||||
- name: code
|
||||
description: Visual Studio Code
|
||||
baseurl: https://packages.microsoft.com/yumrepos/vscode
|
||||
gpgkey: https://packages.microsoft.com/keys/microsoft.asc
|
||||
|
||||
dnf_remove:
|
||||
- abrt
|
||||
- rhythmbox
|
||||
- gnome-software
|
||||
- open-vm-tools-desktop
|
||||
- orca
|
||||
- anaconda-live
|
||||
- gnome-initial-setup
|
||||
|
||||
dnf_install:
|
||||
- rpmfusion-free-release
|
||||
- zsh
|
||||
- zsh-completions
|
||||
- ShellCheck
|
||||
- btrbk
|
||||
- btrfsmaintenance
|
||||
- vim
|
||||
- htop
|
||||
- remmina
|
||||
- calibre
|
||||
- pinta
|
||||
- toolbox
|
||||
- code
|
||||
- gnome-tweaks
|
||||
- wl-clipboard
|
||||
- syncthing
|
||||
- profile-sync-daemon
|
||||
- python3-virtualenv
|
||||
- python3-virtualenvwrapper
|
||||
- nautilus-python
|
||||
- gettext
|
||||
- setroubleshoot
|
||||
- cargo
|
||||
- flatpak
|
||||
- snapd
|
||||
- tailscale
|
||||
- dnf5-plugin-automatic
|
||||
|
||||
# Cargo packages to install
|
||||
cargo_packages:
|
||||
- aichat
|
||||
|
||||
# Git repositories to clone
|
||||
git_repos:
|
||||
- repo: https://git.bryanroessler.com/bryan/installJRMC.git
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/.local/bin/installJRMC"
|
||||
version: dev
|
||||
- repo: https://git.bryanroessler.com/bryan/openwrtbuilder.git
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/.local/bin/openwrtbuilder"
|
||||
version: dev
|
||||
# - repo: https://git.bryanroessler.com/bryan/deployer.git
|
||||
# dest: "{{ ansible_facts['env']['HOME'] }}/.local/bin/deployer"
|
||||
# version: dev
|
||||
# - repo: https://git.bryanroessler.com/bryan/deploy.git
|
||||
# dest: "{{ ansible_facts['env']['HOME'] }}/.local/bin/deploy"
|
||||
# version: dev
|
||||
21
group_vars/all/sysconfig.yml
Normal file
21
group_vars/all/sysconfig.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
# GNOME settings via gsettings
|
||||
sysconfig_gsettings:
|
||||
- schema: org.gnome.nautilus.preferences
|
||||
key: always-use-location-entry
|
||||
value: "true"
|
||||
|
||||
# Sysctl configurations
|
||||
sysconfig_sysctl:
|
||||
- name: fs.inotify.max_user_watches
|
||||
value: 524288
|
||||
file: /etc/sysctl.d/local.conf
|
||||
|
||||
# Sudoers configuration - commands that can run without password
|
||||
sysconfig_sudoers_nopasswd_commands:
|
||||
- /usr/bin/psd-overlay-helper
|
||||
- /usr/sbin/btrfs
|
||||
- /usr/bin/journalctl
|
||||
- /usr/bin/dnf
|
||||
- /usr/bin/fwupdmgr
|
||||
- /usr/bin/dmesg
|
||||
4
group_vars/all/users.yml
Normal file
4
group_vars/all/users.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
users_configure:
|
||||
- name: bryan
|
||||
shell: /usr/bin/zsh
|
||||
36
group_vars/all/vault.yml
Normal file
36
group_vars/all/vault.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
32626332653961623932383137363730363737346333353432653563366161316332343261643463
|
||||
3735336466356330313662356639323964613264626634310a316134363162383362636365616562
|
||||
31363662373232323865346138623163363564353066393538643032333738353038333263623537
|
||||
3630656531313632390a303730626633636530376166663936313632373737636463336265613437
|
||||
31356332306665386263393865303431386334386365646562343465346663346363636233623262
|
||||
32656231366565383561663565663739633632363033633930366533643836316436333965643033
|
||||
66323639623964386263326634336534363865373238306435353331616461333836373331323034
|
||||
37653433643433346262313166666634303366663466643732323134346265626337376536336162
|
||||
35656430663233653864326265313637373062386232373733623139646430313765353733633462
|
||||
31666631663562303832626165376262356632326566353366313566323730366265633333643531
|
||||
32613166306164313338633438633762396530343636643937646131343837393965363063643161
|
||||
65633330353162333465656133633765633039613431663838636362623666383332643138363337
|
||||
66623137653432613634613534373264383139303462303138323232363962383236646138646465
|
||||
31636262313462643536643730643939313766323737383830396462346665616439336162613837
|
||||
64666364306234613761376462303362363334386635353639663534666232623165343661393435
|
||||
66356263323736626266663662636338636535353639306265303965303636313939613564346164
|
||||
38633964666634303761333432346632333635663931366663306633656138353532323764616465
|
||||
32383336663436386435343037396131663932353836336266316539623939343563653339353932
|
||||
38636266636162363339316461616531353031323161313834623131313336633762396661353437
|
||||
37336537383531356332353132313562333736343261616236316133626530393934623437366130
|
||||
66353634386538363530613131666465346463366232656364366336323861633834636461313562
|
||||
35376263353632666438356139303835323030363035373134303937323433303834303030613838
|
||||
62653835623639623164386130666363393734313662653231613531326431633239346661623434
|
||||
38396136396131643931643663613538666633323239303837656264396636383531646435363837
|
||||
34303233616134343332353664653265613135353732336238613232343431623965383939623261
|
||||
64663330336637303361343032616533393236336532343432306164353436633535356535363530
|
||||
64313065373939653039343632303632356136333138353336363636343035356530333230663461
|
||||
38343862336163306632373863306337353466663364313437376165313762376533363039346664
|
||||
39663039613534393731643335353763383265386630656163396631353761383732666533303239
|
||||
34383464666364313965313938633435333534636330323939353339666433356131323539306435
|
||||
30386633316337356534326135356563313939643463626235336237376162616438343534626364
|
||||
32353062613538333336353935336563613166373361663334643766396162383562323264336362
|
||||
63656233393961653065663966353036376136616135656637376334613964386535643935336339
|
||||
35643933666663623463393634623235366465396233346434336238326632653865646665336438
|
||||
613961303139323263653965366632333231
|
||||
11
group_vars/laptop/filesystems.yml
Normal file
11
group_vars/laptop/filesystems.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
mounts:
|
||||
- path: /home
|
||||
src: /dev/disk/by-uuid/42f5911d-d634-4f92-9561-c7e20ca66c83
|
||||
fstype: btrfs
|
||||
opts: subvol=home,compress=zstd:1,defaults
|
||||
state: mounted
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
131
group_vars/workstation/filesystems.yml
Normal file
131
group_vars/workstation/filesystems.yml
Normal file
@@ -0,0 +1,131 @@
|
||||
---
|
||||
mounts:
|
||||
- path: /home
|
||||
src: /dev/disk/by-uuid/42f5911d-d634-4f92-9561-c7e20ca66c83
|
||||
fstype: btrfs
|
||||
opts: subvol=home,compress=zstd:1,defaults
|
||||
state: mounted
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /mnt/array
|
||||
src: /dev/disk/by-uuid/36fe5749-800a-4ab5-a89a-6ad343f5d42f
|
||||
fstype: btrfs
|
||||
opts: defaults,compress=zstd:1,x-systemd.device-timeout=180s
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /mnt/backup
|
||||
src: /dev/disk/by-uuid/64cc836d-e55f-4c34-83db-01c9b43c218a
|
||||
fstype: btrfs
|
||||
opts: defaults,compress=zstd:1,x-systemd.device-timeout=180s,nofail
|
||||
state: mounted
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /mnt/downloads
|
||||
src: /dev/disk/by-uuid/ee6247ed-5bcf-481e-802e-74efbc02eb45
|
||||
fstype: btrfs
|
||||
opts: defaults,compress=zstd:1
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /home/bryan/downloads
|
||||
src: /dev/disk/by-uuid/ee6247ed-5bcf-481e-802e-74efbc02eb45
|
||||
fstype: btrfs
|
||||
opts: subvol=downloads,defaults,compress=zstd:1,x-systemd.requires=home.mount,x-gvfs-hide
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /home/bryan/media
|
||||
src: /dev/disk/by-uuid/36fe5749-800a-4ab5-a89a-6ad343f5d42f
|
||||
fstype: btrfs
|
||||
opts: subvol=media,defaults,compress=zstd:1,x-systemd.requires=home.mount,x-systemd.device-timeout=180s,x-gvfs-hide
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /mnt/array/media/cover-art
|
||||
src: /dev/disk/by-uuid/42f5911d-d634-4f92-9561-c7e20ca66c83
|
||||
fstype: btrfs
|
||||
opts: subvol=root/mnt/cover-art,defaults,compress=zstd:1,x-systemd.requires=mnt-array.mount,x-gvfs-hide
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /home/bryan/media/cover-art
|
||||
src: /dev/disk/by-uuid/42f5911d-d634-4f92-9561-c7e20ca66c83
|
||||
fstype: btrfs
|
||||
opts: subvol=root/mnt/cover-art,defaults,compress=zstd:1,x-systemd.requires=home-bryan-media.mount,x-gvfs-hide
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /mnt/array/media/ebooks
|
||||
src: /dev/disk/by-uuid/42f5911d-d634-4f92-9561-c7e20ca66c83
|
||||
fstype: btrfs
|
||||
opts: subvol=root/mnt/ebooks,defaults,compress=zstd:1,x-systemd.requires=mnt-array.mount,x-gvfs-hide
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /home/bryan/media/ebooks
|
||||
src: /dev/disk/by-uuid/42f5911d-d634-4f92-9561-c7e20ca66c83
|
||||
fstype: btrfs
|
||||
opts: subvol=root/mnt/ebooks,defaults,compress=zstd:1,x-systemd.requires=home-bryan-media.mount,x-gvfs-hide
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /mnt/array/media/pictures/Screenshots
|
||||
src: /dev/disk/by-uuid/42f5911d-d634-4f92-9561-c7e20ca66c83
|
||||
fstype: btrfs
|
||||
opts: subvol=root/mnt/screenshots,defaults,compress=zstd:1,x-systemd.requires=mnt-array.mount,x-gvfs-hide
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /home/bryan/media/pictures/Screenshots
|
||||
src: /dev/disk/by-uuid/42f5911d-d634-4f92-9561-c7e20ca66c83
|
||||
fstype: btrfs
|
||||
opts: subvol=root/mnt/screenshots,defaults,compress=zstd:1,x-systemd.requires=home-bryan-media.mount,x-gvfs-hide
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
|
||||
- path: /home/bryan/devices/laptop/music
|
||||
src: /dev/disk/by-uuid/d0ed963e-aaa0-4dcc-9ece-4ea8fe7fcea2
|
||||
fstype: btrfs
|
||||
opts: defaults,compress=zstd:1,x-systemd.requires=home.mount,x-gvfs-hide
|
||||
state: mounted
|
||||
owner: bryan
|
||||
group: bryan
|
||||
mode: '0755'
|
||||
create_dir: false
|
||||
11
inventories/localhosts
Normal file
11
inventories/localhosts
Normal file
@@ -0,0 +1,11 @@
|
||||
[all]
|
||||
localhost ansible_connection=local ansible_user=bryan
|
||||
|
||||
[htpc]
|
||||
localhost ansible_connection=local ansible_user=bryan
|
||||
|
||||
[workstation]
|
||||
localhost ansible_connection=local ansible_user=bryan
|
||||
|
||||
[laptop]
|
||||
localhost ansible_connection=local ansible_user=bryan
|
||||
16
inventories/remotehosts
Normal file
16
inventories/remotehosts
Normal file
@@ -0,0 +1,16 @@
|
||||
[all]
|
||||
|
||||
[server]
|
||||
bryanroessler.com ansible_user=bryan
|
||||
|
||||
[testing]
|
||||
192.168.122.205 ansible_user=bryan
|
||||
|
||||
[htpc]
|
||||
htpc ansible_user=bryan
|
||||
|
||||
[workstation]
|
||||
workstation ansible_user=bryan
|
||||
|
||||
[laptop]
|
||||
laptop ansible_user=bryan
|
||||
3
pip-requirements
Normal file
3
pip-requirements
Normal file
@@ -0,0 +1,3 @@
|
||||
ansible
|
||||
ansible-core
|
||||
ansible-lint
|
||||
35
playbook.yml
Normal file
35
playbook.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
- name: Sync filesystems
|
||||
hosts: all
|
||||
roles:
|
||||
- filesystems
|
||||
|
||||
- name: Sync scripts
|
||||
hosts: all
|
||||
roles:
|
||||
- scripts
|
||||
|
||||
- name: Sync dotfiles
|
||||
hosts: all
|
||||
roles:
|
||||
- dotfiles
|
||||
|
||||
- name: Sync software
|
||||
hosts: all
|
||||
roles:
|
||||
- software
|
||||
|
||||
- name: Sync services
|
||||
hosts: all
|
||||
roles:
|
||||
- services
|
||||
|
||||
- name: Sync users
|
||||
hosts: all
|
||||
roles:
|
||||
- users
|
||||
|
||||
- name: Sync sysconfig
|
||||
hosts: all
|
||||
roles:
|
||||
- sysconfig
|
||||
96
roles/dotfiles/tasks/main.yml
Normal file
96
roles/dotfiles/tasks/main.yml
Normal file
@@ -0,0 +1,96 @@
|
||||
---
|
||||
- name: Find common dotfiles (excluding templates)
|
||||
ansible.builtin.find:
|
||||
paths: "{{ playbook_dir }}/dotfiles/common"
|
||||
recurse: true
|
||||
file_type: file
|
||||
hidden: true
|
||||
excludes: "*.j2"
|
||||
delegate_to: localhost
|
||||
register: dotfiles_common_files
|
||||
run_once: true
|
||||
|
||||
- name: Find group dotfiles (excluding templates)
|
||||
ansible.builtin.find:
|
||||
paths: "{{ playbook_dir }}/dotfiles/{{ item }}"
|
||||
recurse: true
|
||||
file_type: file
|
||||
hidden: true
|
||||
excludes: "*.j2"
|
||||
loop: "{{ group_names | default([]) }}"
|
||||
delegate_to: localhost
|
||||
register: dotfiles_group_files
|
||||
run_once: true
|
||||
ignore_errors: true
|
||||
|
||||
- name: Deploy common dotfiles (remote)
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.path }}"
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/{{ item.path | replace(playbook_dir + '/dotfiles/common/', '') }}"
|
||||
mode: preserve
|
||||
loop: "{{ dotfiles_common_files.files }}"
|
||||
when: ansible_connection not in ['local', 'localhost']
|
||||
|
||||
- name: Deploy group dotfiles (remote)
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.1.path }}"
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/{{ item.1.path | replace(playbook_dir + '/dotfiles/' + item.0.item + '/', '') }}"
|
||||
mode: preserve
|
||||
loop: "{{ dotfiles_group_files.results | subelements('files', skip_missing=True) }}"
|
||||
when: ansible_connection not in ['local', 'localhost']
|
||||
|
||||
- name: Symlink common dotfiles (local)
|
||||
ansible.builtin.file:
|
||||
src: "{{ item.path }}"
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/{{ item.path | replace((playbook_dir + '/dotfiles/common/'), '') }}"
|
||||
state: link
|
||||
force: true
|
||||
loop: "{{ dotfiles_common_files.files }}"
|
||||
when: ansible_connection in ['local', 'localhost']
|
||||
|
||||
- name: Symlink group dotfiles (local)
|
||||
ansible.builtin.file:
|
||||
src: "{{ item.1.path }}"
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/{{ item.1.path | replace((playbook_dir + '/dotfiles/' + item.0.item + '/'), '') }}"
|
||||
state: link
|
||||
force: true
|
||||
loop: "{{ dotfiles_group_files.results | subelements('files') }}"
|
||||
when: ansible_connection in ['local', 'localhost']
|
||||
|
||||
- name: Find template files in common dotfiles
|
||||
ansible.builtin.find:
|
||||
paths: "{{ playbook_dir }}/dotfiles/common"
|
||||
recurse: true
|
||||
file_type: file
|
||||
hidden: true
|
||||
patterns: "*.j2"
|
||||
delegate_to: localhost
|
||||
register: dotfiles_common_templates
|
||||
run_once: true
|
||||
|
||||
- name: Find template files in group dotfiles
|
||||
ansible.builtin.find:
|
||||
paths: "{{ playbook_dir }}/dotfiles/{{ item }}"
|
||||
recurse: true
|
||||
file_type: file
|
||||
hidden: true
|
||||
patterns: "*.j2"
|
||||
loop: "{{ group_names | default([]) }}"
|
||||
delegate_to: localhost
|
||||
register: dotfiles_group_templates
|
||||
run_once: true
|
||||
ignore_errors: true
|
||||
|
||||
- name: Template common dotfiles
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.path }}"
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/{{ item.path | replace(playbook_dir + '/dotfiles/common/', '') | replace('.j2', '') }}"
|
||||
mode: '0600'
|
||||
loop: "{{ dotfiles_common_templates.files }}"
|
||||
|
||||
- name: Template group dotfiles
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.1.path }}"
|
||||
dest: "{{ ansible_facts['env']['HOME'] }}/{{ item.1.path | replace(playbook_dir + '/dotfiles/' + item.0.item + '/', '') | replace('.j2', '') }}"
|
||||
mode: '0600'
|
||||
loop: "{{ dotfiles_group_templates.results | subelements('files', skip_missing=True) }}"
|
||||
43
roles/filesystems/tasks/main.yml
Normal file
43
roles/filesystems/tasks/main.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: Ensure mount points exist when create_dir is true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: directory
|
||||
owner: "{{ item.owner | default('root') }}"
|
||||
group: "{{ item.group | default('root') }}"
|
||||
mode: "{{ item.mode | default('0755') }}"
|
||||
loop: "{{ mounts | default([]) }}"
|
||||
become: true
|
||||
when: item.create_dir | default(false) | bool
|
||||
|
||||
- name: Verify mount points exist
|
||||
ansible.builtin.stat:
|
||||
path: "{{ item.path }}"
|
||||
register: filesystems_mounts_stat
|
||||
changed_when: false
|
||||
loop: "{{ mounts | default([]) }}"
|
||||
|
||||
- name: Assert mount points exist
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- (item.stat.exists | default(false))
|
||||
fail_msg: "Mount point {{ item.item.path }} does not exist"
|
||||
loop: "{{ filesystems_mounts_stat.results }}"
|
||||
|
||||
- name: Manage fstab entries and mount
|
||||
ansible.posix.mount:
|
||||
path: "{{ item.path }}"
|
||||
src: "{{ item.src }}"
|
||||
fstype: "{{ item.fstype }}"
|
||||
opts: "{{ item.opts | default('defaults') }}"
|
||||
state: "{{ item.state | default('mounted') }}"
|
||||
backup: true
|
||||
loop: "{{ mounts | default([]) }}"
|
||||
become: true
|
||||
|
||||
- name: Ensure directories exist
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: directory
|
||||
mode: "{{ item.mode | default('0755') }}"
|
||||
loop: "{{ directories | default([]) }}"
|
||||
25
roles/scripts/tasks/main.yml
Normal file
25
roles/scripts/tasks/main.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
- name: Copy repo scripts to local bin (for remote hosts)
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ local_bin_dir | default(ansible_facts['env']['HOME'] ~ '/.local/bin') }}/{{ item | basename }}"
|
||||
mode: "0755"
|
||||
owner: "{{ local_bin_owner | default(ansible_facts['user_id']) }}"
|
||||
group: "{{ local_bin_group | default(ansible_facts['user_gid']) }}"
|
||||
with_fileglob:
|
||||
- "{{ scripts_src_glob | default(playbook_dir + '/scripts/*') }}"
|
||||
when: ansible_connection not in ['local', 'localhost'] and item is file
|
||||
|
||||
- name: Symlink repo scripts into local bin (stow-like, for local hosts)
|
||||
ansible.builtin.file:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ local_bin_dir | default(ansible_facts['env']['HOME'] ~ '/.local/bin') }}/{{ item | basename }}"
|
||||
state: link
|
||||
force: true
|
||||
owner: "{{ local_bin_owner | default(ansible_facts['user_id']) }}"
|
||||
group: "{{ local_bin_group | default(ansible_facts['user_gid']) }}"
|
||||
follow: false
|
||||
with_fileglob:
|
||||
- "{{ scripts_src_glob | default(playbook_dir + '/scripts/*') }}"
|
||||
when: ansible_connection in ['local', 'localhost'] and item is file
|
||||
19
roles/services/tasks/main.yml
Normal file
19
roles/services/tasks/main.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Enable and start system services
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ item }}"
|
||||
enabled: true
|
||||
state: started
|
||||
scope: system
|
||||
loop: "{{ services_system }}"
|
||||
become: true
|
||||
when: services_system is defined and services_system | length > 0
|
||||
|
||||
- name: Enable and start user services
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ item }}"
|
||||
enabled: true
|
||||
state: started
|
||||
scope: user
|
||||
loop: "{{ services_user }}"
|
||||
when: services_user is defined and services_user | length > 0
|
||||
46
roles/software/tasks/main.yml
Normal file
46
roles/software/tasks/main.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
|
||||
- name: Add DNF repositories
|
||||
ansible.builtin.yum_repository:
|
||||
name: "{{ item.name }}"
|
||||
description: "{{ item.description }}"
|
||||
baseurl: "{{ item.baseurl }}"
|
||||
enabled: true
|
||||
gpgcheck: true
|
||||
gpgkey: "{{ item.gpgkey }}"
|
||||
loop: "{{ dnf_add_repositories }}"
|
||||
become: true
|
||||
when: dnf_add_repositories is defined and dnf_add_repositories | length > 0
|
||||
|
||||
- name: Remove unwanted packages
|
||||
ansible.builtin.dnf:
|
||||
name: "{{ dnf_remove }}"
|
||||
state: absent
|
||||
become: true
|
||||
when: dnf_remove is defined and dnf_remove | length > 0
|
||||
failed_when: false
|
||||
|
||||
- name: Install DNF packages
|
||||
ansible.builtin.dnf:
|
||||
name: "{{ dnf_install }}"
|
||||
state: present
|
||||
become: true
|
||||
when: dnf_install is defined and dnf_install | length > 0
|
||||
|
||||
- name: Install cargo packages
|
||||
ansible.builtin.command:
|
||||
cmd: "cargo install {{ item }}"
|
||||
loop: "{{ cargo_packages }}"
|
||||
when: cargo_packages is defined and cargo_packages | length > 0
|
||||
register: software_cargo_install_result
|
||||
changed_when: "'Installing' in software_cargo_install_result.stderr or 'Compiling' in software_cargo_install_result.stderr"
|
||||
failed_when: software_cargo_install_result.rc != 0 and 'already exists' not in software_cargo_install_result.stderr
|
||||
|
||||
- name: Clone git repositories
|
||||
ansible.builtin.git:
|
||||
repo: "{{ item.repo }}"
|
||||
dest: "{{ item.dest }}"
|
||||
version: "{{ item.version }}"
|
||||
update: true
|
||||
loop: "{{ git_repos }}"
|
||||
when: git_repos is defined and git_repos | length > 0
|
||||
29
roles/sysconfig/tasks/main.yml
Normal file
29
roles/sysconfig/tasks/main.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
|
||||
- name: Configure sysctl parameters
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item.name }}"
|
||||
value: "{{ item.value }}"
|
||||
sysctl_file: "{{ item.file }}"
|
||||
state: present
|
||||
reload: true
|
||||
loop: "{{ sysconfig_sysctl }}"
|
||||
become: true
|
||||
when: sysconfig_sysctl is defined and sysconfig_sysctl | length > 0
|
||||
|
||||
- name: Configure GNOME settings
|
||||
community.general.dconf:
|
||||
key: "/{{ item.schema | replace('.', '/') }}/{{ item.key }}"
|
||||
value: "{{ item.value }}"
|
||||
state: present
|
||||
loop: "{{ sysconfig_gsettings }}"
|
||||
when: sysconfig_gsettings is defined and sysconfig_gsettings | length > 0
|
||||
|
||||
- name: Configure sudoers for passwordless commands
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/sudoers
|
||||
line: "{{ ansible_facts['user_id'] }} ALL=(ALL) NOPASSWD: {{ sysconfig_sudoers_nopasswd_commands | join(', ') }}"
|
||||
state: present
|
||||
validate: /usr/sbin/visudo -cf %s
|
||||
become: true
|
||||
when: sysconfig_sudoers_nopasswd_commands is defined and sysconfig_sudoers_nopasswd_commands | length > 0
|
||||
6
roles/users/tasks/main.yml
Normal file
6
roles/users/tasks/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
- name: Set user shell
|
||||
ansible.builtin.user:
|
||||
name: "{{ item.name }}"
|
||||
shell: "{{ item.shell }}"
|
||||
loop: "{{ users_configure }}"
|
||||
become: true
|
||||
52
scripts/btrfs-convert
Executable file
52
scripts/btrfs-convert
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env bash
|
||||
# Convert directories to btrfs subvolumes
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <directory1> [directory2 ...]"
|
||||
echo "Converts each directory to a btrfs subvolume."
|
||||
}
|
||||
|
||||
btrfs_convert() {
|
||||
local d dir tmp_dir
|
||||
for d in "$@"; do
|
||||
dir="$d"
|
||||
tmp_dir="$dir.tmp"
|
||||
if [[ ! -d "$dir" ]]; then
|
||||
echo "[ERROR] Directory '$dir' does not exist. Skipping."
|
||||
continue
|
||||
fi
|
||||
if [[ -d "$tmp_dir" ]]; then
|
||||
echo "[ERROR] Temporary directory '$tmp_dir' already exists. Skipping '$dir'."
|
||||
continue
|
||||
fi
|
||||
echo "[INFO] Creating btrfs subvolume: '$tmp_dir'..."
|
||||
if ! btrfs subvolume create "$tmp_dir" &>/dev/null; then
|
||||
echo "[ERROR] Failed to create btrfs subvolume '$tmp_dir'. Skipping '$dir'."
|
||||
continue
|
||||
fi
|
||||
echo "[INFO] Moving contents from '$dir' to '$tmp_dir'..."
|
||||
if ! mv "$dir"/* "$tmp_dir"/ 2>/dev/null; then
|
||||
echo "[ERROR] Failed to move contents from '$dir' to '$tmp_dir'. Cleaning up."
|
||||
rm -rf "$tmp_dir"
|
||||
continue
|
||||
fi
|
||||
echo "[INFO] Removing original directory '$dir'..."
|
||||
if ! rm -rf "$dir"; then
|
||||
echo "[ERROR] Failed to remove '$dir'. Manual cleanup may be required."
|
||||
continue
|
||||
fi
|
||||
echo "[INFO] Renaming '$tmp_dir' to '$dir'..."
|
||||
if ! mv "$tmp_dir" "$dir"; then
|
||||
echo "[ERROR] Failed to rename '$tmp_dir' to '$dir'. Manual cleanup may be required."
|
||||
continue
|
||||
fi
|
||||
echo "[SUCCESS] Converted '$dir' to a btrfs subvolume."
|
||||
done
|
||||
}
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
btrfs_convert "$@"
|
||||
44
scripts/chroot-rescue
Executable file
44
scripts/chroot-rescue
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
# Mount and chroot a linux system
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
lsblk
|
||||
|
||||
# Set defaults
|
||||
RP="${ROOT_PART:-/dev/nvme0n0p3}"
|
||||
BP="${BOOT_PART:-/dev/nvme0n0p1}"
|
||||
|
||||
read -r -p "Root partition [$RP]: " input_rp
|
||||
RP="${input_rp:-$RP}"
|
||||
read -r -p "Boot partition [$BP]: " input_bp
|
||||
BP="${input_bp:-$BP}"
|
||||
MD="${MOUNT_DIR:-/mnt/${RP##*/}}"
|
||||
[[ -d "$MD" ]] && MD="$MD-$RANDOM"
|
||||
read -r -p "Mount directory [$MD]: " input_md
|
||||
MD="${input_md:-$MD}"
|
||||
|
||||
if [[ ! -e "$RP" || ! -e "$BP" ]]; then
|
||||
echo "[ERROR] Root or boot partition does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Mount and enter the chroot
|
||||
echo "[INFO] Mounting and entering chroot..."
|
||||
sudo mkdir -p "$MD"
|
||||
sudo mount "$RP" "$MD"
|
||||
for i in proc sys dev; do
|
||||
sudo mount --bind "/$i" "$MD/$i"
|
||||
done
|
||||
sudo mount "$BP" "$MD/boot/efi"
|
||||
sudo chroot "$MD"
|
||||
|
||||
# After chroot
|
||||
echo "[INFO] Exiting and unmounting chroot..."
|
||||
sudo umount "$MD/boot/efi"
|
||||
for i in proc sys dev; do
|
||||
sudo umount "$MD/$i"
|
||||
done
|
||||
sudo umount "$MD"
|
||||
|
||||
exit 0
|
||||
7
scripts/container-home-assistant
Executable file
7
scripts/container-home-assistant
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
# One-liner to deploy containerized home-assistant
|
||||
|
||||
podman run -d --name="home-assistant" -v ~/.config/home-assistant:/config -v /etc/localtime:/etc/localtime:ro --net=host docker.io/homeassistant/home-assistant:stable &&
|
||||
podman generate systemd --name "home-assistant" --container-prefix "" --separator "" > ~/.config/systemd/user/home-assistant.service &&
|
||||
systemctl --user daemon-reload &&
|
||||
systemctl --user enable --now home-assistant
|
||||
193
scripts/drive-info
Executable file
193
scripts/drive-info
Executable file
@@ -0,0 +1,193 @@
|
||||
#!/usr/bin/env bash
|
||||
# Gathers disk info including:
|
||||
# Hardware info
|
||||
# Filesystem data
|
||||
# Btrfs array membership
|
||||
# LUKS encryption
|
||||
# SMART status
|
||||
#
|
||||
# Usage: sudo ./drive-info.sh
|
||||
#
|
||||
# Requires root privileges for complete information access
|
||||
|
||||
# Check for root privileges
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo "This script must be run as root for complete information"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get list of block devices (excluding loop devices, partitions, and virtual devices)
|
||||
devices=$(lsblk -dn -o NAME,TYPE | grep disk | awk '{print $1}')
|
||||
|
||||
for device in $devices; do
|
||||
dev_path="/dev/$device"
|
||||
|
||||
# Get basic size info
|
||||
size=$(lsblk -dno SIZE "$dev_path" 2>/dev/null)
|
||||
|
||||
# Get comprehensive SMART information
|
||||
smart_info=$(smartctl -i "$dev_path" 2>/dev/null)
|
||||
smart_health=$(smartctl -H "$dev_path" 2>/dev/null | grep "SMART overall-health" | awk '{print $NF}')
|
||||
smart_all=$(smartctl -A "$dev_path" 2>/dev/null)
|
||||
|
||||
# Extract model
|
||||
model=$(echo "$smart_info" | grep "Device Model:" | cut -d: -f2- | xargs)
|
||||
[[ -z "$model" ]] && model=$(echo "$smart_info" | grep "Model Number:" | cut -d: -f2- | xargs)
|
||||
[[ -z "$model" ]] && model=$(cat "/sys/block/$device/device/model" 2>/dev/null | xargs)
|
||||
|
||||
# Extract serial number
|
||||
serial=$(echo "$smart_info" | grep "Serial Number:" | cut -d: -f2- | xargs)
|
||||
[[ -z "$serial" ]] && serial=$(lsblk -dno SERIAL "$dev_path" 2>/dev/null)
|
||||
|
||||
# Extract WWN
|
||||
wwn=$(echo "$smart_info" | grep "LU WWN Device Id:" | cut -d: -f2- | xargs | sed 's/ //g')
|
||||
[[ -z "$wwn" ]] && wwn=$(lsblk -dno WWN "$dev_path" 2>/dev/null)
|
||||
|
||||
# Extract rotation rate
|
||||
rpm=$(echo "$smart_info" | grep "Rotation Rate:" | cut -d: -f2- | xargs)
|
||||
if [[ -z "$rpm" || "$rpm" == "Solid State Device" ]]; then
|
||||
rot=$(cat "/sys/block/$device/queue/rotational" 2>/dev/null)
|
||||
[[ "$rot" == "0" ]] && rpm="SSD"
|
||||
fi
|
||||
|
||||
# Extract form factor
|
||||
form_factor=$(echo "$smart_info" | grep "Form Factor:" | cut -d: -f2- | xargs)
|
||||
|
||||
# Extract interface and link speed
|
||||
interface=$(echo "$smart_info" | grep "SATA Version" | cut -d: -f2- | xargs)
|
||||
[[ -z "$interface" ]] && interface=$(echo "$smart_info" | grep "Transport protocol:" | cut -d: -f2- | xargs)
|
||||
[[ -z "$interface" ]] && interface=$(echo "$smart_info" | grep "NVMe Version:" | cut -d: -f2- | xargs | awk '{print "NVMe " $1}')
|
||||
|
||||
# Extract temperature
|
||||
temp=$(echo "$smart_all" | grep -i "Temperature" | head -1 | awk '{print $10}')
|
||||
[[ -n "$temp" ]] && temp="${temp}°C"
|
||||
|
||||
# Extract power-on hours
|
||||
power_hours=$(echo "$smart_all" | grep "Power_On_Hours" | awk '{print $10}')
|
||||
[[ -z "$power_hours" ]] && power_hours=$(echo "$smart_all" | grep "Power On Hours" | awk '{print $3}')
|
||||
|
||||
# Get disk ID
|
||||
disk_id=""
|
||||
for f in /dev/disk/by-id/*; do
|
||||
[[ -e "$f" ]] || continue
|
||||
# resolve symlink target and compare basename to device, skip entries that reference partitions
|
||||
target=$(readlink -f "$f" 2>/dev/null) || continue
|
||||
if [[ "$(basename "$target")" == "$device" && "$f" != *part* ]]; then
|
||||
disk_id=$(basename "$f")
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Get filesystem UUID
|
||||
uuid=$(lsblk -no UUID "$dev_path" 2>/dev/null | head -1)
|
||||
[[ -z "$uuid" ]] && uuid=$(lsblk -no UUID "${dev_path}1" 2>/dev/null)
|
||||
|
||||
# Check for LUKS encryption
|
||||
luks_uuid=""
|
||||
luks_mapper=""
|
||||
if cryptsetup isLuks "$dev_path" 2>/dev/null; then
|
||||
luks_uuid=$(cryptsetup luksUUID "$dev_path" 2>/dev/null)
|
||||
for mapper in /dev/mapper/luks-*; do
|
||||
[[ -e "$mapper" ]] || continue
|
||||
mapper_name=$(basename "$mapper")
|
||||
[[ "$mapper_name" == "luks-$luks_uuid" ]] && luks_mapper="$mapper_name" && break
|
||||
done
|
||||
fi
|
||||
|
||||
# Get partition table type
|
||||
ptable=$(blkid -p -s PTTYPE "$dev_path" 2>/dev/null | grep -oP 'PTTYPE="\K[^"]+')
|
||||
[[ -z "$ptable" ]] && ptable="none"
|
||||
|
||||
# Get initial mount point
|
||||
mount_point=$(findmnt -no TARGET "$dev_path" 2>/dev/null | head -1)
|
||||
if [[ -z "$mount_point" && -n "$luks_mapper" ]]; then
|
||||
mount_point=$(findmnt -no TARGET "/dev/mapper/$luks_mapper" 2>/dev/null | head -1)
|
||||
fi
|
||||
|
||||
# Get HBA information
|
||||
hba_info=""
|
||||
if [[ -L "/sys/block/$device" ]]; then
|
||||
dev_path_sys=$(readlink -f "/sys/block/$device")
|
||||
|
||||
# Exclude USB, virtual, and NVMe devices from HBA detection
|
||||
if [[ ! "$dev_path_sys" =~ (usb|virtual|nvme) ]]; then
|
||||
phy=$(echo "$dev_path_sys" | grep -oP 'phy-\K[0-9]+' | head -1)
|
||||
port=$(echo "$dev_path_sys" | grep -oP 'port-\K[0-9]+' | head -1)
|
||||
target=$(echo "$dev_path_sys" | grep -oP 'target\K[0-9:]+' | head -1)
|
||||
|
||||
# Find the actual storage controller in the PCI chain
|
||||
mapfile -t pci_addrs < <(echo "$dev_path_sys" | grep -oP '\d+:\d+:\d+\.\d+')
|
||||
for addr in "${pci_addrs[@]}"; do
|
||||
desc=$(lspci -s "$addr" 2>/dev/null | cut -d: -f3-)
|
||||
if [[ "$desc" =~ (SAS|SATA|RAID|HBA|LSI|Adaptec|AHCI) ]]; then
|
||||
pci_addr="$addr"
|
||||
pci_desc="$desc"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Build HBA info string
|
||||
if [[ -n "$pci_addr" ]]; then
|
||||
hba_info="PCI: $pci_addr ($pci_desc)"
|
||||
[[ -n "$phy" ]] && hba_info="$hba_info | PHY: $phy"
|
||||
[[ -n "$port" ]] && hba_info="$hba_info | Port: $port"
|
||||
[[ -n "$target" ]] && hba_info="$hba_info | Target: $target"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Get Btrfs information
|
||||
btrfs_label=""
|
||||
btrfs_uuid=""
|
||||
btrfs_devid=""
|
||||
|
||||
# Check device or its LUKS mapper for btrfs
|
||||
check_dev="$dev_path"
|
||||
if [[ -n "$luks_mapper" ]]; then
|
||||
check_dev="/dev/mapper/$luks_mapper"
|
||||
fi
|
||||
|
||||
btrfs_show=$(btrfs filesystem show "$check_dev" 2>/dev/null)
|
||||
if btrfs filesystem show "$check_dev" &>/dev/null; then
|
||||
btrfs_label=$(echo "$btrfs_show" | head -1 | grep -oP "Label: '\K[^']+")
|
||||
btrfs_uuid=$(echo "$btrfs_show" | head -1 | grep -oP "uuid: \K[a-f0-9-]+")
|
||||
btrfs_devid=$(echo "$btrfs_show" | grep -E "(${check_dev}|${dev_path})" | grep -oP "devid\s+\K[0-9]+" | head -1)
|
||||
|
||||
# If not mounted, check if any other device in the btrfs array is mounted
|
||||
if [[ -z "$mount_point" && -n "$btrfs_uuid" ]]; then
|
||||
all_devs=$(echo "$btrfs_show" | grep "path" | grep -oP "path \K[^ ]+")
|
||||
for btrfs_dev in $all_devs; do
|
||||
mount_point=$(findmnt -no TARGET "$btrfs_dev" 2>/dev/null | head -1)
|
||||
[[ -n "$mount_point" ]] && break
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Default mount point if still empty
|
||||
[[ -z "$mount_point" ]] && mount_point="Not mounted"
|
||||
|
||||
# Text output
|
||||
echo "╔════════════════════════════════════════╗"
|
||||
echo "║ Device: $dev_path"
|
||||
echo "╚════════════════════════════════════════╝"
|
||||
echo "Model: $model"
|
||||
echo "Serial: $serial"
|
||||
echo "WWN: $wwn"
|
||||
echo "Size: $size"
|
||||
echo "Rotation: $rpm"
|
||||
echo "Form Factor: $form_factor"
|
||||
echo "Interface: $interface"
|
||||
echo "Disk ID: $disk_id"
|
||||
echo "Filesystem UUID: $uuid"
|
||||
echo "LUKS UUID: $luks_uuid"
|
||||
echo "Partition Table: $ptable"
|
||||
echo "Mount: $mount_point"
|
||||
echo "HBA Info: $hba_info"
|
||||
echo "SMART Health: $smart_health"
|
||||
echo "Temperature: $temp"
|
||||
echo "Power On Hours: $power_hours"
|
||||
echo "Btrfs Label: $btrfs_label"
|
||||
echo "Btrfs UUID: $btrfs_uuid"
|
||||
echo "Btrfs devid: $btrfs_devid"
|
||||
echo
|
||||
done
|
||||
21
scripts/estimate-musicdir
Executable file
21
scripts/estimate-musicdir
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
# Estimate the size of a music directory if FLACs are lossy compressed
|
||||
|
||||
MUSIC_DIR="${1:-/home/bryan/media/music}"
|
||||
|
||||
# Sum existing MP3s
|
||||
MP3_BYTES=$(find "$MUSIC_DIR" -type f -iname "*.mp3" -exec du -b {} + | awk '{sum+=$1} END{print sum}')
|
||||
|
||||
# Sum FLACs
|
||||
FLAC_BYTES=$(find "$MUSIC_DIR" -type f -iname "*.flac" -exec du -b {} + | awk '{sum+=$1} END{print sum}')
|
||||
|
||||
# Estimate FLACs as 160k Ogg (roughly 1/8 size of FLAC)
|
||||
EST_FLAC_OGG=$(( FLAC_BYTES / 8 ))
|
||||
|
||||
# Total estimated size
|
||||
TOTAL_EST=$(( MP3_BYTES + EST_FLAC_OGG ))
|
||||
|
||||
# Human-readable
|
||||
EST_HR=$(numfmt --to=iec-i --suffix=B "$TOTAL_EST")
|
||||
echo "Estimated total size (MP3 + FLAC → 160k Ogg): $EST_HR"
|
||||
|
||||
29
scripts/extract
Executable file
29
scripts/extract
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
# Automatically decompresses most filetypes
|
||||
|
||||
extract() {
|
||||
local a
|
||||
[[ $# -eq 0 ]] && { echo "usage: extract <archive...>" >&2; return 1; }
|
||||
for a in "$@"; do
|
||||
[[ ! -f $a ]] && { echo "$a: not a file" >&2; continue; }
|
||||
case $a in
|
||||
*.tar.*|*.tgz|*.tbz2) tar xvf "$a" --auto-compress ;;
|
||||
*.tar) tar xvf "$a" ;;
|
||||
*.gz) gunzip "$a" ;;
|
||||
*.bz2) bunzip2 "$a" ;;
|
||||
*.xz) unxz "$a" ;;
|
||||
*.zst) unzstd "$a" ;;
|
||||
*.zip) unzip "$a" ;;
|
||||
*.rar) unrar x "$a" ;;
|
||||
*.7z) 7z x "$a" ;;
|
||||
*.Z) uncompress "$a" ;;
|
||||
*) echo "$a: cannot extract" ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# Allow script to be safely sourced
|
||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
extract "$@"
|
||||
exit
|
||||
fi
|
||||
7
scripts/history-clean
Executable file
7
scripts/history-clean
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
# Cleans the history file of PGP messages and keys
|
||||
|
||||
histfile="${1:-$HISTFILE:-$HOME/.histfile}"
|
||||
cp -a "$histfile" "/tmp/$histfile.bak"
|
||||
sed --in-place '/gpg/d' "$histfile"
|
||||
sed --in-place '/-----BEGIN PGP MESSAGE-----/,/-----END PGP MESSAGE-----/d' "$histfile"
|
||||
60
scripts/iso-to-mkv
Executable file
60
scripts/iso-to-mkv
Executable file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
# Convert ISO files to MKV format with automatic season/episode naming
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SEARCH_DIR="${1:-$(pwd)}"
|
||||
OUT_DIR="${2:-$SEARCH_DIR/out}"
|
||||
|
||||
if [[ ! -d "$SEARCH_DIR" ]]; then
|
||||
echo "[ERROR] Search directory '$SEARCH_DIR' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
prev_season=""
|
||||
ep=1
|
||||
|
||||
echo "[INFO] Searching for ISO files in '$SEARCH_DIR'..."
|
||||
|
||||
find "$SEARCH_DIR" -type f -iname '*.iso' | sort | while read -r iso; do
|
||||
echo "[INFO] Processing: $iso"
|
||||
parent=$(basename "$(dirname "$iso")")
|
||||
|
||||
if [[ ! $parent =~ S([0-9]+) ]]; then
|
||||
echo "[WARN] Skipping '$iso' - parent directory doesn't match season pattern."
|
||||
continue
|
||||
fi
|
||||
|
||||
season=$(printf "%02d" "${BASH_REMATCH[1]}")
|
||||
|
||||
if [[ "$season" != "$prev_season" ]]; then
|
||||
ep=1
|
||||
prev_season="$season"
|
||||
fi
|
||||
|
||||
ripdir="$OUT_DIR/temp/$parent"
|
||||
mkdir -p "$ripdir" "$OUT_DIR/Season $season"
|
||||
|
||||
echo "[INFO] Ripping ISO with MakeMKV..."
|
||||
if ! snap run makemkv.makemkvcon -r mkv --minlength=1800 iso:"$iso" all "$ripdir"; then
|
||||
echo "[ERROR] Failed to rip '$iso'. Skipping."
|
||||
continue
|
||||
fi
|
||||
|
||||
for mkv in "$ripdir"/*.mkv; do
|
||||
[[ -e "$mkv" ]] || continue
|
||||
out="$OUT_DIR/Season $season/S${season}E$(printf "%02d" "$ep").mkv"
|
||||
echo "[INFO] Converting to: $out"
|
||||
if ffmpeg -nostdin -hide_banner -loglevel error -i "$mkv" \
|
||||
-map 0:v -map 0:a:m:language:eng -map 0:s:m:language:eng \
|
||||
-c copy "$out"; then
|
||||
rm "$mkv"
|
||||
((ep++))
|
||||
else
|
||||
echo "[ERROR] FFmpeg conversion failed for '$mkv'."
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "[INFO] Conversion complete. Output in '$OUT_DIR'."
|
||||
63
scripts/jriver-exclusions.ps1
Normal file
63
scripts/jriver-exclusions.ps1
Normal file
@@ -0,0 +1,63 @@
|
||||
<#
|
||||
.SYNOPSIS
|
||||
Adds JRiver Media Center folders & processes to Windows Defender exclusions
|
||||
|
||||
.DESCRIPTION
|
||||
powershell -ExecutionPolicy Bypass -File .\jriver-exclusions.ps1
|
||||
#>
|
||||
|
||||
function Add-ItemExclusion {
|
||||
param(
|
||||
[string]$Item,
|
||||
[ValidateSet('Path','Process')]$Type
|
||||
)
|
||||
try {
|
||||
if ($Type -eq 'Path') {
|
||||
Add-MpPreference -ExclusionPath $Item -ErrorAction Stop
|
||||
} else {
|
||||
Add-MpPreference -ExclusionProcess $Item -ErrorAction Stop
|
||||
}
|
||||
Write-Host "Added ${Type}: ${Item}"
|
||||
}
|
||||
catch {
|
||||
Write-Warning "Skipped/failed ${Type}: ${Item} - $_"
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "Configuring JRiver Media Center (folders via wildcards, processes version 30–50)"
|
||||
|
||||
# Folder exclusions (wildcards cover all files inside)
|
||||
$folders = @(
|
||||
'C:\Program Files\J River',
|
||||
'C:\Program Files\J River\Media Center *',
|
||||
"$Env:APPDATA\J River",
|
||||
"$Env:APPDATA\J River\Media Center *"
|
||||
)
|
||||
|
||||
# Process exclusions (explicit versions 30–50)
|
||||
$processes = @()
|
||||
for ($v = 30; $v -le 50; $v++) {
|
||||
$processes += "MC$v.exe"
|
||||
$processes += "Media Center $v.exe"
|
||||
}
|
||||
# Add static processes that are version-independent
|
||||
$processes += @('JRService.exe','JRWorker.exe','JRWeb.exe')
|
||||
|
||||
# Add exclusions
|
||||
Write-Host "=== Adding folder exclusions ==="
|
||||
$folders | ForEach-Object { Add-ItemExclusion -Item $_ -Type Path }
|
||||
|
||||
Write-Host "=== Adding process exclusions ==="
|
||||
$processes | Sort-Object -Unique | ForEach-Object { Add-ItemExclusion -Item $_ -Type Process }
|
||||
|
||||
# Validation step
|
||||
$pref = Get-MpPreference
|
||||
Write-Host ''
|
||||
Write-Host "=== Current Defender exclusions ==="
|
||||
Write-Host "Paths:"
|
||||
$pref.ExclusionPath | ForEach-Object { Write-Host " $_" }
|
||||
Write-Host ''
|
||||
Write-Host "Processes:"
|
||||
$pref.ExclusionProcess | ForEach-Object { Write-Host " $_" }
|
||||
Write-Host ''
|
||||
Write-Host "=== Validation complete ==="
|
||||
25
scripts/jriver-expressions.txt
Normal file
25
scripts/jriver-expressions.txt
Normal file
@@ -0,0 +1,25 @@
|
||||
IfElse(
|
||||
IsEqual([Media Type], Audio),
|
||||
If(IsEqual([Media Sub Type], Podcast),
|
||||
podcasts/Clean([Album],3),
|
||||
music/Clean([Album Artist (auto)],3)/[[Year]] Clean([Album],3)),
|
||||
IsEqual([Media Sub Type], Movie),
|
||||
movies/Clean([Name], 3),
|
||||
IsEqual([Media Sub Type], TV Show),
|
||||
tv/Clean([Series],3)/Season PadNumber([Season], 2)
|
||||
)
|
||||
|
||||
IfElse(
|
||||
IsEqual([Media Type], Audio),
|
||||
If(IsEmpty([Disc #],1),
|
||||
1[Track #],
|
||||
[Disc #][Track #]) - Clean([Artist] - [Name],3),
|
||||
IsEqual([Media Sub Type], Movie),
|
||||
Clean([Name],3) [[Year]],
|
||||
IsEqual([Media Sub Type], TV Show),
|
||||
Clean([Series] - S[Season]E[Episode] - [Name],3)
|
||||
)
|
||||
|
||||
IfElse(IsEqual([Media Type], Audio), If(IsEqual([Media Sub Type], Podcast), podcasts/Clean([Album],3), music/RemoveCharacters(Clean([Album Artist (auto)],3),.,2)/[[Year]] RemoveCharacters(Clean([Album],3),.,3)), IsEqual([Media Sub Type], Movie), movies/Clean(RemoveCharacters([Name],:), 3) [[Year]], IsEqual([Media Sub Type], TV Show), tv/Clean([Series],3)/Season PadNumber([Season], 2))
|
||||
|
||||
IfElse(IsEqual([Media Type], Audio), If(IsEmpty([Disc #],1), 1[Track #], [Disc #][Track #]) - RemoveCharacters(Clean([Artist] - [Name],3),.,3), IsEqual([Media Sub Type], Movie), Clean(RemoveCharacters([Name],:),3) [[Year]], IsEqual([Media Sub Type], TV Show), Clean([Series] - S[Season]E[Episode] - [Name],3))
|
||||
66
scripts/jriver-fix-date-imported
Executable file
66
scripts/jriver-fix-date-imported
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix JRiver date imported fields to use the earliest date for each album.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path, PureWindowsPath
|
||||
|
||||
|
||||
def get_album_path(line: str) -> str:
|
||||
"""Extract and return the album directory path from a filename field."""
|
||||
filename = line.lstrip('<Field Name="Filename">').rstrip('</Field>\n')
|
||||
path = PureWindowsPath(filename)
|
||||
return str(path.parent)
|
||||
|
||||
|
||||
def get_date(line: str) -> int:
|
||||
"""Extract and return the date imported value from a date field."""
|
||||
date = line.lstrip('<Field Name="Date Imported">').rstrip('</Field>\n')
|
||||
return int(date)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main function to process JRiver library file."""
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: jriver-fix-date-imported <input_file> <output_file>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
in_file = Path(sys.argv[1])
|
||||
out_file = Path(sys.argv[2])
|
||||
|
||||
if not in_file.exists():
|
||||
print(f"[ERROR] Input file '{in_file}' does not exist.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Read input file
|
||||
with open(in_file, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
# Build album to tracks mapping
|
||||
albums: dict[str, list[tuple[int, int]]] = {}
|
||||
current_album: str | None = None
|
||||
|
||||
for lnum, line in enumerate(lines):
|
||||
if '<Field Name="Filename">' in line:
|
||||
current_album = get_album_path(line)
|
||||
elif '<Field Name="Date Imported">' in line:
|
||||
date = get_date(line)
|
||||
if current_album:
|
||||
albums.setdefault(current_album, []).append((lnum, date))
|
||||
|
||||
# Update lines with earliest date for each album
|
||||
for _, tracks in albums.items():
|
||||
earliest_date: int = min(tracks, key=lambda t: t[1])[1]
|
||||
for lnum, _ in tracks:
|
||||
lines[lnum] = f'<Field Name="Date Imported">{earliest_date}</Field>\n'
|
||||
|
||||
# Write output file
|
||||
with open(out_file, 'w', encoding="utf-8") as f:
|
||||
f.writelines(lines)
|
||||
|
||||
print(f"[SUCCESS] Processed {len(albums)} albums. Output written to '{out_file}'.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
63
scripts/jriver-replace-date-imported
Executable file
63
scripts/jriver-replace-date-imported
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Replace JRiver date imported fields with date modified when the latter is earlier.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def get_import_date(line: str) -> int:
|
||||
"""Extract and return the date imported value from a date imported field."""
|
||||
date = line.lstrip('<Field Name="Date Imported">').rstrip('</Field>\n')
|
||||
return int(date)
|
||||
|
||||
|
||||
def get_create_date(line: str) -> int:
|
||||
"""Extract and return the date modified value from a date modified field."""
|
||||
date = line.lstrip('<Field Name="Date Modified">').rstrip('</Field>\n')
|
||||
return int(date)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main function to process JRiver library file."""
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: jriver-replace-date-imported <input_file> <output_file>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
in_file = Path(sys.argv[1])
|
||||
out_file = Path(sys.argv[2])
|
||||
|
||||
if not in_file.exists():
|
||||
print(f"[ERROR] Input file '{in_file}' does not exist.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Read input file
|
||||
with open(in_file, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
# Process lines and replace dates where appropriate
|
||||
import_date: int = 0
|
||||
date_imported_line: int = 0
|
||||
replacements = 0
|
||||
|
||||
for lnum, line in enumerate(lines):
|
||||
if '<Field Name="Date Imported">' in line:
|
||||
import_date = get_import_date(line)
|
||||
date_imported_line = lnum
|
||||
elif '<Field Name="Date Modified">' in line:
|
||||
create_date = get_create_date(line)
|
||||
if create_date < import_date:
|
||||
print(f"[INFO] Replacing {import_date} with {create_date} at line {date_imported_line}")
|
||||
lines[date_imported_line] = f'<Field Name="Date Imported">{create_date}</Field>\n'
|
||||
replacements += 1
|
||||
|
||||
# Write output file
|
||||
with open(out_file, 'w', encoding="utf-8") as f:
|
||||
f.writelines(lines)
|
||||
|
||||
print(f"[SUCCESS] Made {replacements} replacements. Output written to '{out_file}'.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
104
scripts/prune-files
Executable file
104
scripts/prune-files
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env bash
|
||||
# Remove all but the latest N versions of files matching given prefixes
|
||||
# Usage: prune-files -k 3 thisfileprefix [thatfileprefix]
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
prune-files() {
|
||||
local -a PREFIXES
|
||||
local KEEP_INT=1 # default number of files to keep
|
||||
local DRY_RUN=false
|
||||
|
||||
printHelpAndExit() {
|
||||
cat <<-'EOF'
|
||||
USAGE:
|
||||
prune-files -k 3 thisfileprefix [thatfileprefix]
|
||||
|
||||
OPTIONS:
|
||||
-k, --keep NUMBER
|
||||
Keep NUMBER of the latest files that match each file prefix (Default: 1)
|
||||
-n, --dry-run
|
||||
Show what would be removed without actually deleting files
|
||||
-h, --help
|
||||
Print this help dialog and exit
|
||||
EOF
|
||||
[[ -z "$1" ]] && exit 0 || exit "$1"
|
||||
}
|
||||
|
||||
parseInput() {
|
||||
local _input
|
||||
if _input=$(getopt -o hk:n -l help,keep:,dry-run -- "$@"); then
|
||||
eval set -- "$_input"
|
||||
while true; do
|
||||
case "$1" in
|
||||
-k|--keep) shift; KEEP_INT="$1" ;;
|
||||
-n|--dry-run) DRY_RUN=true ;;
|
||||
-h|--help) printHelpAndExit 0 ;;
|
||||
--) shift; break ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
else
|
||||
echo "[ERROR] Incorrect option(s) provided" >&2
|
||||
printHelpAndExit 1
|
||||
fi
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "[ERROR] At least one file prefix must be provided" >&2
|
||||
printHelpAndExit 1
|
||||
fi
|
||||
|
||||
if ! [[ "$KEEP_INT" =~ ^[0-9]+$ ]] || [[ "$KEEP_INT" -lt 1 ]]; then
|
||||
echo "[ERROR] --keep must be a positive integer" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PREFIXES=("$@")
|
||||
}
|
||||
|
||||
findAndRemove() {
|
||||
local prefix file count
|
||||
|
||||
for prefix in "${PREFIXES[@]}"; do
|
||||
count=0
|
||||
echo "[INFO] Processing files with prefix: $prefix"
|
||||
|
||||
# List files matching the prefix sorted by modification time (latest first),
|
||||
# then remove all except the first KEEP_INT files.
|
||||
while IFS= read -r file; do
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo "[DRY-RUN] Would remove: $file"
|
||||
else
|
||||
echo "[INFO] Removing: $file"
|
||||
if ! rm -- "$file"; then
|
||||
echo "[ERROR] Failed to remove: $file" >&2
|
||||
fi
|
||||
fi
|
||||
((count++))
|
||||
done < <(
|
||||
find . -maxdepth 1 -type f -name "${prefix}*" -printf '%T@ %p\n' 2>/dev/null | \
|
||||
sort -rn | \
|
||||
awk -v keep="$KEEP_INT" 'NR > keep {print $2}'
|
||||
)
|
||||
|
||||
if [[ $count -eq 0 ]]; then
|
||||
echo "[INFO] No files to remove for prefix: $prefix"
|
||||
else
|
||||
echo "[INFO] Processed $count file(s) for prefix: $prefix"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
main() {
|
||||
parseInput "$@"
|
||||
findAndRemove
|
||||
}
|
||||
|
||||
main "$@"
|
||||
}
|
||||
|
||||
# Allow script to be safely sourced
|
||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
prune-files "$@"
|
||||
exit $?
|
||||
fi
|
||||
39
scripts/random-words
Executable file
39
scripts/random-words
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
# This function will create a random word pair with an underscore separator ex. turtle_ladder
|
||||
# It accepts one optional argument (an integer) (the number of words to return)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
random_words() {
|
||||
local num="${1:-2}"
|
||||
local -a arr
|
||||
local word
|
||||
|
||||
# Validate input
|
||||
if ! [[ "$num" =~ ^[0-9]+$ ]] || [[ "$num" -lt 1 ]]; then
|
||||
echo "[ERROR] Argument must be a positive integer" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if dictionary file exists
|
||||
if [[ ! -f /usr/share/dict/words ]]; then
|
||||
echo "[ERROR] Dictionary file /usr/share/dict/words not found" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
for ((i=0; i<num; i++)); do
|
||||
# Get random word and sanitize in one pass
|
||||
word=$(shuf -n1 /usr/share/dict/words | tr -d '-_' | tr '[:upper:]' '[:lower:]')
|
||||
arr+=("$word")
|
||||
done
|
||||
|
||||
# Join array with underscores
|
||||
local IFS="_"
|
||||
echo "${arr[*]}"
|
||||
}
|
||||
|
||||
# Allow this file to be executed directly if not being sourced
|
||||
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
|
||||
random_words "$@"
|
||||
exit $?
|
||||
fi
|
||||
107
scripts/remove-small-dirs
Executable file
107
scripts/remove-small-dirs
Executable file
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env bash
|
||||
# Remove directories below a specified size (in KB)
|
||||
# Usage: remove-small-dirs DIRECTORY [SIZE_THRESHOLD]
|
||||
# Default SIZE_THRESHOLD is 1000 KB
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
usage() {
|
||||
cat <<-EOF
|
||||
Usage: remove-small-dirs [OPTIONS] DIRECTORY [SIZE_THRESHOLD]
|
||||
|
||||
Remove directories below a specified size (default: 1000 KB).
|
||||
|
||||
OPTIONS:
|
||||
-n, --dry-run Show what would be removed without deleting
|
||||
-h, --help Display this help message
|
||||
|
||||
ARGUMENTS:
|
||||
DIRECTORY Directory to search for small directories
|
||||
SIZE_THRESHOLD Maximum size in KB (default: 1000)
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse options
|
||||
DRY_RUN=false
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-n|--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-*)
|
||||
echo "[ERROR] Unknown option: $1" >&2
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "[ERROR] You must provide a directory." >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
dir="$1"
|
||||
SIZE="${2:-1000}"
|
||||
|
||||
if [[ ! -d "$dir" ]]; then
|
||||
echo "[ERROR] Directory does not exist: $dir" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [[ "$SIZE" =~ ^[0-9]+$ ]] || [[ "$SIZE" -lt 1 ]]; then
|
||||
echo "[ERROR] SIZE_THRESHOLD must be a positive integer" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[INFO] Searching for directories <= $SIZE KB in '$dir'..."
|
||||
|
||||
# Find directories with size less or equal to SIZE
|
||||
# Sort by depth (deepest first) to avoid removing parent before child
|
||||
small_dirs=$(find "$dir" -mindepth 1 -type d -exec du -ks {} + | \
|
||||
awk -v size="$SIZE" '$1 <= size {print $2}' | \
|
||||
awk '{ print length, $0 }' | sort -rn | cut -d' ' -f2-)
|
||||
|
||||
if [[ -z "$small_dirs" ]]; then
|
||||
echo "[INFO] No directories with size <= $SIZE KB found in '$dir'."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "[INFO] Found $(echo "$small_dirs" | wc -l) directories to remove:"
|
||||
echo "$small_dirs"
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo "[DRY-RUN] Would remove the above directories."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
read -r -p "Remove these directories? [y/N] " response
|
||||
response="${response,,}" # Convert to lowercase
|
||||
|
||||
if [[ ! "$response" =~ ^(yes|y)$ ]]; then
|
||||
echo "[INFO] Exiting, no changes were made."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
count=0
|
||||
while IFS= read -r small_dir; do
|
||||
if [[ -d "$small_dir" ]]; then
|
||||
echo "[INFO] Removing: $small_dir"
|
||||
if rm -rf "$small_dir"; then
|
||||
((count++))
|
||||
else
|
||||
echo "[ERROR] Failed to remove: $small_dir" >&2
|
||||
fi
|
||||
fi
|
||||
done <<< "$small_dirs"
|
||||
|
||||
echo "[SUCCESS] Removed $count directories."
|
||||
86
scripts/speedtest-compare
Executable file
86
scripts/speedtest-compare
Executable file
@@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env bash
|
||||
# This script performs speedtests over Wireguard and native connections and prints their output
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
usage() {
|
||||
cat <<-EOF
|
||||
Usage: speedtest-compare [OPTIONS]
|
||||
|
||||
Compare network speed between Wireguard and native connections.
|
||||
|
||||
OPTIONS:
|
||||
-s, --server ID Specify server ID for native test (default: 17170)
|
||||
-u, --upload Include upload speed test
|
||||
-h, --help Display this help message
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse options
|
||||
UPLOAD_FLAG="--no-upload"
|
||||
SERVER_ID="17170"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-s|--server) shift; SERVER_ID="$1"; shift ;;
|
||||
-u|--upload) UPLOAD_FLAG=""; shift ;;
|
||||
-h|--help) usage; exit 0 ;;
|
||||
*)
|
||||
echo "[ERROR] Unknown option: $1" >&2
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check if speedtest-cli is installed
|
||||
if ! command -v speedtest-cli &>/dev/null; then
|
||||
echo "[ERROR] speedtest-cli is not installed. Please install it first." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
run_test() {
|
||||
local output pingBps pingPart bpsPart pingInt bpsInt mbpsInt
|
||||
|
||||
# Run speedtest-cli and extract the 7th and 8th CSV fields
|
||||
if ! output=$(speedtest-cli $UPLOAD_FLAG --csv "$@" 2>/dev/null); then
|
||||
echo "[ERROR] Speedtest failed" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
pingBps=$(echo "$output" | cut -d"," -f7-8)
|
||||
|
||||
# Extract ping value (as an integer) and bps (and convert to Mbps)
|
||||
pingPart="${pingBps%,*}"
|
||||
bpsPart="${pingBps#*,}"
|
||||
pingInt="${pingPart%.*}"
|
||||
bpsInt="${bpsPart%.*}"
|
||||
mbpsInt=$(( bpsInt / 1000000 ))
|
||||
|
||||
echo "$pingInt $mbpsInt"
|
||||
}
|
||||
|
||||
echo "[INFO] Running speedtest comparison..."
|
||||
echo ""
|
||||
|
||||
# Test Wireguard using automatic server selection
|
||||
echo "Testing Wireguard connection..."
|
||||
if output=$(run_test); then
|
||||
read -r pingInt mbpsInt <<< "$output"
|
||||
echo " Ping: ${pingInt}ms"
|
||||
echo " Speed: ${mbpsInt}Mbps"
|
||||
else
|
||||
echo " [ERROR] Wireguard test failed"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
# Test native connection to ISP
|
||||
echo "Testing native connection (server: $SERVER_ID)..."
|
||||
if output=$(run_test --server "$SERVER_ID"); then
|
||||
read -r pingInt mbpsInt <<< "$output"
|
||||
echo " Ping: ${pingInt}ms"
|
||||
echo " Speed: ${mbpsInt}Mbps"
|
||||
else
|
||||
echo " [ERROR] Native test failed"
|
||||
fi
|
||||
71
scripts/ssh-wrap
Executable file
71
scripts/ssh-wrap
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
# Usage: ssh-wrap user@host [ssh-options]
|
||||
# Wrapper to handle SSH host key changes automatically
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: ssh-wrap user@host [ssh-options]" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Capture SSH output
|
||||
output=$(ssh "$@" 2>&1)
|
||||
exit_code=$?
|
||||
|
||||
# Print the SSH output so user sees what happened
|
||||
echo "$output"
|
||||
|
||||
# If SSH succeeded, we're done
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if the known_hosts warning appears
|
||||
if echo "$output" | grep -q "REMOTE HOST IDENTIFICATION HAS CHANGED"; then
|
||||
echo ""
|
||||
echo "[WARNING] Host key has changed - possible man-in-the-middle attack or host reinstall."
|
||||
|
||||
# Extract the known_hosts file and line number from the "Offending RSA key in ..." line
|
||||
# The line format typically is: "Offending RSA key in /path/to/known_hosts:line"
|
||||
if offending_info=$(echo "$output" | grep "Offending.*key in"); then
|
||||
KNOWN_HOSTS_FILE=$(echo "$offending_info" | awk '{print $5}' | cut -d: -f1)
|
||||
LINE_NUMBER=$(echo "$offending_info" | awk -F: '{print $NF}')
|
||||
|
||||
if [[ -z "$KNOWN_HOSTS_FILE" || -z "$LINE_NUMBER" || ! -f "$KNOWN_HOSTS_FILE" ]]; then
|
||||
echo "[ERROR] Could not extract offending key information or file doesn't exist." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[INFO] Offending key detected in: $KNOWN_HOSTS_FILE on line: $LINE_NUMBER"
|
||||
read -rp "Remove offending key and retry SSH connection? [y/N]: " RESPONSE
|
||||
|
||||
if [[ "$RESPONSE" =~ ^[Yy]$ ]]; then
|
||||
# Backup known_hosts
|
||||
if cp "$KNOWN_HOSTS_FILE" "$KNOWN_HOSTS_FILE.bak"; then
|
||||
echo "[INFO] Backup created: $KNOWN_HOSTS_FILE.bak"
|
||||
else
|
||||
echo "[ERROR] Failed to create backup." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove offending line
|
||||
if sed -i "${LINE_NUMBER}d" "$KNOWN_HOSTS_FILE"; then
|
||||
echo "[INFO] Offending key removed. Retrying SSH connection..."
|
||||
ssh "$@"
|
||||
else
|
||||
echo "[ERROR] Failed to remove offending key." >&2
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "[INFO] Key was not removed. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "[ERROR] Could not extract offending key information. Remove it manually if needed." >&2
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# SSH failed for another reason
|
||||
exit $exit_code
|
||||
fi
|
||||
4
scripts/strip-exif
Executable file
4
scripts/strip-exif
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
# Strips all EXIF data from images provided as arguments
|
||||
|
||||
exiftool -all= "$@"
|
||||
68
scripts/sync-music
Executable file
68
scripts/sync-music
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bash
|
||||
# Sync and transcode music files to a destination directory
|
||||
set -e
|
||||
|
||||
SRC="${1:?Source directory required}"
|
||||
DST="${2:?Destination directory required}"
|
||||
JOBS="${3:-12}"
|
||||
|
||||
command -v opusenc >/dev/null || { echo "ERROR: opusenc not found" >&2; exit 1; }
|
||||
|
||||
mkdir -p "$DST"
|
||||
|
||||
echo "Syncing music from $SRC to $DST (using $JOBS parallel jobs)"
|
||||
|
||||
# Process source files in parallel
|
||||
process_file() {
|
||||
local src="$1"
|
||||
local rel="${src#"$SRC/"}"
|
||||
local dst="$DST/$rel"
|
||||
|
||||
case "${src,,}" in
|
||||
*.flac)
|
||||
dst="${dst%.*}.opus"
|
||||
if [[ ! -f "$dst" || "$src" -nt "$dst" ]]; then
|
||||
echo "Converting: $rel"
|
||||
mkdir -p "$(dirname "$dst")"
|
||||
opusenc --quiet --bitrate 160 --vbr "$src" "$dst"
|
||||
fi
|
||||
;;
|
||||
*.mp3)
|
||||
if [[ ! -f "$dst" || "$src" -nt "$dst" ]]; then
|
||||
echo "Copying: $rel"
|
||||
mkdir -p "$(dirname "$dst")"
|
||||
cp -p "$src" "$dst"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
export -f process_file
|
||||
export SRC DST
|
||||
|
||||
find -L "$SRC" -type f \( -iname "*.flac" -o -iname "*.mp3" \) -print0 | \
|
||||
xargs -0 -P "$JOBS" -I {} bash -c 'process_file "$@"' _ {}
|
||||
|
||||
# Remove stray files
|
||||
while IFS= read -r -d '' dst; do
|
||||
rel="${dst#"$DST/"}"
|
||||
base="${rel%.*}"
|
||||
|
||||
case "${dst,,}" in
|
||||
*.opus)
|
||||
[[ -f "$SRC/$base.flac" || -f "$SRC/$base.FLAC" ]] && continue
|
||||
echo "Removing: $rel"
|
||||
rm -f "$dst"
|
||||
;;
|
||||
*.mp3)
|
||||
[[ -f "$SRC/$rel" ]] && continue
|
||||
echo "Removing: $rel"
|
||||
rm -f "$dst"
|
||||
;;
|
||||
esac
|
||||
done < <(find -L "$DST" -type f \( -iname "*.opus" -o -iname "*.mp3" \) -print0)
|
||||
|
||||
# Clean empty directories
|
||||
find "$DST" -type d -empty -delete 2>/dev/null || true
|
||||
|
||||
echo "Done"
|
||||
78
scripts/tmux-management
Executable file
78
scripts/tmux-management
Executable file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env bash
|
||||
# Open a tiled tmux window with one pane per host each in its own tmux session.
|
||||
# The local session is always the last (active) pane.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration (override with env vars if desired)
|
||||
HOSTS=(workstation laptop) # hosts in pane order
|
||||
REMOTE_SESSION=${REMOTE_SESSION:-main} # tmux session on remotes
|
||||
SYNCHRONIZE=${SYNCHRONIZE:-1} # 1 = broadcast keystrokes
|
||||
INCLUDE_LOCAL=${INCLUDE_LOCAL:-1} # 0 = skip local host
|
||||
LOCAL_SHELL_ONLY=${LOCAL_SHELL_ONLY:-0} # 1 = plain shell locally
|
||||
DEBUG=${DEBUG:-0}
|
||||
|
||||
debug() { if (( DEBUG )); then echo "Debug: $*"; fi; }
|
||||
|
||||
# Returns 0 if $2 is found in nameref array $1
|
||||
array_contains() {
|
||||
local -n arr=$1
|
||||
local needle=$2
|
||||
for element in "${arr[@]}"; do
|
||||
[[ "$element" == "$needle" ]] && return 0
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
LOCAL=$(hostname -s)
|
||||
|
||||
# Build TARGETS list so that LOCAL is always last
|
||||
TARGETS=()
|
||||
for h in "${HOSTS[@]}"; do
|
||||
[[ $h != "$LOCAL" ]] && TARGETS+=("$h")
|
||||
done
|
||||
if (( INCLUDE_LOCAL )); then
|
||||
TARGETS+=("$LOCAL")
|
||||
fi
|
||||
|
||||
(( ${#TARGETS[@]} )) || { echo "No hosts to connect to."; exit 1; }
|
||||
|
||||
SESSION=$(IFS=-; echo "${TARGETS[*]}")
|
||||
debug "Session : $SESSION"
|
||||
debug "Targets : ${TARGETS[*]}"
|
||||
|
||||
# Re‑attach if session already exists
|
||||
if tmux has-session -t "$SESSION" 2>/dev/null; then
|
||||
exec tmux attach -t "$SESSION"
|
||||
fi
|
||||
|
||||
# Builds the command that will run inside a pane
|
||||
open_cmd() {
|
||||
local tgt=$1
|
||||
if [[ $tgt == "$LOCAL" ]]; then
|
||||
if (( LOCAL_SHELL_ONLY )); then
|
||||
printf '%q -l' "${SHELL:-bash}"
|
||||
else
|
||||
printf 'tmux -L %q new -A -s %q' "${SESSION}_local" "$REMOTE_SESSION"
|
||||
fi
|
||||
else
|
||||
printf 'ssh -t %q tmux new -A -s %q' "$tgt" "$REMOTE_SESSION"
|
||||
fi
|
||||
}
|
||||
|
||||
# Create the first pane
|
||||
tmux new-session -d -s "$SESSION" -n "$SESSION" "$(open_cmd "${TARGETS[0]}")"
|
||||
|
||||
# Create remaining panes
|
||||
for tgt in "${TARGETS[@]:1}"; do
|
||||
tmux split-window -t "$SESSION:0" -h "$(open_cmd "$tgt")"
|
||||
done
|
||||
|
||||
tmux select-layout -t "$SESSION:0" tiled
|
||||
((SYNCHRONIZE)) && tmux setw -t "$SESSION:0" synchronize-panes on
|
||||
|
||||
# Activate the last pane (local host)
|
||||
local_index=$(( ${#TARGETS[@]} - 1 ))
|
||||
tmux select-pane -t "$SESSION:0.$local_index"
|
||||
|
||||
exec tmux attach -t "$SESSION"
|
||||
6
scripts/tree-to-markdown
Executable file
6
scripts/tree-to-markdown
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Make a nice markdown file from a directory tree
|
||||
|
||||
tree=$(tree -f --noreport --charset ascii "$1" |
|
||||
sed -e 's/| \+/ /g' -e 's/[|`]-\+/ */g' -e 's:\(* \)\(\(.*/\)\([^/]\+\)\):\1[\4](\2):g')
|
||||
printf "# Code/Directory Structure:\n\n%s" "$tree"
|
||||
16
scripts/update-git-hooks
Executable file
16
scripts/update-git-hooks
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
# Update the post-receive hooks of multiple bare git repos
|
||||
|
||||
for i in /var/lib/git/gogs-repositories/bryan/*/hooks/post-receive; do
|
||||
# Get repo name
|
||||
rn="${i%/hooks/post-receive}"
|
||||
rn="${rn##*/}"
|
||||
|
||||
# Don't duplicate the line if it already exists
|
||||
while IFS= read -r line; do
|
||||
[[ "$line" == "git push --mirror git@github.com:cryobry/${rn}" ]] && continue
|
||||
done < "$i"
|
||||
|
||||
# Append the line
|
||||
echo "git push --mirror git@github.com:cryobry/${rn}" >> "$i"
|
||||
done
|
||||
Reference in New Issue
Block a user