added manual jellyfin copy script

This commit is contained in:
Brad Stein 2025-08-27 10:51:47 -05:00
parent cb658e3a24
commit adaad87c11
4 changed files with 271 additions and 110 deletions

View File

@ -1,18 +0,0 @@
#!/usr/bin/env fish
set jump titan-db
set nodes (ssh $jump 'grep -E "^Host titan-" ~/.ssh/config | awk "{print \$2}"')
for n in $nodes
echo "=== $n ==="
ssh $jump "ssh $n 'ls /dev/longhorn'" 2>/dev/null
# look for app.ini or repos inside each volume
set vols (ssh $jump "ssh $n 'ls /dev/longhorn'" 2>/dev/null)
for v in $vols
ssh $jump "ssh $n 'test -e /dev/longhorn/$v && sudo mount -o ro /dev/longhorn/$v /mnt && \
(ls /mnt/gitea/conf/app.ini 2>/dev/null || true) && \
(ls /mnt/git/repositories 2>/dev/null || true); sudo umount /mnt'" 2>/dev/null
end
end

252
scripts/manual_jellyfin_load.fish Executable file
View File

@ -0,0 +1,252 @@
#!/usr/bin/env fish
# Copy local files/folders into Jellyfin's RWX PVC reliably using rsync.
# Primary path: NodePort -> rsync direct to node (no apiserver).
# Fallback: kubectl port-forward to service (OK for small/medium files).
#
# Usage:
# scripts/manual_jellyfin_load.fish <LOCAL_PATH> [REMOTE_SUBDIR] [JELLYFIN_API_KEY]
# Examples:
# scripts/manual_jellyfin_load.fish "$HOME/Downloads/Avatar - The Last Airbender (2005 - 2008) [1080p]" kids_tv "$JELLYFIN_API_TOKEN"
# scripts/manual_jellyfin_load.fish "$HOME/Movies/." movies # copy contents-only into /media/movies
function usage
echo "Usage: "(basename (status filename))" <LOCAL_PATH> [REMOTE_SUBDIR] [JELLYFIN_API_KEY]"
echo " LOCAL_PATH: file or directory. Use '/.' to copy contents-only."
echo " REMOTE_SUBDIR: subdir under /media on the PVC (optional)."
end
# --- sanity checks ---
set -g KCTL (command -v kubectl)
if test -z "$KCTL"
echo "ERROR: kubectl not found in PATH."; exit 1
end
if not command -sq rsync
echo "ERROR: rsync not found. Install rsync and re-run."; exit 1
end
function kc --description 'kubectl with unlimited timeout'
command "$KCTL" --request-timeout=0 $argv
end
# --- constants ---
set -g NS jellyfin
set -g POD loader
set -g CTN toolbox
set -g YAML services/jellyfin/loader.yaml
set -g RSYNC_SVC loader-rsync
set -g RSYNC_NODEPORT 30873
set -g DEST_BASE /media
# --- args ---
if test (count $argv) -lt 1
usage; exit 1
end
# expand ~ in first arg even when quoted
set -l LOCAL_RAW $argv[1]
set -l LOCAL (string replace -r '^~(?=/|$)' -- $HOME $LOCAL_RAW)
if not test -e "$LOCAL"
echo "ERROR: '$LOCAL' does not exist."; exit 1
end
set -l REMOTE_SUBDIR ""
if test (count $argv) -ge 2
set REMOTE_SUBDIR (string replace -r '^/+|/+$' '' -- $argv[2])
end
set -l API_TOKEN ""
if test (count $argv) -ge 3
set API_TOKEN $argv[3]
end
# contents-only mode if LOCAL ended with '/.'
set -l contents_only 0
if string match -r '/\.\s*$' -- "$LOCAL_RAW" >/dev/null
set contents_only 1
set LOCAL (string replace -r '/\.\s*$' '' -- "$LOCAL")
end
# --- create/refresh loader pod ---
if kc -n $NS get pod $POD -o name >/dev/null 2>&1
echo "Found existing $NS/$POD; deleting it first..."
kc -n $NS delete pod $POD --wait >/dev/null
end
echo "Creating $NS/$POD from $YAML ..."
if not kc -n $NS apply -f "$YAML" >/dev/null
echo "ERROR: failed to apply $YAML"; exit 1
end
echo "Waiting for $NS/$POD to be Ready..."
if not kc -n $NS wait --for=condition=Ready pod/$POD --timeout=600s >/dev/null
echo "ERROR: $POD did not become Ready."; exit 1
end
# ensure base dir & perms
kc -n $NS exec $POD -c $CTN -- sh -lc "mkdir -p -- \"$DEST_BASE\" && chmod 0777 \"$DEST_BASE\"" >/dev/null
if test -n "$REMOTE_SUBDIR"
kc -n $NS exec $POD -c $CTN -- sh -lc "mkdir -p -- \"$DEST_BASE/$REMOTE_SUBDIR\" && chmod 0777 \"$DEST_BASE/$REMOTE_SUBDIR\"" >/dev/null
end
# label so a Service can select it
kc -n $NS label pod $POD app=loader --overwrite >/dev/null
# --- start rsync daemon inside the loader container ---
echo "Starting rsync daemon inside $NS/$POD ..."
set -l RSYNCD_CFG '
pid file = /var/run/rsyncd.pid
use chroot = no
log file = /dev/stdout
max connections = 4
[media]
path = /media
read only = false
uid = 0
gid = 0
hosts allow = 0.0.0.0/0
'
# Install rsync (if needed), write config, stop any old rsync, then start daemon (detaches by itself).
if not kc -n $NS exec $POD -c $CTN -- sh -lc "
(rsync --version >/dev/null 2>&1) || apk add --no-cache rsync >/dev/null 2>&1
cat > /etc/rsyncd.conf <<'EOF'
$RSYNCD_CFG
EOF
pkill rsync >/dev/null 2>&1 || true
rsync --daemon --config=/etc/rsyncd.conf --port=873
# quick presence check
pgrep rsync >/dev/null
"
echo "ERROR: failed to start rsyncd in the loader pod."; exit 1
end
# --- (re)create NodePort service to reach rsyncd ---
kc -n $NS delete svc $RSYNC_SVC --ignore-not-found >/dev/null
printf "%s\n" \
"apiVersion: v1
kind: Service
metadata:
name: $RSYNC_SVC
namespace: $NS
spec:
type: NodePort
selector:
app: loader
ports:
- name: rsync
port: 873
targetPort: 873
nodePort: $RSYNC_NODEPORT" | kc apply -f - >/dev/null
# wait for endpoints to be populated
for i in (seq 1 30)
set -l ep (kc -n $NS get endpoints $RSYNC_SVC -o jsonpath='{range .subsets[*].addresses[*]}{.ip}{" "}{end}')
if test -n "$ep"
break
end
sleep 1
end
# Which node is the pod on?
set -l NODE (kc -n $NS get pod $POD -o jsonpath='{.spec.nodeName}')
set -l HOST (kc get node $NODE -o jsonpath='{range .status.addresses[?(@.type=="InternalIP")]}{.address}{end}')
# Try NodePort reachability; if blocked, fall back to port-forward.
set -l DEST_URL ""
set -l VIA "nodeport"
echo "Waiting for rsync on $HOST:$RSYNC_NODEPORT ..."
for i in (seq 1 10)
if rsync "rsync://$HOST:$RSYNC_NODEPORT/" >/dev/null 2>&1
set DEST_URL "rsync://$HOST:$RSYNC_NODEPORT/media"
break
end
sleep 1
end
if test -z "$DEST_URL"
set VIA "port-forward"
set -l PF_LOCAL 3873
echo "NodePort not reachable; falling back to kubectl port-forward on 127.0.0.1:$PF_LOCAL ..."
# background port-forward; capture PID
kc -n $NS port-forward svc/$RSYNC_SVC 127.0.0.1:$PF_LOCAL:873 >/dev/null 2>&1 &
set -l PF_PID $last_pid
# wait until local rsync answers
for i in (seq 1 30)
if rsync "rsync://127.0.0.1:$PF_LOCAL/" >/dev/null 2>&1
set DEST_URL "rsync://127.0.0.1:$PF_LOCAL/media"
break
end
sleep 1
end
if test -z "$DEST_URL"
echo "ERROR: rsync daemon not reachable via NodePort or port-forward."
if test -n "$PF_PID"
command kill $PF_PID >/dev/null 2>&1
end
exit 1
end
end
if test -n "$REMOTE_SUBDIR"
set DEST_URL "$DEST_URL/$REMOTE_SUBDIR"
end
# --- rsync flags (robust/resumable/overwrite) ---
set -l RSYNC_FLAGS -a --progress --partial --inplace --append-verify --human-readable --exclude='.nfs*' --chmod=Du=rwx,Dgo=rwx,Fu=rw,Fgo=rw --timeout=60 --contimeout=30
# --- perform copy ---
set -l copy_ok 0
if test -f "$LOCAL"
set -l base (basename "$LOCAL")
echo "Copying file '$base' -> $DEST_URL/ ... ($VIA)"
if rsync $RSYNC_FLAGS "$LOCAL" "$DEST_URL/"
set copy_ok 1
end
else if test -d "$LOCAL"
set -l base (basename "$LOCAL")
if test $contents_only -eq 1
echo "Copying contents of '$base/' -> $DEST_URL/ ... ($VIA)"
if rsync $RSYNC_FLAGS "$LOCAL/." "$DEST_URL/"
set copy_ok 1
end
else
echo "Copying folder '$base' -> $DEST_URL/ ... ($VIA)"
if rsync $RSYNC_FLAGS "$LOCAL" "$DEST_URL/"
set copy_ok 1
end
end
else
echo "ERROR: '$LOCAL' is neither file nor directory."
end
# --- verify & optionally refresh Jellyfin ---
echo "Verifying on the pod (top level of "(test -n "$REMOTE_SUBDIR"; and echo "$DEST_BASE/$REMOTE_SUBDIR"; or echo "$DEST_BASE")") ..."
kc -n $NS exec $POD -c $CTN -- sh -lc "du -sh -- \"$DEST_BASE\"; ls -lah -- \"$DEST_BASE\" | sed -n '1,200p'"
if test $copy_ok -eq 1
if test -n "$API_TOKEN"
if command -sq curl
echo "Triggering Jellyfin library refresh..."
if curl -fsS -X POST -H "X-Emby-Token: $API_TOKEN" "https://stream.bstein.dev/Library/Refresh" >/dev/null
echo "Jellyfin library refresh triggered."
else
echo "WARNING: Jellyfin library refresh HTTP call failed."
end
else
echo "NOTE: 'curl' not found; skipping library refresh."
end
end
echo "Cleaning up $NS/$POD and $RSYNC_SVC ..."
if test "$VIA" = "port-forward" -a -n "$PF_PID"
command kill $PF_PID >/dev/null 2>&1
end
kc -n $NS delete svc/$RSYNC_SVC --wait=false >/dev/null
kc -n $NS delete pod/$POD --wait >/dev/null
echo "Done."
else
echo "Copy encountered errors; leaving $POD and $RSYNC_SVC running for inspection."
echo "Tip: check rsyncd in the pod: kubectl -n $NS exec $POD -c $CTN -- pgrep -a rsync || true"
exit 1
end

View File

@ -1,92 +0,0 @@
#!/usr/bin/env fish
# Hard reset / "hammer" for a single Kustomization + HelmRelease pair.
# Default target is Vault (KS: flux-system/vault, HR: vault/vault).
set -l KS_NS flux-system
set -l KS_NAME vault
set -l HR_ns vault
set -l HR_name vault
# Timeouts
set -l TIMEOUT "10m"
set -l SRC_TIMEOUT "5m"
function step
echo (set_color --bold cyan)"$argv"(set_color normal)
end
function ok
echo (set_color --bold green)"$argv"(set_color normal)
end
function warn
echo (set_color --bold yellow)"! $argv"(set_color normal)
end
function err
echo (set_color --bold red)"$argv"(set_color normal)
end
# Preflight
type -q flux; or begin err "flux CLI not found in PATH"; exit 1; end
type -q kubectl; or begin err "kubectl not found in PATH"; exit 1; end
step "Flux: "(flux --version | string trim)
step "Kube context: "(kubectl config current-context)
# 1) Suspend KS and HR (best effort)
step "Suspending Kustomization $KS_NS/$KS_NAME and HelmRelease $HR_ns/$HR_name"
flux suspend kustomization $KS_NAME -n $KS_NS >/dev/null; or warn "KS already suspended?"
flux suspend helmrelease $HR_name -n $HR_ns >/dev/null; or warn "HR already suspended?"
ok "Suspended"
# 2) Ensure latest sources are fetched
step "Reconciling GitRepository flux-system and HelmRepository hashicorp"
flux reconcile source git flux-system -n flux-system --timeout=$SRC_TIMEOUT >/dev/null; or err "GitRepository reconcile failed"
flux reconcile source helm hashicorp -n flux-system --timeout=$SRC_TIMEOUT >/dev/null; or warn "HelmRepository reconcile failed (continuing)"
ok "Sources refreshed"
# 3) Resume KS and push desired state (dont block here)
step "Resuming Kustomization $KS_NS/$KS_NAME and reconciling (dont wait)"
flux resume kustomization $KS_NAME -n $KS_NS >/dev/null; or err "Failed to resume KS"
# With Flux v2.6.x there is no --wait; use kubectl wait later.
flux reconcile kustomization $KS_NAME -n $KS_NS --with-source --timeout=$TIMEOUT >/dev/null; or warn "KS reconcile returned non-zero (continuing)"
# Give controller a moment to create/refresh the HelmRelease CR
sleep 3
# 4) Patch HelmRelease to BYPASS readiness waiting (critical for Vault)
# This prevents helm-controller from rolling back while Vault is sealed.
step "Patching HelmRelease $HR_ns/$HR_name to disable readiness waiting and extend timeouts"
# Wait until the HR object exists (up to ~2 minutes)
for i in (seq 1 60)
kubectl -n $HR_ns get helmrelease $HR_name >/dev/null 2>&1; and break
sleep 2
end
kubectl -n $HR_ns patch helmrelease $HR_name --type merge -p '{
"spec": {
"install": { "disableWait": true, "timeout": "30m", "remediation": { "retries": 0 } },
"upgrade": { "disableWait": true, "timeout": "30m", "remediation": { "retries": 0 } }
}
}' >/dev/null; or warn "Patch failed (does HR exist yet?)"
# 5) Resume HR and reconcile, then WAIT with kubectl
step "Resuming HelmRelease $HR_ns/$HR_name and reconciling"
flux resume helmrelease $HR_name -n $HR_ns >/dev/null; or err "Failed to resume HR"
flux reconcile helmrelease $HR_name -n $HR_ns --with-source --timeout=$TIMEOUT >/dev/null; or warn "HR reconcile returned non-zero (continuing)"
# Wait for HelmRelease Ready condition (works with CRDs; no flux --wait needed)
step "Waiting for HelmRelease Ready (timeout $TIMEOUT)"
kubectl -n $HR_ns wait helmrelease/$HR_name --for=condition=Ready --timeout=$TIMEOUT >/dev/null
if test $status -ne 0
warn "HelmRelease did not become Ready within $TIMEOUT (showing status)"
flux get helmreleases -n $HR_ns $HR_name
else
ok "HelmRelease is Ready"
end
# 6) Show final status
step "Final Flux status (filtered)"
flux get kustomizations -n $KS_NS $KS_NAME
flux get helmreleases -n $HR_ns $HR_name
# Optional: uncomment to wait for Kustomization Ready too
# step "Waiting for Kustomization Ready (timeout $TIMEOUT)"
# kubectl -n $KS_NS wait kustomization/$KS_NAME --for=condition=Ready --timeout=$TIMEOUT >/dev/null; and ok "Kustomization Ready"

View File

@ -0,0 +1,19 @@
# services/jellyfin/loader.yaml
apiVersion: v1
kind: Pod
metadata:
name: loader
namespace: jellyfin
spec:
restartPolicy: Never
volumes:
- name: media
persistentVolumeClaim:
claimName: jellyfin-media
containers:
- name: toolbox
image: alpine:3.19
command: ["/bin/sh", "-c", "sleep infinity"]
volumeMounts:
- name: media
mountPath: /media