backup: persist restic size telemetry and retention totals

This commit is contained in:
Brad Stein 2026-04-13 14:21:29 -03:00
parent 4b5e4f9e31
commit 5d550faec9
9 changed files with 406 additions and 56 deletions

View File

@ -273,6 +273,7 @@ Environment variables:
- `SOTERIA_METRICS_REFRESH_SECONDS` default `300` - `SOTERIA_METRICS_REFRESH_SECONDS` default `300`
- `SOTERIA_POLICY_EVAL_SECONDS` default `300` - `SOTERIA_POLICY_EVAL_SECONDS` default `300`
- `SOTERIA_POLICY_SECRET_NAME` default `soteria-policies` - `SOTERIA_POLICY_SECRET_NAME` default `soteria-policies`
- `SOTERIA_USAGE_SECRET_NAME` default `soteria-backup-usage` (stores persisted restic size estimates)
- `SOTERIA_B2_ENABLED` default `false` (auto-enabled if endpoint/secret are set) - `SOTERIA_B2_ENABLED` default `false` (auto-enabled if endpoint/secret are set)
- `SOTERIA_B2_ENDPOINT` optional S3-compatible endpoint (for B2, usually `https://s3.<region>.backblazeb2.com`) - `SOTERIA_B2_ENDPOINT` optional S3-compatible endpoint (for B2, usually `https://s3.<region>.backblazeb2.com`)
- `SOTERIA_B2_REGION` optional region override (auto-inferred for Backblaze endpoint patterns) - `SOTERIA_B2_REGION` optional region override (auto-inferred for Backblaze endpoint patterns)
@ -321,6 +322,6 @@ The example Service is annotated for Prometheus scraping of `/metrics`.
- Longhorn inventory and metrics are based on discovered backup records per PVC. - Longhorn inventory and metrics are based on discovered backup records per PVC.
- Inventory `Restore` buttons load source context into the restore planner; restore execution happens from the planner panel. - Inventory `Restore` buttons load source context into the restore planner; restore execution happens from the planner panel.
- Scheduled policy execution currently applies to Longhorn driver. - Scheduled backup policies apply to both Longhorn and restic drivers.
- Restic backup and restore execution exists, but inventory-style telemetry is currently Longhorn-focused. - Restic size telemetry is estimated from per-job upload summaries; with shared dedupe repositories those values are per-PVC attributions, not exact physical B2 ownership.
- For Atlas production, place Soteria behind an authenticated ingress and trust only proxy-injected auth headers. - For Atlas production, place Soteria behind an authenticated ingress and trust only proxy-injected auth headers.

View File

@ -21,6 +21,7 @@ const (
defaultPolicyEval = 300 * time.Second defaultPolicyEval = 300 * time.Second
defaultBackupMaxAge = 24 * time.Hour defaultBackupMaxAge = 24 * time.Hour
defaultPolicySecret = "soteria-policies" defaultPolicySecret = "soteria-policies"
defaultUsageSecret = "soteria-backup-usage"
defaultB2ScanInterval = 15 * time.Minute defaultB2ScanInterval = 15 * time.Minute
defaultB2ScanTimeout = 2 * time.Minute defaultB2ScanTimeout = 2 * time.Minute
serviceNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" serviceNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
@ -49,6 +50,7 @@ type Config struct {
MetricsRefreshInterval time.Duration MetricsRefreshInterval time.Duration
PolicyEvalInterval time.Duration PolicyEvalInterval time.Duration
PolicySecretName string PolicySecretName string
UsageSecretName string
BackupMaxAge time.Duration BackupMaxAge time.Duration
B2Enabled bool B2Enabled bool
B2Endpoint string B2Endpoint string
@ -102,6 +104,7 @@ func Load() (*Config, error) {
cfg.PolicyEvalInterval = defaultPolicyEval cfg.PolicyEvalInterval = defaultPolicyEval
cfg.BackupMaxAge = defaultBackupMaxAge cfg.BackupMaxAge = defaultBackupMaxAge
cfg.PolicySecretName = getenvDefault("SOTERIA_POLICY_SECRET_NAME", defaultPolicySecret) cfg.PolicySecretName = getenvDefault("SOTERIA_POLICY_SECRET_NAME", defaultPolicySecret)
cfg.UsageSecretName = getenvDefault("SOTERIA_USAGE_SECRET_NAME", defaultUsageSecret)
cfg.B2Enabled = getenvBool("SOTERIA_B2_ENABLED") cfg.B2Enabled = getenvBool("SOTERIA_B2_ENABLED")
cfg.B2Endpoint = getenv("SOTERIA_B2_ENDPOINT") cfg.B2Endpoint = getenv("SOTERIA_B2_ENDPOINT")
cfg.B2Region = getenv("SOTERIA_B2_REGION") cfg.B2Region = getenv("SOTERIA_B2_REGION")
@ -185,6 +188,9 @@ func Load() (*Config, error) {
if strings.TrimSpace(cfg.PolicySecretName) == "" { if strings.TrimSpace(cfg.PolicySecretName) == "" {
return nil, errors.New("SOTERIA_POLICY_SECRET_NAME must not be empty") return nil, errors.New("SOTERIA_POLICY_SECRET_NAME must not be empty")
} }
if strings.TrimSpace(cfg.UsageSecretName) == "" {
return nil, errors.New("SOTERIA_USAGE_SECRET_NAME must not be empty")
}
if cfg.BackupMaxAge <= 0 { if cfg.BackupMaxAge <= 0 {
return nil, errors.New("SOTERIA_BACKUP_MAX_AGE_HOURS must be greater than zero") return nil, errors.New("SOTERIA_BACKUP_MAX_AGE_HOURS must be greater than zero")
} }

View File

@ -65,6 +65,8 @@ type Server struct {
b2Usage api.B2UsageResponse b2Usage api.B2UsageResponse
jobUsage map[string]resticJobUsageCacheEntry jobUsage map[string]resticJobUsageCacheEntry
jobUsageMu sync.RWMutex jobUsageMu sync.RWMutex
usageMu sync.RWMutex
usageStore map[string]resticPersistedUsageEntry
} }
type authIdentity struct { type authIdentity struct {
@ -80,6 +82,7 @@ const authContextKey ctxKey = "soteria-auth"
const ( const (
policySecretKey = "policies.json" policySecretKey = "policies.json"
usageSecretKey = "restic-job-usage.json"
defaultPolicyHours = 24.0 defaultPolicyHours = 24.0
maxPolicyIntervalHrs = 24 * 365 maxPolicyIntervalHrs = 24 * 365
maxPolicyKeepLast = 1000 maxPolicyKeepLast = 1000
@ -93,6 +96,19 @@ type resticJobUsageCacheEntry struct {
CheckedAt time.Time CheckedAt time.Time
} }
type resticPersistedUsageEntry struct {
Bytes float64 `json:"bytes"`
UpdatedAt string `json:"updated_at,omitempty"`
}
type resticPersistedUsageDocument struct {
Jobs []struct {
Key string `json:"key"`
Bytes float64 `json:"bytes"`
UpdatedAt string `json:"updated_at,omitempty"`
} `json:"jobs"`
}
var ( var (
resticAddedStoredPattern = regexp.MustCompile(`(?mi)added to the (?:repository|repo):[^\n]*\(([^)]+)\s+stored\)`) resticAddedStoredPattern = regexp.MustCompile(`(?mi)added to the (?:repository|repo):[^\n]*\(([^)]+)\s+stored\)`)
resticDataAddedPattern = regexp.MustCompile(`(?m)"data_added":\s*([0-9]+)`) resticDataAddedPattern = regexp.MustCompile(`(?m)"data_added":\s*([0-9]+)`)
@ -107,6 +123,7 @@ func New(cfg *config.Config, client *k8s.Client, lh *longhorn.Client) *Server {
ui: newUIRenderer(), ui: newUIRenderer(),
policies: map[string]api.BackupPolicy{}, policies: map[string]api.BackupPolicy{},
jobUsage: map[string]resticJobUsageCacheEntry{}, jobUsage: map[string]resticJobUsageCacheEntry{},
usageStore: map[string]resticPersistedUsageEntry{},
} }
s.handler = http.HandlerFunc(s.route) s.handler = http.HandlerFunc(s.route)
return s return s
@ -116,6 +133,9 @@ func (s *Server) Start(ctx context.Context) {
if err := s.loadPolicies(ctx); err != nil { if err := s.loadPolicies(ctx); err != nil {
log.Printf("policy load failed: %v", err) log.Printf("policy load failed: %v", err)
} }
if err := s.loadResticUsage(ctx); err != nil {
log.Printf("restic usage load failed: %v", err)
}
s.refreshTelemetry(ctx) s.refreshTelemetry(ctx)
s.refreshB2Usage(ctx) s.refreshB2Usage(ctx)
@ -303,11 +323,12 @@ func (s *Server) handleBackups(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusBadGateway, err.Error()) writeError(w, http.StatusBadGateway, err.Error())
return return
} }
records := s.buildResticBackupRecords(r.Context(), namespace, jobs, s.cfg.ResticRepository)
writeJSON(w, http.StatusOK, api.BackupListResponse{ writeJSON(w, http.StatusOK, api.BackupListResponse{
Namespace: namespace, Namespace: namespace,
PVC: pvcName, PVC: pvcName,
Volume: volumeName, Volume: volumeName,
Backups: buildResticBackupRecords(jobs, s.cfg.ResticRepository), Backups: records,
}) })
default: default:
writeError(w, http.StatusBadRequest, "unsupported backup driver") writeError(w, http.StatusBadRequest, "unsupported backup driver")
@ -1060,9 +1081,16 @@ func (s *Server) enrichPVCInventory(
} }
entry.ActiveBackups = active entry.ActiveBackups = active
entry.CompletedBackups = len(completed) entry.CompletedBackups = len(completed)
sizeSamples := completed
if len(sizeSamples) > 0 {
retained := sizeSamples[0].KeepLast
if retained > 0 && retained < len(sizeSamples) {
sizeSamples = sizeSamples[:retained]
}
}
totalStoredBytes := 0.0 totalStoredBytes := 0.0
storedSamples := 0 storedSamples := 0
for index, job := range completed { for index, job := range sizeSamples {
if index >= maxUsageSampleJobs { if index >= maxUsageSampleJobs {
break break
} }
@ -1139,6 +1167,21 @@ func (s *Server) lookupResticStoredBytesForJob(ctx context.Context, namespace, j
return cached.Bytes, cached.Known return cached.Bytes, cached.Known
} }
if bytes, known := s.lookupPersistedResticUsage(key); known {
entry := resticJobUsageCacheEntry{
Known: true,
Bytes: bytes,
CheckedAt: time.Now().UTC(),
}
s.jobUsageMu.Lock()
if s.jobUsage == nil {
s.jobUsage = map[string]resticJobUsageCacheEntry{}
}
s.jobUsage[key] = entry
s.jobUsageMu.Unlock()
return bytes, true
}
logBody, err := s.client.ReadBackupJobLog(ctx, namespace, jobName) logBody, err := s.client.ReadBackupJobLog(ctx, namespace, jobName)
entry := resticJobUsageCacheEntry{ entry := resticJobUsageCacheEntry{
Known: false, Known: false,
@ -1149,6 +1192,7 @@ func (s *Server) lookupResticStoredBytesForJob(ctx context.Context, namespace, j
if parsedBytes, parsed := parseResticStoredBytes(logBody); parsed { if parsedBytes, parsed := parseResticStoredBytes(logBody); parsed {
entry.Known = true entry.Known = true
entry.Bytes = parsedBytes entry.Bytes = parsedBytes
s.storePersistedResticUsage(ctx, key, parsedBytes)
} }
} }
@ -1221,6 +1265,138 @@ func parseHumanByteSize(raw string) (float64, bool) {
} }
} }
func (s *Server) loadResticUsage(ctx context.Context) error {
if strings.TrimSpace(s.cfg.UsageSecretName) == "" {
return nil
}
raw, err := s.client.LoadSecretData(ctx, s.cfg.Namespace, s.cfg.UsageSecretName, usageSecretKey)
if err != nil {
return err
}
if len(raw) == 0 {
return nil
}
var doc resticPersistedUsageDocument
if err := json.Unmarshal(raw, &doc); err != nil {
return fmt.Errorf("decode restic usage document: %w", err)
}
next := map[string]resticPersistedUsageEntry{}
for _, item := range doc.Jobs {
key := strings.TrimSpace(item.Key)
if key == "" || item.Bytes < 0 || math.IsNaN(item.Bytes) || math.IsInf(item.Bytes, 0) {
continue
}
next[key] = resticPersistedUsageEntry{
Bytes: item.Bytes,
UpdatedAt: strings.TrimSpace(item.UpdatedAt),
}
}
s.usageMu.Lock()
s.usageStore = next
s.usageMu.Unlock()
return nil
}
func (s *Server) lookupPersistedResticUsage(key string) (float64, bool) {
s.usageMu.RLock()
defer s.usageMu.RUnlock()
if s.usageStore == nil {
return 0, false
}
entry, ok := s.usageStore[key]
if !ok {
return 0, false
}
if entry.Bytes < 0 || math.IsNaN(entry.Bytes) || math.IsInf(entry.Bytes, 0) {
return 0, false
}
return entry.Bytes, true
}
func (s *Server) storePersistedResticUsage(ctx context.Context, key string, value float64) {
if key == "" || value < 0 || math.IsNaN(value) || math.IsInf(value, 0) {
return
}
now := time.Now().UTC().Format(time.RFC3339)
changed := false
s.usageMu.Lock()
if s.usageStore == nil {
s.usageStore = map[string]resticPersistedUsageEntry{}
}
current, exists := s.usageStore[key]
if !exists || current.Bytes != value || strings.TrimSpace(current.UpdatedAt) == "" {
s.usageStore[key] = resticPersistedUsageEntry{
Bytes: value,
UpdatedAt: now,
}
changed = true
}
s.usageMu.Unlock()
if !changed {
return
}
if err := s.persistResticUsage(ctx); err != nil {
log.Printf("persist restic usage failed: %v", err)
}
}
func (s *Server) persistResticUsage(ctx context.Context) error {
if strings.TrimSpace(s.cfg.UsageSecretName) == "" {
return nil
}
s.usageMu.RLock()
entries := make([]struct {
Key string
Value resticPersistedUsageEntry
}, 0, len(s.usageStore))
for key, value := range s.usageStore {
entries = append(entries, struct {
Key string
Value resticPersistedUsageEntry
}{Key: key, Value: value})
}
s.usageMu.RUnlock()
sort.Slice(entries, func(i, j int) bool {
return entries[i].Key < entries[j].Key
})
doc := resticPersistedUsageDocument{
Jobs: make([]struct {
Key string `json:"key"`
Bytes float64 `json:"bytes"`
UpdatedAt string `json:"updated_at,omitempty"`
}, 0, len(entries)),
}
for _, entry := range entries {
if entry.Key == "" || entry.Value.Bytes < 0 || math.IsNaN(entry.Value.Bytes) || math.IsInf(entry.Value.Bytes, 0) {
continue
}
doc.Jobs = append(doc.Jobs, struct {
Key string `json:"key"`
Bytes float64 `json:"bytes"`
UpdatedAt string `json:"updated_at,omitempty"`
}{
Key: entry.Key,
Bytes: entry.Value.Bytes,
UpdatedAt: strings.TrimSpace(entry.Value.UpdatedAt),
})
}
payload, err := json.Marshal(doc)
if err != nil {
return fmt.Errorf("encode restic usage document: %w", err)
}
return s.client.SaveSecretData(ctx, s.cfg.Namespace, s.cfg.UsageSecretName, usageSecretKey, payload, map[string]string{
"app.kubernetes.io/name": "soteria",
"app.kubernetes.io/component": "usage-store",
})
}
func (s *Server) refreshTelemetry(ctx context.Context) { func (s *Server) refreshTelemetry(ctx context.Context) {
refreshCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) refreshCtx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel() defer cancel()
@ -1740,7 +1916,7 @@ func buildBackupRecords(backups []longhorn.Backup) []api.BackupRecord {
return records return records
} }
func buildResticBackupRecords(jobs []k8s.BackupJobSummary, defaultRepository string) []api.BackupRecord { func (s *Server) buildResticBackupRecords(ctx context.Context, namespace string, jobs []k8s.BackupJobSummary, defaultRepository string) []api.BackupRecord {
records := make([]api.BackupRecord, 0, len(jobs)) records := make([]api.BackupRecord, 0, len(jobs))
latestName := "" latestName := ""
for _, job := range jobs { for _, job := range jobs {
@ -1756,6 +1932,7 @@ func buildResticBackupRecords(jobs []k8s.BackupJobSummary, defaultRepository str
created = ts.UTC().Format(time.RFC3339) created = ts.UTC().Format(time.RFC3339)
} }
url := "" url := ""
size := ""
latest := job.Name == latestName latest := job.Name == latestName
if latest && strings.EqualFold(job.State, "Completed") { if latest && strings.EqualFold(job.State, "Completed") {
repository := strings.TrimSpace(job.Repository) repository := strings.TrimSpace(job.Repository)
@ -1764,12 +1941,18 @@ func buildResticBackupRecords(jobs []k8s.BackupJobSummary, defaultRepository str
} }
url = encodeResticSelector(repository) url = encodeResticSelector(repository)
} }
if strings.EqualFold(job.State, "Completed") {
if bytes, ok := s.lookupResticStoredBytesForJob(ctx, namespace, job.Name); ok {
size = formatBytesIEC(bytes)
}
}
records = append(records, api.BackupRecord{ records = append(records, api.BackupRecord{
Name: job.Name, Name: job.Name,
SnapshotName: job.Name, SnapshotName: job.Name,
Created: created, Created: created,
State: job.State, State: job.State,
URL: url, URL: url,
Size: size,
Latest: latest, Latest: latest,
}) })
} }
@ -1935,6 +2118,23 @@ func parseSizeBytes(raw string) int64 {
return 0 return 0
} }
func formatBytesIEC(value float64) string {
if value <= 0 || math.IsNaN(value) || math.IsInf(value, 0) {
return "0 B"
}
units := []string{"B", "KiB", "MiB", "GiB", "TiB"}
size := value
unit := 0
for size >= 1024 && unit < len(units)-1 {
size /= 1024
unit++
}
if unit == 0 {
return fmt.Sprintf("%.0f %s", size, units[unit])
}
return fmt.Sprintf("%.2f %s", size, units[unit])
}
func dedupeDefault(value *bool) bool { func dedupeDefault(value *bool) bool {
if value == nil { if value == nil {
return true return true

View File

@ -420,6 +420,142 @@ func TestResticInventoryUsesCompletedBackupJobs(t *testing.T) {
} }
} }
func TestResticInventoryUsesPersistedUsageWhenLogsGone(t *testing.T) {
completedAt := time.Now().UTC().Add(-90 * time.Minute)
jobName := "soteria-backup-data-20260413-010000"
srv := &Server{
cfg: &config.Config{
AuthRequired: false,
BackupDriver: "restic",
BackupMaxAge: 24 * time.Hour,
},
client: &fakeKubeClient{
pvcs: []k8s.PVCSummary{
{Namespace: "apps", Name: "data", VolumeName: "pv-apps-data", Phase: "Bound"},
},
backupJobs: map[string][]k8s.BackupJobSummary{
"apps/data": {
{
Name: jobName,
Namespace: "apps",
PVC: "data",
CreatedAt: completedAt.Add(-2 * time.Minute),
CompletionTime: completedAt,
State: "Completed",
},
},
},
},
longhorn: &fakeLonghornClient{},
metrics: newTelemetry(),
jobUsage: map[string]resticJobUsageCacheEntry{},
usageStore: map[string]resticPersistedUsageEntry{
"apps/" + jobName: {
Bytes: 4096,
UpdatedAt: time.Now().UTC().Format(time.RFC3339),
},
},
}
srv.handler = http.HandlerFunc(srv.route)
req := httptest.NewRequest(http.MethodGet, "/v1/inventory", nil)
res := httptest.NewRecorder()
srv.Handler().ServeHTTP(res, req)
if res.Code != http.StatusOK {
t.Fatalf("expected 200, got %d: %s", res.Code, res.Body.String())
}
var payload api.InventoryResponse
if err := json.Unmarshal(res.Body.Bytes(), &payload); err != nil {
t.Fatalf("decode inventory: %v", err)
}
entry := payload.Namespaces[0].PVCs[0]
if entry.LastBackupSizeBytes != 4096 {
t.Fatalf("expected persisted last backup bytes, got %#v", entry.LastBackupSizeBytes)
}
if entry.TotalBackupSizeBytes != 4096 {
t.Fatalf("expected persisted total backup bytes, got %#v", entry.TotalBackupSizeBytes)
}
}
func TestResticInventoryKeepLastLimitsTotalStoredSample(t *testing.T) {
now := time.Now().UTC()
srv := &Server{
cfg: &config.Config{
AuthRequired: false,
BackupDriver: "restic",
BackupMaxAge: 24 * time.Hour,
},
client: &fakeKubeClient{
pvcs: []k8s.PVCSummary{
{Namespace: "apps", Name: "data", VolumeName: "pv-apps-data", Phase: "Bound"},
},
backupJobs: map[string][]k8s.BackupJobSummary{
"apps/data": {
{
Name: "soteria-backup-data-20260413-030000",
Namespace: "apps",
PVC: "data",
CreatedAt: now.Add(-3 * time.Hour),
CompletionTime: now.Add(-3 * time.Hour),
State: "Completed",
KeepLast: 1,
},
{
Name: "soteria-backup-data-20260413-020000",
Namespace: "apps",
PVC: "data",
CreatedAt: now.Add(-4 * time.Hour),
CompletionTime: now.Add(-4 * time.Hour),
State: "Completed",
KeepLast: 1,
},
{
Name: "soteria-backup-data-20260413-010000",
Namespace: "apps",
PVC: "data",
CreatedAt: now.Add(-5 * time.Hour),
CompletionTime: now.Add(-5 * time.Hour),
State: "Completed",
KeepLast: 1,
},
},
},
jobLogs: map[string]string{
"apps/soteria-backup-data-20260413-030000": `{"data_added": 1200}`,
"apps/soteria-backup-data-20260413-020000": `{"data_added": 800}`,
"apps/soteria-backup-data-20260413-010000": `{"data_added": 600}`,
},
},
longhorn: &fakeLonghornClient{},
metrics: newTelemetry(),
jobUsage: map[string]resticJobUsageCacheEntry{},
usageStore: map[string]resticPersistedUsageEntry{},
}
srv.handler = http.HandlerFunc(srv.route)
req := httptest.NewRequest(http.MethodGet, "/v1/inventory", nil)
res := httptest.NewRecorder()
srv.Handler().ServeHTTP(res, req)
if res.Code != http.StatusOK {
t.Fatalf("expected 200, got %d: %s", res.Code, res.Body.String())
}
var payload api.InventoryResponse
if err := json.Unmarshal(res.Body.Bytes(), &payload); err != nil {
t.Fatalf("decode inventory: %v", err)
}
entry := payload.Namespaces[0].PVCs[0]
if entry.LastBackupSizeBytes != 1200 {
t.Fatalf("expected latest backup size to be sampled, got %#v", entry.LastBackupSizeBytes)
}
if entry.TotalBackupSizeBytes != 1200 {
t.Fatalf("expected keep_last=1 total to reflect retained snapshot only, got %#v", entry.TotalBackupSizeBytes)
}
}
func TestResticInventoryMarksInProgressWhenOnlyActiveJobsExist(t *testing.T) { func TestResticInventoryMarksInProgressWhenOnlyActiveJobsExist(t *testing.T) {
startedAt := time.Now().UTC().Add(-5 * time.Minute) startedAt := time.Now().UTC().Add(-5 * time.Minute)
srv := &Server{ srv := &Server{
@ -502,9 +638,13 @@ func TestResticBackupsEndpointReturnsLatestSelector(t *testing.T) {
}, },
}, },
}, },
jobLogs: map[string]string{
"apps/soteria-backup-data-20260413-010000": `{"data_added": 1200}`,
},
}, },
longhorn: &fakeLonghornClient{}, longhorn: &fakeLonghornClient{},
metrics: newTelemetry(), metrics: newTelemetry(),
jobUsage: map[string]resticJobUsageCacheEntry{},
} }
srv.handler = http.HandlerFunc(srv.route) srv.handler = http.HandlerFunc(srv.route)
@ -526,6 +666,9 @@ func TestResticBackupsEndpointReturnsLatestSelector(t *testing.T) {
if payload.Backups[0].URL != "latest" || !payload.Backups[0].Latest { if payload.Backups[0].URL != "latest" || !payload.Backups[0].Latest {
t.Fatalf("expected latest restic selector, got %#v", payload.Backups[0]) t.Fatalf("expected latest restic selector, got %#v", payload.Backups[0])
} }
if payload.Backups[0].Size == "" {
t.Fatalf("expected restic backup size metadata, got %#v", payload.Backups[0])
}
} }
func TestResticRestoreUsesRepositorySelector(t *testing.T) { func TestResticRestoreUsesRepositorySelector(t *testing.T) {

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -4,8 +4,8 @@
<meta charset="UTF-8" /> <meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Soteria Backup Console</title> <title>Soteria Backup Console</title>
<script type="module" crossorigin src="/assets/index-Bl8gBoZ6.js"></script> <script type="module" crossorigin src="/assets/index-C9X7C4pD.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-Dq7_oHb5.css"> <link rel="stylesheet" crossorigin href="/assets/index-B24a4-XK.css">
</head> </head>
<body> <body>
<div id="root"></div> <div id="root"></div>

View File

@ -659,7 +659,7 @@ function App() {
Backups: {pvc.completed_backups}/{pvc.backup_count} completed | Latest size: {latestSizeLabel} | Total stored: {totalStoredLabel} Backups: {pvc.completed_backups}/{pvc.backup_count} completed | Latest size: {latestSizeLabel} | Total stored: {totalStoredLabel}
</p> </p>
{showResticSizeHint && ( {showResticSizeHint && (
<p className="subtle tiny">Per-PVC upload bytes are estimated from retained restic backup job logs; older jobs outside retention may show n/a.</p> <p className="subtle tiny">Per-PVC storage is estimated from restic upload summaries persisted by Soteria. Older backups created before tracking may show n/a until a new backup runs.</p>
)} )}
{showProgress && ( {showProgress && (
<div className="backup-progress"> <div className="backup-progress">