test(soteria): cover restic usage store
This commit is contained in:
parent
9a061c14ae
commit
d309920ff1
337
internal/server/restic_usage_store_test.go
Normal file
337
internal/server/restic_usage_store_test.go
Normal file
@ -0,0 +1,337 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scm.bstein.dev/bstein/soteria/internal/config"
|
||||
)
|
||||
|
||||
type resticUsageTestKubeClient struct {
|
||||
*fakeKubeClient
|
||||
loadSecretDataErr error
|
||||
saveSecretDataErr error
|
||||
readBackupLogErr error
|
||||
loadCalls int
|
||||
saveCalls int
|
||||
readCalls int
|
||||
}
|
||||
|
||||
func (k *resticUsageTestKubeClient) LoadSecretData(ctx context.Context, namespace, secretName, key string) ([]byte, error) {
|
||||
k.loadCalls++
|
||||
if k.loadSecretDataErr != nil {
|
||||
return nil, k.loadSecretDataErr
|
||||
}
|
||||
return k.fakeKubeClient.LoadSecretData(ctx, namespace, secretName, key)
|
||||
}
|
||||
|
||||
func (k *resticUsageTestKubeClient) SaveSecretData(ctx context.Context, namespace, secretName, key string, value []byte, labels map[string]string) error {
|
||||
k.saveCalls++
|
||||
if k.saveSecretDataErr != nil {
|
||||
return k.saveSecretDataErr
|
||||
}
|
||||
return k.fakeKubeClient.SaveSecretData(ctx, namespace, secretName, key, value, labels)
|
||||
}
|
||||
|
||||
func (k *resticUsageTestKubeClient) ReadBackupJobLog(ctx context.Context, namespace, jobName string) (string, error) {
|
||||
k.readCalls++
|
||||
if k.readBackupLogErr != nil {
|
||||
return "", k.readBackupLogErr
|
||||
}
|
||||
return k.fakeKubeClient.ReadBackupJobLog(ctx, namespace, jobName)
|
||||
}
|
||||
|
||||
func newResticUsageTestServer(cfg *config.Config, client kubeClient) *Server {
|
||||
return &Server{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
metrics: newTelemetry(),
|
||||
jobUsage: map[string]resticJobUsageCacheEntry{},
|
||||
usageStore: map[string]resticPersistedUsageEntry{},
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHumanByteSizeCoversSupportedUnits(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
raw string
|
||||
want float64
|
||||
ok bool
|
||||
}{
|
||||
{name: "bytes", raw: "42 B", want: 42, ok: true},
|
||||
{name: "kib", raw: "1.5 KiB", want: 1536, ok: true},
|
||||
{name: "mib", raw: "2 MiB", want: 2 * 1024 * 1024, ok: true},
|
||||
{name: "gib", raw: "3 GiB", want: 3 * 1024 * 1024 * 1024, ok: true},
|
||||
{name: "tib", raw: "4 TiB", want: 4 * 1024 * 1024 * 1024 * 1024, ok: true},
|
||||
{name: "kb", raw: "2 KB", want: 2000, ok: true},
|
||||
{name: "mb", raw: "3 MB", want: 3000000, ok: true},
|
||||
{name: "gb", raw: "4 GB", want: 4000000000, ok: true},
|
||||
{name: "tb", raw: "5 TB", want: 5000000000000, ok: true},
|
||||
{name: "comma separated", raw: "1,024 KB", want: 1024000, ok: true},
|
||||
{name: "missing unit", raw: "123", want: 0, ok: false},
|
||||
{name: "bad number", raw: "abc MiB", want: 0, ok: false},
|
||||
{name: "unsupported unit", raw: "10 XB", want: 0, ok: false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
got, ok := parseHumanByteSize(tc.raw)
|
||||
if ok != tc.ok {
|
||||
t.Fatalf("%s: expected ok=%v, got %v", tc.name, tc.ok, ok)
|
||||
}
|
||||
if ok && got != tc.want {
|
||||
t.Fatalf("%s: expected %f, got %f", tc.name, tc.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadResticUsageCoversNoopDecodeAndFiltering(t *testing.T) {
|
||||
t.Run("no secret configured", func(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{fakeKubeClient: &fakeKubeClient{}}
|
||||
srv := newResticUsageTestServer(&config.Config{}, client)
|
||||
|
||||
if err := srv.loadResticUsage(context.Background()); err != nil {
|
||||
t.Fatalf("load restic usage without secret: %v", err)
|
||||
}
|
||||
if client.loadCalls != 0 {
|
||||
t.Fatalf("expected no secret reads, got %d", client.loadCalls)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("load error", func(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{
|
||||
fakeKubeClient: &fakeKubeClient{},
|
||||
loadSecretDataErr: errors.New("load exploded"),
|
||||
}
|
||||
srv := newResticUsageTestServer(&config.Config{
|
||||
Namespace: "atlas",
|
||||
UsageSecretName: "restic-usage",
|
||||
}, client)
|
||||
|
||||
if err := srv.loadResticUsage(context.Background()); err == nil || err.Error() != "load exploded" {
|
||||
t.Fatalf("expected load error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("decode error", func(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{
|
||||
fakeKubeClient: &fakeKubeClient{
|
||||
secretData: map[string][]byte{usageSecretKey: []byte(`{bad json`)},
|
||||
},
|
||||
}
|
||||
srv := newResticUsageTestServer(&config.Config{
|
||||
Namespace: "atlas",
|
||||
UsageSecretName: "restic-usage",
|
||||
}, client)
|
||||
|
||||
if err := srv.loadResticUsage(context.Background()); err == nil || err.Error() == "" {
|
||||
t.Fatalf("expected decode error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("filters invalid entries", func(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{
|
||||
fakeKubeClient: &fakeKubeClient{
|
||||
secretData: map[string][]byte{
|
||||
usageSecretKey: []byte(`{
|
||||
"jobs":[
|
||||
{"key":"apps/job-a","bytes":1024,"updated_at":"2026-04-20T00:00:00Z"},
|
||||
{"key":" ","bytes":2048,"updated_at":"2026-04-20T00:00:00Z"},
|
||||
{"key":"apps/job-b","bytes":-1,"updated_at":"2026-04-20T00:00:00Z"}
|
||||
]
|
||||
}`),
|
||||
},
|
||||
},
|
||||
}
|
||||
srv := newResticUsageTestServer(&config.Config{
|
||||
Namespace: "atlas",
|
||||
UsageSecretName: "restic-usage",
|
||||
}, client)
|
||||
|
||||
if err := srv.loadResticUsage(context.Background()); err != nil {
|
||||
t.Fatalf("load filtered restic usage: %v", err)
|
||||
}
|
||||
|
||||
if len(srv.usageStore) != 1 {
|
||||
t.Fatalf("expected only one valid stored entry, got %#v", srv.usageStore)
|
||||
}
|
||||
if got, ok := srv.lookupPersistedResticUsage("apps/job-a"); !ok || got != 1024 {
|
||||
t.Fatalf("expected valid persisted entry, got %f %v", got, ok)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLookupPersistedResticUsageRejectsInvalidStoredValues(t *testing.T) {
|
||||
srv := &Server{
|
||||
usageStore: map[string]resticPersistedUsageEntry{
|
||||
"good": {Bytes: 4096},
|
||||
"neg": {Bytes: -1},
|
||||
"nan": {Bytes: math.NaN()},
|
||||
"pos-inf": {Bytes: math.Inf(1)},
|
||||
},
|
||||
}
|
||||
|
||||
if got, ok := srv.lookupPersistedResticUsage("good"); !ok || got != 4096 {
|
||||
t.Fatalf("expected good persisted entry, got %f %v", got, ok)
|
||||
}
|
||||
for _, key := range []string{"missing", "neg", "nan", "pos-inf"} {
|
||||
if got, ok := srv.lookupPersistedResticUsage(key); ok || got != 0 {
|
||||
t.Fatalf("%s: expected missing/invalid lookup, got %f %v", key, got, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistResticUsageEncodesSortedFilteredDocument(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{fakeKubeClient: &fakeKubeClient{}}
|
||||
srv := newResticUsageTestServer(&config.Config{
|
||||
Namespace: "atlas",
|
||||
UsageSecretName: "restic-usage",
|
||||
}, client)
|
||||
srv.usageStore = map[string]resticPersistedUsageEntry{
|
||||
"apps/job-b": {Bytes: 2048, UpdatedAt: "2026-04-20T01:00:00Z"},
|
||||
"apps/job-a": {Bytes: 1024, UpdatedAt: " 2026-04-20T00:00:00Z "},
|
||||
"": {Bytes: 1},
|
||||
"apps/bad": {Bytes: -1},
|
||||
}
|
||||
|
||||
if err := srv.persistResticUsage(context.Background()); err != nil {
|
||||
t.Fatalf("persist restic usage: %v", err)
|
||||
}
|
||||
if client.saveCalls != 1 {
|
||||
t.Fatalf("expected one save call, got %d", client.saveCalls)
|
||||
}
|
||||
|
||||
raw := client.fakeKubeClient.secretData[usageSecretKey]
|
||||
var doc resticPersistedUsageDocument
|
||||
if err := json.Unmarshal(raw, &doc); err != nil {
|
||||
t.Fatalf("decode persisted usage document: %v", err)
|
||||
}
|
||||
if len(doc.Jobs) != 2 {
|
||||
t.Fatalf("expected two valid persisted jobs, got %#v", doc.Jobs)
|
||||
}
|
||||
if doc.Jobs[0].Key != "apps/job-a" || doc.Jobs[1].Key != "apps/job-b" {
|
||||
t.Fatalf("expected sorted job keys, got %#v", doc.Jobs)
|
||||
}
|
||||
if doc.Jobs[0].UpdatedAt != "2026-04-20T00:00:00Z" {
|
||||
t.Fatalf("expected trimmed timestamp, got %#v", doc.Jobs[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorePersistedResticUsageCoversNoopAndUpdateBranches(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{fakeKubeClient: &fakeKubeClient{}}
|
||||
srv := newResticUsageTestServer(&config.Config{
|
||||
Namespace: "atlas",
|
||||
UsageSecretName: "restic-usage",
|
||||
}, client)
|
||||
|
||||
srv.storePersistedResticUsage(context.Background(), "", 123)
|
||||
srv.storePersistedResticUsage(context.Background(), "apps/job-a", -1)
|
||||
srv.storePersistedResticUsage(context.Background(), "apps/job-a", math.NaN())
|
||||
srv.storePersistedResticUsage(context.Background(), "apps/job-a", math.Inf(1))
|
||||
if client.saveCalls != 0 {
|
||||
t.Fatalf("expected invalid inputs to skip persistence, got %d saves", client.saveCalls)
|
||||
}
|
||||
|
||||
srv.storePersistedResticUsage(context.Background(), "apps/job-a", 2048)
|
||||
entry, ok := srv.usageStore["apps/job-a"]
|
||||
if !ok || entry.Bytes != 2048 || entry.UpdatedAt == "" {
|
||||
t.Fatalf("expected stored usage entry, got %#v %v", entry, ok)
|
||||
}
|
||||
if client.saveCalls != 1 {
|
||||
t.Fatalf("expected one save after new entry, got %d", client.saveCalls)
|
||||
}
|
||||
|
||||
srv.storePersistedResticUsage(context.Background(), "apps/job-a", 2048)
|
||||
if client.saveCalls != 1 {
|
||||
t.Fatalf("expected unchanged entry to skip persistence, got %d saves", client.saveCalls)
|
||||
}
|
||||
|
||||
srv.usageStore["apps/job-b"] = resticPersistedUsageEntry{Bytes: 512}
|
||||
srv.storePersistedResticUsage(context.Background(), "apps/job-b", 512)
|
||||
if client.saveCalls != 2 {
|
||||
t.Fatalf("expected blank timestamp entry to repersist, got %d saves", client.saveCalls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupResticStoredBytesForJobCoversCachePersistedAndLogFallback(t *testing.T) {
|
||||
t.Run("fresh cache", func(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{fakeKubeClient: &fakeKubeClient{}}
|
||||
srv := newResticUsageTestServer(&config.Config{}, client)
|
||||
srv.jobUsage["apps/job-a"] = resticJobUsageCacheEntry{
|
||||
Known: true,
|
||||
Bytes: 512,
|
||||
CheckedAt: time.Now().UTC(),
|
||||
}
|
||||
|
||||
got, ok := srv.lookupResticStoredBytesForJob(context.Background(), "apps", "job-a")
|
||||
if !ok || got != 512 {
|
||||
t.Fatalf("expected fresh cached bytes, got %f %v", got, ok)
|
||||
}
|
||||
if client.readCalls != 0 {
|
||||
t.Fatalf("expected no log reads for fresh cache, got %d", client.readCalls)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("persisted usage populates cache", func(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{fakeKubeClient: &fakeKubeClient{}}
|
||||
srv := newResticUsageTestServer(&config.Config{}, client)
|
||||
srv.usageStore["apps/job-a"] = resticPersistedUsageEntry{Bytes: 1024}
|
||||
|
||||
got, ok := srv.lookupResticStoredBytesForJob(context.Background(), "apps", "job-a")
|
||||
if !ok || got != 1024 {
|
||||
t.Fatalf("expected persisted bytes, got %f %v", got, ok)
|
||||
}
|
||||
cached := srv.jobUsage["apps/job-a"]
|
||||
if !cached.Known || cached.Bytes != 1024 {
|
||||
t.Fatalf("expected cache warm from persisted usage, got %#v", cached)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("job log parse stores persisted usage", func(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{
|
||||
fakeKubeClient: &fakeKubeClient{
|
||||
jobLogs: map[string]string{
|
||||
"apps/job-a": `{"message_type":"summary","data_added":2048}`,
|
||||
},
|
||||
},
|
||||
}
|
||||
srv := newResticUsageTestServer(&config.Config{
|
||||
Namespace: "atlas",
|
||||
UsageSecretName: "restic-usage",
|
||||
}, client)
|
||||
|
||||
got, ok := srv.lookupResticStoredBytesForJob(context.Background(), "apps", "job-a")
|
||||
if !ok || got != 2048 {
|
||||
t.Fatalf("expected parsed bytes from job log, got %f %v", got, ok)
|
||||
}
|
||||
if client.readCalls != 1 {
|
||||
t.Fatalf("expected one job log read, got %d", client.readCalls)
|
||||
}
|
||||
if client.saveCalls != 1 {
|
||||
t.Fatalf("expected parsed bytes to persist, got %d saves", client.saveCalls)
|
||||
}
|
||||
if persisted, ok := srv.lookupPersistedResticUsage("apps/job-a"); !ok || persisted != 2048 {
|
||||
t.Fatalf("expected persisted bytes after log parse, got %f %v", persisted, ok)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("log read failure caches unknown result", func(t *testing.T) {
|
||||
client := &resticUsageTestKubeClient{
|
||||
fakeKubeClient: &fakeKubeClient{},
|
||||
readBackupLogErr: errors.New("log exploded"),
|
||||
}
|
||||
srv := newResticUsageTestServer(&config.Config{}, client)
|
||||
|
||||
got, ok := srv.lookupResticStoredBytesForJob(context.Background(), "apps", "job-a")
|
||||
if ok || got != 0 {
|
||||
t.Fatalf("expected unknown result on log read failure, got %f %v", got, ok)
|
||||
}
|
||||
cached := srv.jobUsage["apps/job-a"]
|
||||
if cached.Known {
|
||||
t.Fatalf("expected unknown cache entry, got %#v", cached)
|
||||
}
|
||||
})
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user