test(k8s): cover client-backed job and state flows

This commit is contained in:
codex 2026-04-20 18:38:12 -03:00
parent 214f89cf0d
commit 7df3552f4f
4 changed files with 563 additions and 189 deletions

View File

@ -10,7 +10,7 @@ import (
)
type Client struct {
Clientset *kubernetes.Clientset
Clientset kubernetes.Interface
}
func New() (*Client, error) {

View File

@ -1,6 +1,7 @@
package k8s
import (
"context"
"strings"
"testing"
"time"
@ -9,205 +10,310 @@ import (
"scm.bstein.dev/bstein/soteria/internal/config"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfake "k8s.io/client-go/kubernetes/fake"
)
func TestJobNameSanitizesAndTruncates(t *testing.T) {
name := jobName("backup", "PVC.With Spaces_and_symbols________________________________")
if len(name) > 63 {
t.Fatalf("expected kubernetes-safe name length, got %d for %q", len(name), name)
}
if strings.ContainsAny(name, " _.") {
t.Fatalf("expected sanitized name, got %q", name)
}
if !strings.HasPrefix(name, "soteria-backup-") {
t.Fatalf("expected soteria backup prefix, got %q", name)
}
}
func TestHelperDefaultsAndParsers(t *testing.T) {
if !dedupeEnabled(nil) {
t.Fatalf("expected nil dedupe to default true")
}
falseValue := false
if dedupeEnabled(&falseValue) {
t.Fatalf("expected explicit false dedupe to remain false")
}
if keepLastWithDefault(nil) != 0 {
t.Fatalf("expected nil keep_last to default to zero")
}
negative := -5
if keepLastWithDefault(&negative) != 0 {
t.Fatalf("expected negative keep_last to clamp to zero")
}
if !parseBoolWithDefault("yes", false) || parseBoolWithDefault("off", true) {
t.Fatalf("unexpected bool parsing")
}
if parseIntWithDefault("12", 0) != 12 || parseIntWithDefault("-1", 7) != 7 || parseIntWithDefault("bad", 9) != 9 {
t.Fatalf("unexpected int parsing")
}
}
func TestRepositoryHelpers(t *testing.T) {
base := "s3:https://b2.example.invalid/atlas"
if repo := resticRepositoryForBackup(base, "apps", "data", true); repo != base {
t.Fatalf("expected dedupe-enabled repository to stay shared, got %q", repo)
}
isolated := resticRepositoryForBackup(base, "Apps", "Data.Volume", false)
if !strings.Contains(isolated, "/isolated/apps/data-volume") {
t.Fatalf("expected isolated repository suffix, got %q", isolated)
}
if sanitizeRepositorySegment(" ") != "unknown" {
t.Fatalf("expected blank repository segment to default to unknown")
}
if appended := appendRepositoryPath("https://b2.example.invalid/root/", "/child/"); appended != "https://b2.example.invalid/root/child" {
t.Fatalf("unexpected appended repository path %q", appended)
}
}
func TestBuildBackupJobIncludesResticMetadata(t *testing.T) {
cfg := &config.Config{
ResticImage: "restic/restic:test",
ResticRepository: "s3:https://b2.example.invalid/atlas",
ResticBackupArgs: []string{"--exclude", "/tmp"},
ResticForgetArgs: []string{"--keep-last", "7"},
S3Endpoint: "https://b2.example.invalid",
S3Region: "us-west-000",
JobTTLSeconds: 900,
JobNodeSelector: map[string]string{"hardware": "rpi5"},
WorkerServiceAccount: "soteria-worker",
}
req := api.BackupRequest{
Namespace: "apps",
PVC: "data",
Tags: []string{"manual", "drill"},
}
job := buildBackupJob(cfg, req, "backup-job", "restic-secret", cfg.ResticRepository, true, 3)
if job.Name != "backup-job" || job.Namespace != "apps" {
t.Fatalf("unexpected job metadata: %#v", job.ObjectMeta)
}
if job.Labels[labelPVC] != "data" || job.Annotations[annotationKeepLast] != "3" {
t.Fatalf("unexpected backup annotations/labels: %#v %#v", job.Labels, job.Annotations)
}
container := job.Spec.Template.Spec.Containers[0]
if container.Image != "restic/restic:test" || container.Args[0] == "" {
t.Fatalf("unexpected restic container: %#v", container)
}
if !strings.Contains(container.Args[0], "--tag pvc=data") || !strings.Contains(container.Args[0], "--exclude /tmp") {
t.Fatalf("expected generated backup command to include request metadata, got %q", container.Args[0])
}
if job.Spec.Template.Spec.ServiceAccountName != "soteria-worker" {
t.Fatalf("expected worker service account, got %#v", job.Spec.Template.Spec.ServiceAccountName)
}
if job.Spec.Template.Spec.NodeSelector["hardware"] != "rpi5" {
t.Fatalf("expected node selector to be copied, got %#v", job.Spec.Template.Spec.NodeSelector)
}
}
func TestBuildRestoreJobUsesTargetPVC(t *testing.T) {
cfg := &config.Config{
ResticImage: "restic/restic:test",
ResticRepository: "s3:https://b2.example.invalid/atlas",
JobTTLSeconds: 600,
}
req := api.RestoreTestRequest{
Namespace: "apps",
TargetPVC: "restore-data",
}
job := buildRestoreJob(cfg, req, "restore-job", "restore-secret", "latest", cfg.ResticRepository)
if job.Labels[labelPVC] != "restore-data" {
t.Fatalf("expected target pvc label, got %#v", job.Labels)
}
if claim := job.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim; claim == nil || claim.ClaimName != "restore-data" {
t.Fatalf("expected restore volume pvc claim, got %#v", job.Spec.Template.Spec.Volumes[0])
}
}
func TestGeneratedResticCommandsAndEnv(t *testing.T) {
cfg := &config.Config{
ResticRepository: "s3:https://b2.example.invalid/atlas",
ResticBackupArgs: []string{"--exclude", "/cache"},
S3Endpoint: "https://b2.example.invalid",
S3Region: "us-west-000",
}
keepLast := 2
command := backupCommand(cfg, api.BackupRequest{
PVC: "data",
Dedupe: boolPtr(false),
KeepLast: &keepLast,
Tags: []string{"nightly"},
})
if !strings.Contains(command, "restic init") || !strings.Contains(command, "--keep-last 2") || !strings.Contains(command, "dedupe=off") {
t.Fatalf("unexpected backup command %q", command)
}
restore := restoreCommand("latest")
if !strings.Contains(restore, "restic restore latest") || !strings.Contains(restore, "/restore/") {
t.Fatalf("unexpected restore command %q", restore)
}
env := resticEnv(cfg, "restic-secret", "")
values := map[string]string{}
for _, item := range env {
values[item.Name] = item.Value
}
if values["RESTIC_REPOSITORY"] != cfg.ResticRepository || values["AWS_REGION"] != "us-west-000" {
t.Fatalf("unexpected env values %#v", values)
}
if int32Ptr(7) == nil || *int32Ptr(7) != 7 {
t.Fatalf("expected int32 pointer helper to round-trip")
}
}
func TestSummarizeAndSortBackupJobs(t *testing.T) {
func TestListBackupJobsAndListBackupJobsForPVCCoverFilteringAndSorting(t *testing.T) {
now := time.Now().UTC()
completed := batchv1.Job{
recent := metav1.NewTime(now.Add(-1 * time.Hour))
old := metav1.NewTime(now.Add(-3 * time.Hour))
client := &Client{Clientset: k8sfake.NewSimpleClientset(
&batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "job-complete",
Name: "backup-zeta",
Namespace: "apps",
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Hour)),
Labels: map[string]string{
labelAppName: "soteria",
labelComponent: "backup",
labelAction: "backup",
labelPVC: "data",
},
Annotations: map[string]string{
annotationResticRepository: "s3:https://b2.example.invalid/atlas",
annotationResticRepository: "s3:https://repo/data",
annotationDedupeEnabled: "false",
annotationKeepLast: "3",
},
CreationTimestamp: old,
},
Status: batchv1.JobStatus{Succeeded: 1, CompletionTime: &old},
},
&batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "backup-alpha",
Namespace: "apps",
Labels: map[string]string{
labelAppName: "soteria",
labelComponent: "backup",
labelAction: "backup",
labelPVC: "data",
},
Annotations: map[string]string{
annotationResticRepository: "s3:https://repo/data",
annotationDedupeEnabled: "true",
annotationKeepLast: "5",
},
CreationTimestamp: recent,
},
Status: batchv1.JobStatus{
Succeeded: 1,
CompletionTime: &metav1.Time{Time: now.Add(-time.Hour)},
Status: batchv1.JobStatus{Active: 1},
},
}
running := batchv1.Job{
&batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "job-running",
Name: "backup-other",
Namespace: "apps",
CreationTimestamp: metav1.NewTime(now.Add(-30 * time.Minute)),
Labels: map[string]string{
labelAppName: "soteria",
labelComponent: "backup",
labelAction: "backup",
labelPVC: "cache",
},
Status: batchv1.JobStatus{
Active: 1,
CreationTimestamp: recent,
},
Status: batchv1.JobStatus{Failed: 1},
},
&batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "skip-missing-pvc",
Namespace: "apps",
Labels: map[string]string{
labelAppName: "soteria",
labelComponent: "backup",
labelAction: "backup",
},
CreationTimestamp: recent,
},
},
)}
items, err := client.ListBackupJobs(context.Background(), "apps")
if err != nil {
t.Fatalf("list backup jobs: %v", err)
}
if len(items) != 3 {
t.Fatalf("expected three pvc-tagged backup jobs, got %#v", items)
}
if items[0].Name != "backup-other" || items[1].Name != "backup-alpha" || items[2].Name != "backup-zeta" {
t.Fatalf("expected sorted backup jobs, got %#v", items)
}
if items[1].State != "Running" || items[2].State != "Completed" || items[0].State != "Failed" {
t.Fatalf("expected summarized backup states, got %#v", items)
}
if items[1].DedupeEnabled != true || items[2].DedupeEnabled != false || items[2].KeepLast != 3 {
t.Fatalf("expected annotation-derived summary fields, got %#v", items)
}
completedSummary := summarizeBackupJob(completed, "data")
if completedSummary.State != "Completed" || completedSummary.DedupeEnabled || completedSummary.KeepLast != 5 {
t.Fatalf("unexpected completed summary %#v", completedSummary)
items, err = client.ListBackupJobsForPVC(context.Background(), "apps", "data")
if err != nil {
t.Fatalf("list backup jobs for pvc: %v", err)
}
runningSummary := summarizeBackupJob(running, "data")
if runningSummary.State != "Running" {
t.Fatalf("unexpected running summary %#v", runningSummary)
}
items := []BackupJobSummary{completedSummary, runningSummary}
sortBackupJobSummaries(items)
if items[0].Name != "job-running" {
t.Fatalf("expected newer running job first, got %#v", items)
if len(items) != 2 || items[0].Name != "backup-alpha" || items[1].Name != "backup-zeta" {
t.Fatalf("expected filtered pvc job list, got %#v", items)
}
}
func boolPtr(value bool) *bool {
return &value
func TestResolvePVCMountedNodeIgnoresDeadPodsAndFindsMountedClaim(t *testing.T) {
now := metav1.NewTime(time.Now().UTC())
client := &Client{Clientset: k8sfake.NewSimpleClientset(
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "deleted", Namespace: "apps", DeletionTimestamp: &now},
Spec: corev1.PodSpec{NodeName: "titan-00"},
},
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "no-node", Namespace: "apps"},
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{{Name: "data", VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: "data"},
}}},
},
},
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "finished", Namespace: "apps"},
Spec: corev1.PodSpec{
NodeName: "titan-01",
Volumes: []corev1.Volume{{Name: "data", VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: "data"},
}}},
},
Status: corev1.PodStatus{Phase: corev1.PodSucceeded},
},
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "active", Namespace: "apps"},
Spec: corev1.PodSpec{
NodeName: "titan-02",
Volumes: []corev1.Volume{
{Name: "cache", VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: "cache"},
}},
{Name: "data", VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: "data"},
}},
},
},
Status: corev1.PodStatus{Phase: corev1.PodRunning},
},
)}
nodeName, err := client.resolvePVCMountedNode(context.Background(), "apps", "data")
if err != nil || nodeName != "titan-02" {
t.Fatalf("expected mounted pvc node, got %q %v", nodeName, err)
}
nodeName, err = client.resolvePVCMountedNode(context.Background(), "apps", "missing")
if err != nil || nodeName != "" {
t.Fatalf("expected missing pvc mount to return empty node, got %q %v", nodeName, err)
}
}
func TestCreateBackupJobCoversValidationDryRunAndLiveCreation(t *testing.T) {
clientset := k8sfake.NewSimpleClientset(
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "restic-src", Namespace: "shared"},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"AWS_ACCESS_KEY_ID": []byte("abc"),
"AWS_SECRET_ACCESS_KEY": []byte("def"),
"RESTIC_PASSWORD": []byte("ghi"),
},
},
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "app-pod", Namespace: "apps"},
Spec: corev1.PodSpec{
NodeName: "titan-02",
Volumes: []corev1.Volume{{Name: "data", VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: "data"},
}}},
},
Status: corev1.PodStatus{Phase: corev1.PodRunning},
},
)
client := &Client{Clientset: clientset}
cfg := &config.Config{
SecretNamespace: "shared",
ResticSecretName: "restic-src",
ResticRepository: "s3:https://repo/root",
ResticImage: "restic/restic:latest",
JobTTLSeconds: 3600,
WorkerServiceAccount: "soteria-sa",
}
if _, _, err := client.CreateBackupJob(context.Background(), cfg, api.BackupRequest{PVC: "data"}); err == nil {
t.Fatalf("expected missing namespace validation error")
}
if _, _, err := client.CreateBackupJob(context.Background(), cfg, api.BackupRequest{Namespace: "apps"}); err == nil {
t.Fatalf("expected missing pvc validation error")
}
if _, _, err := client.CreateBackupJob(context.Background(), cfg, api.BackupRequest{Namespace: "apps", PVC: "data", Snapshot: true}); err == nil {
t.Fatalf("expected unsupported snapshot error")
}
jobName, secretName, err := client.CreateBackupJob(context.Background(), cfg, api.BackupRequest{
Namespace: "apps",
PVC: "data",
DryRun: true,
})
if err != nil || jobName == "" || secretName == "" {
t.Fatalf("expected dry-run names, got job=%q secret=%q err=%v", jobName, secretName, err)
}
if _, err := client.Clientset.BatchV1().Jobs("apps").Get(context.Background(), jobName, metav1.GetOptions{}); err == nil {
t.Fatalf("expected dry-run to skip job creation")
}
dedupe := false
keepLast := 3
jobName, secretName, err = client.CreateBackupJob(context.Background(), cfg, api.BackupRequest{
Namespace: "apps",
PVC: "data",
Dedupe: &dedupe,
KeepLast: &keepLast,
})
if err != nil {
t.Fatalf("create live backup job: %v", err)
}
job, err := client.Clientset.BatchV1().Jobs("apps").Get(context.Background(), jobName, metav1.GetOptions{})
if err != nil {
t.Fatalf("get created backup job: %v", err)
}
if job.Spec.Template.Spec.NodeName != "titan-02" || job.Spec.Template.Spec.ServiceAccountName != "soteria-sa" {
t.Fatalf("expected node pin + service account, got %#v", job.Spec.Template.Spec)
}
if job.Annotations[annotationDedupeEnabled] != "false" || job.Annotations[annotationKeepLast] != "3" {
t.Fatalf("expected backup annotations, got %#v", job.Annotations)
}
secret, err := client.Clientset.CoreV1().Secrets("apps").Get(context.Background(), secretName, metav1.GetOptions{})
if err != nil {
t.Fatalf("get copied backup secret: %v", err)
}
if len(secret.OwnerReferences) != 1 || secret.OwnerReferences[0].Name != job.Name || secret.OwnerReferences[0].Kind != "Job" {
t.Fatalf("expected job owner reference on copied secret, got %#v", secret.OwnerReferences)
}
}
func TestCreateRestoreJobCoversValidationDryRunAndLiveCreation(t *testing.T) {
clientset := k8sfake.NewSimpleClientset(
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "restic-src", Namespace: "shared"},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"AWS_ACCESS_KEY_ID": []byte("abc"),
"AWS_SECRET_ACCESS_KEY": []byte("def"),
"RESTIC_PASSWORD": []byte("ghi"),
},
},
)
client := &Client{Clientset: clientset}
cfg := &config.Config{
SecretNamespace: "shared",
ResticSecretName: "restic-src",
ResticRepository: "s3:https://repo/root",
ResticImage: "restic/restic:latest",
JobTTLSeconds: 3600,
WorkerServiceAccount: "soteria-sa",
}
if _, _, err := client.CreateRestoreJob(context.Background(), cfg, api.RestoreTestRequest{}); err == nil {
t.Fatalf("expected missing namespace validation error")
}
jobName, secretName, err := client.CreateRestoreJob(context.Background(), cfg, api.RestoreTestRequest{
Namespace: "apps",
DryRun: true,
})
if err != nil || jobName == "" || secretName == "" {
t.Fatalf("expected restore dry-run names, got job=%q secret=%q err=%v", jobName, secretName, err)
}
jobName, secretName, err = client.CreateRestoreJob(context.Background(), cfg, api.RestoreTestRequest{
Namespace: "apps",
TargetPVC: "restore-data",
Snapshot: "snap-123",
})
if err != nil {
t.Fatalf("create live restore job: %v", err)
}
job, err := client.Clientset.BatchV1().Jobs("apps").Get(context.Background(), jobName, metav1.GetOptions{})
if err != nil {
t.Fatalf("get created restore job: %v", err)
}
if job.Labels[labelPVC] != "restore-data" {
t.Fatalf("expected target pvc label on restore job, got %#v", job.Labels)
}
if job.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim == nil || job.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName != "restore-data" {
t.Fatalf("expected restore pvc volume, got %#v", job.Spec.Template.Spec.Volumes)
}
if !strings.Contains(job.Spec.Template.Spec.Containers[0].Args[0], "restic restore snap-123") {
t.Fatalf("expected restore command to include snapshot, got %#v", job.Spec.Template.Spec.Containers[0].Args)
}
secret, err := client.Clientset.CoreV1().Secrets("apps").Get(context.Background(), secretName, metav1.GetOptions{})
if err != nil {
t.Fatalf("get copied restore secret: %v", err)
}
if len(secret.OwnerReferences) != 1 || secret.OwnerReferences[0].Name != job.Name {
t.Fatalf("expected restore job owner reference on copied secret, got %#v", secret.OwnerReferences)
}
}

141
internal/k8s/state_test.go Normal file
View File

@ -0,0 +1,141 @@
package k8s
import (
"context"
"errors"
"testing"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
k8sfake "k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
)
func TestLoadSecretDataCoversMissingSecretValueAndCopy(t *testing.T) {
client := &Client{Clientset: k8sfake.NewSimpleClientset(
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "filled", Namespace: "atlas"},
Data: map[string][]byte{"token": []byte("atlas-secret")},
},
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "empty", Namespace: "atlas"},
},
)}
value, err := client.LoadSecretData(context.Background(), "atlas", "missing", "token")
if err != nil || value != nil {
t.Fatalf("expected missing secret to return nil, got %q %v", string(value), err)
}
value, err = client.LoadSecretData(context.Background(), "atlas", "empty", "token")
if err != nil || value != nil {
t.Fatalf("expected empty secret key to return nil, got %q %v", string(value), err)
}
value, err = client.LoadSecretData(context.Background(), "atlas", "filled", "token")
if err != nil || string(value) != "atlas-secret" {
t.Fatalf("expected copied secret value, got %q %v", string(value), err)
}
value[0] = 'X'
secret, err := client.Clientset.CoreV1().Secrets("atlas").Get(context.Background(), "filled", metav1.GetOptions{})
if err != nil {
t.Fatalf("reload filled secret: %v", err)
}
if string(secret.Data["token"]) != "atlas-secret" {
t.Fatalf("expected returned bytes to be copied, got stored=%q", string(secret.Data["token"]))
}
}
func TestLoadSecretDataWrapsUnexpectedErrors(t *testing.T) {
clientset := k8sfake.NewSimpleClientset()
clientset.PrependReactor("get", "secrets", func(action k8stesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("get exploded")
})
client := &Client{Clientset: clientset}
if _, err := client.LoadSecretData(context.Background(), "atlas", "filled", "token"); err == nil || err.Error() == "get exploded" {
t.Fatalf("expected wrapped get error, got %v", err)
}
}
func TestSaveSecretDataCreatesAndUpdatesSecrets(t *testing.T) {
client := &Client{Clientset: k8sfake.NewSimpleClientset()}
if err := client.SaveSecretData(context.Background(), "atlas", "restic-usage", "usage.json", []byte("first"), map[string]string{"app": "soteria"}); err != nil {
t.Fatalf("create secret data: %v", err)
}
created, err := client.Clientset.CoreV1().Secrets("atlas").Get(context.Background(), "restic-usage", metav1.GetOptions{})
if err != nil {
t.Fatalf("get created secret: %v", err)
}
if string(created.Data["usage.json"]) != "first" || created.Labels["app"] != "soteria" {
t.Fatalf("unexpected created secret: %#v", created)
}
created.ResourceVersion = "1"
if _, err := client.Clientset.CoreV1().Secrets("atlas").Update(context.Background(), created, metav1.UpdateOptions{}); err != nil {
t.Fatalf("prime created secret resource version: %v", err)
}
if err := client.SaveSecretData(context.Background(), "atlas", "restic-usage", "usage.json", []byte("second"), map[string]string{"component": "usage-store"}); err != nil {
t.Fatalf("update secret data: %v", err)
}
updated, err := client.Clientset.CoreV1().Secrets("atlas").Get(context.Background(), "restic-usage", metav1.GetOptions{})
if err != nil {
t.Fatalf("get updated secret: %v", err)
}
if string(updated.Data["usage.json"]) != "second" {
t.Fatalf("expected updated secret payload, got %#v", updated.Data)
}
if updated.Labels["app"] != "soteria" || updated.Labels["component"] != "usage-store" {
t.Fatalf("expected merged labels, got %#v", updated.Labels)
}
}
func TestSaveSecretDataWrapsGetAndWriteErrors(t *testing.T) {
t.Run("get error", func(t *testing.T) {
clientset := k8sfake.NewSimpleClientset()
clientset.PrependReactor("get", "secrets", func(action k8stesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("get exploded")
})
client := &Client{Clientset: clientset}
if err := client.SaveSecretData(context.Background(), "atlas", "restic-usage", "usage.json", []byte("value"), nil); err == nil || err.Error() == "get exploded" {
t.Fatalf("expected wrapped get error, got %v", err)
}
})
t.Run("create error", func(t *testing.T) {
clientset := k8sfake.NewSimpleClientset()
clientset.PrependReactor("get", "secrets", func(action k8stesting.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewNotFound(corev1.Resource("secrets"), "restic-usage")
})
clientset.PrependReactor("create", "secrets", func(action k8stesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("create exploded")
})
client := &Client{Clientset: clientset}
if err := client.SaveSecretData(context.Background(), "atlas", "restic-usage", "usage.json", []byte("value"), nil); err == nil || err.Error() == "create exploded" {
t.Fatalf("expected wrapped create error, got %v", err)
}
})
t.Run("update error", func(t *testing.T) {
clientset := k8sfake.NewSimpleClientset(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "restic-usage", Namespace: "atlas"},
Data: map[string][]byte{},
})
clientset.PrependReactor("update", "secrets", func(action k8stesting.Action) (bool, runtime.Object, error) {
return true, nil, errors.New("update exploded")
})
client := &Client{Clientset: clientset}
if err := client.SaveSecretData(context.Background(), "atlas", "restic-usage", "usage.json", []byte("value"), nil); err == nil || err.Error() == "update exploded" {
t.Fatalf("expected wrapped update error, got %v", err)
}
})
}

View File

@ -0,0 +1,127 @@
package k8s
import (
"context"
"testing"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sfake "k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
)
func TestResolvePVCVolumeCoversSuccessAndFailures(t *testing.T) {
pvcBound := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "data", Namespace: "apps"},
Spec: corev1.PersistentVolumeClaimSpec{VolumeName: "pv-data"},
}
pvBound := &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{Name: "pv-data"},
}
pvcNoVolume := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "pending", Namespace: "apps"},
}
client := &Client{Clientset: k8sfake.NewSimpleClientset(pvcBound, pvcNoVolume, pvBound)}
volumeName, pvc, pv, err := client.ResolvePVCVolume(context.Background(), "apps", "data")
if err != nil || volumeName != "pv-data" || pvc.Name != "data" || pv.Name != "pv-data" {
t.Fatalf("expected resolved pvc volume, got volume=%q pvc=%v pv=%v err=%v", volumeName, pvc, pv, err)
}
if _, _, _, err := client.ResolvePVCVolume(context.Background(), "apps", "pending"); err == nil {
t.Fatalf("expected unbound pvc error")
}
if _, _, _, err := client.ResolvePVCVolume(context.Background(), "apps", "missing"); err == nil {
t.Fatalf("expected missing pvc error")
}
clientset := k8sfake.NewSimpleClientset(pvcBound)
clientset.PrependReactor("get", "persistentvolumes", func(action k8stesting.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewNotFound(schema.GroupResource{Resource: "persistentvolumes"}, "pv-data")
})
client = &Client{Clientset: clientset}
if _, _, _, err := client.ResolvePVCVolume(context.Background(), "apps", "data"); err == nil {
t.Fatalf("expected missing pv error")
}
}
func TestListBoundPVCsAndExistsCoversFilteringSortingAndCapacityFallback(t *testing.T) {
storageClass := "fast"
client := &Client{Clientset: k8sfake.NewSimpleClientset(
&corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "zeta", Namespace: "ops"},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pv-zeta",
StorageClassName: &storageClass,
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse("2Gi")},
},
},
Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound},
},
&corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "alpha", Namespace: "apps"},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pv-alpha",
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
},
Status: corev1.PersistentVolumeClaimStatus{
Phase: corev1.ClaimBound,
Capacity: corev1.ResourceList{corev1.ResourceStorage: resource.MustParse("5Gi")},
},
},
&corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "skip-unbound", Namespace: "apps"},
Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending},
},
&corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "skip-novol", Namespace: "apps"},
Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound},
},
)}
items, err := client.ListBoundPVCs(context.Background())
if err != nil {
t.Fatalf("list bound pvcs: %v", err)
}
if len(items) != 2 {
t.Fatalf("expected two bound pvc summaries, got %#v", items)
}
if items[0].Namespace != "apps" || items[0].Name != "alpha" || items[1].Namespace != "ops" || items[1].Name != "zeta" {
t.Fatalf("expected sorted pvc summaries, got %#v", items)
}
if items[0].Capacity != "5Gi" || items[1].Capacity != "2Gi" {
t.Fatalf("expected status/spec capacity fallback, got %#v", items)
}
if len(items[0].AccessModes) != 1 || items[0].AccessModes[0] != string(corev1.ReadWriteOnce) {
t.Fatalf("expected access modes to be captured, got %#v", items[0])
}
exists, err := client.PersistentVolumeClaimExists(context.Background(), "apps", "alpha")
if err != nil || !exists {
t.Fatalf("expected pvc to exist, got %v %v", exists, err)
}
exists, err = client.PersistentVolumeClaimExists(context.Background(), "apps", "missing")
if err != nil || exists {
t.Fatalf("expected pvc to be missing, got %v %v", exists, err)
}
}
func TestPersistentVolumeClaimExistsWrapsUnexpectedErrors(t *testing.T) {
clientset := k8sfake.NewSimpleClientset()
clientset.PrependReactor("get", "persistentvolumeclaims", func(action k8stesting.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewForbidden(schema.GroupResource{Resource: "persistentvolumeclaims"}, "data", nil)
})
client := &Client{Clientset: clientset}
if _, err := client.PersistentVolumeClaimExists(context.Background(), "apps", "data"); err == nil {
t.Fatalf("expected wrapped pvc get error")
}
}