Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelogs/unreleased/9732-blackpiglet
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Remove Restic code path from PodVolumeRestore.
2 changes: 1 addition & 1 deletion cmd/velero-restore-helper/velero-restore-helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ func main() {
for {
<-ticker.C
if done() {
fmt.Println("All restic restores are done")
fmt.Println("All PodVolumeRestores are done")
err := removeFolder()
if err != nil {
fmt.Println(err)
Expand Down
2 changes: 1 addition & 1 deletion internal/resourcepolicies/resource_policies.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ const (
ConfigmapRefType string = "configmap"
// skip action implies the volume would be skipped from the backup operation
Skip VolumeActionType = "skip"
// fs-backup action implies that the volume would be backed up via file system copy method using the uploader(kopia/restic) configured by the user
// fs-backup action implies that the volume would be backed up via file system copy method using the uploader(kopia) configured by the user
FSBackup VolumeActionType = "fs-backup"
// snapshot action can have 3 different meaning based on velero configuration and backup spec - cloud provider based snapshots, local csi snapshots and datamover snapshots
Snapshot VolumeActionType = "snapshot"
Expand Down
48 changes: 0 additions & 48 deletions pkg/cmd/cli/nodeagent/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
cacheutil "k8s.io/client-go/tools/cache"
Expand Down Expand Up @@ -430,10 +429,6 @@ func (s *nodeAgentServer) run() {
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
}

if err := controller.InitLegacyPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.namespace, s.config.resourceTimeout, s.logger); err != nil {
s.logger.WithError(err).Fatal("Unable to create the legacy pod volume restore controller")
}

dataUploadReconciler := controller.NewDataUploadReconciler(
s.mgr.GetClient(),
s.mgr,
Expand Down Expand Up @@ -509,8 +504,6 @@ func (s *nodeAgentServer) run() {
if err := pvrReconciler.AttemptPVRResume(s.ctx, s.logger.WithField("node", s.nodeName), s.namespace); err != nil {
s.logger.WithError(errors.WithStack(err)).Error("Failed to attempt PVR resume")
}

s.markLegacyPVRsFailed(s.mgr.GetClient())
}()

s.logger.Info("Controllers starting...")
Expand Down Expand Up @@ -604,47 +597,6 @@ func (s *nodeAgentServer) validatePodVolumesHostPath(client kubernetes.Interface
return nil
}

func (s *nodeAgentServer) markLegacyPVRsFailed(client ctrlclient.Client) {
pvrs := &velerov1api.PodVolumeRestoreList{}
if err := client.List(s.ctx, pvrs, &ctrlclient.ListOptions{Namespace: s.namespace}); err != nil {
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores")
return
}

for i, pvr := range pvrs.Items {
if !controller.IsLegacyPVR(&pvr) {
continue
}

if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress {
s.logger.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase)
continue
}

pod := &corev1api.Pod{}
if err := client.Get(s.ctx, types.NamespacedName{
Namespace: pvr.Spec.Pod.Namespace,
Name: pvr.Spec.Pod.Name,
}, pod); err != nil {
s.logger.WithError(errors.WithStack(err)).Errorf("failed to get pod \"%s/%s\" of podvolumerestore %q",
pvr.Spec.Pod.Namespace, pvr.Spec.Pod.Name, pvr.GetName())
continue
}
if pod.Spec.NodeName != s.nodeName {
s.logger.Debugf("the node of pod referenced by podvolumerestore %q is %q, not %q, skip", pvr.GetName(), pod.Spec.NodeName, s.nodeName)
continue
}

if err := controller.UpdatePVRStatusToFailed(s.ctx, client, &pvrs.Items[i], errors.New("cannot survive from node-agent restart"),
fmt.Sprintf("get a legacy podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, velerov1api.PodVolumeRestorePhaseFailed),
time.Now(), s.logger); err != nil {
s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName())
continue
}
s.logger.WithField("podvolumerestore", pvr.GetName()).Warn(pvr.Status.Message)
}
}

var getConfigsFunc = nodeagent.GetConfigs

func (s *nodeAgentServer) getDataPathConfigs() error {
Expand Down
4 changes: 2 additions & 2 deletions pkg/cmd/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -1164,8 +1164,8 @@ func markPodVolumeRestoresCancel(ctx context.Context, client ctrlclient.Client,

for i := range pvrs.Items {
pvr := pvrs.Items[i]
if controller.IsLegacyPVR(&pvr) {
log.WithField("PVR", pvr.GetName()).Warn("Found a legacy PVR during velero server restart, cannot stop it")
if _, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType); err != nil {
log.WithField("PVR", pvr.Name).Warnf("invalid uploader type %s, skip marking cancel for this PVR", pvr.Spec.UploaderType)
continue
}

Expand Down
14 changes: 10 additions & 4 deletions pkg/controller/pod_volume_restore_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ func (r *PodVolumeRestoreReconciler) closeDataPath(ctx context.Context, pvrName
func (r *PodVolumeRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error {
gp := kube.NewGenericEventPredicate(func(object client.Object) bool {
pvr := object.(*velerov1api.PodVolumeRestore)
if IsLegacyPVR(pvr) {
if _, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType); err != nil {
return false
}

Expand All @@ -628,7 +628,8 @@ func (r *PodVolumeRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error {

pred := kube.NewAllEventPredicate(func(obj client.Object) bool {
pvr := obj.(*velerov1api.PodVolumeRestore)
return !IsLegacyPVR(pvr)
_, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType)
return err == nil
})

return ctrl.NewControllerManagedBy(mgr).
Expand Down Expand Up @@ -678,7 +679,7 @@ func (r *PodVolumeRestoreReconciler) findPVRForTargetPod(ctx context.Context, po

requests := []reconcile.Request{}
for _, item := range list.Items {
if IsLegacyPVR(&item) {
if _, err := uploader.ValidateUploaderType(item.Spec.UploaderType); err != nil {
continue
}

Expand Down Expand Up @@ -708,6 +709,11 @@ func (r *PodVolumeRestoreReconciler) findPVRForRestorePod(ctx context.Context, p
"PVR": pvr.Name,
})

if _, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType); err != nil {
log.WithField("uploaderType", pvr.Spec.UploaderType).Debug("skip PVR with invalid uploader type")
return []reconcile.Request{}
}

if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseAccepted {
return []reconcile.Request{}
}
Expand Down Expand Up @@ -1029,7 +1035,7 @@ func (r *PodVolumeRestoreReconciler) AttemptPVRResume(ctx context.Context, logge

for i := range pvrs.Items {
pvr := &pvrs.Items[i]
if IsLegacyPVR(pvr) {
if _, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType); err != nil {
continue
}

Expand Down
Loading
Loading