diff --git a/src/go/rpk/pkg/cli/cloud/auth/BUILD b/src/go/rpk/pkg/cli/cloud/auth/BUILD index aca03a3ccb794..65aa3f17378a6 100644 --- a/src/go/rpk/pkg/cli/cloud/auth/BUILD +++ b/src/go/rpk/pkg/cli/cloud/auth/BUILD @@ -1,4 +1,4 @@ -load("@rules_go//go:def.bzl", "go_library") +load("@rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "auth", @@ -21,3 +21,14 @@ go_library( "@com_github_spf13_cobra//:cobra", ], ) + +go_test( + name = "auth_test", + srcs = ["list_test.go"], + embed = [":auth"], + deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", + "@com_github_stretchr_testify//require", + ], +) diff --git a/src/go/rpk/pkg/cli/cloud/auth/list.go b/src/go/rpk/pkg/cli/cloud/auth/list.go index e6c2b9fee8a6b..9df51ac0249e8 100644 --- a/src/go/rpk/pkg/cli/cloud/auth/list.go +++ b/src/go/rpk/pkg/cli/cloud/auth/list.go @@ -10,6 +10,8 @@ package auth import ( + "fmt" + "io" "sort" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" @@ -18,19 +20,46 @@ import ( "github.com/spf13/cobra" ) +type cloudAuthRow struct { + Name string `json:"name" yaml:"name"` + Kind string `json:"kind" yaml:"kind"` + Organization string `json:"organization" yaml:"organization"` + OrganizationID string `json:"organization_id" yaml:"organization_id"` + Current bool `json:"current" yaml:"current"` +} + +func printCloudAuthList(f config.OutFormatter, rows []cloudAuthRow, w io.Writer) { + if isText, _, rendered, err := f.Format(rows); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, rendered) + return + } + tw := out.NewTableTo(w, "NAME", "KIND", "ORGANIZATION", "ORGANIZATION-ID") + defer tw.Flush() + for _, r := range rows { + name := r.Name + if r.Current { + name += "*" + } + tw.Print(name, r.Kind, r.Organization, r.OrganizationID) + } +} + func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { - return &cobra.Command{ + cmd := &cobra.Command{ Use: "list", Aliases: []string{"ls"}, Short: "List rpk cloud authentications", Args: cobra.ExactArgs(0), - Run: func(*cobra.Command, []string) { + Run: func(cmd *cobra.Command, _ []string) { + f := p.Formatter + if h, ok := f.Help([]cloudAuthRow{}); ok { + out.Exit(h) + } + cfg, err := p.Load(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) - tw := out.NewTable("name", "kind", "organization", "organization-id") - defer tw.Flush() - y, ok := cfg.ActualRpkYaml() if !ok { return @@ -44,14 +73,21 @@ func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { (l.OrgID == r.OrgID && l.Name < r.Name))) }) + rows := make([]cloudAuthRow, 0, len(y.CloudAuths)) for i := range y.CloudAuths { a := &y.CloudAuths[i] - name := a.Name - if a.OrgID == y.CurrentCloudAuthOrgID && a.Kind == y.CurrentCloudAuthKind { - name += "*" - } - tw.Print(name, a.Kind, a.Organization, a.OrgID) + rows = append(rows, cloudAuthRow{ + Name: a.Name, + Kind: a.Kind, + Organization: a.Organization, + OrganizationID: a.OrgID, + Current: a.OrgID == y.CurrentCloudAuthOrgID && a.Kind == y.CurrentCloudAuthKind, + }) } + + printCloudAuthList(f, rows, cmd.OutOrStdout()) }, } + p.InstallFormatFlag(cmd) + return cmd } diff --git a/src/go/rpk/pkg/cli/cloud/auth/list_test.go b/src/go/rpk/pkg/cli/cloud/auth/list_test.go new file mode 100644 index 0000000000000..b7d0f15887ed1 --- /dev/null +++ b/src/go/rpk/pkg/cli/cloud/auth/list_test.go @@ -0,0 +1,47 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package auth + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintCloudAuthList(t *testing.T) { + data := []cloudAuthRow{ + {Name: "acme-sso", Kind: "sso", Organization: "acme", OrganizationID: "org-123", Current: true}, + {Name: "acme-client", Kind: "client", Organization: "acme", OrganizationID: "org-123"}, + } + + t.Run("text marks current with asterisk", func(t *testing.T) { + var buf bytes.Buffer + printCloudAuthList(config.OutFormatter{Kind: "text"}, data, &buf) + require.Equal(t, [][]string{ + {"NAME", "KIND", "ORGANIZATION", "ORGANIZATION-ID"}, + {"acme-sso*", "sso", "acme", "org-123"}, + {"acme-client", "client", "acme", "org-123"}, + }, out.TableRows(buf.String())) + }) + + // Round-trip verifies that structured output uses the Current bool + // and does not embed the asterisk in Name. + t.Run("json round-trip", func(t *testing.T) { + var buf bytes.Buffer + printCloudAuthList(config.OutFormatter{Kind: "json"}, data, &buf) + var got []cloudAuthRow + require.NoError(t, json.Unmarshal(buf.Bytes(), &got)) + require.Equal(t, data, got) + }) +} diff --git a/src/go/rpk/pkg/cli/cluster/BUILD b/src/go/rpk/pkg/cli/cluster/BUILD index c87bffa749951..9d20fd2425485 100644 --- a/src/go/rpk/pkg/cli/cluster/BUILD +++ b/src/go/rpk/pkg/cli/cluster/BUILD @@ -1,4 +1,4 @@ -load("@rules_go//go:def.bzl", "go_library") +load("@rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "cluster", @@ -34,3 +34,14 @@ go_library( "@org_uber_go_zap//:zap", ], ) + +go_test( + name = "cluster_test", + srcs = ["logdirs_test.go"], + embed = [":cluster"], + deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", + "@com_github_stretchr_testify//require", + ], +) diff --git a/src/go/rpk/pkg/cli/cluster/config/BUILD b/src/go/rpk/pkg/cli/cluster/config/BUILD index 0fbe1a4db5330..5a522306667fa 100644 --- a/src/go/rpk/pkg/cli/cluster/config/BUILD +++ b/src/go/rpk/pkg/cli/cluster/config/BUILD @@ -48,11 +48,15 @@ go_test( "import_test.go", "list_test.go", "set_test.go", + "status_test.go", ], embed = [":config"], deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", "//src/go/rpk/pkg/publicapi", "@build_buf_gen_go_redpandadata_cloud_protocolbuffers_go//redpanda/api/controlplane/v1:controlplane", + "@com_github_redpanda_data_common_go_rpadmin//:rpadmin", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@in_gopkg_yaml_v3//:yaml_v3", diff --git a/src/go/rpk/pkg/cli/cluster/config/status.go b/src/go/rpk/pkg/cli/cluster/config/status.go index d23543f0f702f..b040b5be8384f 100644 --- a/src/go/rpk/pkg/cli/cluster/config/status.go +++ b/src/go/rpk/pkg/cli/cluster/config/status.go @@ -12,6 +12,7 @@ package config import ( "errors" "fmt" + "io" "slices" "go.uber.org/zap" @@ -22,6 +23,7 @@ import ( controlplanev1 "buf.build/gen/go/redpandadata/cloud/protocolbuffers/go/redpanda/api/controlplane/v1" "connectrpc.com/connect" + "github.com/redpanda-data/common-go/rpadmin" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/adminapi" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" @@ -30,6 +32,41 @@ import ( "github.com/spf13/cobra" ) +type nodeConfigStatus struct { + Node int64 `json:"node" yaml:"node"` + ConfigVersion int64 `json:"config_version" yaml:"config_version"` + NeedsRestart bool `json:"needs_restart" yaml:"needs_restart"` + Invalid []string `json:"invalid" yaml:"invalid"` + Unknown []string `json:"unknown" yaml:"unknown"` +} + +func buildNodeStatuses(resp rpadmin.ConfigStatusResponse) []nodeConfigStatus { + statuses := make([]nodeConfigStatus, 0, len(resp)) + for _, node := range resp { + statuses = append(statuses, nodeConfigStatus{ + Node: node.NodeID, + ConfigVersion: node.ConfigVersion, + NeedsRestart: node.Restart, + Invalid: node.Invalid, + Unknown: node.Unknown, + }) + } + return statuses +} + +func printNodeStatus(f config.OutFormatter, statuses []nodeConfigStatus, w io.Writer) { + if isText, _, t, err := f.Format(statuses); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + tw := out.NewTableTo(w, "NODE", "CONFIG-VERSION", "NEEDS-RESTART", "INVALID", "UNKNOWN") + defer tw.Flush() + for _, s := range statuses { + tw.Print(s.Node, s.ConfigVersion, s.NeedsRestart, s.Invalid, s.Unknown) + } +} + func newStatusCommand(fs afero.Fs, p *config.Params) *cobra.Command { cmd := &cobra.Command{ Use: "status", @@ -46,6 +83,11 @@ a lower number shows that a node is out of sync, perhaps because it is offline.`, Args: cobra.NoArgs, Run: func(cmd *cobra.Command, _ []string) { + f := p.Formatter + if h, ok := f.Help([]nodeConfigStatus{}); ok { + out.Exit(h) + } + vp, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -71,26 +113,15 @@ is offline.`, client, err := adminapi.NewClient(cmd.Context(), fs, vp) out.MaybeDie(err, "unable to initialize admin client: %v", err) - // GET the status endpoint resp, err := client.ClusterConfigStatus(cmd.Context(), false) out.MaybeDie(err, "error fetching status: %v", err) - tw := out.NewTable("NODE", "CONFIG-VERSION", "NEEDS-RESTART", "INVALID", "UNKNOWN") - defer tw.Flush() - - for _, node := range resp { - tw.PrintStructFields(struct { - ID int64 - Version int64 - Restart bool - Invalid []string - Unknown []string - }{node.NodeID, node.ConfigVersion, node.Restart, node.Invalid, node.Unknown}) - } + printNodeStatus(f, buildNodeStatuses(resp), cmd.OutOrStdout()) } }, } + p.InstallFormatFlag(cmd) return cmd } diff --git a/src/go/rpk/pkg/cli/cluster/config/status_test.go b/src/go/rpk/pkg/cli/cluster/config/status_test.go new file mode 100644 index 0000000000000..c688fc9f4f996 --- /dev/null +++ b/src/go/rpk/pkg/cli/cluster/config/status_test.go @@ -0,0 +1,57 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package config + +import ( + "strings" + "testing" + + "github.com/redpanda-data/common-go/rpadmin" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestBuildNodeStatuses(t *testing.T) { + nodes := rpadmin.ConfigStatusResponse{ + {NodeID: 1, ConfigVersion: 5, Restart: true, Invalid: []string{"bad_key"}, Unknown: []string{"new_key"}}, + {NodeID: 2, ConfigVersion: 5, Restart: false, Invalid: nil, Unknown: nil}, + } + + statuses := buildNodeStatuses(nodes) + require.Len(t, statuses, 2) + + require.Equal(t, int64(1), statuses[0].Node) + require.Equal(t, int64(5), statuses[0].ConfigVersion) + require.True(t, statuses[0].NeedsRestart) + require.Equal(t, []string{"bad_key"}, statuses[0].Invalid) + require.Equal(t, []string{"new_key"}, statuses[0].Unknown) + + require.Equal(t, int64(2), statuses[1].Node) + require.False(t, statuses[1].NeedsRestart) + require.Nil(t, statuses[1].Invalid) + require.Nil(t, statuses[1].Unknown) +} + +func TestPrintNodeStatus(t *testing.T) { + statuses := []nodeConfigStatus{ + {Node: 1, ConfigVersion: 5, NeedsRestart: true, Invalid: []string{"bad_key"}, Unknown: []string{"new_key"}}, + {Node: 2, ConfigVersion: 5, NeedsRestart: false}, + } + + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printNodeStatus(f, statuses, b) + require.Equal(t, [][]string{ + {"NODE", "CONFIG-VERSION", "NEEDS-RESTART", "INVALID", "UNKNOWN"}, + {"1", "5", "true", "[bad_key]", "[new_key]"}, + {"2", "5", "false", "[]", "[]"}, + }, out.TableRows(b.String())) +} diff --git a/src/go/rpk/pkg/cli/cluster/logdirs.go b/src/go/rpk/pkg/cli/cluster/logdirs.go index 6c8e0273f6b0f..8a9e7158d54e8 100644 --- a/src/go/rpk/pkg/cli/cluster/logdirs.go +++ b/src/go/rpk/pkg/cli/cluster/logdirs.go @@ -12,6 +12,7 @@ package cluster import ( "context" "fmt" + "io" "os" "sort" "strconv" @@ -27,6 +28,104 @@ import ( "github.com/twmb/types" ) +type logDirRow struct { + Broker int32 `json:"broker" yaml:"broker"` + Dir string `json:"dir" yaml:"dir"` + Topic string `json:"topic" yaml:"topic"` + Partition int32 `json:"partition" yaml:"partition"` + Size int64 `json:"size" yaml:"size"` + Error string `json:"error,omitempty" yaml:"error,omitempty"` +} + +// collapseLogDirRows merges consecutive rows where shouldChange returns false, +// accumulating size into the prior row. +func collapseLogDirRows(rows []logDirRow, shouldChange func(prior, current logDirRow) bool) []logDirRow { + if len(rows) == 0 { + return rows + } + prior := rows[0] + keep := rows[:0] + for _, current := range rows[1:] { + if shouldChange(prior, current) { + keep = append(keep, prior) + prior = current + continue + } + prior.Size += current.Size + } + return append(keep, prior) +} + +// aggregateAndSortLogDirs validates aggregateInto, collapses rows to the +// requested granularity, and optionally sorts by size descending. +func aggregateAndSortLogDirs(rows []logDirRow, aggregateInto string, sortBySize bool) ([]logDirRow, error) { + switch strings.ToLower(aggregateInto) { + default: + return nil, fmt.Errorf("unrecognized --aggregate-into %q", aggregateInto) + case "", "partition": + // no collapse needed + case "broker": + rows = collapseLogDirRows(rows, func(prior, current logDirRow) bool { + return prior.Broker != current.Broker + }) + case "dir": + rows = collapseLogDirRows(rows, func(prior, current logDirRow) bool { + return prior.Broker != current.Broker || prior.Dir != current.Dir + }) + case "topic": + rows = collapseLogDirRows(rows, func(prior, current logDirRow) bool { + return prior.Broker != current.Broker || prior.Dir != current.Dir || prior.Topic != current.Topic + }) + } + if sortBySize { + sort.SliceStable(rows, func(i, j int) bool { return rows[i].Size >= rows[j].Size }) + } + return rows, nil +} + +func printLogDirs(f config.OutFormatter, rows []logDirRow, aggregateInto string, human bool, sortBySize bool, w io.Writer) { + var err error + rows, err = aggregateAndSortLogDirs(rows, aggregateInto, sortBySize) + out.MaybeDie(err, "invalid --aggregate-into value") + + if isText, _, t, err := f.Format(rows); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + + sizeFn := func(size int64) string { + if human { + return units.HumanSize(float64(size)) + } + return strconv.Itoa(int(size)) + } + + var tw *out.TabWriter + var printRow func(r logDirRow) + switch strings.ToLower(aggregateInto) { + default: + // unreachable: validated in aggregateAndSortLogDirs + case "", "partition": + tw = out.NewTableTo(w, "BROKER", "DIR", "TOPIC", "PARTITION", "SIZE", "ERROR") + printRow = func(r logDirRow) { tw.Print(r.Broker, r.Dir, r.Topic, r.Partition, sizeFn(r.Size), r.Error) } + case "broker": + tw = out.NewTableTo(w, "BROKER", "SIZE", "ERROR") + printRow = func(r logDirRow) { tw.Print(r.Broker, sizeFn(r.Size), r.Error) } + case "dir": + tw = out.NewTableTo(w, "BROKER", "DIR", "SIZE", "ERROR") + printRow = func(r logDirRow) { tw.Print(r.Broker, r.Dir, sizeFn(r.Size), r.Error) } + case "topic": + tw = out.NewTableTo(w, "BROKER", "DIR", "TOPIC", "SIZE", "ERROR") + printRow = func(r logDirRow) { tw.Print(r.Broker, r.Dir, r.Topic, sizeFn(r.Size), r.Error) } + } + + defer tw.Flush() + for _, r := range rows { + printRow(r) + } +} + func newLogdirsCommand(fs afero.Fs, p *config.Params) *cobra.Command { cmd := &cobra.Command{ Use: "logdirs", @@ -69,7 +168,12 @@ where revision is a Redpanda internal concept. `, Args: cobra.ExactArgs(0), - Run: func(_ *cobra.Command, _ []string) { + Run: func(cmd *cobra.Command, _ []string) { + f := p.Formatter + if h, ok := f.Help([]logDirRow{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -92,27 +196,19 @@ where revision is a Redpanda internal concept. s = listed.TopicsSet() } - type row struct { - Broker int32 - Dir string - Topic string - Partition int32 - Size int64 - Err string - } - var rows []row + var rows []logDirRow eachDir := func(d kadm.DescribedLogDir) { if d.Err != nil { - rows = append(rows, row{ + rows = append(rows, logDirRow{ Broker: d.Broker, Dir: d.Dir, - Err: d.Err.Error(), + Error: d.Err.Error(), }) return } d.Topics.Each(func(p kadm.DescribedLogDirPartition) { - rows = append(rows, row{ + rows = append(rows, logDirRow{ Broker: d.Broker, Dir: d.Dir, Topic: p.Topic, @@ -132,78 +228,10 @@ where revision is a Redpanda internal concept. desc.Each(eachDir) } - // First we deeply sort our rows, we will use this for - // in-place aggregating. + // Deeply sort rows first so aggregation can collapse consecutive equal keys. types.Sort(rows) - // For aggregate into, we merge rows. If shouldChange - // returns true, we know we need to move to a new row. - collapse := func(shouldChange func(prior, current row) bool) { - if len(rows) == 0 { - return - } - prior := rows[0] - keep := rows[:0] - for _, current := range rows[1:] { - if shouldChange(prior, current) { - keep = append(keep, prior) - prior = current - continue - } - prior.Size += current.Size - } - rows = append(keep, prior) - } - - sizeFn := func(size int64) string { - if human { - return units.HumanSize(float64(size)) - } - return strconv.Itoa(int(size)) - } - - var headers []string - var rowfn func(*out.TabWriter, row) - switch strings.ToLower(aggregateInto) { - default: - out.Die("unrecognized --aggregate-into %q", aggregateInto) - - case "broker": - headers = []string{"broker", "size", "error"} - collapse(func(prior, current row) bool { return prior.Broker != current.Broker }) - rowfn = func(tw *out.TabWriter, r row) { tw.Print(r.Broker, sizeFn(r.Size), r.Err) } - - case "dir": - headers = []string{"broker", "dir", "size", "error"} - collapse(func(prior, current row) bool { return prior.Broker != current.Broker || prior.Dir != current.Dir }) - rowfn = func(tw *out.TabWriter, r row) { tw.Print(r.Broker, r.Dir, sizeFn(r.Size), r.Err) } - - case "topic": - headers = []string{"broker", "dir", "topic", "size", "error"} - collapse(func(prior, current row) bool { - return prior.Broker != current.Broker || prior.Dir != current.Dir || prior.Topic != current.Topic - }) - rowfn = func(tw *out.TabWriter, r row) { tw.Print(r.Broker, r.Dir, r.Topic, sizeFn(r.Size), r.Err) } - - case "", "partition": - headers = []string{"broker", "dir", "topic", "partition", "size", "error"} - rowfn = func(tw *out.TabWriter, r row) { tw.Print(r.Broker, r.Dir, r.Topic, r.Partition, sizeFn(r.Size), r.Err) } - } - - // Finally, if we are sorting by size, we perform a - // stable sort. We want stable to preserve ordering for - // what we have already ordered and aggregated. - if sortBySize { - sort.SliceStable(rows, func(i, j int) bool { - return rows[i].Size >= rows[j].Size - }) - } - - tw := out.NewTable(headers...) - defer tw.Flush() - for _, row := range rows { - rowfn(tw, row) - } + printLogDirs(f, rows, aggregateInto, human, sortBySize, cmd.OutOrStdout()) }, } @@ -217,5 +245,6 @@ where revision is a Redpanda internal concept. opts := []string{"broker", "dir", "topic"} return opts, cobra.ShellCompDirectiveDefault }) + p.InstallFormatFlag(cmd) return cmd } diff --git a/src/go/rpk/pkg/cli/cluster/logdirs_test.go b/src/go/rpk/pkg/cli/cluster/logdirs_test.go new file mode 100644 index 0000000000000..f7dc01b7ae409 --- /dev/null +++ b/src/go/rpk/pkg/cli/cluster/logdirs_test.go @@ -0,0 +1,164 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package cluster + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintLogDirs(t *testing.T) { + // Fresh rows per subtest: collapse() reuses the input slice's backing + // array, so sharing a single slice across subtests leaks mutations. + freshRows := func() []logDirRow { + return []logDirRow{ + {Broker: 1, Dir: "/var/lib/redpanda/data", Topic: "foo", Partition: 0, Size: 1024}, + {Broker: 1, Dir: "/var/lib/redpanda/data", Topic: "foo", Partition: 1, Size: 2048}, + {Broker: 2, Dir: "/var/lib/redpanda/data", Topic: "bar", Partition: 0, Size: 512, Error: "some error"}, + } + } + f := config.OutFormatter{Kind: "text"} + + aggCases := []struct { + agg string + want [][]string + }{ + { + agg: "partition", + want: [][]string{ + {"BROKER", "DIR", "TOPIC", "PARTITION", "SIZE", "ERROR"}, + {"1", "/var/lib/redpanda/data", "foo", "0", "1024"}, + {"1", "/var/lib/redpanda/data", "foo", "1", "2048"}, + {"2", "/var/lib/redpanda/data", "bar", "0", "512", "some", "error"}, + }, + }, + { + agg: "broker", + want: [][]string{ + {"BROKER", "SIZE", "ERROR"}, + {"1", "3072"}, + {"2", "512", "some", "error"}, + }, + }, + { + agg: "dir", + want: [][]string{ + {"BROKER", "DIR", "SIZE", "ERROR"}, + {"1", "/var/lib/redpanda/data", "3072"}, + {"2", "/var/lib/redpanda/data", "512", "some", "error"}, + }, + }, + { + agg: "topic", + want: [][]string{ + {"BROKER", "DIR", "TOPIC", "SIZE", "ERROR"}, + {"1", "/var/lib/redpanda/data", "foo", "3072"}, + {"2", "/var/lib/redpanda/data", "bar", "512", "some", "error"}, + }, + }, + } + for _, tc := range aggCases { + t.Run("agg="+tc.agg, func(t *testing.T) { + var buf bytes.Buffer + printLogDirs(f, freshRows(), tc.agg, false, false, &buf) + require.Equal(t, tc.want, out.TableRows(buf.String())) + }) + } + + t.Run("sort-by-size descending", func(t *testing.T) { + var buf bytes.Buffer + printLogDirs(f, freshRows(), "partition", false, true, &buf) + require.Equal(t, [][]string{ + {"BROKER", "DIR", "TOPIC", "PARTITION", "SIZE", "ERROR"}, + {"1", "/var/lib/redpanda/data", "foo", "1", "2048"}, + {"1", "/var/lib/redpanda/data", "foo", "0", "1024"}, + {"2", "/var/lib/redpanda/data", "bar", "0", "512", "some", "error"}, + }, out.TableRows(buf.String())) + }) + + t.Run("human-readable size", func(t *testing.T) { + var buf bytes.Buffer + printLogDirs(f, []logDirRow{{Broker: 1, Dir: "/data", Topic: "t", Partition: 0, Size: 1048576}}, "partition", true, false, &buf) + require.Equal(t, [][]string{ + {"BROKER", "DIR", "TOPIC", "PARTITION", "SIZE", "ERROR"}, + {"1", "/data", "t", "0", "1.049MB"}, + }, out.TableRows(buf.String())) + }) +} + +func TestAggregateAndSortLogDirs(t *testing.T) { + freshRows := func() []logDirRow { + return []logDirRow{ + {Broker: 1, Dir: "/data", Topic: "foo", Partition: 0, Size: 1024}, + {Broker: 1, Dir: "/data", Topic: "foo", Partition: 1, Size: 2048}, + {Broker: 2, Dir: "/data", Topic: "bar", Partition: 0, Size: 512}, + } + } + + t.Run("invalid value returns error", func(t *testing.T) { + _, err := aggregateAndSortLogDirs(freshRows(), "bogus", false) + require.ErrorContains(t, err, "bogus") + }) + + t.Run("broker aggregation sums sizes", func(t *testing.T) { + rows, err := aggregateAndSortLogDirs(freshRows(), "broker", false) + require.NoError(t, err) + require.Len(t, rows, 2) + require.Equal(t, int64(3072), rows[0].Size) + require.Equal(t, int64(512), rows[1].Size) + }) + + t.Run("sort by size descending", func(t *testing.T) { + rows, err := aggregateAndSortLogDirs(freshRows(), "partition", true) + require.NoError(t, err) + require.Equal(t, int64(2048), rows[0].Size) + require.Equal(t, int64(1024), rows[1].Size) + require.Equal(t, int64(512), rows[2].Size) + }) +} + +func TestPrintLogDirsJSON(t *testing.T) { + freshRows := func() []logDirRow { + return []logDirRow{ + {Broker: 1, Dir: "/data", Topic: "foo", Partition: 0, Size: 1024}, + {Broker: 1, Dir: "/data", Topic: "foo", Partition: 1, Size: 2048}, + {Broker: 2, Dir: "/data", Topic: "bar", Partition: 0, Size: 512}, + } + } + + t.Run("aggregate broker emits aggregated json", func(t *testing.T) { + var buf bytes.Buffer + printLogDirs(config.OutFormatter{Kind: "json"}, freshRows(), "broker", false, false, &buf) + + var rows []logDirRow + require.NoError(t, json.Unmarshal(buf.Bytes(), &rows)) + require.Len(t, rows, 2) + require.Equal(t, int32(1), rows[0].Broker) + require.Equal(t, int64(3072), rows[0].Size) + require.Equal(t, int32(2), rows[1].Broker) + require.Equal(t, int64(512), rows[1].Size) + }) + + t.Run("sort by size json", func(t *testing.T) { + var buf bytes.Buffer + printLogDirs(config.OutFormatter{Kind: "json"}, freshRows(), "partition", false, true, &buf) + + var rows []logDirRow + require.NoError(t, json.Unmarshal(buf.Bytes(), &rows)) + require.Equal(t, int64(2048), rows[0].Size) + require.Equal(t, int64(1024), rows[1].Size) + require.Equal(t, int64(512), rows[2].Size) + }) +} diff --git a/src/go/rpk/pkg/cli/cluster/maintenance/BUILD b/src/go/rpk/pkg/cli/cluster/maintenance/BUILD index f816c60b203ff..4df786763bb3d 100644 --- a/src/go/rpk/pkg/cli/cluster/maintenance/BUILD +++ b/src/go/rpk/pkg/cli/cluster/maintenance/BUILD @@ -1,4 +1,4 @@ -load("@rules_go//go:def.bzl", "go_library") +load("@rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "maintenance", @@ -19,3 +19,15 @@ go_library( "@com_github_spf13_cobra//:cobra", ], ) + +go_test( + name = "maintenance_test", + srcs = ["status_test.go"], + embed = [":maintenance"], + deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", + "@com_github_redpanda_data_common_go_rpadmin//:rpadmin", + "@com_github_stretchr_testify//require", + ], +) diff --git a/src/go/rpk/pkg/cli/cluster/maintenance/enable.go b/src/go/rpk/pkg/cli/cluster/maintenance/enable.go index 6d6ca1f626e2f..52d323f760803 100644 --- a/src/go/rpk/pkg/cli/cluster/maintenance/enable.go +++ b/src/go/rpk/pkg/cli/cluster/maintenance/enable.go @@ -110,7 +110,7 @@ node exists that is already in maintenance mode then an error will be returned. } retries = 3 if table == nil { - table = newMaintenanceReportTable() + table = newMaintenanceReportTable(cmd.OutOrStdout()) } addBrokerMaintenanceReport(table, b) table.Flush() diff --git a/src/go/rpk/pkg/cli/cluster/maintenance/status.go b/src/go/rpk/pkg/cli/cluster/maintenance/status.go index 14416728e66a5..ade8504e6c0c1 100644 --- a/src/go/rpk/pkg/cli/cluster/maintenance/status.go +++ b/src/go/rpk/pkg/cli/cluster/maintenance/status.go @@ -11,6 +11,7 @@ package maintenance import ( "fmt" + "io" "github.com/redpanda-data/common-go/rpadmin" @@ -21,22 +22,68 @@ import ( "github.com/spf13/cobra" ) -func newMaintenanceReportTable() *out.TabWriter { - headers := []string{ - "Node-ID", "Enabled", "Finished", "Errors", - "Partitions", "Eligible", "Transferring", "Failed", +type brokerMaintenanceStatus struct { + NodeID int `json:"node_id" yaml:"node_id"` + Enabled bool `json:"enabled" yaml:"enabled"` + Finished *bool `json:"finished,omitempty" yaml:"finished,omitempty"` + Errors *bool `json:"errors,omitempty" yaml:"errors,omitempty"` + Partitions *int `json:"partitions,omitempty" yaml:"partitions,omitempty"` + Eligible *int `json:"eligible,omitempty" yaml:"eligible,omitempty"` + Transferring *int `json:"transferring,omitempty" yaml:"transferring,omitempty"` + Failed *int `json:"failed,omitempty" yaml:"failed,omitempty"` +} + +func buildMaintenanceStatuses(brokers []rpadmin.Broker) []brokerMaintenanceStatus { + statuses := make([]brokerMaintenanceStatus, 0, len(brokers)) + for _, b := range brokers { + s := brokerMaintenanceStatus{NodeID: b.NodeID} + if b.Maintenance != nil { + s.Enabled = b.Maintenance.Draining + s.Finished = b.Maintenance.Finished + s.Errors = b.Maintenance.Errors + s.Partitions = b.Maintenance.Partitions + s.Eligible = b.Maintenance.Eligible + s.Transferring = b.Maintenance.Transferring + s.Failed = b.Maintenance.Failed + } + statuses = append(statuses, s) + } + return statuses +} + +func printMaintenanceStatus(f config.OutFormatter, statuses []brokerMaintenanceStatus, w io.Writer) { + if isText, _, t, err := f.Format(statuses); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + tw := newMaintenanceReportTable(w) + defer tw.Flush() + for _, s := range statuses { + tw.Print( + s.NodeID, + s.Enabled, + nullableToStr(s.Finished), + nullableToStr(s.Errors), + nullableToStr(s.Partitions), + nullableToStr(s.Eligible), + nullableToStr(s.Transferring), + nullableToStr(s.Failed), + ) } - return out.NewTable(headers...) } func nullableToStr[V any](v *V) string { if v == nil { return "-" } - return fmt.Sprint(*v) } +func newMaintenanceReportTable(w io.Writer) *out.TabWriter { + return out.NewTableTo(w, "Node-ID", "Enabled", "Finished", "Errors", "Partitions", "Eligible", "Transferring", "Failed") +} + func addBrokerMaintenanceReport(table *out.TabWriter, b rpadmin.Broker) { table.Print( b.NodeID, @@ -65,7 +112,7 @@ output can be used to monitor the progress of node draining. Field descriptions: NODE-ID: the node ID - ENABLED: true if the node is currently in maintenance mode + ENABLED: true if the node is currently in maintenance mode (draining) FINISHED: leadership draining has completed ERRORS: errors have been encountered while draining PARTITIONS: number of partitions whose leadership has moved @@ -80,9 +127,17 @@ Notes: - Only partitions with more than one replica are eligible for leadership transfer. + + - FINISHED, ERRORS, PARTITIONS, ELIGIBLE, TRANSFERRING, and FAILED are only + populated while a node is in maintenance mode (ENABLED=true). `, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, _ []string) { + f := p.Formatter + if h, ok := f.Help([]brokerMaintenanceStatus{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) config.CheckExitCloudAdmin(p) @@ -101,12 +156,9 @@ Notes: out.Die("maintenance mode is not supported in this cluster") } - table := newMaintenanceReportTable() - defer table.Flush() - for _, broker := range brokers { - addBrokerMaintenanceReport(table, broker) - } + printMaintenanceStatus(f, buildMaintenanceStatuses(brokers), cmd.OutOrStdout()) }, } + p.InstallFormatFlag(cmd) return cmd } diff --git a/src/go/rpk/pkg/cli/cluster/maintenance/status_test.go b/src/go/rpk/pkg/cli/cluster/maintenance/status_test.go new file mode 100644 index 0000000000000..475817ba3e086 --- /dev/null +++ b/src/go/rpk/pkg/cli/cluster/maintenance/status_test.go @@ -0,0 +1,82 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package maintenance + +import ( + "strings" + "testing" + + "github.com/redpanda-data/common-go/rpadmin" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestBuildMaintenanceStatuses(t *testing.T) { + brokers := []rpadmin.Broker{ + { + NodeID: 1, + Maintenance: &rpadmin.MaintenanceStatus{ + Draining: true, + Finished: new(true), + Errors: new(false), + Partitions: new(5), + Eligible: new(3), + Transferring: new(1), + Failed: new(0), + }, + }, + { + NodeID: 2, + Maintenance: &rpadmin.MaintenanceStatus{Draining: false}, + }, + } + + statuses := buildMaintenanceStatuses(brokers) + require.Len(t, statuses, 2) + + require.Equal(t, 1, statuses[0].NodeID) + require.True(t, statuses[0].Enabled) + require.Equal(t, new(true), statuses[0].Finished) + require.Equal(t, new(false), statuses[0].Errors) + require.Equal(t, new(5), statuses[0].Partitions) + + require.Equal(t, 2, statuses[1].NodeID) + require.False(t, statuses[1].Enabled) + require.Nil(t, statuses[1].Finished) +} + +func TestPrintMaintenanceStatus(t *testing.T) { + statuses := []brokerMaintenanceStatus{ + { + NodeID: 1, + Enabled: true, + Finished: new(true), + Errors: new(false), + Partitions: new(5), + Eligible: new(3), + Transferring: new(1), + Failed: new(0), + }, + { + NodeID: 2, + Enabled: false, + }, + } + + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printMaintenanceStatus(f, statuses, b) + require.Equal(t, [][]string{ + {"NODE-ID", "ENABLED", "FINISHED", "ERRORS", "PARTITIONS", "ELIGIBLE", "TRANSFERRING", "FAILED"}, + {"1", "true", "true", "false", "5", "3", "1", "0"}, + {"2", "false", "-", "-", "-", "-", "-", "-"}, + }, out.TableRows(b.String())) +} diff --git a/src/go/rpk/pkg/cli/cluster/partitions/BUILD b/src/go/rpk/pkg/cli/cluster/partitions/BUILD index b6780dd5d25d2..a346db62d5098 100644 --- a/src/go/rpk/pkg/cli/cluster/partitions/BUILD +++ b/src/go/rpk/pkg/cli/cluster/partitions/BUILD @@ -35,9 +35,16 @@ go_library( go_test( name = "partitions_test", size = "small", - srcs = ["move_test.go"], + srcs = [ + "cancel_test.go", + "move_status_test.go", + "move_test.go", + "status_test.go", + ], embed = [":partitions"], deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", "@com_github_redpanda_data_common_go_rpadmin//:rpadmin", "@com_github_stretchr_testify//require", ], diff --git a/src/go/rpk/pkg/cli/cluster/partitions/cancel.go b/src/go/rpk/pkg/cli/cluster/partitions/cancel.go index fbca89afd7b45..e924cd70a1b97 100644 --- a/src/go/rpk/pkg/cli/cluster/partitions/cancel.go +++ b/src/go/rpk/pkg/cli/cluster/partitions/cancel.go @@ -11,6 +11,7 @@ package partitions import ( "fmt" + "io" "github.com/redpanda-data/common-go/rpadmin" @@ -28,6 +29,13 @@ type movementCancelHandler struct { noConfirm bool } +type movementCancelResult struct { + Namespace string `json:"namespace" yaml:"namespace"` + Topic string `json:"topic" yaml:"topic"` + Partition int `json:"partition" yaml:"partition"` + Result string `json:"result" yaml:"result"` +} + func newMovementCancelCommand(fs afero.Fs, p *config.Params) *cobra.Command { m := &movementCancelHandler{ fs: fs, @@ -39,14 +47,14 @@ func newMovementCancelCommand(fs afero.Fs, p *config.Params) *cobra.Command { Short: "Cancel ongoing partition movements", Long: `Cancel ongoing partition movements. -By default, this command cancels all the partition movements in the cluster. -To ensure that you do not accidentally cancel all partition movements, this -command prompts users for confirmation before issuing the cancellation request. +By default, this command cancels all the partition movements in the cluster. +To ensure that you do not accidentally cancel all partition movements, this +command prompts users for confirmation before issuing the cancellation request. You can use "--no-confirm" to disable the confirmation prompt: rpk cluster partitions move-cancel --no-confirm -If "--node" is set, this command will only stop the partition movements +If "--node" is set, this command will only stop the partition movements occurring in the specified node: rpk cluster partitions move-cancel --node 1 @@ -56,10 +64,16 @@ occurring in the specified node: } cmd.Flags().IntVar(&m.node, "node", -1, "ID of a specific node on which to cancel ongoing partition movements") cmd.Flags().BoolVar(&m.noConfirm, "no-confirm", false, "Disable confirmation prompt") + p.InstallFormatFlag(cmd) return cmd } func (m *movementCancelHandler) runMovementCancel(cmd *cobra.Command, _ []string) { + f := m.p.Formatter + if h, ok := f.Help([]movementCancelResult{}); ok { + out.Exit(h) + } + p, err := m.p.LoadVirtualProfile(m.fs) out.MaybeDie(err, "rpk unable to load config: %v", err) config.CheckExitCloudAdmin(p) @@ -90,29 +104,39 @@ func (m *movementCancelHandler) runMovementCancel(cmd *cobra.Command, _ []string out.MaybeDie(err, "unable to cancel partition movements: %v", err) } - if len(movements) == 0 { - fmt.Println("There are no ongoing partition movements to cancel") - return + err = printMovementsResult(f, buildMovementCancelResult(movements), cmd.OutOrStdout()) + out.MaybeDieErr(err) +} + +func buildMovementCancelResult(movements []rpadmin.PartitionsMovementResult) []movementCancelResult { + results := make([]movementCancelResult, 0, len(movements)) + for _, m := range movements { + results = append(results, movementCancelResult{ + Namespace: m.Namespace, + Topic: m.Topic, + Partition: m.Partition, + Result: m.Result, + }) } - printMovementsResult(movements) + return results } -func printMovementsResult(movements []rpadmin.PartitionsMovementResult) { - headers := []string{ - "NAMESPACE", - "TOPIC", - "PARTITION", - "RESULT", +func printMovementsResult(f config.OutFormatter, results []movementCancelResult, w io.Writer) error { + if isText, _, formatted, err := f.Format(results); !isText { + if err != nil { + return fmt.Errorf("unable to print movement cancel results in the required format %q: %v", f.Kind, err) + } + fmt.Fprintln(w, formatted) + return nil } - tw := out.NewTable(headers...) + if len(results) == 0 { + fmt.Fprintln(w, "There are no ongoing partition movements to cancel") + return nil + } + tw := out.NewTableTo(w, "Namespace", "Topic", "Partition", "Result") defer tw.Flush() - for _, m := range movements { - result := struct { - Namespace string - Topic string - Partition int - Result string - }{m.Namespace, m.Topic, m.Partition, m.Result} - tw.PrintStructFields(result) + for _, r := range results { + tw.PrintStructFields(r) } + return nil } diff --git a/src/go/rpk/pkg/cli/cluster/partitions/cancel_test.go b/src/go/rpk/pkg/cli/cluster/partitions/cancel_test.go new file mode 100644 index 0000000000000..46671867a5530 --- /dev/null +++ b/src/go/rpk/pkg/cli/cluster/partitions/cancel_test.go @@ -0,0 +1,85 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package partitions + +import ( + "bytes" + "testing" + + "github.com/redpanda-data/common-go/rpadmin" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func Test_buildMovementCancelResult(t *testing.T) { + tests := []struct { + name string + movements []rpadmin.PartitionsMovementResult + want []movementCancelResult + }{ + { + name: "empty", + movements: []rpadmin.PartitionsMovementResult{}, + want: []movementCancelResult{}, + }, + { + name: "single result", + movements: []rpadmin.PartitionsMovementResult{ + {Namespace: "kafka", Topic: "foo", Partition: 0, Result: "success"}, + }, + want: []movementCancelResult{ + {Namespace: "kafka", Topic: "foo", Partition: 0, Result: "success"}, + }, + }, + { + name: "multiple results", + movements: []rpadmin.PartitionsMovementResult{ + {Namespace: "kafka", Topic: "foo", Partition: 0, Result: "success"}, + {Namespace: "kafka", Topic: "bar", Partition: 1, Result: "failed"}, + {Namespace: "redpanda_internal", Topic: "tx", Partition: 2, Result: "success"}, + }, + want: []movementCancelResult{ + {Namespace: "kafka", Topic: "foo", Partition: 0, Result: "success"}, + {Namespace: "kafka", Topic: "bar", Partition: 1, Result: "failed"}, + {Namespace: "redpanda_internal", Topic: "tx", Partition: 2, Result: "success"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := buildMovementCancelResult(tt.movements) + require.Equal(t, tt.want, got) + }) + } +} + +func Test_printMovementsResult(t *testing.T) { + results := []movementCancelResult{ + {Namespace: "kafka", Topic: "foo", Partition: 0, Result: "success"}, + {Namespace: "kafka", Topic: "bar", Partition: 1, Result: "failed"}, + } + + t.Run("text", func(t *testing.T) { + var buf bytes.Buffer + require.NoError(t, printMovementsResult(config.OutFormatter{Kind: "text"}, results, &buf)) + require.Equal(t, [][]string{ + {"NAMESPACE", "TOPIC", "PARTITION", "RESULT"}, + {"kafka", "foo", "0", "success"}, + {"kafka", "bar", "1", "failed"}, + }, out.TableRows(buf.String())) + }) + + t.Run("text empty", func(t *testing.T) { + var buf bytes.Buffer + require.NoError(t, printMovementsResult(config.OutFormatter{Kind: "text"}, []movementCancelResult{}, &buf)) + require.Equal(t, "There are no ongoing partition movements to cancel\n", buf.String()) + }) +} diff --git a/src/go/rpk/pkg/cli/cluster/partitions/move_status.go b/src/go/rpk/pkg/cli/cluster/partitions/move_status.go index 92ab8e2f76696..812f5e6d0aadd 100644 --- a/src/go/rpk/pkg/cli/cluster/partitions/move_status.go +++ b/src/go/rpk/pkg/cli/cluster/partitions/move_status.go @@ -11,6 +11,7 @@ package partitions import ( "fmt" + "io" "slices" "strconv" "strings" @@ -26,20 +27,183 @@ import ( "github.com/twmb/types" ) +type partitionMoveStatus struct { + NamespaceTopic string `json:"namespace_topic" yaml:"namespace_topic"` + Partition int `json:"partition" yaml:"partition"` + MovingFrom []int `json:"moving_from" yaml:"moving_from"` + MovingTo []int `json:"moving_to" yaml:"moving_to"` + CompletionPercent int `json:"completion_percent" yaml:"completion_percent"` + PartitionSize int `json:"partition_size" yaml:"partition_size"` + BytesMoved int `json:"bytes_moved" yaml:"bytes_moved"` + BytesRemaining int `json:"bytes_remaining" yaml:"bytes_remaining"` +} + +type reconciliationOperation struct { + Core int `json:"core" yaml:"core"` + Type string `json:"type" yaml:"type"` + RetryNumber int `json:"retry_number" yaml:"retry_number"` + Revision int `json:"revision" yaml:"revision"` + Status string `json:"status" yaml:"status"` +} + +type reconciliationNodeStatus struct { + NodeID int `json:"node_id" yaml:"node_id"` + Operations []reconciliationOperation `json:"operations" yaml:"operations"` +} + +type partitionReconciliation struct { + NamespaceTopic string `json:"namespace_topic" yaml:"namespace_topic"` + Partition int `json:"partition" yaml:"partition"` + NodeStatuses []reconciliationNodeStatus `json:"node_statuses" yaml:"node_statuses"` +} + +type moveStatusResponse struct { + Movements []partitionMoveStatus `json:"movements" yaml:"movements"` + Reconciliations []partitionReconciliation `json:"reconciliations,omitempty" yaml:"reconciliations,omitempty"` +} + +func buildMoveStatuses(reconfigs []rpadmin.ReconfigurationsResponse) []partitionMoveStatus { + statuses := make([]partitionMoveStatus, 0, len(reconfigs)) + for _, r := range reconfigs { + var completion int + if r.PartitionSize > 0 { + completion = r.BytesMoved * 100 / r.PartitionSize + } + from := make([]int, 0, len(r.PreviousReplicas)) + for _, replica := range r.PreviousReplicas { + from = append(from, replica.NodeID) + } + to := make([]int, 0, len(r.NewReplicas)) + for _, replica := range r.NewReplicas { + to = append(to, replica.NodeID) + } + statuses = append(statuses, partitionMoveStatus{ + NamespaceTopic: r.Ns + "/" + r.Topic, + Partition: r.PartitionID, + MovingFrom: from, + MovingTo: to, + CompletionPercent: completion, + PartitionSize: r.PartitionSize, + BytesMoved: r.BytesMoved, + BytesRemaining: r.BytesLeft, + }) + } + return statuses +} + +func buildReconciliations(reconfigs []rpadmin.ReconfigurationsResponse) []partitionReconciliation { + recs := make([]partitionReconciliation, 0, len(reconfigs)) + for _, r := range reconfigs { + nodes := make([]reconciliationNodeStatus, 0, len(r.ReconciliationStatuses)) + for _, s := range r.ReconciliationStatuses { + ops := make([]reconciliationOperation, 0, len(s.Operations)) + for _, op := range s.Operations { + ops = append(ops, reconciliationOperation{ + Core: op.Core, + Type: op.Type, + RetryNumber: op.RetryNumber, + Revision: op.Revision, + Status: op.Status, + }) + } + nodes = append(nodes, reconciliationNodeStatus{ + NodeID: s.NodeID, + Operations: ops, + }) + } + recs = append(recs, partitionReconciliation{ + NamespaceTopic: r.Ns + "/" + r.Topic, + Partition: r.PartitionID, + NodeStatuses: nodes, + }) + } + return recs +} + +func buildMoveStatusResponse(reconfigs []rpadmin.ReconfigurationsResponse, includeReconciliations bool) moveStatusResponse { + resp := moveStatusResponse{ + Movements: buildMoveStatuses(reconfigs), + } + if includeReconciliations { + resp.Reconciliations = buildReconciliations(reconfigs) + } + return resp +} + +func printMoveStatus(f config.OutFormatter, resp moveStatusResponse, human bool, w io.Writer) { + if isText, _, t, err := f.Format(resp); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + sizeFn := func(size int) string { + if human { + return units.HumanSize(float64(size)) + } + return strconv.Itoa(size) + } + + const ( + secMove = "Partition movements" + secReconcile = "Reconciliation statuses" + ) + sections := out.NewSections( + out.ConditionalSectionHeaders(map[string]bool{ + secMove: true, + secReconcile: resp.Reconciliations != nil, + })..., + ) + sections.Add(secMove, func() { + tw := out.NewTableTo(w, "NAMESPACE-TOPIC", "PARTITION", "MOVING-FROM", "MOVING-TO", "COMPLETION-%", "PARTITION-SIZE", "BYTES-MOVED", "BYTES-REMAINING") + defer tw.Flush() + for _, s := range resp.Movements { + tw.Print( + s.NamespaceTopic, + s.Partition, + fmt.Sprint(s.MovingFrom), + fmt.Sprint(s.MovingTo), + s.CompletionPercent, + sizeFn(s.PartitionSize), + sizeFn(s.BytesMoved), + sizeFn(s.BytesRemaining), + ) + } + }) + sections.Add(secReconcile, func() { + for i, r := range resp.Reconciliations { + if i > 0 { + fmt.Fprintln(w) + } + fmt.Fprintf(w, "%s/%d\n", r.NamespaceTopic, r.Partition) + tw := out.NewTableTo(w, "Node-id", "Core", "Type", "Retry-number", "Revision", "Status") + for _, s := range r.NodeStatuses { + row := []any{s.NodeID} + for _, op := range s.Operations { + row = append(row, op.Core, op.Type, op.RetryNumber, op.Revision, op.Status) + } + tw.Print(row...) + } + tw.Flush() + } + }) +} + func newPartitionMovementsStatusCommand(fs afero.Fs, p *config.Params) *cobra.Command { var ( - completion int - all bool - human bool - partitions []string - response []rpadmin.ReconfigurationsResponse - filteredResponse []rpadmin.ReconfigurationsResponse + all bool + human bool + partitions []string ) cmd := &cobra.Command{ Use: "move-status", Short: "Show ongoing partition movements", Long: helpListMovement, Run: func(cmd *cobra.Command, topics []string) { + f := p.Formatter + if h, ok := f.Help(moveStatusResponse{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) config.CheckExitCloudAdmin(p) @@ -52,13 +216,18 @@ func newPartitionMovementsStatusCommand(fs afero.Fs, p *config.Params) *cobra.Co cl, err := adminapi.NewClient(cmd.Context(), fs, p) out.MaybeDie(err, "unable to initialize admin client: %v", err) - response, err = cl.Reconfigurations(cmd.Context()) + response, err := cl.Reconfigurations(cmd.Context()) out.MaybeDie(err, "unable to list partition movements: %v", err) if len(response) == 0 { - out.Exit("There are no ongoing partition movements.") + if f.IsText() { + out.Exit("There are no ongoing partition movements.") + } + printMoveStatus(f, buildMoveStatusResponse(nil, all), human, cmd.OutOrStdout()) + return } + var filteredResponse []rpadmin.ReconfigurationsResponse for _, t := range topics { nt := strings.Split(t, "/") if len(nt) > 2 { @@ -70,7 +239,7 @@ func newPartitionMovementsStatusCommand(fs afero.Fs, p *config.Params) *cobra.Co isInternalNs := len(nt) == 2 && r.Ns == nt[0] && r.Topic == nt[1] if isKafkaNs || isInternalNs { - if len(partitions) == 0 || contains(partitions, strconv.Itoa(r.PartitionID)) { + if len(partitions) == 0 || slices.Contains(partitions, strconv.Itoa(r.PartitionID)) { filteredResponse = append(filteredResponse, r) } } @@ -80,107 +249,20 @@ func newPartitionMovementsStatusCommand(fs afero.Fs, p *config.Params) *cobra.Co response = filteredResponse } - sizeFn := func(size int) string { - if human { - return units.HumanSize(float64(size)) - } - return strconv.Itoa(size) - } - - f := func(rr *rpadmin.ReconfigurationsResponse) any { - var ( - newReplica []int - oldReplica []int - ) - nt := rr.Ns + "/" + rr.Topic - if rr.PartitionSize > 0 { - completion = rr.BytesMoved * 100 / rr.PartitionSize - } - for _, r := range rr.NewReplicas { - newReplica = append(newReplica, r.NodeID) - } - for _, r := range rr.PreviousReplicas { - oldReplica = append(oldReplica, r.NodeID) - } - return struct { - NT string - PartitionID int - MovingFrom []int - MovingTo []int - Completion int - PartitionSize string - BytesMoved string - BytesRemaining string - }{ - nt, - rr.PartitionID, - oldReplica, - newReplica, - completion, - sizeFn(rr.PartitionSize), - sizeFn(rr.BytesMoved), - sizeFn(rr.BytesLeft), - } - } - types.Sort(response) - const ( - secMove = "Partition movements" - secReconcile = "Reconciliation statuses" - ) - sections := out.NewSections( - out.ConditionalSectionHeaders(map[string]bool{ - secMove: true, // we always print this section - secReconcile: all, // we only print this section if -a is passed - })..., - ) - sections.Add(secMove, func() { - headers := []string{"Namespace-Topic", "Partition", "Moving-from", "Moving-to", "Completion-%", "Partition-size", "Bytes-moved", "Bytes-remaining"} - tw := out.NewTable(headers...) - defer tw.Flush() - for _, tps := range response { - tw.PrintStructFields(f(&tps)) - } - }) - - sections.Add(secReconcile, func() { - var j int - for _, p := range response { - fmt.Printf("%s\n", p.Ns+"/"+p.Topic+"/"+strconv.Itoa(p.PartitionID)) - headers := []string{"Node-id", "Core", "Type", "Retry-number", "Revision", "Status"} - tw := out.NewTable(headers...) - for _, rs := range p.ReconciliationStatuses { - var row []any - row = append(row, rs.NodeID) - for _, s := range rs.Operations { - row = append(row, s.Core, s.Type, s.RetryNumber, s.Revision, s.Status) - } - tw.Print(row...) - } - tw.Flush() - j++ - if j < len(response) { - fmt.Println() - } - } - }) + printMoveStatus(f, buildMoveStatusResponse(response, all), human, cmd.OutOrStdout()) }, } cmd.Flags().BoolVarP(&all, "print-all", "a", false, "Print internal states about movements for debugging") cmd.Flags().BoolVarP(&human, "human-readable", "H", false, "Print the partition size in a human-readable form") cmd.Flags().StringSliceVarP(&partitions, "partition", "p", nil, "Partitions to filter ongoing movements status (repeatable)") + p.InstallFormatFlag(cmd) return cmd } -// This function returns true when a partition that movement is -// ongoing is a requested partition by the --partition option. -func contains(pReq []string, pRes string) bool { - return slices.Contains(pReq, pRes) -} - const helpListMovement = `Show ongoing partition movements. By default this command lists all ongoing partition movements in the cluster. @@ -211,4 +293,7 @@ Using -H, it prints the partition size in a human-readable format Using "--print-all / -a" the command additionally prints the column "RECONCILIATION STATUSES", which reveals the internal status of the ongoing reconciliations. Reported errors do not necessarily mean real problems. + +The --format flag controls the output format: text (default), json, yaml, or +help (prints field descriptions). ` diff --git a/src/go/rpk/pkg/cli/cluster/partitions/move_status_test.go b/src/go/rpk/pkg/cli/cluster/partitions/move_status_test.go new file mode 100644 index 0000000000000..00aa001def937 --- /dev/null +++ b/src/go/rpk/pkg/cli/cluster/partitions/move_status_test.go @@ -0,0 +1,223 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package partitions + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/redpanda-data/common-go/rpadmin" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestBuildMoveStatuses(t *testing.T) { + input := []rpadmin.ReconfigurationsResponse{ + { + Ns: "kafka", + Topic: "foo", + PartitionID: 0, + PartitionSize: 1000, + BytesMoved: 500, + BytesLeft: 500, + PreviousReplicas: []rpadmin.Replica{ + {NodeID: 1, Core: 0}, + {NodeID: 2, Core: 0}, + }, + NewReplicas: []rpadmin.Replica{ + {NodeID: 1, Core: 0}, + {NodeID: 3, Core: 0}, + }, + }, + { + Ns: "kafka", + Topic: "bar", + PartitionID: 1, + PartitionSize: 0, // zero partition size → completion stays 0 + BytesMoved: 0, + BytesLeft: 200, + PreviousReplicas: []rpadmin.Replica{{NodeID: 2, Core: 0}}, + NewReplicas: []rpadmin.Replica{{NodeID: 4, Core: 1}}, + }, + } + + statuses := buildMoveStatuses(input) + require.Len(t, statuses, 2) + + require.Equal(t, "kafka/foo", statuses[0].NamespaceTopic) + require.Equal(t, 0, statuses[0].Partition) + require.Equal(t, []int{1, 2}, statuses[0].MovingFrom) + require.Equal(t, []int{1, 3}, statuses[0].MovingTo) + require.Equal(t, 50, statuses[0].CompletionPercent) + require.Equal(t, 1000, statuses[0].PartitionSize) + require.Equal(t, 500, statuses[0].BytesMoved) + require.Equal(t, 500, statuses[0].BytesRemaining) + + require.Equal(t, "kafka/bar", statuses[1].NamespaceTopic) + require.Equal(t, 0, statuses[1].CompletionPercent) // zero partition size +} + +func sampleMoveStatuses() []partitionMoveStatus { + return []partitionMoveStatus{ + { + NamespaceTopic: "kafka/foo", + Partition: 0, + MovingFrom: []int{1, 2}, + MovingTo: []int{1, 3}, + CompletionPercent: 50, + PartitionSize: 1024, + BytesMoved: 512, + BytesRemaining: 512, + }, + { + NamespaceTopic: "kafka/bar", + Partition: 1, + MovingFrom: []int{2}, + MovingTo: []int{4}, + CompletionPercent: 0, + PartitionSize: 200, + BytesMoved: 0, + BytesRemaining: 200, + }, + } +} + +func sampleReconciliations() []partitionReconciliation { + return []partitionReconciliation{ + { + NamespaceTopic: "kafka/foo", + Partition: 0, + NodeStatuses: []reconciliationNodeStatus{{ + NodeID: 1, + Operations: []reconciliationOperation{{ + Core: 0, Type: "update", RetryNumber: 0, Revision: 42, Status: "done", + }}, + }}, + }, + } +} + +func TestPrintMoveStatus(t *testing.T) { + resp := moveStatusResponse{Movements: sampleMoveStatuses()} + + header := []string{"NAMESPACE-TOPIC", "PARTITION", "MOVING-FROM", "MOVING-TO", "COMPLETION-%", "PARTITION-SIZE", "BYTES-MOVED", "BYTES-REMAINING"} + // Fields splits "[1 2]" into "[1" and "2]" — an acceptable trade for + // staying tabwriter-padding-agnostic. + cases := []struct { + name string + human bool + want [][]string + }{ + { + name: "bytes", + want: [][]string{ + header, + {"kafka/foo", "0", "[1", "2]", "[1", "3]", "50", "1024", "512", "512"}, + {"kafka/bar", "1", "[2]", "[4]", "0", "200", "0", "200"}, + }, + }, + { + name: "human", + human: true, + want: [][]string{ + header, + {"kafka/foo", "0", "[1", "2]", "[1", "3]", "50", "1.024kB", "512B", "512B"}, + {"kafka/bar", "1", "[2]", "[4]", "0", "200B", "0B", "200B"}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printMoveStatus(f, resp, c.human, b) + require.Equal(t, c.want, out.TableRows(b.String())) + }) + } +} + +func TestPrintMoveStatusJSON(t *testing.T) { + // --format json emits a single JSON document and MUST NOT include any + // text section headers like "PARTITION MOVEMENTS". + t.Run("movements only", func(t *testing.T) { + b := &strings.Builder{} + resp := moveStatusResponse{Movements: sampleMoveStatuses()} + printMoveStatus(config.OutFormatter{Kind: "json"}, resp, false, b) + + s := b.String() + require.NotContains(t, s, "PARTITION MOVEMENTS") + require.NotContains(t, s, "RECONCILIATION STATUSES") + + var got moveStatusResponse + require.NoError(t, json.Unmarshal([]byte(s), &got)) + require.Equal(t, resp.Movements, got.Movements) + require.Nil(t, got.Reconciliations) + }) + + t.Run("with reconciliations (print-all)", func(t *testing.T) { + b := &strings.Builder{} + resp := moveStatusResponse{ + Movements: sampleMoveStatuses(), + Reconciliations: sampleReconciliations(), + } + printMoveStatus(config.OutFormatter{Kind: "json"}, resp, false, b) + + s := b.String() + require.NotContains(t, s, "PARTITION MOVEMENTS") + require.NotContains(t, s, "RECONCILIATION STATUSES") + + var got moveStatusResponse + require.NoError(t, json.Unmarshal([]byte(s), &got)) + require.Equal(t, resp.Movements, got.Movements) + require.Equal(t, resp.Reconciliations, got.Reconciliations) + }) + + t.Run("empty", func(t *testing.T) { + b := &strings.Builder{} + printMoveStatus(config.OutFormatter{Kind: "json"}, moveStatusResponse{Movements: []partitionMoveStatus{}}, false, b) + require.Equal(t, "{\"movements\":[]}\n", b.String()) + }) +} + +func TestBuildMoveStatusResponse(t *testing.T) { + reconfigs := []rpadmin.ReconfigurationsResponse{{ + Ns: "kafka", + Topic: "foo", + PartitionID: 0, + PartitionSize: 100, + BytesMoved: 50, + BytesLeft: 50, + ReconciliationStatuses: []rpadmin.Status{{ + NodeID: 1, + Operations: []rpadmin.Operation{{ + Core: 0, Type: "update", RetryNumber: 0, Revision: 7, Status: "done", + }}, + }}, + }} + + t.Run("without reconciliations", func(t *testing.T) { + resp := buildMoveStatusResponse(reconfigs, false) + require.Len(t, resp.Movements, 1) + require.Nil(t, resp.Reconciliations) + }) + + t.Run("with reconciliations", func(t *testing.T) { + resp := buildMoveStatusResponse(reconfigs, true) + require.Len(t, resp.Movements, 1) + require.Len(t, resp.Reconciliations, 1) + require.Equal(t, "kafka/foo", resp.Reconciliations[0].NamespaceTopic) + require.Equal(t, 0, resp.Reconciliations[0].Partition) + require.Equal(t, 1, resp.Reconciliations[0].NodeStatuses[0].NodeID) + require.Equal(t, "done", resp.Reconciliations[0].NodeStatuses[0].Operations[0].Status) + }) +} diff --git a/src/go/rpk/pkg/cli/cluster/partitions/status.go b/src/go/rpk/pkg/cli/cluster/partitions/status.go index 879ef073d8795..5acfc53f16b74 100644 --- a/src/go/rpk/pkg/cli/cluster/partitions/status.go +++ b/src/go/rpk/pkg/cli/cluster/partitions/status.go @@ -11,6 +11,7 @@ package partitions import ( "fmt" + "io" "github.com/redpanda-data/common-go/rpadmin" "github.com/twmb/types" @@ -22,6 +23,74 @@ import ( "github.com/spf13/cobra" ) +type balancerStatusResponse struct { + Status string `json:"status" yaml:"status"` + SecondsSinceLastTick int `json:"seconds_since_last_tick" yaml:"seconds_since_last_tick"` + CurrentReassignmentsCount int `json:"current_reassignments_count" yaml:"current_reassignments_count"` + PartitionsPendingForceRecovery *int `json:"partitions_pending_force_recovery_count,omitempty" yaml:"partitions_pending_force_recovery_count,omitempty"` + PartitionsPendingRecoverySample []string `json:"partitions_pending_force_recovery_sample,omitempty" yaml:"partitions_pending_force_recovery_sample,omitempty"` + UnavailableNodes []int `json:"unavailable_nodes,omitempty" yaml:"unavailable_nodes,omitempty"` + OverDiskLimitNodes []int `json:"over_disk_limit_nodes,omitempty" yaml:"over_disk_limit_nodes,omitempty"` + BrokerReplicaDistribution []ReplicaDistribution `json:"broker_replica_distribution,omitempty" yaml:"broker_replica_distribution,omitempty"` +} + +func buildBalancerStatusResponse(pbs rpadmin.PartitionBalancerStatus, clusterPartitions []rpadmin.ClusterPartition) balancerStatusResponse { + replicaDist := buildReplicaPerBroker(clusterPartitions) + types.Sort(replicaDist) + + resp := balancerStatusResponse{ + Status: pbs.Status, + SecondsSinceLastTick: pbs.SecondsSinceLastTick, + CurrentReassignmentsCount: pbs.CurrentReassignmentsCount, + PartitionsPendingForceRecovery: pbs.PartitionsPendingForceRecovery, + PartitionsPendingRecoverySample: pbs.PartitionsPendingRecoveryList, + UnavailableNodes: pbs.Violations.UnavailableNodes, + OverDiskLimitNodes: pbs.Violations.OverDiskLimitNodes, + BrokerReplicaDistribution: replicaDist, + } + return resp +} + +func printBalancerStatus(f config.OutFormatter, resp balancerStatusResponse, w io.Writer) { + if isText, _, t, err := f.Format(resp); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + + const ( + secBalancerStatus = "Balancer status" + secReplicaDist = "Replica distribution" + ) + sections := out.NewSections( + out.ConditionalSectionHeaders(map[string]bool{ + secBalancerStatus: true, + secReplicaDist: len(resp.BrokerReplicaDistribution) > 0, + })..., + ) + sections.Add(secBalancerStatus, func() { + tw := out.NewTableTo(w) + defer tw.Flush() + tw.Print("Status:", resp.Status) + tw.Print("Seconds Since Last Tick:", resp.SecondsSinceLastTick) + tw.Print("Current Reassignment Count:", resp.CurrentReassignmentsCount) + if resp.PartitionsPendingForceRecovery != nil { + tw.Print(fmt.Sprintf("Partitions Pending Recovery (%v):", *resp.PartitionsPendingForceRecovery), resp.PartitionsPendingRecoverySample) + } + if len(resp.UnavailableNodes) > 0 || len(resp.OverDiskLimitNodes) > 0 { + tw.Print("Unavailable Nodes:", resp.UnavailableNodes) + tw.Print("Over Disk Limit Nodes:", resp.OverDiskLimitNodes) + } + }) + sections.Add(secReplicaDist, func() { + tw := out.NewTableTo(w, "BROKER", "PARTITION-COUNT") + defer tw.Flush() + for _, d := range resp.BrokerReplicaDistribution { + tw.Print(d.NodeID, d.Count) + } + }) +} + func newBalancerStatusCommand(fs afero.Fs, p *config.Params) *cobra.Command { cmd := &cobra.Command{ Use: "balancer-status", @@ -81,6 +150,11 @@ investigation. A few areas to investigate: `, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, _ []string) { + f := p.Formatter + if h, ok := f.Help(balancerStatusResponse{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) config.CheckExitCloudAdmin(p) @@ -97,42 +171,9 @@ investigation. A few areas to investigate: fmt.Printf("unable to query all partitions in the cluster: %v", err) } - printBalancerStatus(status, clusterPartitions) + printBalancerStatus(f, buildBalancerStatusResponse(status, clusterPartitions), cmd.OutOrStdout()) }, } - + p.InstallFormatFlag(cmd) return cmd } - -func printBalancerStatus(pbs rpadmin.PartitionBalancerStatus, clusterPartitions []rpadmin.ClusterPartition) { - const ( - secBalancerStatus = "Balancer status" - secReplicaDist = "Replica distribution" - ) - sections := out.NewSections( - out.ConditionalSectionHeaders(map[string]bool{ - secBalancerStatus: true, - secReplicaDist: true, - })..., - ) - sections.Add(secBalancerStatus, func() { - tw := out.NewTable() - defer tw.Flush() - tw.Print("Status:", pbs.Status) - tw.Print("Seconds Since Last Tick:", pbs.SecondsSinceLastTick) - tw.Print("Current Reassignment Count:", pbs.CurrentReassignmentsCount) - if pbs.PartitionsPendingForceRecovery != nil && pbs.PartitionsPendingRecoveryList != nil { - tw.Print(fmt.Sprintf("Partitions Pending Recovery (%v):", *pbs.PartitionsPendingForceRecovery), pbs.PartitionsPendingRecoveryList) - } - v := pbs.Violations - if len(v.OverDiskLimitNodes) > 0 || len(v.UnavailableNodes) > 0 { - tw.Print("Unavailable Nodes:", v.UnavailableNodes) - tw.Print("Over Disk Limit Nodes:", v.OverDiskLimitNodes) - } - }) - sections.Add(secReplicaDist, func() { - replicaDist := buildReplicaPerBroker(clusterPartitions) - types.Sort(replicaDist) - printReplicaDistribution(replicaDist) - }) -} diff --git a/src/go/rpk/pkg/cli/cluster/partitions/status_test.go b/src/go/rpk/pkg/cli/cluster/partitions/status_test.go new file mode 100644 index 0000000000000..2b684a724f8f6 --- /dev/null +++ b/src/go/rpk/pkg/cli/cluster/partitions/status_test.go @@ -0,0 +1,111 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package partitions + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/redpanda-data/common-go/rpadmin" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func ptrInt(v int) *int { return &v } + +func TestPrintBalancerStatus(t *testing.T) { + pbs := rpadmin.PartitionBalancerStatus{ + Status: "ready", + SecondsSinceLastTick: 5, + CurrentReassignmentsCount: 2, + Violations: rpadmin.PartitionBalancerViolations{ + UnavailableNodes: []int{1, 2}, + OverDiskLimitNodes: []int{3}, + }, + } + clusterPartitions := []rpadmin.ClusterPartition{ + {Replicas: []rpadmin.Replica{{NodeID: 1}, {NodeID: 2}}}, + {Replicas: []rpadmin.Replica{{NodeID: 1}}}, + } + + resp := buildBalancerStatusResponse(pbs, clusterPartitions) + + require.Equal(t, "ready", resp.Status) + require.Equal(t, 5, resp.SecondsSinceLastTick) + require.Equal(t, 2, resp.CurrentReassignmentsCount) + require.Equal(t, []int{1, 2}, resp.UnavailableNodes) + require.Equal(t, []int{3}, resp.OverDiskLimitNodes) + require.Len(t, resp.BrokerReplicaDistribution, 2) + + f := config.OutFormatter{Kind: "text"} + var buf strings.Builder + printBalancerStatus(f, resp, &buf) + require.Equal(t, [][]string{ + {"Status:", "ready"}, + {"Seconds", "Since", "Last", "Tick:", "5"}, + {"Current", "Reassignment", "Count:", "2"}, + {"Unavailable", "Nodes:", "[1", "2]"}, + {"Over", "Disk", "Limit", "Nodes:", "[3]"}, + {"BROKER", "PARTITION-COUNT"}, + {"1", "2"}, + {"2", "1"}, + }, out.TableRows(buf.String())) +} + +func TestPrintBalancerStatusNoBrokerDist(t *testing.T) { + pbs := rpadmin.PartitionBalancerStatus{ + Status: "off", + SecondsSinceLastTick: 0, + } + + resp := buildBalancerStatusResponse(pbs, nil) + + require.Equal(t, "off", resp.Status) + require.Empty(t, resp.BrokerReplicaDistribution) + + // JSON should omit broker_replica_distribution when empty. + jsonBytes, err := json.Marshal(resp) + require.NoError(t, err) + require.NotContains(t, string(jsonBytes), "broker_replica_distribution") + + f := config.OutFormatter{Kind: "text"} + var buf strings.Builder + printBalancerStatus(f, resp, &buf) + require.Equal(t, [][]string{ + {"Status:", "off"}, + {"Seconds", "Since", "Last", "Tick:", "0"}, + {"Current", "Reassignment", "Count:", "0"}, + }, out.TableRows(buf.String())) +} + +func TestPrintBalancerStatusPendingRecovery(t *testing.T) { + count := 3 + pbs := rpadmin.PartitionBalancerStatus{ + Status: "stalled", + PartitionsPendingForceRecovery: &count, + PartitionsPendingRecoveryList: []string{"foo/0/0", "bar/1/0"}, + } + + resp := buildBalancerStatusResponse(pbs, nil) + require.Equal(t, ptrInt(3), resp.PartitionsPendingForceRecovery) + require.Equal(t, []string{"foo/0/0", "bar/1/0"}, resp.PartitionsPendingRecoverySample) + + f := config.OutFormatter{Kind: "text"} + var buf strings.Builder + printBalancerStatus(f, resp, &buf) + require.Equal(t, [][]string{ + {"Status:", "stalled"}, + {"Seconds", "Since", "Last", "Tick:", "0"}, + {"Current", "Reassignment", "Count:", "0"}, + {"Partitions", "Pending", "Recovery", "(3):", "[foo/0/0", "bar/1/0]"}, + }, out.TableRows(buf.String())) +} diff --git a/src/go/rpk/pkg/cli/cluster/selftest/BUILD b/src/go/rpk/pkg/cli/cluster/selftest/BUILD index 4ef2675b4021c..cfd2cd2de2421 100644 --- a/src/go/rpk/pkg/cli/cluster/selftest/BUILD +++ b/src/go/rpk/pkg/cli/cluster/selftest/BUILD @@ -28,6 +28,7 @@ go_test( srcs = ["selftest_test.go"], embed = [":selftest"], deps = [ + "//src/go/rpk/pkg/config", "@com_github_redpanda_data_common_go_rpadmin//:rpadmin", "@com_github_stretchr_testify//require", ], diff --git a/src/go/rpk/pkg/cli/cluster/selftest/selftest_test.go b/src/go/rpk/pkg/cli/cluster/selftest/selftest_test.go index 3160418facb16..013474b25ffeb 100644 --- a/src/go/rpk/pkg/cli/cluster/selftest/selftest_test.go +++ b/src/go/rpk/pkg/cli/cluster/selftest/selftest_test.go @@ -10,14 +10,36 @@ package selftest import ( + "bytes" "encoding/json" "testing" "github.com/redpanda-data/common-go/rpadmin" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" "github.com/stretchr/testify/require" ) +func TestPrintSelfTestStatus(t *testing.T) { + f := config.OutFormatter{Kind: "text"} + + t.Run("running nodes prints status message", func(t *testing.T) { + var buf bytes.Buffer + require.NoError(t, printSelfTestStatus(f, []rpadmin.SelfTestNodeReport{ + {NodeID: 0, Status: "running", Stage: "disk"}, + }, &buf)) + require.Equal(t, "Node 0 is still running disk self test\n", buf.String()) + }) + + t.Run("uninitialized prints idle message", func(t *testing.T) { + var buf bytes.Buffer + require.NoError(t, printSelfTestStatus(f, []rpadmin.SelfTestNodeReport{ + {NodeID: 0, Status: "idle", Stage: "idle"}, + }, &buf)) + require.Equal(t, "All nodes are idle with no cached test results\n", buf.String()) + }) +} + func TestClusterStatus(t *testing.T) { for _, test := range []struct { name string diff --git a/src/go/rpk/pkg/cli/cluster/selftest/status.go b/src/go/rpk/pkg/cli/cluster/selftest/status.go index 996044c710f17..6e68307cb09da 100644 --- a/src/go/rpk/pkg/cli/cluster/selftest/status.go +++ b/src/go/rpk/pkg/cli/cluster/selftest/status.go @@ -10,8 +10,8 @@ package selftest import ( - "encoding/json" "fmt" + "io" "sort" "strings" "time" @@ -33,8 +33,72 @@ const ( statusRunning = "running" ) +// selfTestResult mirrors rpadmin.SelfTestNodeResult with yaml tags for +// structured output support. +type selfTestResult struct { + P50 *uint `json:"p50,omitempty" yaml:"p50,omitempty"` + P90 *uint `json:"p90,omitempty" yaml:"p90,omitempty"` + P99 *uint `json:"p99,omitempty" yaml:"p99,omitempty"` + P999 *uint `json:"p999,omitempty" yaml:"p999,omitempty"` + MaxLatency *uint `json:"max_latency,omitempty" yaml:"max_latency,omitempty"` + RequestsPerSec *uint `json:"rps,omitempty" yaml:"rps,omitempty"` + BytesPerSec *uint `json:"bps,omitempty" yaml:"bps,omitempty"` + Timeouts uint `json:"timeouts" yaml:"timeouts"` + TestID string `json:"test_id" yaml:"test_id"` + TestName string `json:"name" yaml:"name"` + TestInfo string `json:"info" yaml:"info"` + TestType string `json:"test_type" yaml:"test_type"` + StartTime int64 `json:"start_time" yaml:"start_time"` + EndTime int64 `json:"end_time" yaml:"end_time"` + Duration uint `json:"duration" yaml:"duration"` + Warning *string `json:"warning,omitempty" yaml:"warning,omitempty"` + Error *string `json:"error,omitempty" yaml:"error,omitempty"` +} + +// selfTestNodeReport mirrors rpadmin.SelfTestNodeReport with yaml tags for +// structured output support. +type selfTestNodeReport struct { + NodeID int `json:"node_id" yaml:"node_id"` + Status string `json:"status" yaml:"status"` + Stage string `json:"stage" yaml:"stage"` + Results []selfTestResult `json:"results,omitempty" yaml:"results,omitempty"` +} + +func toSelfTestNodeReports(reports []rpadmin.SelfTestNodeReport) []selfTestNodeReport { + result := make([]selfTestNodeReport, 0, len(reports)) + for _, r := range reports { + nr := selfTestNodeReport{ + NodeID: r.NodeID, + Status: r.Status, + Stage: r.Stage, + } + for _, res := range r.Results { + nr.Results = append(nr.Results, selfTestResult{ + P50: res.P50, + P90: res.P90, + P99: res.P99, + P999: res.P999, + MaxLatency: res.MaxLatency, + RequestsPerSec: res.RequestsPerSec, + BytesPerSec: res.BytesPerSec, + Timeouts: res.Timeouts, + TestID: res.TestID, + TestName: res.TestName, + TestInfo: res.TestInfo, + TestType: res.TestType, + StartTime: res.StartTime, + EndTime: res.EndTime, + Duration: res.Duration, + Warning: res.Warning, + Error: res.Error, + }) + } + result = append(result, nr) + } + return result +} + func newStatusCommand(fs afero.Fs, p *config.Params) *cobra.Command { - var format string cmd := &cobra.Command{ Use: "status", Short: "Returns the status of the current running tests or the cached results of the last completed run.", @@ -66,69 +130,78 @@ If Tiered Storage is not enabled, the cloud storage tests won't run and a warnin `, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, _ []string) { - // Load config settings + f := p.Formatter + if h, ok := f.Help([]selfTestNodeReport{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) config.CheckExitCloudAdmin(p) - // Create new HTTP client for communication w/ admin server cl, err := adminapi.NewClient(cmd.Context(), fs, p) out.MaybeDie(err, "unable to initialize admin client: %v", err) - // Make HTTP GET request to any node requesting for status - // Returns last runs results, or status of which nodes have jobs running reports, err := cl.SelfTestStatus(cmd.Context()) out.MaybeDie(err, "unable to query self-test status: %v", err) - if format == "json" { - asJSON, err := json.MarshalIndent(reports, "", "\t") - out.MaybeDie(err, "unable to format response as JSON: %v", err) - fmt.Print(string(asJSON)) - return - } + err = printSelfTestStatus(f, reports, cmd.OutOrStdout()) + out.MaybeDieErr(err) + }, + } + p.InstallFormatFlag(cmd) + return cmd +} - // If there is outstanding work, indicate which nodes, then exit - running := runningNodes(reports) - if len(running) > 0 { - keys := make([]int, 0, len(running)) - for k := range running { - keys = append(keys, k) - } - sort.Ints(keys) - for _, k := range keys { - fmt.Printf("Node %v is still running %v self test\n", k, running[k]) - } - return - } +func printSelfTestStatus(f config.OutFormatter, reports []rpadmin.SelfTestNodeReport, w io.Writer) error { + converted := toSelfTestNodeReports(reports) + if isText, _, formatted, err := f.Format(converted); !isText { + if err != nil { + return fmt.Errorf("unable to format self-test status: %w", err) + } + fmt.Fprintln(w, formatted) + return nil + } - // .. or redpanda has never run any tests, no cached data exists - if isUninitialized(reports) { - fmt.Println("All nodes are idle with no cached test results") - return - } + // If there is outstanding work, indicate which nodes, then exit. + running := runningNodes(reports) + if len(running) > 0 { + keys := make([]int, 0, len(running)) + for k := range running { + keys = append(keys, k) + } + sort.Ints(keys) + for _, k := range keys { + fmt.Fprintf(w, "Node %v is still running %v self test\n", k, running[k]) + } + return nil + } - // In all other cases there are results, print them and exit - tw := out.NewTabWriter() - defer tw.Flush() - for _, report := range reports { - header := makeReportHeader(report) - tw.PrintColumn(header) - tw.PrintColumn(strings.Repeat("=", len(header))) - tableResults := makeReportTable(report) - if len(tableResults) == 0 { - tw.PrintColumn("INFO", "No cached results for node") - tw.Line() - continue - } - for _, row := range tableResults { - all := rowDataAsInterface(row[1:]) - tw.PrintColumn(row[0], all...) - } - } - }, + // No cached data exists if Redpanda has never run any tests. + if isUninitialized(reports) { + fmt.Fprintln(w, "All nodes are idle with no cached test results") + return nil } - cmd.Flags().StringVar(&format, "format", "text", "Output format (text, json)") - return cmd + + // Print results grouped by node. + tw := out.NewTabWriterTo(w) + defer tw.Flush() + for _, report := range reports { + header := makeReportHeader(report) + tw.PrintColumn(header) + tw.PrintColumn(strings.Repeat("=", len(header))) + tableResults := makeReportTable(report) + if len(tableResults) == 0 { + tw.PrintColumn("INFO", "No cached results for node") + tw.Line() + continue + } + for _, row := range tableResults { + all := rowDataAsInterface(row[1:]) + tw.PrintColumn(row[0], all...) + } + } + return nil } func rowDataAsInterface(row []string) []any { diff --git a/src/go/rpk/pkg/cli/group/BUILD b/src/go/rpk/pkg/cli/group/BUILD index 10025520cff24..f01b665b7874d 100644 --- a/src/go/rpk/pkg/cli/group/BUILD +++ b/src/go/rpk/pkg/cli/group/BUILD @@ -28,9 +28,15 @@ go_library( go_test( name = "group_test", size = "small", - srcs = ["seek_test.go"], + srcs = [ + "group_test.go", + "offset_delete_test.go", + "seek_test.go", + ], embed = [":group"], deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", "//src/go/rpk/pkg/testfs", "@com_github_stretchr_testify//require", "@com_github_twmb_franz_go_pkg_kadm//:kadm", diff --git a/src/go/rpk/pkg/cli/group/group.go b/src/go/rpk/pkg/cli/group/group.go index d6322b7c8aa79..20cc27327c060 100644 --- a/src/go/rpk/pkg/cli/group/group.go +++ b/src/go/rpk/pkg/cli/group/group.go @@ -13,6 +13,7 @@ package group import ( "context" "fmt" + "io" "slices" "strings" @@ -84,6 +85,46 @@ members and their lag), and manage offsets. return cmd } +type listedGroup struct { + Broker int32 `json:"broker" yaml:"broker"` + Group string `json:"group" yaml:"group"` + State string `json:"state,omitempty" yaml:"state,omitempty"` +} + +func buildListedGroups(groups []kadm.ListedGroup) []listedGroup { + result := make([]listedGroup, 0, len(groups)) + for _, g := range groups { + result = append(result, listedGroup{ + Broker: g.Coordinator, + Group: g.Group, + State: g.State, + }) + } + return result +} + +func printGroupList(f config.OutFormatter, groups []listedGroup, w io.Writer) { + if isText, _, t, err := f.Format(groups); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + hasState := slices.ContainsFunc(groups, func(g listedGroup) bool { return g.State != "" }) + if hasState { + tw := out.NewTableTo(w, "BROKER", "GROUP", "STATE") + defer tw.Flush() + for _, g := range groups { + tw.Print(g.Broker, g.Group, g.State) + } + } else { + tw := out.NewTableTo(w, "BROKER", "GROUP") + defer tw.Flush() + for _, g := range groups { + tw.Print(g.Broker, g.Group) + } + } +} + func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { var filterStates []string validStates := []string{"PreparingRebalance", "CompletingRebalance", "Stable", "Dead", "Empty"} @@ -106,7 +147,12 @@ The STATE columns shows which state the group is in: - Empty: The group currently has no members. `, Args: cobra.ExactArgs(0), - Run: func(_ *cobra.Command, _ []string) { + Run: func(cmd *cobra.Command, _ []string) { + f := p.Formatter + if h, ok := f.Help([]listedGroup{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -132,31 +178,7 @@ The STATE columns shows which state the group is in: listed, err := adm.ListGroups(context.Background(), normalizedFilterStates...) out.HandleShardError("ListGroups", err) - groups := listed.Sorted() - isV4Response := slices.ContainsFunc(groups, func(g kadm.ListedGroup) bool { return g.State != "" }) - - // Conditionally hide the STATE column for older brokers that - // do not return the state of the consumer group - if !isV4Response { - tw := out.NewTable("BROKER", "GROUP") - defer tw.Flush() - for _, g := range groups { - tw.PrintStructFields(struct { - Broker int32 - Group string - }{g.Coordinator, g.Group}) - } - } else { - tw := out.NewTable("BROKER", "GROUP", "STATE") - defer tw.Flush() - for _, g := range groups { - tw.PrintStructFields(struct { - Broker int32 - Group string - State string - }{g.Coordinator, g.Group, g.State}) - } - } + printGroupList(f, buildListedGroups(listed.Sorted()), cmd.OutOrStdout()) }, } @@ -165,11 +187,42 @@ The STATE columns shows which state the group is in: cmd.RegisterFlagCompletionFunc("states", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { return validStates, cobra.ShellCompDirectiveDefault }) + p.InstallFormatFlag(cmd) return cmd } +type deletedGroup struct { + Group string `json:"group" yaml:"group"` + Status string `json:"status" yaml:"status"` +} + +func buildDeletedGroups(groups []kadm.DeleteGroupResponse) []deletedGroup { + result := make([]deletedGroup, 0, len(groups)) + for _, g := range groups { + status := "OK" + if g.Err != nil { + status = g.Err.Error() + } + result = append(result, deletedGroup{Group: g.Group, Status: status}) + } + return result +} + +func printGroupDelete(f config.OutFormatter, groups []deletedGroup, w io.Writer) { + if isText, _, t, err := f.Format(groups); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + tw := out.NewTableTo(w, "GROUP", "STATUS") + defer tw.Flush() + for _, g := range groups { + tw.Print(g.Group, g.Status) + } +} + func newDeleteCommand(fs afero.Fs, p *config.Params) *cobra.Command { - return &cobra.Command{ + cmd := &cobra.Command{ Use: "delete [GROUPS...]", Short: "Delete groups from brokers", Long: `Delete groups from brokers. @@ -192,7 +245,12 @@ automatically are cleaned up, such as when you create temporary groups for quick investigation or testing. This command helps you do that. `, Args: cobra.MinimumNArgs(1), - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { + f := p.Formatter + if h, ok := f.Help([]deletedGroup{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -203,21 +261,9 @@ quick investigation or testing. This command helps you do that. deleted, err := adm.DeleteGroups(context.Background(), args...) out.HandleShardError("DeleteGroups", err) - tw := out.NewTable("GROUP", "STATUS") - defer tw.Flush() - for _, g := range deleted.Sorted() { - status := "OK" - if g.Err != nil { - status = g.Err.Error() - } - tw.PrintStructFields(struct { - Group string - Status string - }{ - g.Group, - status, - }) - } + printGroupDelete(f, buildDeletedGroups(deleted.Sorted()), cmd.OutOrStdout()) }, } + p.InstallFormatFlag(cmd) + return cmd } diff --git a/src/go/rpk/pkg/cli/group/group_test.go b/src/go/rpk/pkg/cli/group/group_test.go new file mode 100644 index 0000000000000..1067a5e83a31c --- /dev/null +++ b/src/go/rpk/pkg/cli/group/group_test.go @@ -0,0 +1,76 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package group + +import ( + "strings" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintGroupList(t *testing.T) { + cases := []struct { + name string + data []listedGroup + want [][]string + }{ + { + name: "with state", + data: []listedGroup{ + {Broker: 1, Group: "group-a", State: "Stable"}, + {Broker: 2, Group: "group-b", State: "Empty"}, + }, + want: [][]string{ + {"BROKER", "GROUP", "STATE"}, + {"1", "group-a", "Stable"}, + {"2", "group-b", "Empty"}, + }, + }, + { + name: "without state", + data: []listedGroup{ + {Broker: 1, Group: "group-a"}, + {Broker: 2, Group: "group-b"}, + }, + want: [][]string{ + {"BROKER", "GROUP"}, + {"1", "group-a"}, + {"2", "group-b"}, + }, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printGroupList(f, c.data, b) + require.Equal(t, c.want, out.TableRows(b.String())) + }) + } +} + +func TestPrintGroupDelete(t *testing.T) { + data := []deletedGroup{ + {Group: "group-a", Status: "OK"}, + {Group: "group-b", Status: "some error"}, + } + + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printGroupDelete(f, data, b) + require.Equal(t, [][]string{ + {"GROUP", "STATUS"}, + {"group-a", "OK"}, + {"group-b", "some", "error"}, + }, out.TableRows(b.String())) +} diff --git a/src/go/rpk/pkg/cli/group/offset_delete.go b/src/go/rpk/pkg/cli/group/offset_delete.go index 8b7b64885d75a..f481100e4e136 100644 --- a/src/go/rpk/pkg/cli/group/offset_delete.go +++ b/src/go/rpk/pkg/cli/group/offset_delete.go @@ -12,7 +12,9 @@ package group import ( "context" "fmt" + "io" "os" + "sort" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/kafka" @@ -23,6 +25,55 @@ import ( "github.com/twmb/franz-go/pkg/kadm" ) +type offsetDeleteResult struct { + Topic string `json:"topic" yaml:"topic"` + Partition int32 `json:"partition" yaml:"partition"` + Status string `json:"status" yaml:"status"` +} + +func buildOffsetDeleteResults(responses kadm.DeleteOffsetsResponses) ([]offsetDeleteResult, bool) { + ok := true + var results []offsetDeleteResult + + topics := make([]string, 0, len(responses)) + for topic := range responses { + topics = append(topics, topic) + } + sort.Strings(topics) + + for _, topic := range topics { + partitionErrors := responses[topic] + partitions := make([]int32, 0, len(partitionErrors)) + for p := range partitionErrors { + partitions = append(partitions, p) + } + sort.Slice(partitions, func(i, j int) bool { return partitions[i] < partitions[j] }) + + for _, partition := range partitions { + msg := "OK" + if e := partitionErrors[partition]; e != nil { + ok = false + msg = e.Error() + } + results = append(results, offsetDeleteResult{Topic: topic, Partition: partition, Status: msg}) + } + } + return results, ok +} + +func printOffsetDeleteResults(f config.OutFormatter, results []offsetDeleteResult, w io.Writer) { + if isText, _, t, err := f.Format(results); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + tw := out.NewTabWriterTo(w) + defer tw.Flush() + for _, r := range results { + tw.Print(r.Topic, r.Partition, r.Status) + } +} + func NewOffsetDeleteCommand(fs afero.Fs, p *config.Params) *cobra.Command { var ( fromFile string @@ -51,7 +102,12 @@ topic_a 1 topic_b 0 `, Args: cobra.ExactArgs(1), - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { + f := p.Formatter + if h, ok := f.Help([]offsetDeleteResult{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -81,19 +137,8 @@ topic_b 0 responses, err := adm.DeleteOffsets(context.Background(), args[0], topicsSet) out.MaybeDieErr(err) - tw := out.NewTabWriter() - ok := true - for topic, partitionErrors := range responses { - for partition, err := range partitionErrors { - msg := "OK" - if err != nil { - ok = false - msg = err.Error() - } - fmt.Fprintf(tw, "%s\t%d\t%s\n", topic, partition, msg) - } - } - tw.Flush() + results, ok := buildOffsetDeleteResults(responses) + printOffsetDeleteResults(f, results, cmd.OutOrStdout()) if !ok { // At least one row contained an error. os.Exit(1) } @@ -102,6 +147,7 @@ topic_b 0 cmd.Flags().StringVarP(&fromFile, "from-file", "f", "", "File of topic/partition tuples for which to delete offsets for") cmd.Flags().StringArrayVarP(&topicPartitions, "topic", "t", nil, "topic:partition_id (repeatable; e.g. -t foo:0,1,2 )") cmd.MarkFlagsMutuallyExclusive("from-file", "topic") + p.InstallFormatFlag(cmd) return cmd } diff --git a/src/go/rpk/pkg/cli/group/offset_delete_test.go b/src/go/rpk/pkg/cli/group/offset_delete_test.go new file mode 100644 index 0000000000000..05c5609418f98 --- /dev/null +++ b/src/go/rpk/pkg/cli/group/offset_delete_test.go @@ -0,0 +1,123 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package group + +import ( + "errors" + "strings" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" + "github.com/twmb/franz-go/pkg/kadm" +) + +func TestBuildOffsetDeleteResults(t *testing.T) { + tests := []struct { + name string + responses kadm.DeleteOffsetsResponses + wantOK bool + wantOrder []offsetDeleteResult + }{ + { + name: "empty responses", + responses: kadm.DeleteOffsetsResponses{}, + wantOK: true, + wantOrder: nil, + }, + { + name: "single topic single partition success", + responses: kadm.DeleteOffsetsResponses{ + "foo": {0: nil}, + }, + wantOK: true, + wantOrder: []offsetDeleteResult{ + {Topic: "foo", Partition: 0, Status: "OK"}, + }, + }, + { + name: "single topic single partition error", + responses: kadm.DeleteOffsetsResponses{ + "foo": {0: errors.New("some error")}, + }, + wantOK: false, + wantOrder: []offsetDeleteResult{ + {Topic: "foo", Partition: 0, Status: "some error"}, + }, + }, + { + name: "topics sorted alphabetically", + responses: kadm.DeleteOffsetsResponses{ + "zebra": {0: nil}, + "apple": {0: nil}, + "mango": {0: nil}, + }, + wantOK: true, + wantOrder: []offsetDeleteResult{ + {Topic: "apple", Partition: 0, Status: "OK"}, + {Topic: "mango", Partition: 0, Status: "OK"}, + {Topic: "zebra", Partition: 0, Status: "OK"}, + }, + }, + { + name: "partitions sorted numerically", + responses: kadm.DeleteOffsetsResponses{ + "foo": {3: nil, 1: nil, 0: nil, 2: nil}, + }, + wantOK: true, + wantOrder: []offsetDeleteResult{ + {Topic: "foo", Partition: 0, Status: "OK"}, + {Topic: "foo", Partition: 1, Status: "OK"}, + {Topic: "foo", Partition: 2, Status: "OK"}, + {Topic: "foo", Partition: 3, Status: "OK"}, + }, + }, + { + name: "mixed success and error marks ok=false", + responses: kadm.DeleteOffsetsResponses{ + "foo": { + 0: nil, + 1: errors.New("bad partition"), + }, + }, + wantOK: false, + wantOrder: []offsetDeleteResult{ + {Topic: "foo", Partition: 0, Status: "OK"}, + {Topic: "foo", Partition: 1, Status: "bad partition"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, ok := buildOffsetDeleteResults(tt.responses) + require.Equal(t, tt.wantOK, ok) + require.Equal(t, tt.wantOrder, got) + }) + } +} + +func TestPrintOffsetDeleteResults(t *testing.T) { + results := []offsetDeleteResult{ + {Topic: "apple", Partition: 0, Status: "OK"}, + {Topic: "apple", Partition: 1, Status: "some error"}, + {Topic: "zebra", Partition: 0, Status: "OK"}, + } + + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printOffsetDeleteResults(f, results, b) + require.Equal(t, [][]string{ + {"apple", "0", "OK"}, + {"apple", "1", "some", "error"}, + {"zebra", "0", "OK"}, + }, out.TableRows(b.String())) +} diff --git a/src/go/rpk/pkg/cli/group/seek.go b/src/go/rpk/pkg/cli/group/seek.go index d1ffdec7b6fab..9f8efd4b5ce78 100644 --- a/src/go/rpk/pkg/cli/group/seek.go +++ b/src/go/rpk/pkg/cli/group/seek.go @@ -14,6 +14,7 @@ import ( "context" "errors" "fmt" + "io" "strconv" "strings" @@ -85,7 +86,12 @@ Seek group G to the beginning of a topic it was not previously consuming: rpk group seek G --to start --topics foo --allow-new-topics `, Args: cobra.ExactArgs(1), - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { + f := p.Formatter + if h, ok := f.Help([]seekCommitResult{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -94,8 +100,8 @@ Seek group G to the beginning of a topic it was not previously consuming: defer adm.Close() var n int - for _, f := range []string{to, toGroup, toFile} { - if f != "" { + for _, flag := range []string{to, toGroup, toFile} { + if flag != "" { n++ } } @@ -114,7 +120,7 @@ Seek group G to the beginning of a topic it was not previously consuming: group := args[0] - seek(fs, adm, group, to, toGroup, toFile, tset, allowNewTopics) + seek(fs, adm, group, to, toGroup, toFile, tset, allowNewTopics, f, cmd.OutOrStdout()) }, } @@ -123,6 +129,7 @@ Seek group G to the beginning of a topic it was not previously consuming: cmd.Flags().StringVar(&toFile, "to-file", "", "Seek to offsets as specified in the file") cmd.Flags().StringSliceVar(&topics, "topics", nil, "Only seek these topics, if any are specified") cmd.Flags().BoolVar(&allowNewTopics, "allow-new-topics", false, "Allow seeking to new topics not currently consumed (implied with --to-group or --to-file)") + p.InstallFormatFlag(cmd) return cmd } @@ -208,6 +215,8 @@ func seek( toFile string, topics map[string]bool, allowNewTopics bool, + f config.OutFormatter, + w io.Writer, ) { current := seekFetch(adm, group, topics, true) var commitTo kadm.Offsets @@ -261,51 +270,62 @@ func seek( committed, err := adm.CommitOffsets(context.Background(), group, commitTo) out.MaybeDie(err, "unable to commit offsets: %v", err) - useErr := committed.Error() != nil - headers := []string{"topic", "partition", "prior-offset", "current-offset"} - if useErr { - headers = append(headers, "error") - } - tw := out.NewTable(headers...) - defer tw.Flush() + results := make([]seekCommitResult, 0, len(committed)) for _, c := range committed.Sorted() { - s := seekCommit{c.Topic, c.Partition, -1, -1} + r := seekCommitResult{Topic: c.Topic, Partition: c.Partition, Prior: -1, Current: -1} if o, exists := current.Lookup(c.Topic, c.Partition); exists { - s.Prior = o.At + r.Prior = o.At } if o, exists := commitTo.Lookup(c.Topic, c.Partition); exists { - s.Current = o.At + r.Current = o.At } - se := seekCommitErr{c.Topic, c.Partition, -1, -1, ""} if c.Err != nil { // Redpanda / Kafka send UnknownMemberID when issuing OffsetCommit // if the group is not empty. This error is unclear to end users, so // we remap it here. if errors.Is(c.Err, kerr.UnknownMemberID) { - se.Error = "INVALID_OPERATION: seeking a non-empty group is not allowed." + r.Error = "INVALID_OPERATION: seeking a non-empty group is not allowed." } else { - se.Error = c.Err.Error() + r.Error = c.Err.Error() } } + results = append(results, r) + } + printSeekResults(f, results, w) +} + +func printSeekResults(f config.OutFormatter, results []seekCommitResult, w io.Writer) { + if isText, _, t, err := f.Format(results); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + useErr := false + for _, r := range results { + if r.Error != "" { + useErr = true + break + } + } + headers := []string{"TOPIC", "PARTITION", "PRIOR-OFFSET", "CURRENT-OFFSET"} + if useErr { + headers = append(headers, "ERROR") + } + tw := out.NewTableTo(w, headers...) + defer tw.Flush() + for _, r := range results { if useErr { - tw.PrintStructFields(se) + tw.Print(r.Topic, r.Partition, r.Prior, r.Current, r.Error) } else { - tw.PrintStructFields(s) + tw.Print(r.Topic, r.Partition, r.Prior, r.Current) } } } -type seekCommit struct { - Topic string - Partition int32 - Prior int64 - Current int64 -} - -type seekCommitErr struct { - Topic string - Partition int32 - Prior int64 - Current int64 - Error string +type seekCommitResult struct { + Topic string `json:"topic" yaml:"topic"` + Partition int32 `json:"partition" yaml:"partition"` + Prior int64 `json:"prior_offset" yaml:"prior_offset"` + Current int64 `json:"current_offset" yaml:"current_offset"` + Error string `json:"error,omitempty" yaml:"error,omitempty"` } diff --git a/src/go/rpk/pkg/cli/group/seek_test.go b/src/go/rpk/pkg/cli/group/seek_test.go index c9532e31c1ceb..3a4456f477a1d 100644 --- a/src/go/rpk/pkg/cli/group/seek_test.go +++ b/src/go/rpk/pkg/cli/group/seek_test.go @@ -10,13 +10,48 @@ package group import ( + "bytes" "testing" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/testfs" "github.com/stretchr/testify/require" "github.com/twmb/franz-go/pkg/kadm" ) +func TestPrintSeekResults(t *testing.T) { + f := config.OutFormatter{Kind: "text"} + + t.Run("no errors omits ERROR column", func(t *testing.T) { + results := []seekCommitResult{ + {Topic: "foo", Partition: 0, Prior: 10, Current: 0}, + {Topic: "bar", Partition: 1, Prior: 5, Current: 5}, + } + var buf bytes.Buffer + printSeekResults(f, results, &buf) + require.Equal(t, [][]string{ + {"TOPIC", "PARTITION", "PRIOR-OFFSET", "CURRENT-OFFSET"}, + {"foo", "0", "10", "0"}, + {"bar", "1", "5", "5"}, + }, out.TableRows(buf.String())) + }) + + t.Run("with errors adds ERROR column", func(t *testing.T) { + results := []seekCommitResult{ + {Topic: "foo", Partition: 0, Prior: 10, Current: 0, Error: "some error"}, + {Topic: "bar", Partition: 1, Prior: 5, Current: 5}, + } + var buf bytes.Buffer + printSeekResults(f, results, &buf) + require.Equal(t, [][]string{ + {"TOPIC", "PARTITION", "PRIOR-OFFSET", "CURRENT-OFFSET", "ERROR"}, + {"foo", "0", "10", "0", "some", "error"}, + {"bar", "1", "5", "5"}, + }, out.TableRows(buf.String())) + }) +} + func TestParseSeekFile(t *testing.T) { keep := map[string]bool{"foo": true} o0 := kadm.Offset{ diff --git a/src/go/rpk/pkg/cli/plugin/BUILD b/src/go/rpk/pkg/cli/plugin/BUILD index 375b90f34cd19..7d17da22446b0 100644 --- a/src/go/rpk/pkg/cli/plugin/BUILD +++ b/src/go/rpk/pkg/cli/plugin/BUILD @@ -1,4 +1,4 @@ -load("@rules_go//go:def.bzl", "go_library") +load("@rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "plugin", @@ -11,6 +11,7 @@ go_library( importpath = "github.com/redpanda-data/redpanda/src/go/rpk/pkg/cli/plugin", visibility = ["//visibility:public"], deps = [ + "//src/go/rpk/pkg/config", "//src/go/rpk/pkg/osutil", "//src/go/rpk/pkg/out", "//src/go/rpk/pkg/plugin", @@ -19,3 +20,14 @@ go_library( "@org_uber_go_zap//:zap", ], ) + +go_test( + name = "plugin_test", + srcs = ["list_test.go"], + embed = [":plugin"], + deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", + "@com_github_stretchr_testify//require", + ], +) diff --git a/src/go/rpk/pkg/cli/plugin/list.go b/src/go/rpk/pkg/cli/plugin/list.go index 64b045092dc5b..a975544887c61 100644 --- a/src/go/rpk/pkg/cli/plugin/list.go +++ b/src/go/rpk/pkg/cli/plugin/list.go @@ -11,14 +11,29 @@ package plugin import ( "fmt" + "io" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/plugin" "github.com/spf13/afero" "github.com/spf13/cobra" ) -func newListCommand(fs afero.Fs) *cobra.Command { +type pluginRow struct { + Name string `json:"name" yaml:"name"` + Description string `json:"description" yaml:"description"` + Installed bool `json:"installed" yaml:"installed"` + Message string `json:"message,omitempty" yaml:"message,omitempty"` +} + +type localPluginRow struct { + Name string `json:"name" yaml:"name"` + Path string `json:"path" yaml:"path"` + Shadows []string `json:"shadows,omitempty" yaml:"shadows,omitempty"` +} + +func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { var local bool cmd := &cobra.Command{ @@ -37,62 +52,113 @@ whether you have "shadowed" plugins (the same plugin specified multiple times). `, Args: cobra.ExactArgs(0), - Run: func(*cobra.Command, []string) { + Run: func(cmd *cobra.Command, _ []string) { + f := p.Formatter + if local { + if h, ok := f.Help([]localPluginRow{}); ok { + out.Exit(h) + } + } else { + if h, ok := f.Help([]pluginRow{}); ok { + out.Exit(h) + } + } + installed := plugin.ListPlugins(fs, plugin.UserPaths()) if local { installed.Sort() - - tw := out.NewTable("NAME", "PATH", "SHADOWS") - defer tw.Flush() - - for _, p := range installed { - var shadowed string - if len(p.ShadowedPaths) > 0 { - shadowed = p.ShadowedPaths[0] - } - tw.Print(p.FullName(), p.Path, shadowed) - - if len(p.ShadowedPaths) < 1 { - continue - } - for _, shadowed = range p.ShadowedPaths[1:] { - tw.Print("", "", shadowed) - } - } - + rows := buildLocalPluginList(installed) + printLocalPluginList(f, rows, cmd.OutOrStdout()) return } m, err := getManifest() out.MaybeDieErr(err) - tw := out.NewTable("NAME", "DESCRIPTION", "MESSAGE") - defer tw.Flush() - for _, entry := range m.Plugins { - name := entry.Name - _, entrySha, _ := entry.PathShaForUser() - - var message string + rows := buildPluginList(fs, installed, m.Plugins) + printPluginList(f, rows, cmd.OutOrStdout()) + }, + } - p, exists := installed.Find(name) - if exists { - name = "*" + name + p.InstallFormatFlag(cmd) + cmd.Flags().BoolVarP(&local, "local", "l", false, "List locally installed plugins and shadowed plugins") - sha, err := plugin.Sha256Path(fs, p.Path) - if err != nil { - message = fmt.Sprintf("unable to calculate local binary sha256: %v", err) - } else if sha != entrySha { - message = "local binary sha256 differs from manifest sha256" - } - } + return cmd +} - tw.Print(name, entry.Description, message) +func buildPluginList(fs afero.Fs, installed plugin.Plugins, entries []plugin.ManifestPlugin) []pluginRow { + rows := make([]pluginRow, 0, len(entries)) + for _, entry := range entries { + _, entrySha, _ := entry.PathShaForUser() + + row := pluginRow{ + Name: entry.Name, + Description: entry.Description, + } + + p, exists := installed.Find(entry.Name) + if exists { + row.Installed = true + sha, err := plugin.Sha256Path(fs, p.Path) + if err != nil { + row.Message = fmt.Sprintf("unable to calculate local binary sha256: %v", err) + } else if sha != entrySha { + row.Message = "local binary sha256 differs from manifest sha256" } - }, + } + + rows = append(rows, row) } + return rows +} - cmd.Flags().BoolVarP(&local, "local", "l", false, "List locally installed plugins and shadowed plugins") +func printPluginList(f config.OutFormatter, rows []pluginRow, w io.Writer) { + if isText, _, t, err := f.Format(rows); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintf(w, "%s\n", t) + return + } + tw := out.NewTableTo(w, "Name", "Description", "Message") + defer tw.Flush() + for _, r := range rows { + name := r.Name + if r.Installed { + name = "*" + name + } + tw.Print(name, r.Description, r.Message) + } +} - return cmd +func buildLocalPluginList(installed plugin.Plugins) []localPluginRow { + rows := make([]localPluginRow, 0, len(installed)) + for _, p := range installed { + row := localPluginRow{ + Name: p.FullName(), + Path: p.Path, + Shadows: p.ShadowedPaths, + } + rows = append(rows, row) + } + return rows +} + +func printLocalPluginList(f config.OutFormatter, rows []localPluginRow, w io.Writer) { + if isText, _, t, err := f.Format(rows); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintf(w, "%s\n", t) + return + } + tw := out.NewTableTo(w, "Name", "Path", "Shadows") + defer tw.Flush() + for _, r := range rows { + if len(r.Shadows) == 0 { + tw.Print(r.Name, r.Path, "") + continue + } + tw.Print(r.Name, r.Path, r.Shadows[0]) + for _, shadowed := range r.Shadows[1:] { + tw.Print("", "", shadowed) + } + } } diff --git a/src/go/rpk/pkg/cli/plugin/list_test.go b/src/go/rpk/pkg/cli/plugin/list_test.go new file mode 100644 index 0000000000000..936a66f79cdec --- /dev/null +++ b/src/go/rpk/pkg/cli/plugin/list_test.go @@ -0,0 +1,53 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package plugin + +import ( + "bytes" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintPluginList(t *testing.T) { + data := []pluginRow{ + {Name: "cloud", Description: "Manage Redpanda Cloud", Installed: true}, + {Name: "byoc", Description: "Bring your own cloud", Installed: false}, + {Name: "mm3", Description: "MirrorMaker3", Installed: true, Message: "sha differs"}, + } + + var buf bytes.Buffer + printPluginList(config.OutFormatter{Kind: "text"}, data, &buf) + require.Equal(t, [][]string{ + {"NAME", "DESCRIPTION", "MESSAGE"}, + {"*cloud", "Manage", "Redpanda", "Cloud"}, + {"byoc", "Bring", "your", "own", "cloud"}, + {"*mm3", "MirrorMaker3", "sha", "differs"}, + }, out.TableRows(buf.String())) +} + +func TestPrintLocalPluginList(t *testing.T) { + data := []localPluginRow{ + {Name: "cloud", Path: "/home/user/.local/bin/.rpk-cloud"}, + {Name: "byoc", Path: "/home/user/.local/bin/.rpk-byoc", Shadows: []string{"/usr/local/bin/.rpk-byoc", "/opt/bin/.rpk-byoc"}}, + } + + var buf bytes.Buffer + printLocalPluginList(config.OutFormatter{Kind: "text"}, data, &buf) + // Second shadow appears on its own row with blank Name/Path cells. + require.Equal(t, [][]string{ + {"NAME", "PATH", "SHADOWS"}, + {"cloud", "/home/user/.local/bin/.rpk-cloud"}, + {"byoc", "/home/user/.local/bin/.rpk-byoc", "/usr/local/bin/.rpk-byoc"}, + {"/opt/bin/.rpk-byoc"}, + }, out.TableRows(buf.String())) +} diff --git a/src/go/rpk/pkg/cli/plugin/plugin.go b/src/go/rpk/pkg/cli/plugin/plugin.go index 3c18b9fc974a5..6e3e4bacb0ac3 100644 --- a/src/go/rpk/pkg/cli/plugin/plugin.go +++ b/src/go/rpk/pkg/cli/plugin/plugin.go @@ -11,6 +11,7 @@ package plugin import ( + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/plugin" "github.com/spf13/afero" "github.com/spf13/cobra" @@ -18,7 +19,7 @@ import ( const urlBase = "https://vectorized-public.s3.us-west-2.amazonaws.com/rpk-plugins" -func NewCommand(fs afero.Fs) *cobra.Command { +func NewCommand(fs afero.Fs, p *config.Params) *cobra.Command { cmd := &cobra.Command{ Use: "plugin", Short: "List, download, update, and remove rpk plugins", @@ -68,7 +69,7 @@ example, "foo_bar_baz" corresponds to the command "rpk foo bar baz". Args: cobra.ExactArgs(0), } cmd.AddCommand( - newListCommand(fs), + newListCommand(fs, p), newInstallCommand(fs), newUninstallCommand(fs), ) diff --git a/src/go/rpk/pkg/cli/profile/BUILD b/src/go/rpk/pkg/cli/profile/BUILD index b505293baf03e..74f5ab09292f1 100644 --- a/src/go/rpk/pkg/cli/profile/BUILD +++ b/src/go/rpk/pkg/cli/profile/BUILD @@ -46,6 +46,7 @@ go_test( size = "small", srcs = [ "create_test.go", + "list_test.go", "print_test.go", "prompt_test.go", "validate_test.go", @@ -57,6 +58,7 @@ go_test( }, deps = [ "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", "@build_buf_gen_go_redpandadata_cloud_protocolbuffers_go//redpanda/api/controlplane/v1:controlplane", "@com_github_fatih_color//:color", "@com_github_spf13_afero//:afero", diff --git a/src/go/rpk/pkg/cli/profile/list.go b/src/go/rpk/pkg/cli/profile/list.go index d4dd890bec65b..62a450f7c00be 100644 --- a/src/go/rpk/pkg/cli/profile/list.go +++ b/src/go/rpk/pkg/cli/profile/list.go @@ -10,6 +10,8 @@ package profile import ( + "fmt" + "io" "sort" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" @@ -18,19 +20,56 @@ import ( "github.com/spf13/cobra" ) +type profileListItem struct { + Name string `json:"name" yaml:"name"` + Description string `json:"description" yaml:"description"` + Current bool `json:"current" yaml:"current"` +} + +func buildProfileList(y *config.RpkYaml) []profileListItem { + items := make([]profileListItem, 0, len(y.Profiles)) + for _, p := range y.Profiles { + items = append(items, profileListItem{ + Name: p.Name, + Description: p.Description, + Current: p.Name == y.CurrentProfile, + }) + } + return items +} + +func printProfileList(f config.OutFormatter, items []profileListItem, w io.Writer) { + if isText, _, t, err := f.Format(items); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + tw := out.NewTableTo(w, "Name", "Description") + defer tw.Flush() + for _, item := range items { + name := item.Name + if item.Current { + name += "*" + } + tw.Print(name, item.Description) + } +} + func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { - return &cobra.Command{ + cmd := &cobra.Command{ Use: "list", Aliases: []string{"ls"}, Short: "List rpk profiles", Args: cobra.ExactArgs(0), - Run: func(*cobra.Command, []string) { + Run: func(cmd *cobra.Command, _ []string) { + f := p.Formatter + if h, ok := f.Help([]profileListItem{}); ok { + out.Exit(h) + } + cfg, err := p.Load(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) - tw := out.NewTable("name", "description") - defer tw.Flush() - y, ok := cfg.ActualRpkYaml() if !ok { return @@ -40,13 +79,9 @@ func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { return y.Profiles[i].Name < y.Profiles[j].Name }) - for _, p := range y.Profiles { - name := p.Name - if name == y.CurrentProfile { - name += "*" - } - tw.Print(name, p.Description) - } + printProfileList(f, buildProfileList(y), cmd.OutOrStdout()) }, } + p.InstallFormatFlag(cmd) + return cmd } diff --git a/src/go/rpk/pkg/cli/profile/list_test.go b/src/go/rpk/pkg/cli/profile/list_test.go new file mode 100644 index 0000000000000..c5f818c293f40 --- /dev/null +++ b/src/go/rpk/pkg/cli/profile/list_test.go @@ -0,0 +1,35 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package profile + +import ( + "strings" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintProfileList(t *testing.T) { + profiles := []profileListItem{ + {Name: "dev", Description: "development cluster", Current: true}, + {Name: "prod", Description: ""}, + } + + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printProfileList(f, profiles, b) + require.Equal(t, [][]string{ + {"NAME", "DESCRIPTION"}, + {"dev*", "development", "cluster"}, + {"prod"}, + }, out.TableRows(b.String())) +} diff --git a/src/go/rpk/pkg/cli/redpanda/admin/brokers/BUILD b/src/go/rpk/pkg/cli/redpanda/admin/brokers/BUILD index d9314cd9c79bc..9243af42f678c 100644 --- a/src/go/rpk/pkg/cli/redpanda/admin/brokers/BUILD +++ b/src/go/rpk/pkg/cli/redpanda/admin/brokers/BUILD @@ -1,4 +1,4 @@ -load("@rules_go//go:def.bzl", "go_library") +load("@rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "brokers", @@ -23,3 +23,15 @@ go_library( "@com_github_twmb_types//:types", ], ) + +go_test( + name = "brokers_test", + srcs = ["decommission_status_test.go"], + embed = [":brokers"], + deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", + "@com_github_redpanda_data_common_go_rpadmin//:rpadmin", + "@com_github_stretchr_testify//require", + ], +) diff --git a/src/go/rpk/pkg/cli/redpanda/admin/brokers/decommission-status.go b/src/go/rpk/pkg/cli/redpanda/admin/brokers/decommission-status.go index ff7c740c3b99c..f551bcd32092c 100644 --- a/src/go/rpk/pkg/cli/redpanda/admin/brokers/decommission-status.go +++ b/src/go/rpk/pkg/cli/redpanda/admin/brokers/decommission-status.go @@ -1,14 +1,23 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + package brokers import ( "errors" "fmt" + "io" "sort" "strconv" - "github.com/redpanda-data/common-go/rpadmin" - "github.com/docker/go-units" + "github.com/redpanda-data/common-go/rpadmin" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/adminapi" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" @@ -17,11 +26,116 @@ import ( "github.com/twmb/types" ) +type decommissionStatusResponse struct { + ReallocationFailures []reallocationFailure `json:"reallocation_failures,omitempty" yaml:"reallocation_failures,omitempty"` + AllocationFailures []string `json:"allocation_failures,omitempty" yaml:"allocation_failures,omitempty"` + Partitions []decommissionPartition `json:"partitions" yaml:"partitions"` +} + +type reallocationFailure struct { + Partition string `json:"partition" yaml:"partition"` + Reason string `json:"reason" yaml:"reason"` +} + +type decommissionPartition struct { + Partition string `json:"partition" yaml:"partition"` + MovingTo int `json:"moving_to" yaml:"moving_to"` + CompletionPercent int `json:"completion_percent" yaml:"completion_percent"` + PartitionSize int `json:"partition_size" yaml:"partition_size"` + BytesMoved *int `json:"bytes_moved,omitempty" yaml:"bytes_moved,omitempty"` + BytesRemaining *int `json:"bytes_remaining,omitempty" yaml:"bytes_remaining,omitempty"` +} + +func buildDecommissionStatus(dbs rpadmin.DecommissionStatusResponse, detailed bool) decommissionStatusResponse { + resp := decommissionStatusResponse{ + Partitions: make([]decommissionPartition, 0, len(dbs.Partitions)), + } + + if dbs.ReallocationFailureDetails != nil { + resp.ReallocationFailures = make([]reallocationFailure, 0, len(dbs.ReallocationFailureDetails)) + for _, f := range dbs.ReallocationFailureDetails { + ntp := f.NS + "/" + f.Topic + "/" + strconv.Itoa(f.Partition) + resp.ReallocationFailures = append(resp.ReallocationFailures, reallocationFailure{ + Partition: ntp, + Reason: f.Error, + }) + } + } else if dbs.AllocationFailures != nil { + resp.AllocationFailures = append([]string(nil), dbs.AllocationFailures...) + } + + for _, p := range dbs.Partitions { + ntp := p.Ns + "/" + p.Topic + "/" + strconv.Itoa(p.Partition) + var completion int + if p.PartitionSize > 0 { + completion = p.BytesMoved * 100 / p.PartitionSize + } + dp := decommissionPartition{ + Partition: ntp, + MovingTo: p.MovingTo.NodeID, + CompletionPercent: completion, + PartitionSize: p.PartitionSize, + } + if detailed { + dp.BytesMoved = &p.BytesMoved + dp.BytesRemaining = &p.BytesLeftToMove + } + resp.Partitions = append(resp.Partitions, dp) + } + + return resp +} + +func printDecommissionStatus(f config.OutFormatter, resp decommissionStatusResponse, detailed, human bool, w io.Writer) { + if isText, _, s, err := f.Format(resp); !isText { + out.MaybeDie(err, "unable to print in the required format %q: %v", f.Kind, err) + fmt.Fprintln(w, s) + return + } + + sizeFn := func(size int) string { + if human { + return units.HumanSize(float64(size)) + } + return strconv.Itoa(size) + } + + if len(resp.ReallocationFailures) > 0 { + out.SectionTo(w, "reallocation failure details") + tw := out.NewTableTo(w, "Partition", "Reason") + for _, rf := range resp.ReallocationFailures { + tw.Print(rf.Partition, rf.Reason) + } + tw.Flush() + fmt.Fprintln(w) + } else if len(resp.AllocationFailures) > 0 { + out.SectionTo(w, "allocation failures") + for _, af := range resp.AllocationFailures { + fmt.Fprintln(w, af) + } + fmt.Fprintln(w) + } + + out.SectionTo(w, "decommission progress") + headers := []string{"Partition", "Moving-to", "Completion-%", "Partition-size"} + if detailed { + headers = append(headers, "Bytes-moved", "Bytes-remaining") + } + tw := out.NewTableTo(w, headers...) + defer tw.Flush() + for _, p := range resp.Partitions { + if p.BytesMoved != nil && p.BytesRemaining != nil { + tw.Print(p.Partition, p.MovingTo, p.CompletionPercent, sizeFn(p.PartitionSize), sizeFn(*p.BytesMoved), sizeFn(*p.BytesRemaining)) + } else { + tw.Print(p.Partition, p.MovingTo, p.CompletionPercent, sizeFn(p.PartitionSize)) + } + } +} + func newDecommissionBrokerStatus(fs afero.Fs, p *config.Params) *cobra.Command { var ( - completion int - detailed bool - human bool + detailed bool + human bool ) cmd := &cobra.Command{ Use: "decommission-status [BROKER ID]", @@ -67,6 +181,11 @@ kafka/foo/7 Missing partition size information, all replicas may be offline `, Args: cobra.ExactArgs(1), Run: func(cmd *cobra.Command, args []string) { + f := p.Formatter + if h, ok := f.Help(decommissionStatusResponse{}); ok { + out.Exit(h) + } + broker, _ := strconv.Atoi(args[0]) p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -82,6 +201,11 @@ kafka/foo/7 Missing partition size information, all replicas may be offline if he.Response.StatusCode == 400 { body, bodyErr := he.DecodeGenericErrorBody() if bodyErr == nil { + if isText, _, t, err := f.Format(decommissionStatusResponse{Partitions: []decommissionPartition{}}); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(cmd.OutOrStdout(), t) + return + } out.Exit("%s", body.Message) } } @@ -89,6 +213,11 @@ kafka/foo/7 Missing partition size information, all replicas may be offline out.MaybeDie(err, "unable to request brokers: %v", err) if dbs.Finished { + if isText, _, t, err := f.Format(buildDecommissionStatus(dbs, detailed)); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(cmd.OutOrStdout(), t) + return + } if dbs.ReplicasLeft == 0 { out.Exit("Node %d is decommissioned successfully.", broker) } else { @@ -96,90 +225,16 @@ kafka/foo/7 Missing partition size information, all replicas may be offline } } - if dbs.ReallocationFailureDetails != nil { - out.Section("reallocation failure details") - tw := out.NewTable("Partition", "Reason") - for _, f := range dbs.ReallocationFailureDetails { - ntp := f.NS + "/" + f.Topic + "/" + strconv.Itoa(f.Partition) - tw.PrintStructFields(struct { - NTP string - Reason string - }{ - NTP: ntp, - Reason: f.Error, - }) - } - tw.Flush() - fmt.Println() - } else if dbs.AllocationFailures != nil { - sort.Strings(dbs.AllocationFailures) - out.Section("allocation failures") - for _, f := range dbs.AllocationFailures { - fmt.Println(f) - } - fmt.Println() - } - - out.Section("decommission progress") - headers := []string{"Partition", "Moving-to", "Completion-%", "Partition-size"} - if detailed { - headers = append(headers, "Bytes-moved", "Bytes-remaining") - } - - sizeFn := func(size int) string { - if human { - return units.HumanSize(float64(size)) - } - return strconv.Itoa(size) - } - - f := func(p *rpadmin.DecommissionPartitions) any { - ntp := p.Ns + "/" + p.Topic + "/" + strconv.Itoa(p.Partition) - if p.PartitionSize > 0 { - completion = p.BytesMoved * 100 / p.PartitionSize - } - if detailed { - return struct { - NTP string - MovingTo int - Completion int - PartitionSize string - BytesMoved string - BytesRemaining string - }{ - ntp, - p.MovingTo.NodeID, - completion, - sizeFn(p.PartitionSize), - sizeFn(p.BytesMoved), - sizeFn(p.BytesLeftToMove), - } - } else { - return struct { - NTP string - MovingTo int - Completion int - PartitionSize string - }{ - ntp, - p.MovingTo.NodeID, - completion, - sizeFn(p.PartitionSize), - } - } - } - types.Sort(dbs.Partitions) + sort.Strings(dbs.AllocationFailures) - tw := out.NewTable(headers...) - defer tw.Flush() - for _, p := range dbs.Partitions { - tw.PrintStructFields(f(&p)) - } + resp := buildDecommissionStatus(dbs, detailed) + printDecommissionStatus(f, resp, detailed, human, cmd.OutOrStdout()) }, } cmd.Flags().BoolVarP(&detailed, "detailed", "d", false, "Print how much data moved and remaining in bytes") cmd.Flags().BoolVarP(&human, "human-readable", "H", false, "Print the partition size in a human-readable form") + p.InstallFormatFlag(cmd) return cmd } diff --git a/src/go/rpk/pkg/cli/redpanda/admin/brokers/decommission_status_test.go b/src/go/rpk/pkg/cli/redpanda/admin/brokers/decommission_status_test.go new file mode 100644 index 0000000000000..0f80e3f773071 --- /dev/null +++ b/src/go/rpk/pkg/cli/redpanda/admin/brokers/decommission_status_test.go @@ -0,0 +1,201 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package brokers + +import ( + "bytes" + "testing" + + "github.com/redpanda-data/common-go/rpadmin" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestBuildDecommissionStatus(t *testing.T) { + t.Run("basic partitions", func(t *testing.T) { + dbs := rpadmin.DecommissionStatusResponse{ + Partitions: []rpadmin.DecommissionPartitions{ + { + Ns: "kafka", + Topic: "test", + Partition: 0, + MovingTo: rpadmin.DecommissionMovingTo{NodeID: 3}, + PartitionSize: 1000, + BytesMoved: 100, + BytesLeftToMove: 900, + }, + }, + } + + resp := buildDecommissionStatus(dbs, false) + + require.Len(t, resp.Partitions, 1) + require.Equal(t, "kafka/test/0", resp.Partitions[0].Partition) + require.Equal(t, 3, resp.Partitions[0].MovingTo) + require.Equal(t, 10, resp.Partitions[0].CompletionPercent) + require.Equal(t, 1000, resp.Partitions[0].PartitionSize) + require.Nil(t, resp.Partitions[0].BytesMoved) + require.Nil(t, resp.Partitions[0].BytesRemaining) + }) + + t.Run("detailed partitions", func(t *testing.T) { + dbs := rpadmin.DecommissionStatusResponse{ + Partitions: []rpadmin.DecommissionPartitions{ + { + Ns: "kafka", + Topic: "test", + Partition: 1, + MovingTo: rpadmin.DecommissionMovingTo{NodeID: 5}, + PartitionSize: 2000, + BytesMoved: 500, + BytesLeftToMove: 1500, + }, + }, + } + + resp := buildDecommissionStatus(dbs, true) + + require.Len(t, resp.Partitions, 1) + require.NotNil(t, resp.Partitions[0].BytesMoved) + require.Equal(t, 500, *resp.Partitions[0].BytesMoved) + require.NotNil(t, resp.Partitions[0].BytesRemaining) + require.Equal(t, 1500, *resp.Partitions[0].BytesRemaining) + }) + + t.Run("zero partition size completion", func(t *testing.T) { + dbs := rpadmin.DecommissionStatusResponse{ + Partitions: []rpadmin.DecommissionPartitions{ + { + Ns: "kafka", + Topic: "t", + Partition: 0, + MovingTo: rpadmin.DecommissionMovingTo{NodeID: 1}, + }, + }, + } + + resp := buildDecommissionStatus(dbs, false) + require.Equal(t, 0, resp.Partitions[0].CompletionPercent) + }) + + t.Run("reallocation failures", func(t *testing.T) { + dbs := rpadmin.DecommissionStatusResponse{ + ReallocationFailureDetails: []rpadmin.ReallocationFailedPartition{ + {NS: "kafka", Topic: "foo", Partition: 1, Error: "not enough space"}, + }, + } + + resp := buildDecommissionStatus(dbs, false) + + require.Len(t, resp.ReallocationFailures, 1) + require.Equal(t, "kafka/foo/1", resp.ReallocationFailures[0].Partition) + require.Equal(t, "not enough space", resp.ReallocationFailures[0].Reason) + require.Empty(t, resp.AllocationFailures) + }) + + t.Run("allocation failures", func(t *testing.T) { + dbs := rpadmin.DecommissionStatusResponse{ + AllocationFailures: []string{"kafka/bar/0", "kafka/bar/1"}, + } + + resp := buildDecommissionStatus(dbs, false) + + require.Empty(t, resp.ReallocationFailures) + require.Equal(t, []string{"kafka/bar/0", "kafka/bar/1"}, resp.AllocationFailures) + }) +} + +func TestPrintDecommissionStatus(t *testing.T) { + f := config.OutFormatter{Kind: "text"} + resp := decommissionStatusResponse{ + Partitions: []decommissionPartition{ + {Partition: "kafka/test/0", MovingTo: 3, CompletionPercent: 10, PartitionSize: 1000}, + {Partition: "kafka/test/1", MovingTo: 3, CompletionPercent: 50, PartitionSize: 2000}, + }, + } + + t.Run("basic", func(t *testing.T) { + var buf bytes.Buffer + printDecommissionStatus(f, resp, false, false, &buf) + require.Equal(t, [][]string{ + {"DECOMMISSION", "PROGRESS"}, + {"PARTITION", "MOVING-TO", "COMPLETION-%", "PARTITION-SIZE"}, + {"kafka/test/0", "3", "10", "1000"}, + {"kafka/test/1", "3", "50", "2000"}, + }, out.TableRows(buf.String())) + }) + + t.Run("detailed adds bytes columns", func(t *testing.T) { + moved, remaining := 100, 900 + respDetailed := decommissionStatusResponse{ + Partitions: []decommissionPartition{ + {Partition: "kafka/test/0", MovingTo: 3, CompletionPercent: 10, PartitionSize: 1000, BytesMoved: &moved, BytesRemaining: &remaining}, + }, + } + var buf bytes.Buffer + printDecommissionStatus(f, respDetailed, true, false, &buf) + require.Equal(t, [][]string{ + {"DECOMMISSION", "PROGRESS"}, + {"PARTITION", "MOVING-TO", "COMPLETION-%", "PARTITION-SIZE", "BYTES-MOVED", "BYTES-REMAINING"}, + {"kafka/test/0", "3", "10", "1000", "100", "900"}, + }, out.TableRows(buf.String())) + }) + + t.Run("reallocation failures section precedes progress", func(t *testing.T) { + respFail := decommissionStatusResponse{ + ReallocationFailures: []reallocationFailure{{Partition: "kafka/foo/1", Reason: "no space"}}, + Partitions: []decommissionPartition{{Partition: "kafka/test/0", MovingTo: 3, CompletionPercent: 5, PartitionSize: 100}}, + } + var buf bytes.Buffer + printDecommissionStatus(f, respFail, false, false, &buf) + require.Equal(t, [][]string{ + {"REALLOCATION", "FAILURE", "DETAILS"}, + {"PARTITION", "REASON"}, + {"kafka/foo/1", "no", "space"}, + {}, + {"DECOMMISSION", "PROGRESS"}, + {"PARTITION", "MOVING-TO", "COMPLETION-%", "PARTITION-SIZE"}, + {"kafka/test/0", "3", "5", "100"}, + }, out.TableRows(buf.String())) + }) + + t.Run("allocation failures section precedes progress", func(t *testing.T) { + respFail := decommissionStatusResponse{ + AllocationFailures: []string{"kafka/bar/0"}, + Partitions: []decommissionPartition{{Partition: "kafka/test/0", MovingTo: 3, CompletionPercent: 5, PartitionSize: 100}}, + } + var buf bytes.Buffer + printDecommissionStatus(f, respFail, false, false, &buf) + require.Equal(t, [][]string{ + {"ALLOCATION", "FAILURES"}, + {"kafka/bar/0"}, + {}, + {"DECOMMISSION", "PROGRESS"}, + {"PARTITION", "MOVING-TO", "COMPLETION-%", "PARTITION-SIZE"}, + {"kafka/test/0", "3", "5", "100"}, + }, out.TableRows(buf.String())) + }) + + t.Run("human readable sizes", func(t *testing.T) { + var buf bytes.Buffer + printDecommissionStatus(f, resp, false, true, &buf) + rows := out.TableRows(buf.String()) + // Data row's PARTITION-SIZE column should no longer be raw integer. + require.NotEqual(t, "1000", rows[2][3]) + require.NotEqual(t, "2000", rows[3][3]) + }) + + t.Run("json empty partitions", func(t *testing.T) { + var buf bytes.Buffer + printDecommissionStatus(config.OutFormatter{Kind: "json"}, decommissionStatusResponse{Partitions: []decommissionPartition{}}, false, false, &buf) + require.Equal(t, `{"partitions":[]}`+"\n", buf.String()) + }) +} diff --git a/src/go/rpk/pkg/cli/redpanda/admin/partitions/BUILD b/src/go/rpk/pkg/cli/redpanda/admin/partitions/BUILD index 478434a278035..c10f5054a2a52 100644 --- a/src/go/rpk/pkg/cli/redpanda/admin/partitions/BUILD +++ b/src/go/rpk/pkg/cli/redpanda/admin/partitions/BUILD @@ -1,4 +1,4 @@ -load("@rules_go//go:def.bzl", "go_library") +load("@rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "partitions", @@ -14,3 +14,14 @@ go_library( "@com_github_twmb_franz_go_pkg_kadm//:kadm", ], ) + +go_test( + name = "partitions_test", + srcs = ["partitions_test.go"], + embed = [":partitions"], + deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", + "@com_github_stretchr_testify//require", + ], +) diff --git a/src/go/rpk/pkg/cli/redpanda/admin/partitions/partitions.go b/src/go/rpk/pkg/cli/redpanda/admin/partitions/partitions.go index d50a18692a796..44b5e6ef20133 100644 --- a/src/go/rpk/pkg/cli/redpanda/admin/partitions/partitions.go +++ b/src/go/rpk/pkg/cli/redpanda/admin/partitions/partitions.go @@ -13,6 +13,8 @@ package partitions import ( "context" + "fmt" + "io" "strconv" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" @@ -23,6 +25,12 @@ import ( "github.com/twmb/franz-go/pkg/kadm" ) +type partitionResponse struct { + Topic string `json:"topic" yaml:"topic"` + Partition int32 `json:"partition" yaml:"partition"` + IsLeader bool `json:"is_leader" yaml:"is_leader"` +} + // NewCommand returns the partitions admin command. func NewCommand(fs afero.Fs, p *config.Params) *cobra.Command { cmd := &cobra.Command{ @@ -43,7 +51,12 @@ func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { Aliases: []string{"ls"}, Short: "List the partitions in a broker in the cluster", Args: cobra.ExactArgs(1), - Run: func(_ *cobra.Command, args []string) { + Run: func(cmd *cobra.Command, args []string) { + f := p.Formatter + if h, ok := f.Help([]partitionResponse{}); ok { + out.Exit(h) + } + brokerID, err := strconv.Atoi(args[0]) out.MaybeDie(err, "invalid broker %s: %v", args[0], err) if brokerID < 0 { @@ -61,29 +74,43 @@ func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { m, err = adm.Metadata(context.Background()) out.MaybeDie(err, "unable to request metadata: %v", err) - tw := out.NewTable("TOPIC", "PARTITION", "IS-LEADER") - defer tw.Flush() - + var resp []partitionResponse for _, t := range m.Topics.Sorted() { for _, pt := range t.Partitions.Sorted() { for _, rs := range pt.Replicas { if int(rs) == brokerID { - var isLeader bool - if int(pt.Leader) == brokerID { - isLeader = true - tw.Print(t.Topic, pt.Partition, isLeader) - } - if !leaderOnly && !isLeader { - tw.Print(t.Topic, pt.Partition, isLeader) + isLeader := int(pt.Leader) == brokerID + if isLeader || !leaderOnly { + resp = append(resp, partitionResponse{ + Topic: t.Topic, + Partition: pt.Partition, + IsLeader: isLeader, + }) } } } } } + + printAdminPartitionList(f, resp, cmd.OutOrStdout()) }, } cmd.Flags().BoolVarP(&leaderOnly, "leader-only", "l", false, "print the partitions on broker which are leaders") + p.InstallFormatFlag(cmd) return cmd } + +func printAdminPartitionList(f config.OutFormatter, data []partitionResponse, w io.Writer) { + if isText, _, s, err := f.Format(data); !isText { + out.MaybeDie(err, "unable to print in the required format %q: %v", f.Kind, err) + fmt.Fprintln(w, s) + return + } + tw := out.NewTableTo(w, "Topic", "Partition", "Is-Leader") + defer tw.Flush() + for _, p := range data { + tw.Print(p.Topic, p.Partition, p.IsLeader) + } +} diff --git a/src/go/rpk/pkg/cli/redpanda/admin/partitions/partitions_test.go b/src/go/rpk/pkg/cli/redpanda/admin/partitions/partitions_test.go new file mode 100644 index 0000000000000..3f576eae1a107 --- /dev/null +++ b/src/go/rpk/pkg/cli/redpanda/admin/partitions/partitions_test.go @@ -0,0 +1,45 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package partitions + +import ( + "bytes" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintAdminPartitionList(t *testing.T) { + f := config.OutFormatter{Kind: "text"} + + t.Run("text", func(t *testing.T) { + data := []partitionResponse{ + {Topic: "topic-a", Partition: 0, IsLeader: true}, + {Topic: "topic-a", Partition: 1, IsLeader: false}, + {Topic: "topic-b", Partition: 0, IsLeader: true}, + } + var buf bytes.Buffer + printAdminPartitionList(f, data, &buf) + require.Equal(t, [][]string{ + {"TOPIC", "PARTITION", "IS-LEADER"}, + {"topic-a", "0", "true"}, + {"topic-a", "1", "false"}, + {"topic-b", "0", "true"}, + }, out.TableRows(buf.String())) + }) + + t.Run("text empty", func(t *testing.T) { + var buf bytes.Buffer + printAdminPartitionList(f, nil, &buf) + require.Equal(t, [][]string{{"TOPIC", "PARTITION", "IS-LEADER"}}, out.TableRows(buf.String())) + }) +} diff --git a/src/go/rpk/pkg/cli/root.go b/src/go/rpk/pkg/cli/root.go index 7956d4fa7334c..626341b90cea8 100644 --- a/src/go/rpk/pkg/cli/root.go +++ b/src/go/rpk/pkg/cli/root.go @@ -124,7 +124,7 @@ func Execute() { debug.NewCommand(fs, p), generate.NewCommand(fs, p), group.NewCommand(fs, p), - plugincmd.NewCommand(fs), + plugincmd.NewCommand(fs, p), registry.NewCommand(fs, p), security.NewCommand(fs, p), shadow.NewCommand(fs, p), diff --git a/src/go/rpk/pkg/cli/security/acl/BUILD b/src/go/rpk/pkg/cli/security/acl/BUILD index 4cdd94412fac3..0460eff25b6d6 100644 --- a/src/go/rpk/pkg/cli/security/acl/BUILD +++ b/src/go/rpk/pkg/cli/security/acl/BUILD @@ -29,9 +29,14 @@ go_library( go_test( name = "acl_test", size = "small", - srcs = ["common_test.go"], + srcs = [ + "common_test.go", + "delete_test.go", + ], embed = [":acl"], deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", "@com_github_redpanda_data_common_go_rpsr//:rpsr", "@com_github_stretchr_testify//require", "@com_github_twmb_franz_go_pkg_kadm//:kadm", diff --git a/src/go/rpk/pkg/cli/security/acl/common.go b/src/go/rpk/pkg/cli/security/acl/common.go index 96ce2f049a7b3..1fba8755d4fa5 100644 --- a/src/go/rpk/pkg/cli/security/acl/common.go +++ b/src/go/rpk/pkg/cli/security/acl/common.go @@ -77,14 +77,14 @@ type ( Permission string `json:"permission"` } aclWithMessage struct { - Principal string `json:"principal"` - Host string `json:"host"` - ResourceType string `json:"resource_type"` - ResourceName string `json:"resource_name"` - ResourcePatternType string `json:"resource_pattern_type"` - Operation string `json:"operation"` - Permission string `json:"permission"` - Message string `json:"message"` + Principal string `json:"principal" yaml:"principal"` + Host string `json:"host" yaml:"host"` + ResourceType string `json:"resource_type" yaml:"resource_type"` + ResourceName string `json:"resource_name" yaml:"resource_name"` + ResourcePatternType string `json:"resource_pattern_type" yaml:"resource_pattern_type"` + Operation string `json:"operation" yaml:"operation"` + Permission string `json:"permission" yaml:"permission"` + Message string `json:"message" yaml:"message"` } ) diff --git a/src/go/rpk/pkg/cli/security/acl/delete.go b/src/go/rpk/pkg/cli/security/acl/delete.go index 2c9d51051eb62..02d3dac6988aa 100644 --- a/src/go/rpk/pkg/cli/security/acl/delete.go +++ b/src/go/rpk/pkg/cli/security/acl/delete.go @@ -12,6 +12,7 @@ package acl import ( "context" "fmt" + "io" "go.uber.org/zap" @@ -61,7 +62,10 @@ resource names: `, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, _ []string) { - f := p.Formatter // always text for now + f := p.Formatter + if h, ok := f.Help(&aclDeleteOutput{}); ok { + out.Exit(h) + } p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -101,9 +105,10 @@ resource names: printAllFilters = false printDeletionsHeader = true } - deleteReqResp(cmd.Context(), adm, srClient, printAllFilters, printDeletionsHeader, kBuilder, srACLs, filteredSRACLs) + deleteReqResp(cmd.Context(), adm, srClient, printAllFilters, printDeletionsHeader, kBuilder, srACLs, filteredSRACLs, f, cmd.OutOrStdout()) }, } + p.InstallFormatFlag(cmd) p.InstallKafkaFlags(cmd) a.addDeleteFlags(cmd) cmd.Flags().BoolVarP(&printAllFilters, "print-filters", "f", false, "Print the filters that were requested (failed filters are always printed)") @@ -133,6 +138,11 @@ func (a *acls) addDeleteFlags(cmd *cobra.Command) { cmd.Flags().StringSliceVar(&a.denyHosts, denyHostFlag, nil, "Denied host ACLs to remove (repeatable)") } +type aclDeleteOutput struct { + Filters []aclWithMessage `json:"filters,omitempty" yaml:"filters,omitempty"` + Deletions []aclWithMessage `json:"deletions" yaml:"deletions"` +} + func deleteReqResp( ctx context.Context, adm *kadm.Client, @@ -142,6 +152,8 @@ func deleteReqResp( b *kadm.ACLBuilder, srACLsFilter []rpsr.ACL, filteredSRACLs []rpsr.ACL, + f config.OutFormatter, + w io.Writer, ) { var ( kResults []kadm.DeleteACLsResult @@ -170,69 +182,57 @@ func deleteReqResp( } srResults = filteredSRACLs } - // If any filters failed, or if all filters are requested, we print the - // filter section. + + // Build the structured output. + output := aclDeleteOutput{ + Deletions: []aclWithMessage{}, + } + + // If any filters failed, or if all filters are requested, include them. var printFailedFilters bool - for _, f := range kResults { - if f.Err != nil { + for _, r := range kResults { + if r.Err != nil { printFailedFilters = true break } } - if printAllFilters || printFailedFilters { - out.Section("filters") - printDeleteFilters(printAllFilters, kResults, srACLsFilter) - fmt.Println() - printDeletionsHeader = true - } - if printDeletionsHeader { - out.Section("deletions") - } - printDeleteResults(kResults, srResults, srErr) -} - -func printDeleteFilters(all bool, kResults kadm.DeleteACLsResults, srACLs []rpsr.ACL) { - var results []aclWithMessage - for _, f := range kResults { - if f.Err == nil && !all { - continue + for _, r := range kResults { + if r.Err == nil && !printAllFilters { + continue + } + output.Filters = append(output.Filters, aclWithMessage{ + unptr(r.Principal), + unptr(r.Host), + r.Type.String(), + unptr(r.Name), + r.Pattern.String(), + r.Operation.String(), + r.Permission.String(), + kafka.ErrMessage(r.Err), + }) + } + for _, r := range srACLsFilter { + msg := "" + if srErr != nil { + msg = srErr.Error() + } + output.Filters = append(output.Filters, aclWithMessage{ + Principal: r.Principal, + Host: r.Host, + ResourceType: string(r.ResourceType), + ResourceName: r.Resource, + ResourcePatternType: string(r.PatternType), + Operation: string(r.Operation), + Permission: string(r.Permission), + Message: msg, + }) } - results = append(results, aclWithMessage{ - unptr(f.Principal), - unptr(f.Host), - f.Type.String(), - unptr(f.Name), - f.Pattern.String(), - f.Operation.String(), - f.Permission.String(), - kafka.ErrMessage(f.Err), - }) - } - for _, f := range srACLs { - results = append(results, aclWithMessage{ - Principal: f.Principal, - Host: f.Host, - ResourceType: string(f.ResourceType), - ResourceName: f.Resource, - ResourcePatternType: string(f.PatternType), - Operation: string(f.Operation), - Permission: string(f.Permission), - }) - } - types.Sort(results) - tw := out.NewTable(headersWithError...) - defer tw.Flush() - for _, f := range results { - tw.PrintStructFields(f) } -} -func printDeleteResults(kResults kadm.DeleteACLsResults, srACLs []rpsr.ACL, srErr error) { - var results []aclWithMessage - for _, f := range kResults { - for _, d := range f.Deleted { - results = append(results, aclWithMessage{ + for _, r := range kResults { + for _, d := range r.Deleted { + output.Deletions = append(output.Deletions, aclWithMessage{ d.Principal, d.Host, d.Type.String(), @@ -244,26 +244,53 @@ func printDeleteResults(kResults kadm.DeleteACLsResults, srACLs []rpsr.ACL, srEr }) } } - for _, f := range srACLs { + for _, r := range srResults { msg := "" if srErr != nil { msg = srErr.Error() } - results = append(results, aclWithMessage{ - Principal: f.Principal, - Host: f.Host, - ResourceType: string(f.ResourceType), - ResourceName: f.Resource, - ResourcePatternType: string(f.PatternType), - Operation: string(f.Operation), - Permission: string(f.Permission), + output.Deletions = append(output.Deletions, aclWithMessage{ + Principal: r.Principal, + Host: r.Host, + ResourceType: string(r.ResourceType), + ResourceName: r.Resource, + ResourcePatternType: string(r.PatternType), + Operation: string(r.Operation), + Permission: string(r.Permission), Message: msg, }) } - types.Sort(results) - tw := out.NewTable(headersWithError...) - defer tw.Flush() - for _, f := range results { - tw.PrintStructFields(f) + + types.Sort(output) + + // Presence of filters implies a deletions section header is needed. + if len(output.Filters) > 0 { + printDeletionsHeader = true + } + printDeleteOutput(f, output, printDeletionsHeader, w) +} + +func printDeleteOutput(f config.OutFormatter, output aclDeleteOutput, printDeletionsHeader bool, w io.Writer) { + if isText, _, t, err := f.Format(&output); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintf(w, "%s\n", t) + return + } + if len(output.Filters) > 0 { + out.SectionTo(w, "filters") + tw := out.NewTableTo(w, headersWithError...) + for _, r := range output.Filters { + tw.PrintStructFields(r) + } + tw.Flush() + fmt.Fprintln(w) + } + if printDeletionsHeader { + out.SectionTo(w, "deletions") + } + tw := out.NewTableTo(w, headersWithError...) + for _, r := range output.Deletions { + tw.PrintStructFields(r) } + tw.Flush() } diff --git a/src/go/rpk/pkg/cli/security/acl/delete_test.go b/src/go/rpk/pkg/cli/security/acl/delete_test.go new file mode 100644 index 0000000000000..903cc0cf4bdc1 --- /dev/null +++ b/src/go/rpk/pkg/cli/security/acl/delete_test.go @@ -0,0 +1,68 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package acl + +import ( + "bytes" + "strings" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintDeleteOutput(t *testing.T) { + f := config.OutFormatter{Kind: "text"} + row := aclWithMessage{ + Principal: "User:alice", + Host: "*", + ResourceType: "Topic", + ResourceName: "foo", + ResourcePatternType: "Literal", + Operation: "Read", + Permission: "Allow", + } + output := aclDeleteOutput{ + Filters: []aclWithMessage{row}, + Deletions: []aclWithMessage{row}, + } + + header := []string{"PRINCIPAL", "HOST", "RESOURCE-TYPE", "RESOURCE-NAME", "RESOURCE-PATTERN-TYPE", "OPERATION", "PERMISSION", "ERROR"} + dataRow := []string{"User:alice", "*", "Topic", "foo", "Literal", "Read", "Allow"} + + t.Run("filters and deletions sections", func(t *testing.T) { + var buf bytes.Buffer + printDeleteOutput(f, output, true, &buf) + require.Equal(t, [][]string{ + {"FILTERS"}, + header, + dataRow, + {}, + {"DELETIONS"}, + header, + dataRow, + }, out.TableRows(buf.String())) + }) + + t.Run("no header when deletions-only and flag false", func(t *testing.T) { + var buf bytes.Buffer + printDeleteOutput(f, aclDeleteOutput{Deletions: output.Deletions}, false, &buf) + require.Equal(t, [][]string{header, dataRow}, out.TableRows(buf.String())) + }) + + // Round-trip verifies structured output preserves field names and values. + // The empty-filters case also covers the omitempty tag. + t.Run("json omitempty on filters", func(t *testing.T) { + var buf bytes.Buffer + printDeleteOutput(config.OutFormatter{Kind: "json"}, aclDeleteOutput{Deletions: output.Deletions}, false, &buf) + require.False(t, strings.Contains(buf.String(), `"filters"`)) + }) +} diff --git a/src/go/rpk/pkg/cli/security/secret/BUILD b/src/go/rpk/pkg/cli/security/secret/BUILD index 5fb02984bd6a8..59aa8498da05b 100644 --- a/src/go/rpk/pkg/cli/security/secret/BUILD +++ b/src/go/rpk/pkg/cli/security/secret/BUILD @@ -25,7 +25,14 @@ go_library( go_test( name = "secret_test", - srcs = ["secret_test.go"], + srcs = [ + "list_test.go", + "secret_test.go", + ], embed = [":secret"], - deps = ["@com_github_stretchr_testify//require"], + deps = [ + "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", + "@com_github_stretchr_testify//require", + ], ) diff --git a/src/go/rpk/pkg/cli/security/secret/list.go b/src/go/rpk/pkg/cli/security/secret/list.go index 0b93fdbe68261..f967ff670bc00 100644 --- a/src/go/rpk/pkg/cli/security/secret/list.go +++ b/src/go/rpk/pkg/cli/security/secret/list.go @@ -11,6 +11,8 @@ package secret import ( "fmt" + "io" + "os" "strings" dataplanev1 "buf.build/gen/go/redpandadata/dataplane/protocolbuffers/go/redpanda/api/dataplane/v1" @@ -22,6 +24,11 @@ import ( "github.com/spf13/cobra" ) +type secretListItem struct { + ID string `json:"id" yaml:"id"` + Scopes []string `json:"scopes,omitempty" yaml:"scopes,omitempty"` +} + func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { var nameContains string @@ -30,6 +37,10 @@ func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { Short: "List all secrets", Long: "List all secrets in your Redpanda Cloud cluster", Run: func(cmd *cobra.Command, _ []string) { + f := p.Formatter + if h, ok := f.Help([]secretListItem{}); ok { + out.Exit(h) + } p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) if !p.CheckFromCloud() { @@ -56,30 +67,41 @@ func newListCommand(fs afero.Fs, p *config.Params) *cobra.Command { response, err := cl.Secret.ListSecrets(cmd.Context(), connect.NewRequest(request)) out.MaybeDie(err, "unable to list secrets: %v", err) - tw := out.NewTable("NAME", "SCOPES") - defer tw.Flush() + var items []secretListItem for _, secret := range response.Msg.Secrets { - var secretScopes []string + var scopes []string for _, scope := range secret.Scopes { name, ok := mapScopeToName()[scope] if !ok { - fmt.Printf("invalid scope: %s,", scope.String()) + fmt.Fprintf(os.Stderr, "invalid scope: %s\n", scope.String()) name = "invalid" } - secretScopes = append(secretScopes, name) + scopes = append(scopes, name) } - tw.PrintStructFields(struct { - Name string - Scopes string - }{ - Name: secret.Id, - Scopes: strings.Join(secretScopes, ", "), + items = append(items, secretListItem{ + ID: secret.Id, + Scopes: scopes, }) } + printSecretList(f, items, cmd.OutOrStdout()) }, } cmd.Flags().StringVar(&nameContains, "name-contains", "", "Substring match on secret name") + p.InstallFormatFlag(cmd) return cmd } + +func printSecretList(f config.OutFormatter, items []secretListItem, w io.Writer) { + if isText, _, formatted, err := f.Format(items); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, formatted) + return + } + tw := out.NewTableTo(w, "NAME", "SCOPES") + defer tw.Flush() + for _, item := range items { + tw.Print(item.ID, strings.Join(item.Scopes, ", ")) + } +} diff --git a/src/go/rpk/pkg/cli/security/secret/list_test.go b/src/go/rpk/pkg/cli/security/secret/list_test.go new file mode 100644 index 0000000000000..e7456cf247dc4 --- /dev/null +++ b/src/go/rpk/pkg/cli/security/secret/list_test.go @@ -0,0 +1,52 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package secret + +import ( + "bytes" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintSecretList(t *testing.T) { + f := config.OutFormatter{Kind: "text"} + + t.Run("multiple scopes", func(t *testing.T) { + data := []secretListItem{ + {ID: "MY_SECRET", Scopes: []string{"redpanda_connect", "redpanda_cluster"}}, + {ID: "ANOTHER_SECRET", Scopes: []string{"redpanda_connect"}}, + } + var buf bytes.Buffer + printSecretList(f, data, &buf) + require.Equal(t, [][]string{ + {"NAME", "SCOPES"}, + {"MY_SECRET", "redpanda_connect,", "redpanda_cluster"}, + {"ANOTHER_SECRET", "redpanda_connect"}, + }, out.TableRows(buf.String())) + }) + + t.Run("no scopes", func(t *testing.T) { + var buf bytes.Buffer + printSecretList(f, []secretListItem{{ID: "EMPTY_SECRET"}}, &buf) + require.Equal(t, [][]string{ + {"NAME", "SCOPES"}, + {"EMPTY_SECRET"}, + }, out.TableRows(buf.String())) + }) + + t.Run("empty", func(t *testing.T) { + var buf bytes.Buffer + printSecretList(f, nil, &buf) + require.Equal(t, [][]string{{"NAME", "SCOPES"}}, out.TableRows(buf.String())) + }) +} diff --git a/src/go/rpk/pkg/cli/topic/BUILD b/src/go/rpk/pkg/cli/topic/BUILD index 59655a672111c..3ce3f01b843c9 100644 --- a/src/go/rpk/pkg/cli/topic/BUILD +++ b/src/go/rpk/pkg/cli/topic/BUILD @@ -47,9 +47,12 @@ go_test( name = "topic_test", size = "small", srcs = [ + "add_partitions_test.go", "analyze_test.go", + "config_test.go", "consume_test.go", "create_test.go", + "delete_test.go", "describe_test.go", "list_test.go", "trim_test.go", @@ -58,6 +61,7 @@ go_test( embed = [":topic"], deps = [ "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", "@com_github_spf13_afero//:afero", "@com_github_spf13_cobra//:cobra", "@com_github_stretchr_testify//assert", diff --git a/src/go/rpk/pkg/cli/topic/add_partitions.go b/src/go/rpk/pkg/cli/topic/add_partitions.go index c0d00b244ba1a..d78b217b9ce75 100644 --- a/src/go/rpk/pkg/cli/topic/add_partitions.go +++ b/src/go/rpk/pkg/cli/topic/add_partitions.go @@ -13,6 +13,7 @@ import ( "context" "errors" "fmt" + "io" "os" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" @@ -24,6 +25,24 @@ import ( "go.uber.org/zap" ) +type addPartitionsResult struct { + Topic string `json:"topic" yaml:"topic"` + Status string `json:"status" yaml:"status"` +} + +func printAddPartitionsResults(f config.OutFormatter, results []addPartitionsResult, w io.Writer) { + if isText, _, t, err := f.Format(results); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + tw := out.NewTableTo(w, "TOPIC", "STATUS") + defer tw.Flush() + for _, r := range results { + tw.Print(r.Topic, r.Status) + } +} + func newAddPartitionsCommand(fs afero.Fs, p *config.Params) *cobra.Command { var num int var force bool @@ -32,7 +51,12 @@ func newAddPartitionsCommand(fs afero.Fs, p *config.Params) *cobra.Command { Short: "Add partitions to existing topics", Args: cobra.MinimumNArgs(1), Long: `Add partitions to existing topics.`, - Run: func(_ *cobra.Command, topics []string) { + Run: func(cmd *cobra.Command, topics []string) { + f := p.Formatter + if h, ok := f.Help([]addPartitionsResult{}); ok { + out.Exit(h) + } + if !force { for _, t := range topics { if t == "__consumer_offsets" || t == "_schemas" || t == "__transaction_state" || t == "coprocessor_internal_topic" { @@ -61,9 +85,7 @@ func newAddPartitionsCommand(fs afero.Fs, p *config.Params) *cobra.Command { } }() - tw := out.NewTable("topic", "status") - defer tw.Flush() - + var results []addPartitionsResult for _, resp := range resps.Sorted() { msg := "OK" if resp.ErrMessage != "" { @@ -82,12 +104,14 @@ func newAddPartitionsCommand(fs afero.Fs, p *config.Params) *cobra.Command { } exit1 = true } - tw.Print(resp.Topic, msg) + results = append(results, addPartitionsResult{Topic: resp.Topic, Status: msg}) } + printAddPartitionsResults(f, results, cmd.OutOrStdout()) }, } cmd.Flags().IntVarP(&num, "num", "n", 0, "Number of partitions to add to each topic") cmd.MarkFlagRequired("num") cmd.Flags().BoolVarP(&force, "force", "f", false, "Force change the partition count in internal topics, e.g. __consumer_offsets.") + p.InstallFormatFlag(cmd) return cmd } diff --git a/src/go/rpk/pkg/cli/topic/add_partitions_test.go b/src/go/rpk/pkg/cli/topic/add_partitions_test.go new file mode 100644 index 0000000000000..e2ac771497756 --- /dev/null +++ b/src/go/rpk/pkg/cli/topic/add_partitions_test.go @@ -0,0 +1,35 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package topic + +import ( + "strings" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintAddPartitionsResults(t *testing.T) { + results := []addPartitionsResult{ + {Topic: "foo", Status: "OK"}, + {Topic: "bar", Status: "INVALID_PARTITIONS: unable to add 3 partitions due to hardware constraints"}, + } + + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printAddPartitionsResults(f, results, b) + require.Equal(t, [][]string{ + {"TOPIC", "STATUS"}, + {"foo", "OK"}, + {"bar", "INVALID_PARTITIONS:", "unable", "to", "add", "3", "partitions", "due", "to", "hardware", "constraints"}, + }, out.TableRows(b.String())) +} diff --git a/src/go/rpk/pkg/cli/topic/config.go b/src/go/rpk/pkg/cli/topic/config.go index 89882890bf5ca..97f62620fae6c 100644 --- a/src/go/rpk/pkg/cli/topic/config.go +++ b/src/go/rpk/pkg/cli/topic/config.go @@ -11,6 +11,8 @@ package topic import ( "context" + "fmt" + "io" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/kafka" @@ -22,6 +24,24 @@ import ( "go.uber.org/zap" ) +type alterConfigResult struct { + Topic string `json:"topic" yaml:"topic"` + Status string `json:"status" yaml:"status"` +} + +func printAlterConfigResults(f config.OutFormatter, results []alterConfigResult, w io.Writer) { + if isText, _, t, err := f.Format(results); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + tw := out.NewTableTo(w, "TOPIC", "STATUS") + defer tw.Flush() + for _, r := range results { + tw.Print(r.Topic, r.Status) + } +} + func newAlterConfigCommand(fs afero.Fs, p *config.Params) *cobra.Command { var ( sets []string // key=val @@ -52,7 +72,12 @@ The --dry option will validate whether the requested configuration change is valid, but does not apply it. Use the flag '--no-confirm' to avoid the confirmation prompt.`, Args: cobra.MinimumNArgs(1), - Run: func(_ *cobra.Command, topics []string) { + Run: func(cmd *cobra.Command, topics []string) { + f := p.Formatter + if h, ok := f.Help([]alterConfigResult{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -158,9 +183,7 @@ Use the flag '--no-confirm' to avoid the confirmation prompt.`, resp, err := req.RequestWith(context.Background(), cl) out.MaybeDie(err, "unable to incrementally update configs: %v", err) - tw := out.NewTable("TOPIC", "STATUS") - defer tw.Flush() - + var results []alterConfigResult for _, resource := range resp.Resources { msg := "OK" if rrwErrorCode := rrwErrors[resource.ResourceName]; resource.ErrorCode != 0 || rrwErrorCode != 0 { @@ -174,8 +197,9 @@ Use the flag '--no-confirm' to avoid the confirmation prompt.`, msg += ": " + *resource.ErrorMessage } } - tw.Print(resource.ResourceName, msg) + results = append(results, alterConfigResult{Topic: resource.ResourceName, Status: msg}) } + printAlterConfigResults(f, results, cmd.OutOrStdout()) }, } @@ -187,5 +211,6 @@ Use the flag '--no-confirm' to avoid the confirmation prompt.`, cmd.Flags().BoolVar(&dry, "dry", false, "Dry run: validate the alter request, but do not apply") cmd.Flags().BoolVar(&noConfirm, "no-confirm", false, "Disable confirmation prompt") + p.InstallFormatFlag(cmd) return cmd } diff --git a/src/go/rpk/pkg/cli/topic/config_test.go b/src/go/rpk/pkg/cli/topic/config_test.go new file mode 100644 index 0000000000000..e6a3f788dd75e --- /dev/null +++ b/src/go/rpk/pkg/cli/topic/config_test.go @@ -0,0 +1,35 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package topic + +import ( + "strings" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintAlterConfigResults(t *testing.T) { + results := []alterConfigResult{ + {Topic: "foo", Status: "OK"}, + {Topic: "bar", Status: "Invalid topic"}, + } + + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printAlterConfigResults(f, results, b) + require.Equal(t, [][]string{ + {"TOPIC", "STATUS"}, + {"foo", "OK"}, + {"bar", "Invalid", "topic"}, + }, out.TableRows(b.String())) +} diff --git a/src/go/rpk/pkg/cli/topic/delete.go b/src/go/rpk/pkg/cli/topic/delete.go index 028df58e3291c..aec3ff6f3a1a2 100644 --- a/src/go/rpk/pkg/cli/topic/delete.go +++ b/src/go/rpk/pkg/cli/topic/delete.go @@ -12,6 +12,8 @@ package topic import ( "context" "errors" + "fmt" + "io" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/kafka" @@ -22,6 +24,24 @@ import ( "go.uber.org/zap" ) +type topicDeleteResult struct { + Topic string `json:"topic" yaml:"topic"` + Status string `json:"status" yaml:"status"` +} + +func printTopicDeleteResults(f config.OutFormatter, results []topicDeleteResult, w io.Writer) { + if isText, _, t, err := f.Format(results); !isText { + out.MaybeDie(err, "unable to print in the requested format %q: %v", f.Kind, err) + fmt.Fprintln(w, t) + return + } + tw := out.NewTableTo(w, "TOPIC", "STATUS") + defer tw.Flush() + for _, r := range results { + tw.Print(r.Topic, r.Status) + } +} + func newDeleteCommand(fs afero.Fs, p *config.Params) *cobra.Command { var re bool cmd := &cobra.Command{ @@ -51,7 +71,12 @@ For example, `, Args: cobra.MinimumNArgs(1), - Run: func(_ *cobra.Command, topics []string) { + Run: func(cmd *cobra.Command, topics []string) { + f := p.Formatter + if h, ok := f.Help([]topicDeleteResult{}); ok { + out.Exit(h) + } + p, err := p.LoadVirtualProfile(fs) out.MaybeDie(err, "rpk unable to load config: %v", err) @@ -66,8 +91,8 @@ For example, resps, err := adm.DeleteTopics(context.Background(), topics...) out.MaybeDie(err, "unable to issue delete topics request: %v", err) - tw := out.NewTable("topic", "status") - defer tw.Flush() + + results := make([]topicDeleteResult, 0, len(resps)) for _, t := range resps.Sorted() { msg := "OK" if t.Err != nil { @@ -79,10 +104,12 @@ For example, } } } - tw.Print(t.Topic, msg) + results = append(results, topicDeleteResult{Topic: t.Topic, Status: msg}) } + printTopicDeleteResults(f, results, cmd.OutOrStdout()) }, } cmd.Flags().BoolVarP(&re, "regex", "r", false, "Parse topics as regex; delete any topic that matches any input topic expression") + p.InstallFormatFlag(cmd) return cmd } diff --git a/src/go/rpk/pkg/cli/topic/delete_test.go b/src/go/rpk/pkg/cli/topic/delete_test.go new file mode 100644 index 0000000000000..287c8c64ee1a3 --- /dev/null +++ b/src/go/rpk/pkg/cli/topic/delete_test.go @@ -0,0 +1,35 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package topic + +import ( + "strings" + "testing" + + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" + "github.com/stretchr/testify/require" +) + +func TestPrintTopicDeleteResults(t *testing.T) { + results := []topicDeleteResult{ + {Topic: "foo", Status: "OK"}, + {Topic: "bar", Status: "UNKNOWN_TOPIC_OR_PARTITION: topic not found"}, + } + + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printTopicDeleteResults(f, results, b) + require.Equal(t, [][]string{ + {"TOPIC", "STATUS"}, + {"foo", "OK"}, + {"bar", "UNKNOWN_TOPIC_OR_PARTITION:", "topic", "not", "found"}, + }, out.TableRows(b.String())) +} diff --git a/src/go/rpk/pkg/cli/topic/list_test.go b/src/go/rpk/pkg/cli/topic/list_test.go index 65dde713faf74..868e0728cc577 100644 --- a/src/go/rpk/pkg/cli/topic/list_test.go +++ b/src/go/rpk/pkg/cli/topic/list_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" "github.com/spf13/afero" "github.com/spf13/cobra" "github.com/stretchr/testify/require" @@ -59,114 +60,65 @@ func setupTestTopics() kadm.TopicDetails { } } -type testCase struct { - Kind string - Output string -} - -func JSON(t *testing.T, o any) testCase { - expected, err := json.Marshal(o) - require.NoError(t, err) - return testCase{Kind: "json", Output: string(expected) + "\n"} -} - -func YAML(t *testing.T, o any) testCase { - expected, err := yaml.Marshal(o) - require.NoError(t, err) - return testCase{Kind: "yaml", Output: string(expected) + "\n"} -} - -func Text(s string) testCase { - return testCase{Kind: "text", Output: s} -} - func TestSummarizedListView(t *testing.T) { topics := setupTestTopics() s := summarizedListView(false, topics) - cases := []testCase{ - Text(`NAME PARTITIONS REPLICAS -test-topic 2 3 -`), - JSON(t, s), - YAML(t, s), - } - - for _, c := range cases { - f := config.OutFormatter{Kind: c.Kind} - b := &strings.Builder{} - printSummarizedListView(f, s, b) - require.Equal(t, c.Output, b.String()) - } + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printSummarizedListView(f, s, b) + require.Equal(t, [][]string{ + {"NAME", "PARTITIONS", "REPLICAS"}, + {"test-topic", "2", "3"}, + }, out.TableRows(b.String())) } func TestDetailedListView(t *testing.T) { topics := setupTestTopics() d := detailedListView(false, topics) - cases := []testCase{ - Text(`test-topic, 2 partitions, 3 replicas - PARTITION LEADER EPOCH REPLICAS OFFLINE_REPLICAS - 0 1 5 [1 2 3] [] - 1 2 3 [1 2 3] [1] -`), - JSON(t, d), - YAML(t, d), - } - - for _, c := range cases { - f := config.OutFormatter{Kind: c.Kind} - b := &strings.Builder{} - printDetailedListView(f, d, b) - require.Equal(t, c.Output, b.String()) - } + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printDetailedListView(f, d, b) + require.Equal(t, [][]string{ + {"test-topic,", "2", "partitions,", "3", "replicas"}, + {"PARTITION", "LEADER", "EPOCH", "REPLICAS", "OFFLINE_REPLICAS"}, + {"0", "1", "5", "[1", "2", "3]", "[]"}, + {"1", "2", "3", "[1", "2", "3]", "[1]"}, + }, out.TableRows(b.String())) } func TestSummarizedListViewWithInternal(t *testing.T) { topics := setupTestTopics() s := summarizedListView(true, topics) - cases := []testCase{ - Text(`NAME PARTITIONS REPLICAS -internal-topic 1 1 -test-topic 2 3 -`), - JSON(t, s), - YAML(t, s), - } - - for _, c := range cases { - f := config.OutFormatter{Kind: c.Kind} - b := &strings.Builder{} - printSummarizedListView(f, s, b) - require.Equal(t, c.Output, b.String()) - } + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printSummarizedListView(f, s, b) + require.Equal(t, [][]string{ + {"NAME", "PARTITIONS", "REPLICAS"}, + {"internal-topic", "1", "1"}, + {"test-topic", "2", "3"}, + }, out.TableRows(b.String())) } func TestDetailedListViewWithInternal(t *testing.T) { topics := setupTestTopics() d := detailedListView(true, topics) - cases := []testCase{ - Text(`internal-topic (internal), 1 partitions, 1 replicas - PARTITION LEADER EPOCH REPLICAS - 0 1 1 [1] - -test-topic, 2 partitions, 3 replicas - PARTITION LEADER EPOCH REPLICAS OFFLINE_REPLICAS - 0 1 5 [1 2 3] [] - 1 2 3 [1 2 3] [1] -`), - JSON(t, d), - YAML(t, d), - } - - for _, c := range cases { - f := config.OutFormatter{Kind: c.Kind} - b := &strings.Builder{} - printDetailedListView(f, d, b) - require.Equal(t, c.Output, b.String()) - } + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printDetailedListView(f, d, b) + require.Equal(t, [][]string{ + {"internal-topic", "(internal),", "1", "partitions,", "1", "replicas"}, + {"PARTITION", "LEADER", "EPOCH", "REPLICAS"}, + {"0", "1", "1", "[1]"}, + {}, + {"test-topic,", "2", "partitions,", "3", "replicas"}, + {"PARTITION", "LEADER", "EPOCH", "REPLICAS", "OFFLINE_REPLICAS"}, + {"0", "1", "5", "[1", "2", "3]", "[]"}, + {"1", "2", "3", "[1", "2", "3]", "[1]"}, + }, out.TableRows(b.String())) } func TestEmptyTopicList(t *testing.T) { diff --git a/src/go/rpk/pkg/cli/transform/BUILD b/src/go/rpk/pkg/cli/transform/BUILD index 694efc8d0df4d..3bd2f902be366 100644 --- a/src/go/rpk/pkg/cli/transform/BUILD +++ b/src/go/rpk/pkg/cli/transform/BUILD @@ -45,10 +45,10 @@ go_test( embed = [":transform"], deps = [ "//src/go/rpk/pkg/config", + "//src/go/rpk/pkg/out", "@com_github_redpanda_data_common_go_rpadmin//:rpadmin", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@com_github_twmb_franz_go//pkg/kgo", - "@in_gopkg_yaml_v3//:yaml_v3", ], ) diff --git a/src/go/rpk/pkg/cli/transform/list_test.go b/src/go/rpk/pkg/cli/transform/list_test.go index 48f3ec8b46fb7..0491a90d525c9 100644 --- a/src/go/rpk/pkg/cli/transform/list_test.go +++ b/src/go/rpk/pkg/cli/transform/list_test.go @@ -12,14 +12,13 @@ package transform import ( - "encoding/json" "strings" "testing" "github.com/redpanda-data/common-go/rpadmin" "github.com/redpanda-data/redpanda/src/go/rpk/pkg/config" - "gopkg.in/yaml.v3" + "github.com/redpanda-data/redpanda/src/go/rpk/pkg/out" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -76,28 +75,6 @@ func setupTestData() []rpadmin.TransformMetadata { } } -type testCase struct { - Kind string - Output string -} - -func JSON(t *testing.T, o any) testCase { - expected, err := json.Marshal(o) - require.NoError(t, err) - return testCase{Kind: "json", Output: string(expected) + "\n"} -} - -func YAML(t *testing.T, o any) testCase { - expected, err := yaml.Marshal(o) - require.NoError(t, err) - return testCase{Kind: "yaml", Output: string(expected) + "\n"} -} - -func Text(s string) testCase { - s = strings.TrimSpace(s) - return testCase{Kind: "text", Output: s + "\n"} -} - func TestHandleEmptyInput(t *testing.T) { s := summarizedView([]rpadmin.TransformMetadata{}) assert.NotNil(t, s) @@ -107,44 +84,32 @@ func TestHandleEmptyInput(t *testing.T) { func TestPrintSummaryView(t *testing.T) { s := summarizedView(setupTestData()) - cases := []testCase{ - Text(` -NAME INPUT TOPIC OUTPUT TOPIC RUNNING LAG -foo2bar foo bar 2 / 3 7 -scrubber pii cleaned, munged 0 / 1 99 -`), - JSON(t, s), - YAML(t, s), - } - for _, c := range cases { - f := config.OutFormatter{Kind: c.Kind} - b := &strings.Builder{} - printSummary(f, s, b) - require.Equal(t, c.Output, b.String()) - } + + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printSummary(f, s, b) + require.Equal(t, [][]string{ + {"NAME", "INPUT", "TOPIC", "OUTPUT", "TOPIC", "RUNNING", "LAG"}, + {"foo2bar", "foo", "bar", "2", "/", "3", "7"}, + {"scrubber", "pii", "cleaned,", "munged", "0", "/", "1", "99"}, + }, out.TableRows(b.String())) } func TestPrintDetailView(t *testing.T) { d := detailView(setupTestData()) - cases := []testCase{ - Text(` -foo2bar, foo → bar - PARTITION NODE STATUS LAG - 1 0 running 1 - 2 0 running 1 - 3 1 inactive 5 -scrubber, pii → cleaned, munged - PARTITION NODE STATUS LAG - 1 0 errored 99 -`), - JSON(t, d), - YAML(t, d), - } - for _, c := range cases { - f := config.OutFormatter{Kind: c.Kind} - b := &strings.Builder{} - printDetailed(f, d, b) - require.Equal(t, c.Output, b.String()) - } + f := config.OutFormatter{Kind: "text"} + b := &strings.Builder{} + printDetailed(f, d, b) + require.Equal(t, [][]string{ + {"foo2bar,", "foo", "→", "bar"}, + {"PARTITION", "NODE", "STATUS", "LAG"}, + {"1", "0", "running", "1"}, + {"2", "0", "running", "1"}, + {"3", "1", "inactive", "5"}, + {}, + {"scrubber,", "pii", "→", "cleaned,", "munged"}, + {"PARTITION", "NODE", "STATUS", "LAG"}, + {"1", "0", "errored", "99"}, + }, out.TableRows(b.String())) } diff --git a/src/go/rpk/pkg/out/out.go b/src/go/rpk/pkg/out/out.go index aa8251b5ac17b..ff6e45b5ce120 100644 --- a/src/go/rpk/pkg/out/out.go +++ b/src/go/rpk/pkg/out/out.go @@ -215,10 +215,16 @@ func args2strings(args []any) []string { return sargs } +// SectionTo prints header in uppercase, followed by a line of =, to w. +func SectionTo(w io.Writer, header string) { + upper := norm(header) + fmt.Fprintln(w, upper) + fmt.Fprintln(w, strings.Repeat("=", len(upper))) +} + // Section prints header in uppercase, followed by a line of =. func Section(header string) { - fmt.Println(norm(header)) - fmt.Println(strings.Repeat("=", len(header))) + SectionTo(os.Stdout, header) } // TabWriter writes tab delimited output. @@ -319,6 +325,22 @@ func (t *TabWriter) Line(sprint ...any) { fmt.Fprint(t.Writer, append(sprint, "\n")...) } +// TableRows parses tabwriter-formatted output into whitespace-tokenized rows. +// Intended for tests: column widths aren't preserved, so assertions don't +// couple to tabwriter padding choices. Section-underline rows (all '=', as +// written by Section) are dropped since their length is just len(title). +func TableRows(s string) [][]string { + var rows [][]string + for _, line := range strings.Split(strings.TrimRight(s, "\n"), "\n") { + fields := strings.Fields(line) + if len(fields) == 1 && strings.Trim(fields[0], "=") == "" { + continue + } + rows = append(rows, fields) + } + return rows +} + func WithLogBanner(s string, additionalArgs ...any) string { if len(additionalArgs) == 0 { return fmt.Sprintf("================ %s ===============", s)