From 45bb0926c2bdc9f9100cac1b8b4a2933ab0a5119 Mon Sep 17 00:00:00 2001 From: Kevin Cao <39608887+kev-cao@users.noreply.github.com> Date: Mon, 12 Jan 2026 10:41:40 -0500 Subject: [PATCH 1/2] backup: update SHOW BACKUP syntax to NEWER/OLDER than This commit updates the `SHOW BACKUP` time filtering syntax from `AFTER/BEFORE` to `NEWER/OLDER THAN`. Epic: CRDB-57536 Informs: #159647 Release note: None --- docs/generated/sql/bnf/show_backup.bnf | 2 +- docs/generated/sql/bnf/stmt_block.bnf | 18 ++++--- pkg/sql/parser/sql.y | 42 +++++++++------- pkg/sql/parser/testdata/backup_restore | 68 +++++++++++++------------- pkg/sql/sem/tree/show.go | 34 ++++++------- 5 files changed, 88 insertions(+), 76 deletions(-) diff --git a/docs/generated/sql/bnf/show_backup.bnf b/docs/generated/sql/bnf/show_backup.bnf index 775e752f390c..ee9360fa7405 100644 --- a/docs/generated/sql/bnf/show_backup.bnf +++ b/docs/generated/sql/bnf/show_backup.bnf @@ -1,5 +1,5 @@ show_backup_stmt ::= - 'SHOW' 'BACKUPS' 'IN' collectionURI opt_show_after_before_clause + 'SHOW' 'BACKUPS' 'IN' collectionURI opt_show_backup_time_filter_clause | 'SHOW' 'BACKUP' 'SCHEMAS' 'FROM' subdirectory 'IN' collectionURI 'WITH' show_backup_options ( ( ',' show_backup_options ) )* | 'SHOW' 'BACKUP' 'SCHEMAS' 'FROM' subdirectory 'IN' collectionURI 'WITH' 'OPTIONS' '(' show_backup_options ( ( ',' show_backup_options ) )* ')' | 'SHOW' 'BACKUP' 'SCHEMAS' 'FROM' subdirectory 'IN' collectionURI diff --git a/docs/generated/sql/bnf/stmt_block.bnf b/docs/generated/sql/bnf/stmt_block.bnf index c175ceaca9b1..c325ed711b6f 100644 --- a/docs/generated/sql/bnf/stmt_block.bnf +++ b/docs/generated/sql/bnf/stmt_block.bnf @@ -865,7 +865,7 @@ use_stmt ::= 'USE' var_value show_backup_stmt ::= - 'SHOW' 'BACKUPS' 'IN' string_or_placeholder_opt_list opt_show_after_before_clause + 'SHOW' 'BACKUPS' 'IN' string_or_placeholder_opt_list opt_show_backup_time_filter_clause | 'SHOW' 'BACKUP' show_backup_details 'FROM' string_or_placeholder 'IN' string_or_placeholder_opt_list opt_with_show_backup_options | 'SHOW' 'BACKUP' string_or_placeholder 'IN' string_or_placeholder_opt_list opt_with_show_backup_options @@ -1360,6 +1360,7 @@ unreserved_keyword ::= | 'NAN' | 'NEVER' | 'NEW' + | 'NEWER' | 'NEW_DB_NAME' | 'NEW_KMS' | 'NEXT' @@ -1393,6 +1394,7 @@ unreserved_keyword ::= | 'OFF' | 'OIDS' | 'OLD' + | 'OLDER' | 'OLD_KMS' | 'OPERATOR' | 'OPT' @@ -1564,6 +1566,7 @@ unreserved_keyword ::= | 'TENANTS' | 'TESTING_RELOCATE' | 'TEXT' + | 'THAN' | 'TIES' | 'TRACE' | 'TRACING' @@ -2208,11 +2211,11 @@ var_value ::= a_expr | extra_var_value -opt_show_after_before_clause ::= - 'AFTER' a_expr - | 'BEFORE' a_expr - | 'AFTER' a_expr 'BEFORE' a_expr - | 'BEFORE' a_expr 'AFTER' a_expr +opt_show_backup_time_filter_clause ::= + 'NEWER' 'THAN' a_expr + | 'OLDER' 'THAN' a_expr + | 'NEWER' 'THAN' a_expr 'OLDER' 'THAN' a_expr + | 'OLDER' 'THAN' a_expr 'NEWER' 'THAN' a_expr | show_backup_details ::= @@ -4337,6 +4340,7 @@ bare_label_keywords ::= | 'NATURAL' | 'NEVER' | 'NEW' + | 'NEWER' | 'NEW_DB_NAME' | 'NEW_KMS' | 'NEXT' @@ -4374,6 +4378,7 @@ bare_label_keywords ::= | 'OFF' | 'OIDS' | 'OLD' + | 'OLDER' | 'OLD_KMS' | 'ONLY' | 'OPERATOR' @@ -4567,6 +4572,7 @@ bare_label_keywords ::= | 'TENANT_NAME' | 'TESTING_RELOCATE' | 'TEXT' + | 'THAN' | 'THEN' | 'THROTTLING' | 'TIES' diff --git a/pkg/sql/parser/sql.y b/pkg/sql/parser/sql.y index 963da3e047ea..74d994b00a54 100644 --- a/pkg/sql/parser/sql.y +++ b/pkg/sql/parser/sql.y @@ -754,8 +754,8 @@ func (u *sqlSymUnion) showBackupDetails() tree.ShowBackupDetails { func (u *sqlSymUnion) showBackupOptions() *tree.ShowBackupOptions { return u.val.(*tree.ShowBackupOptions) } -func (u *sqlSymUnion) showAfterBefore() *tree.ShowAfterBefore { - return u.val.(*tree.ShowAfterBefore) +func (u *sqlSymUnion) showBackupTimeFilter() *tree.ShowBackupTimeFilter { + return u.val.(*tree.ShowBackupTimeFilter) } func (u *sqlSymUnion) checkExternalConnectionOptions() *tree.CheckExternalConnectionOptions { return u.val.(*tree.CheckExternalConnectionOptions) @@ -1060,14 +1060,14 @@ func (u *sqlSymUnion) filterType() tree.FilterType { %token MULTIPOINT MULTIPOINTM MULTIPOINTZ MULTIPOINTZM %token MULTIPOLYGON MULTIPOLYGONM MULTIPOLYGONZ MULTIPOLYGONZM -%token NAN NAME NAMES NATURAL NEG_INNER_PRODUCT NEVER NEW NEW_DB_NAME NEW_KMS NEXT NO NOBYPASSRLS NOCANCELQUERY NOCONTROLCHANGEFEED +%token NAN NAME NAMES NATURAL NEG_INNER_PRODUCT NEVER NEW NEWER NEW_DB_NAME NEW_KMS NEXT NO NOBYPASSRLS NOCANCELQUERY NOCONTROLCHANGEFEED %token NOCONTROLJOB NOCREATEDB NOCREATELOGIN NOCREATEROLE NODE NOLOGIN NOMODIFYCLUSTERSETTING NOREPLICATION %token NOSQLLOGIN NO_INDEX_JOIN NO_ZIGZAG_JOIN NO_FULL_SCAN NONE NONVOTERS NORMAL NOT %token NOTHING NOTHING_AFTER_RETURNING %token NOTNULL %token NOVIEWACTIVITY NOVIEWACTIVITYREDACTED NOVIEWCLUSTERSETTING NOWAIT NULL NULLIF NULLS NUMERIC -%token OF OFF OFFSET OID OIDS OIDVECTOR OLD OLD_KMS ON ONLY OPT OPTION OPTIONS OR +%token OF OFF OFFSET OID OIDS OIDVECTOR OLD OLDER OLD_KMS ON ONLY OPT OPTION OPTIONS OR %token ORDER ORDINALITY OTHERS OUT OUTER OVER OVERLAPS OVERLAY OWNED OWNER OPERATOR %token PARALLEL PARENT PARTIAL PARTITION PARTITIONS PASSWORD PAUSE PAUSED PER PERMISSIVE PHYSICAL PLACEMENT PLACING @@ -1092,7 +1092,7 @@ func (u *sqlSymUnion) filterType() tree.FilterType { %token STABLE START STATE STATEMENT STATISTICS STATUS STDIN STDOUT STOP STRAIGHT STREAM STRICT STRING STORAGE STORE STORED STORING SUBJECT SUBSTRING SUPER %token SUPPORT SURVIVE SURVIVAL SYMMETRIC SYNTAX SYSTEM SQRT SUBSCRIPTION STATEMENTS -%token TABLE TABLES TABLESPACE TEMP TEMPLATE TEMPORARY TENANT TENANT_NAME TENANTS TESTING_RELOCATE TEXT THEN +%token TABLE TABLES TABLESPACE TEMP TEMPLATE TEMPORARY TENANT TENANT_NAME TENANTS TESTING_RELOCATE TEXT THAN THEN %token TIES TIME TIMETZ TIMESTAMP TIMESTAMPTZ TO THROTTLING TRAILING TRACE %token TRANSACTION TRANSACTIONS TRANSFER TRANSFORM TREAT TRIGGER TRIGGERS TRIM TRUE %token TRUNCATE TRUSTED TYPE TYPES @@ -1488,7 +1488,7 @@ func (u *sqlSymUnion) filterType() tree.FilterType { %type show_backup_details %type <*tree.ShowJobOptions> show_job_options show_job_options_list %type <*tree.ShowBackupOptions> opt_with_show_backup_options show_backup_options show_backup_options_list -%type <*tree.ShowAfterBefore> opt_show_after_before_clause +%type <*tree.ShowBackupTimeFilter> opt_show_backup_time_filter_clause %type <*tree.CopyOptions> opt_with_copy_options copy_options copy_options_list copy_generic_options copy_generic_options_list %type import_format %type storage_parameter_key @@ -9013,12 +9013,12 @@ show_histogram_stmt: // %Text: SHOW BACKUP [SCHEMAS|FILES|RANGES] // %SeeAlso: WEBDOCS/show-backup.html show_backup_stmt: - SHOW BACKUPS IN string_or_placeholder_opt_list opt_show_after_before_clause + SHOW BACKUPS IN string_or_placeholder_opt_list opt_show_backup_time_filter_clause { $$.val = &tree.ShowBackup{ InCollection: $4.stringOrPlaceholderOptList(), - TimeRange: *$5.showAfterBefore(), + TimeRange: *$5.showBackupTimeFilter(), } } | SHOW BACKUP show_backup_details FROM string_or_placeholder IN string_or_placeholder_opt_list opt_with_show_backup_options @@ -9167,26 +9167,26 @@ show_backup_options: $$.val = &tree.ShowBackupOptions{EncryptionInfoDir: $3.expr()} } -opt_show_after_before_clause: - AFTER a_expr +opt_show_backup_time_filter_clause: + NEWER THAN a_expr { - $$.val = &tree.ShowAfterBefore{After: $2.expr()} + $$.val = &tree.ShowBackupTimeFilter{NewerThan: $3.expr()} } - | BEFORE a_expr + | OLDER THAN a_expr { - $$.val = &tree.ShowAfterBefore{Before: $2.expr()} + $$.val = &tree.ShowBackupTimeFilter{OlderThan: $3.expr()} } - | AFTER a_expr BEFORE a_expr + | NEWER THAN a_expr OLDER THAN a_expr { - $$.val = &tree.ShowAfterBefore{After: $2.expr(), Before: $4.expr()} + $$.val = &tree.ShowBackupTimeFilter{NewerThan: $3.expr(), OlderThan: $6.expr()} } - | BEFORE a_expr AFTER a_expr + | OLDER THAN a_expr NEWER THAN a_expr { - $$.val = &tree.ShowAfterBefore{After: $4.expr(), Before: $2.expr()} + $$.val = &tree.ShowBackupTimeFilter{NewerThan: $6.expr(), OlderThan: $3.expr()} } | /* EMPTY */ { - $$.val = &tree.ShowAfterBefore{} + $$.val = &tree.ShowBackupTimeFilter{} } // %Help: SHOW CLUSTER SETTING - display cluster settings @@ -18858,6 +18858,7 @@ unreserved_keyword: | NAN | NEVER | NEW +| NEWER | NEW_DB_NAME | NEW_KMS | NEXT @@ -18891,6 +18892,7 @@ unreserved_keyword: | OFF | OIDS | OLD +| OLDER | OLD_KMS | OPERATOR | OPT @@ -19062,6 +19064,7 @@ unreserved_keyword: | TENANTS | TESTING_RELOCATE | TEXT +| THAN | TIES | TRACE | TRACING @@ -19431,6 +19434,7 @@ bare_label_keywords: | NATURAL | NEVER | NEW +| NEWER | NEW_DB_NAME | NEW_KMS | NEXT @@ -19468,6 +19472,7 @@ bare_label_keywords: | OFF | OIDS | OLD +| OLDER | OLD_KMS | ONLY | OPERATOR @@ -19661,6 +19666,7 @@ bare_label_keywords: | TENANT_NAME | TESTING_RELOCATE | TEXT +| THAN | THEN | THROTTLING | TIES diff --git a/pkg/sql/parser/testdata/backup_restore b/pkg/sql/parser/testdata/backup_restore index ef2356f91686..c9976ad4f38f 100644 --- a/pkg/sql/parser/testdata/backup_restore +++ b/pkg/sql/parser/testdata/backup_restore @@ -132,56 +132,56 @@ SHOW BACKUPS IN $1 -- literals removed SHOW BACKUPS IN $1 -- identifiers removed parse -SHOW BACKUPS IN 'bar' AFTER '1' +SHOW BACKUPS IN 'bar' NEWER THAN '1' ---- -SHOW BACKUPS IN '*****' AFTER '1' -- normalized! -SHOW BACKUPS IN ('*****') AFTER ('1') -- fully parenthesized -SHOW BACKUPS IN '_' AFTER '_' -- literals removed -SHOW BACKUPS IN '*****' AFTER '1' -- identifiers removed -SHOW BACKUPS IN 'bar' AFTER '1' -- passwords exposed +SHOW BACKUPS IN '*****' NEWER THAN '1' -- normalized! +SHOW BACKUPS IN ('*****') NEWER THAN ('1') -- fully parenthesized +SHOW BACKUPS IN '_' NEWER THAN '_' -- literals removed +SHOW BACKUPS IN '*****' NEWER THAN '1' -- identifiers removed +SHOW BACKUPS IN 'bar' NEWER THAN '1' -- passwords exposed parse -SHOW BACKUPS IN 'bar' BEFORE '1' +SHOW BACKUPS IN 'bar' OLDER THAN '1' ---- -SHOW BACKUPS IN '*****' BEFORE '1' -- normalized! -SHOW BACKUPS IN ('*****') BEFORE ('1') -- fully parenthesized -SHOW BACKUPS IN '_' BEFORE '_' -- literals removed -SHOW BACKUPS IN '*****' BEFORE '1' -- identifiers removed -SHOW BACKUPS IN 'bar' BEFORE '1' -- passwords exposed +SHOW BACKUPS IN '*****' OLDER THAN '1' -- normalized! +SHOW BACKUPS IN ('*****') OLDER THAN ('1') -- fully parenthesized +SHOW BACKUPS IN '_' OLDER THAN '_' -- literals removed +SHOW BACKUPS IN '*****' OLDER THAN '1' -- identifiers removed +SHOW BACKUPS IN 'bar' OLDER THAN '1' -- passwords exposed parse -SHOW BACKUPS IN 'bar' AFTER '1' BEFORE '2' +SHOW BACKUPS IN 'bar' NEWER THAN '1' OLDER THAN '2' ---- -SHOW BACKUPS IN '*****' AFTER '1' BEFORE '2' -- normalized! -SHOW BACKUPS IN ('*****') AFTER ('1') BEFORE ('2') -- fully parenthesized -SHOW BACKUPS IN '_' AFTER '_' BEFORE '_' -- literals removed -SHOW BACKUPS IN '*****' AFTER '1' BEFORE '2' -- identifiers removed -SHOW BACKUPS IN 'bar' AFTER '1' BEFORE '2' -- passwords exposed +SHOW BACKUPS IN '*****' NEWER THAN '1' OLDER THAN '2' -- normalized! +SHOW BACKUPS IN ('*****') NEWER THAN ('1') OLDER THAN ('2') -- fully parenthesized +SHOW BACKUPS IN '_' NEWER THAN '_' OLDER THAN '_' -- literals removed +SHOW BACKUPS IN '*****' NEWER THAN '1' OLDER THAN '2' -- identifiers removed +SHOW BACKUPS IN 'bar' NEWER THAN '1' OLDER THAN '2' -- passwords exposed parse -SHOW BACKUPS IN 'bar' BEFORE '2' AFTER '1' +SHOW BACKUPS IN 'bar' OLDER THAN '2' NEWER THAN '1' ---- -SHOW BACKUPS IN '*****' AFTER '1' BEFORE '2' -- normalized! -SHOW BACKUPS IN ('*****') AFTER ('1') BEFORE ('2') -- fully parenthesized -SHOW BACKUPS IN '_' AFTER '_' BEFORE '_' -- literals removed -SHOW BACKUPS IN '*****' AFTER '1' BEFORE '2' -- identifiers removed -SHOW BACKUPS IN 'bar' AFTER '1' BEFORE '2' -- passwords exposed +SHOW BACKUPS IN '*****' NEWER THAN '1' OLDER THAN '2' -- normalized! +SHOW BACKUPS IN ('*****') NEWER THAN ('1') OLDER THAN ('2') -- fully parenthesized +SHOW BACKUPS IN '_' NEWER THAN '_' OLDER THAN '_' -- literals removed +SHOW BACKUPS IN '*****' NEWER THAN '1' OLDER THAN '2' -- identifiers removed +SHOW BACKUPS IN 'bar' NEWER THAN '1' OLDER THAN '2' -- passwords exposed parse -SHOW BACKUPS IN $1 AFTER $2 BEFORE $3 +SHOW BACKUPS IN $1 NEWER THAN $2 OLDER THAN $3 ---- -SHOW BACKUPS IN $1 AFTER $2 BEFORE $3 -SHOW BACKUPS IN ($1) AFTER ($2) BEFORE ($3) -- fully parenthesized -SHOW BACKUPS IN $1 AFTER $1 BEFORE $1 -- literals removed -SHOW BACKUPS IN $1 AFTER $2 BEFORE $3 -- identifiers removed +SHOW BACKUPS IN $1 NEWER THAN $2 OLDER THAN $3 +SHOW BACKUPS IN ($1) NEWER THAN ($2) OLDER THAN ($3) -- fully parenthesized +SHOW BACKUPS IN $1 NEWER THAN $1 OLDER THAN $1 -- literals removed +SHOW BACKUPS IN $1 NEWER THAN $2 OLDER THAN $3 -- identifiers removed parse -SHOW BACKUPS IN $1 BEFORE $2 AFTER $3 +SHOW BACKUPS IN $1 OLDER THAN $2 NEWER THAN $3 ---- -SHOW BACKUPS IN $1 AFTER $3 BEFORE $2 -- normalized! -SHOW BACKUPS IN ($1) AFTER ($3) BEFORE ($2) -- fully parenthesized -SHOW BACKUPS IN $1 AFTER $1 BEFORE $1 -- literals removed -SHOW BACKUPS IN $1 AFTER $3 BEFORE $2 -- identifiers removed +SHOW BACKUPS IN $1 NEWER THAN $3 OLDER THAN $2 -- normalized! +SHOW BACKUPS IN ($1) NEWER THAN ($3) OLDER THAN ($2) -- fully parenthesized +SHOW BACKUPS IN $1 NEWER THAN $1 OLDER THAN $1 -- literals removed +SHOW BACKUPS IN $1 NEWER THAN $3 OLDER THAN $2 -- identifiers removed parse diff --git a/pkg/sql/sem/tree/show.go b/pkg/sql/sem/tree/show.go index af29caa93ca8..20c8d033cc86 100644 --- a/pkg/sql/sem/tree/show.go +++ b/pkg/sql/sem/tree/show.go @@ -100,7 +100,7 @@ type ShowBackup struct { From bool Details ShowBackupDetails Options ShowBackupOptions - TimeRange ShowAfterBefore + TimeRange ShowBackupTimeFilter } // Format implements the NodeFormatter interface. @@ -140,32 +140,32 @@ func (node *ShowBackup) Format(ctx *FmtCtx) { } } -// ShowAfterBefore represents the AFTER BEFORE option for -// SHOW BACKUPS. -type ShowAfterBefore struct { - After Expr - Before Expr +// ShowBackupTimeFilter represents the NEWER THAN OLDER THAN +// option for SHOW BACKUPS. +type ShowBackupTimeFilter struct { + NewerThan Expr + OlderThan Expr } -var _ NodeFormatter = &ShowAfterBefore{} +var _ NodeFormatter = &ShowBackupTimeFilter{} -func (s *ShowAfterBefore) Format(ctx *FmtCtx) { - if s.After != nil { - ctx.WriteString("AFTER ") - ctx.FormatNode(s.After) +func (s *ShowBackupTimeFilter) Format(ctx *FmtCtx) { + if s.NewerThan != nil { + ctx.WriteString("NEWER THAN ") + ctx.FormatNode(s.NewerThan) } - if s.Before != nil { - if s.After != nil { + if s.OlderThan != nil { + if s.NewerThan != nil { ctx.WriteString(" ") } - ctx.WriteString("BEFORE ") - ctx.FormatNode(s.Before) + ctx.WriteString("OLDER THAN ") + ctx.FormatNode(s.OlderThan) } } -func (s *ShowAfterBefore) IsDefault() bool { - return s.After == nil && s.Before == nil +func (s *ShowBackupTimeFilter) IsDefault() bool { + return s.NewerThan == nil && s.OlderThan == nil } type ShowBackupOptions struct { From b81ac03686f8ad6b30c508cb420ec6f445ed8d07 Mon Sep 17 00:00:00 2001 From: Kevin Cao <39608887+kev-cao@users.noreply.github.com> Date: Wed, 24 Dec 2025 13:38:59 -0500 Subject: [PATCH 2/2] backup: support SHOW BACKUPS with backup ids This commit teaches SHOW BACKUPS to display the new UX with backup IDs, along with BEFORE/AFTER filtering. Epic: CRDB-57536 Resolves: #159647 Release note (sql change): Users can now set the `use_backups_with_ids` session setting to enable a new `SHOW BACKUPS IN` experience. When enabled, `SHOW BACKUPS IN ` displays all backups in the collection. Results can be filtered by backup end time using `OLDER THAN ` or `NEWER THAN ` clauses. Example usage: SET use_backups_with_ids = true; SHOW BACKUPS IN '' OLDER THAN '2026-01-09 12:13:14' NEWER THAN '2026-01-04 15:16:17'; --- pkg/backup/BUILD.bazel | 1 + pkg/backup/backupinfo/BUILD.bazel | 1 + pkg/backup/backupinfo/backup_index.go | 211 ++++++++++++------ pkg/backup/backupinfo/backup_index_test.go | 43 +++- pkg/backup/show.go | 126 ++++++++++- pkg/backup/show_test.go | 174 +++++++++++++++ .../testdata/logic_test/information_schema | 23 +- .../logictest/testdata/logic_test/pg_catalog | 3 + .../logictest/testdata/logic_test/show_source | 1 + pkg/sql/sessiondatapb/session_data.proto | 5 + pkg/sql/sessionmutator/mutator.go | 4 + pkg/sql/vars.go | 16 ++ 12 files changed, 517 insertions(+), 91 deletions(-) diff --git a/pkg/backup/BUILD.bazel b/pkg/backup/BUILD.bazel index 6f3a1e81cfc9..8b1374bdd746 100644 --- a/pkg/backup/BUILD.bazel +++ b/pkg/backup/BUILD.bazel @@ -133,6 +133,7 @@ go_library( "//pkg/util", "//pkg/util/admission", "//pkg/util/admission/admissionpb", + "//pkg/util/besteffort", "//pkg/util/bulk", "//pkg/util/ctxgroup", "//pkg/util/envutil", diff --git a/pkg/backup/backupinfo/BUILD.bazel b/pkg/backup/backupinfo/BUILD.bazel index 7c988d1bf9f3..938df3e84ad3 100644 --- a/pkg/backup/backupinfo/BUILD.bazel +++ b/pkg/backup/backupinfo/BUILD.bazel @@ -42,6 +42,7 @@ go_library( "//pkg/storage", "//pkg/util", "//pkg/util/besteffort", + "//pkg/util/buildutil", "//pkg/util/bulk", "//pkg/util/ctxgroup", "//pkg/util/encoding", diff --git a/pkg/backup/backupinfo/backup_index.go b/pkg/backup/backupinfo/backup_index.go index 657844019d39..24cc0be39277 100644 --- a/pkg/backup/backupinfo/backup_index.go +++ b/pkg/backup/backupinfo/backup_index.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/besteffort" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -234,8 +235,11 @@ type RestorableBackup struct { // ListRestorableBackups lists all restorable backups from the backup index // within the specified time interval (inclusive at both ends). The store should // be rooted at the default collection URI (the one that contains the -// `metadata/` directory). -// +// `metadata/` directory). A maxCount of 0 indicates no limit on the number +// of backups to return, otherwise, if the number of backups found exceeds +// maxCount, iteration will stop early and the boolean return value will be +// set to true. + // NB: Duplicate end times within a chain are elided, as IDs only identify // unique end times within a chain. For the purposes of determining which // backup's metadata we use to populate the fields, we always pick the backup @@ -248,86 +252,113 @@ type RestorableBackup struct { // milliseconds. As such, it is possible that a backup with an end time slightly // ahead of `before` may be included in the results. func ListRestorableBackups( - ctx context.Context, store cloud.ExternalStorage, after, before time.Time, -) ([]RestorableBackup, error) { - idxInRange, err := listIndexesWithinRange(ctx, store, after, before) - if err != nil { - return nil, err - } - - var filteredIndexes []parsedIndex - for _, index := range idxInRange { - if len(filteredIndexes) > 0 { - last := &filteredIndexes[len(filteredIndexes)-1] - // Elide duplicate end times within a chain. Because the indexes are - // sorted with ascending start times breaking ties, keeping the last one - // ensures that we keep the non-compacted backup. - if last.end.Equal(index.end) && last.fullEnd.Equal(index.fullEnd) { - last.filePath = index.filePath - continue + ctx context.Context, store cloud.ExternalStorage, newerThan, olderThan time.Time, maxCount uint, +) ([]RestorableBackup, bool, error) { + ctx, trace := tracing.ChildSpan(ctx, "backupinfo.ListRestorableBackups") + defer trace.Finish() + + var filteredIdxs []parsedIndex + var exceededMax bool + if err := listIndexesWithinRange( + ctx, store, newerThan, olderThan, + func(index parsedIndex) error { + if len(filteredIdxs) > 0 { + lastIdx := len(filteredIdxs) - 1 + // Elide duplicate end times within a chain. Because indexes are fetched + // in descending order with ties broken by ascending start time, keeping + // the last one ensures that we keep the non-compacted backup. + if filteredIdxs[lastIdx].end.Equal(index.end) && + filteredIdxs[lastIdx].fullEnd.Equal(index.fullEnd) { + if buildutil.CrdbTestBuild { + // Sanity check that start times are in ascending order for indexes + // with the same end time. + if index.start.Before(filteredIdxs[lastIdx].start) { + return errors.Newf( + "expected index start times to be in ascending order: %s vs %s", + index.start, filteredIdxs[lastIdx].start, + ) + } + } + filteredIdxs[lastIdx] = index + return nil + } } - } - filteredIndexes = append(filteredIndexes, index) + filteredIdxs = append(filteredIdxs, index) + if maxCount > 0 && uint(len(filteredIdxs)) > maxCount { + exceededMax = true + return cloud.ErrListingDone + } + return nil + }, + ); err != nil { + return nil, false, err + } + if exceededMax { + filteredIdxs = filteredIdxs[:maxCount] } - backups := make([]RestorableBackup, 0, len(filteredIndexes)) - for _, index := range filteredIndexes { - reader, _, err := store.ReadFile(ctx, index.filePath, cloud.ReadOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "reading index file %s", index.filePath) - } - - bytes, err := ioctx.ReadAll(ctx, reader) - besteffort.Error(ctx, "cleanup-index-reader", func(ctx context.Context) error { - return reader.Close(ctx) - }) + ctx, readTrace := tracing.ChildSpan(ctx, "backupinfo.ReadIndexFiles") + defer readTrace.Finish() + backups, err := util.MapE(filteredIdxs, func(index parsedIndex) (RestorableBackup, error) { + idxMeta, err := readIndexFile(ctx, store, index.filePath) if err != nil { - return nil, errors.Wrapf(err, "reading index file %s", index.filePath) + return RestorableBackup{}, err } - - idxMeta := backuppb.BackupIndexMetadata{} - if err := protoutil.Unmarshal(bytes, &idxMeta); err != nil { - return nil, errors.Wrapf(err, "unmarshalling index file %s", index.filePath) - } - - backups = append(backups, RestorableBackup{ + return RestorableBackup{ ID: encodeBackupID(index.fullEnd, index.end), EndTime: idxMeta.EndTime, MVCCFilter: idxMeta.MVCCFilter, RevisionStartTime: idxMeta.RevisionStartTime, - }) + }, nil + }) + if err != nil { + return nil, false, err } - return backups, nil + + return backups, exceededMax, nil } type parsedIndex struct { - filePath string // path to the index relative to the backup collection root - fullEnd, end time.Time + filePath string // path to the index relative to the backup collection root + fullEnd, start, end time.Time } // listIndexesWithinRange lists all index files whose end time falls within the // specified time interval (inclusive at both ends). The store should be rooted // at the default collection URI (the one that contains the `metadata/` -// directory). The returned index filenames are relative to the `metadata/index` -// directory and sorted in descending order by end time, with ties broken by -// ascending start time. +// directory). The indexes are passed to the callback in descending end time +// order, with ties broken by ascending start time order. To stop iteration +// early, the callback can return cloud.ErrListingDone. Any other returned error +// by the callback will be propagated back to the caller. // // NB: Filtering is applied to backup end times truncated to tens of // milliseconds. func listIndexesWithinRange( - ctx context.Context, store cloud.ExternalStorage, after, before time.Time, -) ([]parsedIndex, error) { + ctx context.Context, + store cloud.ExternalStorage, + newerThan, olderThan time.Time, + cb func(parsedIndex) error, +) error { // First, find the full backup end time prefix we begin listing from. Since // full backup end times are stored in descending order in the index, we add // ten milliseconds (the maximum granularity of the timestamp encoding) to // ensure an inclusive start. - maxEndTime := before.Add(10 * time.Millisecond) + maxEndTime := olderThan.Add(10 * time.Millisecond) maxEndTimeSubdir, err := endTimeToIndexSubdir(maxEndTime) if err != nil { - return nil, err + return err } - var idxInRange []parsedIndex + // We don't immediately emit an index when we see it; instead, we hold onto + // it until the next index is seen. This is because we may need to swap with + // the next index in order to maintain descending end tinme order. This occurs + // when incremental backups are created and appended to the previous chain + // while the full backup for a new chain is still being run. Note that this + // swapping of the last two seen indexes only maintains a sorted order due to + // the way the backup index is sorted and the invariant that the existence of + // an incremental backup in a chain ensures that no backup in an older chain + // can have an end time greater than or equal to the incremental's end time. + var pendingEmit parsedIndex err = store.List( ctx, backupbase.BackupIndexDirectoryPath+"/", @@ -342,39 +373,52 @@ func listIndexesWithinRange( } // Once we see an *incremental* backup with an end time before `after`, we // can stop iterating as we have found all backups within the time range. - if !start.IsZero() && end.Before(after) { + if !start.IsZero() && end.Before(newerThan) { return cloud.ErrListingDone } - if end.After(before) || end.Before(after) { + if end.After(olderThan) || end.Before(newerThan) { return nil } - entry := parsedIndex{ + nextEntry := parsedIndex{ filePath: path.Join(backupbase.BackupIndexDirectoryPath, file), fullEnd: full, + start: start, end: end, } - // We may need to swap with the last index appended to maintain descending - // end time order. This occurs when incremental backups are created and - // appended to the previous chain while the full backup for a new chain - // is still being run. Note that this swapping of the last two elements - // only maintains a sorted order due to the way the backup index is sorted - // and the invariant that the existence of an incremental backup in a - // chain ensures that no backup in an older chain can have an end time - // greater than or equal to the incremental's end time. - if len(idxInRange) > 0 && end.After(idxInRange[len(idxInRange)-1].end) { - tmp := idxInRange[len(idxInRange)-1] - idxInRange[len(idxInRange)-1] = entry - entry = tmp + if pendingEmit == (parsedIndex{}) { + pendingEmit = nextEntry + return nil + } + if !nextEntry.end.After(pendingEmit.end) { + // The pending emit has an end time less than or equal to the new entry, + // so we can guarantee that the pending emit is the next index to be + // flushed. + if err := cb(pendingEmit); err != nil { + return err + } + pendingEmit = nextEntry + } else { + // This new entry does have an end time newer than the last index, so we + // need to emit this one first and continue holding onto that previous + // index. + if err := cb(nextEntry); err != nil { + return err + } } - idxInRange = append(idxInRange, entry) return nil }, ) if err != nil && !errors.Is(err, cloud.ErrListingDone) { - return nil, err + return err } - return idxInRange, nil + // Loop has ended, we can flush any pending index. + if pendingEmit != (parsedIndex{}) { + if err := cb(pendingEmit); err != nil && !errors.Is(err, cloud.ErrListingDone) { + return err + } + } + return nil } // GetBackupTreeIndexMetadata concurrently retrieves the index metadata for all @@ -693,6 +737,33 @@ func parseTimesFromIndexFilepath(filepath string) (fullEnd, start, end time.Time return fullEnd, start, end, nil } +// readIndexFile reads and unmarshals the backup index file at the given path. +// store should be rooted at the default collection URI (the one that contains +// the `metadata/` directory). The indexFilePath is relative to the collection +// URI. +func readIndexFile( + ctx context.Context, store cloud.ExternalStorage, indexFilePath string, +) (backuppb.BackupIndexMetadata, error) { + reader, _, err := store.ReadFile(ctx, indexFilePath, cloud.ReadOptions{}) + if err != nil { + return backuppb.BackupIndexMetadata{}, errors.Wrapf(err, "reading index file %s", indexFilePath) + } + defer besteffort.Error(ctx, "cleanup-index-reader", func(ctx context.Context) error { + return reader.Close(ctx) + }) + + bytes, err := ioctx.ReadAll(ctx, reader) + if err != nil { + return backuppb.BackupIndexMetadata{}, errors.Wrapf(err, "reading index file %s", indexFilePath) + } + + idxMeta := backuppb.BackupIndexMetadata{} + if err := protoutil.Unmarshal(bytes, &idxMeta); err != nil { + return backuppb.BackupIndexMetadata{}, errors.Wrapf(err, "unmarshalling index file %s", indexFilePath) + } + return idxMeta, nil +} + // encodeBackupID generates a backup ID for a backup identified by its parent // full end time and its own end time. func encodeBackupID(fullEnd time.Time, backupEnd time.Time) string { diff --git a/pkg/backup/backupinfo/backup_index_test.go b/pkg/backup/backupinfo/backup_index_test.go index 3ac8d53e078d..a7f9ec151842 100644 --- a/pkg/backup/backupinfo/backup_index_test.go +++ b/pkg/backup/backupinfo/backup_index_test.go @@ -825,8 +825,8 @@ func TestListRestorableBackups(t *testing.T) { afterTS := hlc.Timestamp{WallTime: int64(tc.after) * 1e9}.GoTime() beforeTS := hlc.Timestamp{WallTime: int64(tc.before) * 1e9}.GoTime() - backups, err := ListRestorableBackups( - ctx, externalStorage, afterTS, beforeTS, + backups, _, err := ListRestorableBackups( + ctx, externalStorage, afterTS, beforeTS, 0, ) require.NoError(t, err) @@ -836,6 +836,45 @@ func TestListRestorableBackups(t *testing.T) { require.Equal(t, tc.expectedOutput, actualOutput) }) } + + maxCountCases := []struct { + name string + after, before int + maxCount uint + expectedExceeded bool + }{ + { + "max count not exceeded", + 2, 10, + 10, + false, + }, + { + "max count exceeded", + 2, 62, + 10, + true, + }, + { + "max count exactly met", + 10, 28, + 8, + false, + }, + } + for _, tc := range maxCountCases { + t.Run(tc.name, func(t *testing.T) { + afterTS := hlc.Timestamp{WallTime: int64(tc.after) * 1e9}.GoTime() + beforeTS := hlc.Timestamp{WallTime: int64(tc.before) * 1e9}.GoTime() + + backups, exceeded, err := ListRestorableBackups( + ctx, externalStorage, afterTS, beforeTS, tc.maxCount, + ) + require.NoError(t, err) + require.LessOrEqual(t, len(backups), int(tc.maxCount)) + require.Equal(t, tc.expectedExceeded, exceeded) + }) + } } func TestConvertIndexSubdirToSubdir(t *testing.T) { diff --git a/pkg/backup/show.go b/pkg/backup/show.go index 619f481c4b3a..fe49dcbc6e46 100644 --- a/pkg/backup/show.go +++ b/pkg/backup/show.go @@ -42,6 +42,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/besteffort" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -51,6 +52,8 @@ import ( "github.com/cockroachdb/errors" ) +const showBackupsMaxPageSize = 50 + type backupInfoReader interface { showBackup( context.Context, @@ -1353,6 +1356,59 @@ var showBackupsInCollectionHeader = colinfo.ResultColumns{ {Name: "path", Typ: types.String}, } +var showBackupsWithIDsHeader = colinfo.ResultColumns{ + {Name: "id", Typ: types.String}, + {Name: "backup_time", Typ: types.TimestampTZ}, + {Name: "revision_start_time", Typ: types.TimestampTZ}, +} + +// getTimeRangeOrDefaults parses the NEWER THAN and OLDER THAN expressions from +// the SHOW BACKUPS command. If a NEWER THAN or OLDER THAN is not specified, a +// zero time or current time is returned, respectively, It also returns the +// maximum number of results to return, which is non-zero only if the time range +// is not fully specified. +func getTimeRangeOrDefaults( + ctx context.Context, p sql.PlanHookState, interval tree.ShowBackupTimeFilter, +) (newerThan time.Time, olderThan time.Time, maxCount uint, err error) { + parseTime := func(t tree.Expr) (time.Time, error) { + asOf := tree.AsOfClause{Expr: t} + asTime, err := p.EvalAsOfTimestamp(ctx, asOf) + if err != nil { + return time.Time{}, err + } + return asTime.Timestamp.GoTime(), nil + } + + // To avoid issues with certain backups being skipped at the time boundaries + // due to precision differences between backup times and the times encoded in + // backup filenames, we round to second-level precision. We choose the most + // inclusive rounding, meaning we round down for AFTER and up for BEFORE. + precision := time.Second + if interval.NewerThan != nil { + newerThan, err = parseTime(interval.NewerThan) + if err != nil { + return time.Time{}, time.Time{}, 0, err + } + newerThan = newerThan.Truncate(precision) + } + + if interval.OlderThan != nil { + olderThan, err = parseTime(interval.OlderThan) + if err != nil { + return time.Time{}, time.Time{}, 0, err + } + } else { + olderThan = timeutil.Now() + } + olderThan = olderThan.Add(precision - time.Nanosecond).Truncate(precision) + + if interval.NewerThan == nil || interval.OlderThan == nil { + maxCount = showBackupsMaxPageSize + } + + return newerThan, olderThan, maxCount, nil +} + // showBackupPlanHook implements PlanHookFn. func showBackupsInCollectionPlanHook( ctx context.Context, collection []string, showStmt *tree.ShowBackup, p sql.PlanHookState, @@ -1362,6 +1418,7 @@ func showBackupsInCollectionPlanHook( return nil, nil, false, err } + useIDs := p.SessionData().UseBackupsWithIDs fn := func(ctx context.Context, resultsCh chan<- tree.Datums) error { ctx, span := tracing.ChildSpan(ctx, showStmt.StatementTag()) defer span.Finish() @@ -1370,17 +1427,70 @@ func showBackupsInCollectionPlanHook( if err != nil { return errors.Wrapf(err, "connect to external storage") } - defer store.Close() - res, err := backupdest.ListFullBackupsInCollection(ctx, store) - if err != nil { - return err - } - for _, i := range res { - resultsCh <- tree.Datums{tree.NewDString(i)} + defer besteffort.Error(ctx, "show-backups-close-store", func(_ context.Context) error { + return store.Close() + }) + + if useIDs { + newerThan, olderThan, maxCount, err := getTimeRangeOrDefaults(ctx, p, showStmt.TimeRange) + if err != nil { + return err + } + res, exceededMax, err := backupinfo.ListRestorableBackups( + ctx, store, newerThan, olderThan, maxCount, + ) + if err != nil { + return err + } + if exceededMax { + p.BufferClientNotice( + context.TODO(), + pgnotice.Newf( + "only showing %d most recent backups. To see more, please run `SHOW BACKUPS IN ... OLDER THAN '%s'` or specify both OLDER THAN and NEWER THAN.", + len(res), + res[len(res)-1].EndTime.GoTime().UTC().Format(time.DateTime), + ), + ) + } + for _, i := range res { + backupTime, err := tree.MakeDTimestampTZ( + timeutil.Unix(0, i.EndTime.WallTime), time.Second, + ) + if err != nil { + return err + } + revStartTime := tree.DNull + if i.MVCCFilter == backuppb.MVCCFilter_All { + revStartTime, err = tree.MakeDTimestampTZ( + timeutil.Unix(0, i.RevisionStartTime.WallTime), time.Second, + ) + if err != nil { + return err + } + } + resultsCh <- tree.Datums{ + tree.NewDString(i.ID), + backupTime, + revStartTime, + } + } + } else { + res, err := backupdest.ListFullBackupsInCollection(ctx, store) + if err != nil { + return err + } + for _, i := range res { + resultsCh <- tree.Datums{tree.NewDString(i)} + } } return nil } - return fn, showBackupsInCollectionHeader, false, nil + + header := showBackupsInCollectionHeader + if useIDs { + header = showBackupsWithIDsHeader + } + return fn, header, false, nil } func init() { diff --git a/pkg/backup/show_test.go b/pkg/backup/show_test.go index 12355f4bfdad..c9824519ae9a 100644 --- a/pkg/backup/show_test.go +++ b/pkg/backup/show_test.go @@ -8,21 +8,26 @@ package backup import ( "context" "fmt" + "math/rand" "net/url" "os" "path/filepath" "regexp" + "slices" "strconv" "strings" "testing" + "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/securitytest" "github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/jobutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -827,3 +832,172 @@ func TestShowBackupCheckFiles(t *testing.T) { fileSum) } } + +func TestShowBackupsWithIDs(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + const numAccounts = 11 + _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication) + defer cleanupFn() + sqlDB.Exec(t, "SET SESSION use_backups_with_ids = true") + + const collectionURI = "nodelocal://1/backup" + + getSystemTime := func() (ts int64) { + sqlDB.QueryRow(t, "SELECT cluster_logical_timestamp()::INT").Scan(&ts) + return ts + } + + times := []int64{getSystemTime()} + sqlDB.Exec(t, fmt.Sprintf("BACKUP INTO $1 AS OF SYSTEM TIME %d", times[0]), collectionURI) + + for i := range 2 { + times = append(times, getSystemTime()) + sqlDB.Exec( + t, + fmt.Sprintf("BACKUP INTO LATEST IN $1 AS OF SYSTEM TIME %d", times[i+1]), + collectionURI, + ) + } + + // We pause the second full backup so that we can take incrementals during its + // running to give us overlapped chains to test against. + sqlDB.Exec(t, `SET CLUSTER SETTING jobs.debug.pausepoints = 'backup.after.write_first_checkpoint'`) + times = append(times, getSystemTime()) + var pausedJobID jobspb.JobID + sqlDB.QueryRow( + t, + fmt.Sprintf("BACKUP INTO $1 AS OF SYSTEM TIME %d WITH detached", times[len(times)-1]), + collectionURI, + ).Scan(&pausedJobID) + jobutils.WaitForJobToPause(t, sqlDB, pausedJobID) + sqlDB.Exec(t, `SET CLUSTER SETTING jobs.debug.pausepoints = ''`) + + for range 2 { + times = append(times, getSystemTime()) + sqlDB.Exec( + t, + fmt.Sprintf("BACKUP INTO LATEST IN $1 AS OF SYSTEM TIME %d", times[len(times)-1]), + collectionURI, + ) + } + + sqlDB.Exec(t, "RESUME JOB $1", pausedJobID) + jobutils.WaitForJobToSucceed(t, sqlDB, pausedJobID) + times = append(times, getSystemTime()) + sqlDB.Exec( + t, + fmt.Sprintf("BACKUP INTO LATEST IN $1 AS OF SYSTEM TIME %d", times[len(times)-1]), + collectionURI, + ) + + t.Run("backup times are in descending order", func(t *testing.T) { + shownTimes := sqlDB.QueryStr( + t, fmt.Sprintf(`SELECT backup_time FROM [SHOW BACKUPS IN '%s']`, collectionURI), + ) + require.Equal(t, len(times), len(shownTimes)) + require.True( + t, slices.IsSortedFunc(shownTimes, func(a, b []string) int { + if a[0] == b[0] { + return 0 + } else if a[0] > b[0] { + return -1 + } else { + return 1 + } + }), + ) + }) + + t.Run("time filtering", func(t *testing.T) { + // SHOW BACKUPS is only second-precise, so we just pick a second + // after the first backup. If the backup times overlap a second boundary, + // then our filter partitions the backups. Over the course of multiple + // tests, this gives us good coverage of filter points. + filterTime := int64(time.Duration(times[0]).Truncate(time.Second) + time.Second) + + t.Run("OLDER THAN", func(t *testing.T) { + var count int + sqlDB.QueryRow( + t, + fmt.Sprintf( + `SELECT count(*) FROM [SHOW BACKUPS IN '%s' OLDER THAN %d]`, + collectionURI, filterTime, + ), + ).Scan(&count) + + var expected int + for _, ts := range times { + // We must truncate times to the same precision as backup collection + // blob names since that's what SHOW BACKUPS uses to filter. + truncatedTS := int64(time.Duration(ts).Truncate(10 * time.Millisecond)) + if truncatedTS <= filterTime { + expected++ + } + } + require.Equal( + t, expected, count, + "unexpected number of backups before %d", filterTime, + ) + }) + + t.Run("NEWER THAN", func(t *testing.T) { + var count int + sqlDB.QueryRow( + t, + fmt.Sprintf( + `SELECT count(*) FROM [SHOW BACKUPS IN '%s' NEWER THAN %d]`, + collectionURI, filterTime, + ), + ).Scan(&count) + + var expected int + for _, ts := range times { + truncatedTS := int64(time.Duration(ts).Truncate(10 * time.Millisecond)) + if truncatedTS >= filterTime { + expected++ + } + } + require.Equal( + t, expected, count, + "unexpected number of backups after %d", filterTime, + ) + }) + + t.Run("OLDER THAN and NEWER THAN", func(t *testing.T) { + // Pick a random pair of times for the range. Note that in most cases, + // this will end up being the same query as the tests above due to the + // short window of time the backups span, over the course of multiple + // test runs, this results in good coverage. + olderIdx := rand.Intn(len(times) - 1) + newerIdx := rand.Intn(len(times)-olderIdx-1) + olderIdx + 1 + + // Truncate the times to second precision like SHOW BACKUPS. + olderTime := int64(time.Duration(times[olderIdx]).Truncate(time.Second)) + newerTime := int64(time.Duration(times[newerIdx]).Truncate(time.Second)) + + var count int + sqlDB.QueryRow( + t, + fmt.Sprintf( + `SELECT count(*) FROM [SHOW BACKUPS IN '%s' OLDER THAN %d NEWER THAN %d]`, + collectionURI, newerTime, olderTime, + ), + ).Scan(&count) + + var expected int + for _, ts := range times { + truncatedTS := int64(time.Duration(ts).Truncate(10 * time.Millisecond)) + if truncatedTS <= newerTime && truncatedTS >= olderTime { + expected++ + } + } + require.Equal( + t, expected, count, + "unexpected number of backups between %d and %d", + olderTime, newerTime, + ) + }) + }) +} diff --git a/pkg/sql/logictest/testdata/logic_test/information_schema b/pkg/sql/logictest/testdata/logic_test/information_schema index cc5c518cda4f..7034d2a52927 100644 --- a/pkg/sql/logictest/testdata/logic_test/information_schema +++ b/pkg/sql/logictest/testdata/logic_test/information_schema @@ -950,8 +950,8 @@ table_columns query TTTTT colnames SELECT table_catalog, table_schema, table_name, table_type, is_insertable_into FROM system.information_schema.tables -WHERE -(table_schema <> 'crdb_internal' OR table_name = 'node_build_info') +WHERE +(table_schema <> 'crdb_internal' OR table_name = 'node_build_info') AND NOT (table_schema = 'public' AND table_name <> 'locations') ORDER BY table_name, table_schema ---- @@ -2356,9 +2356,9 @@ root other_db public ALL YES # root can see everything (filtering most of crdb_internal and system to avoid a brittle test) query TTTTTTTT colnames,rowsort -SELECT * FROM system.information_schema.table_privileges -WHERE - (table_schema <> 'crdb_internal' OR table_name = 'node_build_info') AND +SELECT * FROM system.information_schema.table_privileges +WHERE + (table_schema <> 'crdb_internal' OR table_name = 'node_build_info') AND NOT (table_catalog = 'system' AND table_schema = 'public' AND 'table_name' <> 'locations') ORDER BY table_schema, table_name, table_schema, grantee, privilege_type ---- @@ -2585,9 +2585,9 @@ NULL public system pg_extension geometry_columns NULL public system pg_extension spatial_ref_sys SELECT NO YES query TTTTTTTT colnames,rowsort -SELECT * FROM system.information_schema.role_table_grants -WHERE -(table_schema <> 'crdb_internal' OR table_name = 'node_build_info') +SELECT * FROM system.information_schema.role_table_grants +WHERE +(table_schema <> 'crdb_internal' OR table_name = 'node_build_info') AND NOT (table_schema = 'public' AND table_name <> 'locations'); ---- grantor grantee table_catalog table_schema table_name privilege_type is_grantable with_hierarchy @@ -3018,19 +3018,19 @@ statement ok CREATE SEQUENCE test_seq_3 AS smallint statement ok -CREATE SEQUENCE test_seq_4 AS integer +CREATE SEQUENCE test_seq_4 AS integer statement ok CREATE SEQUENCE test_seq_5 AS bigint statement ok -CREATE SEQUENCE test_seq_6 AS INT2 +CREATE SEQUENCE test_seq_6 AS INT2 statement ok CREATE SEQUENCE test_seq_7 AS INT4 statement ok -CREATE SEQUENCE test_seq_8 AS INT8 +CREATE SEQUENCE test_seq_8 AS INT8 query TTTTIIITTTTT colnames,rowsort SELECT * FROM information_schema.sequences @@ -3970,6 +3970,7 @@ troubleshooting_mode off unbounded_parallel_scans off unconstrained_non_covering_index_scan_enabled off unsafe_allow_triggers_modifying_cascades off +use_backups_with_ids off use_cputs_on_non_unique_indexes off use_improved_routine_dependency_tracking on use_improved_routine_deps_triggers_and_computed_cols on diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog index a13de324bdd9..3030d601f682 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_catalog +++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog @@ -3240,6 +3240,7 @@ troubleshooting_mode off unbounded_parallel_scans off NULL NULL NULL string unconstrained_non_covering_index_scan_enabled off NULL NULL NULL string unsafe_allow_triggers_modifying_cascades off NULL NULL NULL string +use_backups_with_ids off NULL NULL NULL string use_cputs_on_non_unique_indexes off NULL NULL NULL string use_declarative_schema_changer on NULL NULL NULL string use_improved_routine_dependency_tracking on NULL NULL NULL string @@ -3491,6 +3492,7 @@ troubleshooting_mode off unbounded_parallel_scans off NULL user NULL off off unconstrained_non_covering_index_scan_enabled off NULL user NULL off off unsafe_allow_triggers_modifying_cascades off NULL user NULL off off +use_backups_with_ids off NULL user NULL off off use_cputs_on_non_unique_indexes off NULL user NULL off off use_declarative_schema_changer on NULL user NULL on on use_improved_routine_dependency_tracking on NULL user NULL on on @@ -3734,6 +3736,7 @@ troubleshooting_mode NULL NULL unbounded_parallel_scans NULL NULL NULL NULL NULL unconstrained_non_covering_index_scan_enabled NULL NULL NULL NULL NULL unsafe_allow_triggers_modifying_cascades NULL NULL NULL NULL NULL +use_backups_with_ids NULL NULL NULL NULL NULL use_cputs_on_non_unique_indexes NULL NULL NULL NULL NULL use_declarative_schema_changer NULL NULL NULL NULL NULL use_improved_routine_dependency_tracking NULL NULL NULL NULL NULL diff --git a/pkg/sql/logictest/testdata/logic_test/show_source b/pkg/sql/logictest/testdata/logic_test/show_source index e8de7169644a..a7840c011267 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_source +++ b/pkg/sql/logictest/testdata/logic_test/show_source @@ -254,6 +254,7 @@ troubleshooting_mode off unbounded_parallel_scans off unconstrained_non_covering_index_scan_enabled off unsafe_allow_triggers_modifying_cascades off +use_backups_with_ids off use_cputs_on_non_unique_indexes off use_declarative_schema_changer on use_improved_routine_dependency_tracking on diff --git a/pkg/sql/sessiondatapb/session_data.proto b/pkg/sql/sessiondatapb/session_data.proto index 5fcfa3d4fa86..9f710db3e070 100644 --- a/pkg/sql/sessiondatapb/session_data.proto +++ b/pkg/sql/sessiondatapb/session_data.proto @@ -145,6 +145,11 @@ message SessionData { // for deadlock detection. google.protobuf.Duration deadlock_timeout = 33 [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + + // UseBackupsWithIDs, when true, enables the use of backup IDs in SHOW BACKUP + // and RESTORE. If set, SHOW BACKUPS with display backups with IDs that can + // then be used in SHOW BACKUP and RESTORE statements. + bool use_backups_with_ids = 34 [(gogoproto.customname) = "UseBackupsWithIDs"]; } // DataConversionConfig contains the parameters that influence the output diff --git a/pkg/sql/sessionmutator/mutator.go b/pkg/sql/sessionmutator/mutator.go index ca4159e752e0..0fdd95d4b643 100644 --- a/pkg/sql/sessionmutator/mutator.go +++ b/pkg/sql/sessionmutator/mutator.go @@ -1122,3 +1122,7 @@ func (m *SessionDataMutator) SetUseImprovedRoutineDepsTriggersAndComputedCols(va func (m *SessionDataMutator) SetDistSQLPreventPartitioningSoftLimitedScans(val bool) { m.Data.DistSQLPreventPartitioningSoftLimitedScans = val } + +func (m *SessionDataMutator) SetUseBackupsWithIDs(val bool) { + m.Data.UseBackupsWithIDs = val +} diff --git a/pkg/sql/vars.go b/pkg/sql/vars.go index 40b29045e975..7a7c73e15d77 100644 --- a/pkg/sql/vars.go +++ b/pkg/sql/vars.go @@ -4542,6 +4542,22 @@ var varGen = map[string]sessionVar{ }, GlobalDefault: globalTrue, }, + + `use_backups_with_ids`: { + GetStringVal: makePostgresBoolGetStringValFn(`use_backups_with_ids`), + Set: func(_ context.Context, m sessionmutator.SessionDataMutator, s string) error { + b, err := paramparse.ParseBoolVar("use_backups_with_ids", s) + if err != nil { + return err + } + m.SetUseBackupsWithIDs(b) + return nil + }, + Get: func(evalCtx *extendedEvalContext, _ *kv.Txn) (string, error) { + return formatBoolAsPostgresSetting(evalCtx.SessionData().UseBackupsWithIDs), nil + }, + GlobalDefault: globalFalse, + }, } func ReplicationModeFromString(s string) (sessiondatapb.ReplicationMode, error) {