From b9a7bde073881d850b42183a7457db504e35bf03 Mon Sep 17 00:00:00 2001 From: Peng Peng Date: Fri, 24 Apr 2026 23:49:26 +0800 Subject: [PATCH 01/12] feat(cli): add olares-cli profile/auth/credential subsystem (Phase 1) Introduce the multi-profile credential subsystem for olares-cli, covering the "operate on behalf of a user" scenario via password login and refresh token import, while keeping the existing kubeconfig-based commands (osinfo/os/node/gpu/amdgpu/disk) untouched and orthogonal. New packages: - pkg/cliconfig: ~/.olares-cli/{config.json,tokens.json} layout, file perms (0700/0600), atomic write, MultiProfileConfig with current / previous profile pointers, FindByOlaresID, etc. - pkg/olares: olaresId -> auth/vault/desktop URL derivation (single source of truth for all per-profile URLs). - pkg/auth: stateless protocol layer. * login.go (mode A): /api/firstfactor with optional TOTP, password salting, cookie-jar HTTP client. * refresh.go (mode B): one-shot /api/refresh exchange used to bootstrap an access token from a user-supplied refresh token. * token_store.go: plaintext-JSON TokenStore (Phase 1) with Get / Set / Delete / List / MarkInvalidated. StoredToken includes InvalidatedAt (Phase 1 defines, Phase 2 writes on /api/refresh 401/403); Set() defensively zeroes it on every fresh grant. * jwt.go: ExpiresAt(token) only - no signature verification, no other claims exposed (username/groups/mfa/jid are explicitly untrusted). - pkg/credential: Provider chain + ResolvedProfile. * DefaultProvider resolves Profile + token, returning typed errors with invalidated > expired > ok priority (ErrTokenInvalidated / ErrTokenExpired / ErrNotLoggedIn), each with a "run profile login" CTA. Profile config is never silently mutated. * EnvProvider stub for future Phase 3 in-cluster scenario. - pkg/cmdutil/Factory: lazy DI for Credential / ResolvedProfile / HTTP client (auto-injects Authorization: Bearer; no auto-refresh in Phase 1). New commands (cmd/ctl/profile, no separate `auth` namespace): - profile list / use / remove / login / import. - login & import auto-create the profile on first use, reuse-and-overwrite when the existing token is missing/expired/invalidated, and reject only when a still-valid token is present (with a profile-remove hint). - list shows STATUS: logged-in (Xh) / expired / invalidated / never. Also: - Register profile command in cmd/ctl/root.go. - Ignore cli/docs/ (design notes are local-only, not part of the shipping repo). Phase 1 trade-offs (deferred to Phase 2): OS keychain backend, automatic refresh-with-lock (sync.Map + flock + double-check), LarePass OAuth device-flow login, wizard activation -> profile bridge. Made-with: Cursor --- .gitignore | 1 + cli/cmd/ctl/profile/credentials.go | 212 ++++++++++++++++++ cli/cmd/ctl/profile/credentials_test.go | 167 ++++++++++++++ cli/cmd/ctl/profile/import.go | 96 +++++++++ cli/cmd/ctl/profile/list.go | 122 +++++++++++ cli/cmd/ctl/profile/login.go | 205 ++++++++++++++++++ cli/cmd/ctl/profile/remove.go | 64 ++++++ cli/cmd/ctl/profile/root.go | 44 ++++ cli/cmd/ctl/profile/use.go | 41 ++++ cli/cmd/ctl/root.go | 2 + cli/pkg/auth/jwt.go | 80 +++++++ cli/pkg/auth/jwt_test.go | 74 +++++++ cli/pkg/auth/login.go | 275 ++++++++++++++++++++++++ cli/pkg/auth/refresh.go | 93 ++++++++ cli/pkg/auth/token_store.go | 218 +++++++++++++++++++ cli/pkg/cliconfig/config.go | 270 +++++++++++++++++++++++ cli/pkg/cliconfig/paths.go | 79 +++++++ cli/pkg/cmdutil/factory.go | 138 ++++++++++++ cli/pkg/credential/default_provider.go | 147 +++++++++++++ cli/pkg/credential/env_provider.go | 29 +++ cli/pkg/credential/provider.go | 93 ++++++++ cli/pkg/credential/types.go | 56 +++++ cli/pkg/olares/id.go | 88 ++++++++ 23 files changed, 2594 insertions(+) create mode 100644 cli/cmd/ctl/profile/credentials.go create mode 100644 cli/cmd/ctl/profile/credentials_test.go create mode 100644 cli/cmd/ctl/profile/import.go create mode 100644 cli/cmd/ctl/profile/list.go create mode 100644 cli/cmd/ctl/profile/login.go create mode 100644 cli/cmd/ctl/profile/remove.go create mode 100644 cli/cmd/ctl/profile/root.go create mode 100644 cli/cmd/ctl/profile/use.go create mode 100644 cli/pkg/auth/jwt.go create mode 100644 cli/pkg/auth/jwt_test.go create mode 100644 cli/pkg/auth/login.go create mode 100644 cli/pkg/auth/refresh.go create mode 100644 cli/pkg/auth/token_store.go create mode 100644 cli/pkg/cliconfig/config.go create mode 100644 cli/pkg/cliconfig/paths.go create mode 100644 cli/pkg/cmdutil/factory.go create mode 100644 cli/pkg/credential/default_provider.go create mode 100644 cli/pkg/credential/env_provider.go create mode 100644 cli/pkg/credential/provider.go create mode 100644 cli/pkg/credential/types.go create mode 100644 cli/pkg/olares/id.go diff --git a/.gitignore b/.gitignore index ceb6aaca9..b3ecfd058 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ olares-cli-*.tar.gz .vscode .DS_Store cli/output +cli/docs/ daemon/output daemon/bin diff --git a/cli/cmd/ctl/profile/credentials.go b/cli/cmd/ctl/profile/credentials.go new file mode 100644 index 000000000..6cc1cfbd7 --- /dev/null +++ b/cli/cmd/ctl/profile/credentials.go @@ -0,0 +1,212 @@ +package profile + +import ( + "errors" + "fmt" + "time" + + "github.com/beclab/Olares/cli/pkg/auth" + "github.com/beclab/Olares/cli/pkg/cliconfig" + "github.com/beclab/Olares/cli/pkg/olares" +) + +// commonCredFlags captures the flags shared by `profile login` and +// `profile import`. The two commands diverge only in HOW they obtain the +// initial Token; everything else (CLI surface, profile creation rules, +// persistence) is identical. +type commonCredFlags struct { + olaresID string + name string + authURLOverride string + localURLPrefix string + insecureSkipVerify bool +} + +// validateAndDeriveAuthURL canonicalizes the user-supplied flags into a +// concrete (terminusName, authURL) pair, applying AuthURLOverride when +// present and otherwise deriving from the parsed olaresId. +func (f *commonCredFlags) validateAndDeriveAuthURL() (id olares.ID, terminusName, authURL string, err error) { + if f.olaresID == "" { + return "", "", "", errors.New("--olares-id is required") + } + id, err = olares.ParseID(f.olaresID) + if err != nil { + return "", "", "", err + } + terminusName = id.TerminusName() + if f.authURLOverride != "" { + authURL = f.authURLOverride + } else { + authURL = id.AuthURL(f.localURLPrefix) + } + return id, terminusName, authURL, nil +} + +// ensureProfileWritable enforces the "auto-create-or-reuse, reject if valid +// token exists" rule shared by login and import. +// +// Returns the (possibly newly-allocated) ProfileConfig that the caller should +// upsert AFTER it has successfully obtained a fresh Token. If a profile +// already exists for olaresID, its URL-override fields are preserved and only +// the alias (Name) is updated when the caller passed a non-empty --name. If a +// VALID token is already present in the store for this olaresId, the function +// returns an error instructing the user to `profile remove` first — this +// matches the design doc's "refuse duplicate logins" rule. +func ensureProfileWritable( + cfg *cliconfig.MultiProfileConfig, + store auth.TokenStore, + flags commonCredFlags, + now time.Time, +) (cliconfig.ProfileConfig, error) { + // 1. Reject if a still-valid token exists for this olaresId. An + // explicitly invalidated token (Phase 2 marks this on /api/refresh + // failure) is always treated as "needs re-login" and falls through — + // users should be able to recover with a single `profile login`, no + // `profile remove` required. + stored, err := store.Get(flags.olaresID) + if err != nil && !errors.Is(err, auth.ErrTokenNotFound) { + return cliconfig.ProfileConfig{}, fmt.Errorf("read token store: %w", err) + } + if err == nil && stored.InvalidatedAt == 0 { + exp, expErr := auth.ExpiresAt(stored.AccessToken) + // "Valid" = exp is parseable AND in the future. A token with no exp + // claim is treated as "unknown / could still be valid" → also reject, + // because we can't prove otherwise client-side. + if expErr == nil && now.Before(exp) { + return cliconfig.ProfileConfig{}, fmt.Errorf( + "already authenticated for %s (expires in %s).\nto re-authenticate, run: olares-cli profile remove %s", + flags.olaresID, humanizeDuration(exp.Sub(now)), flags.olaresID, + ) + } + if errors.Is(expErr, auth.ErrNoExpClaim) { + return cliconfig.ProfileConfig{}, fmt.Errorf( + "a token is already stored for %s but its expiry can't be determined client-side.\nto re-authenticate, run: olares-cli profile remove %s", + flags.olaresID, flags.olaresID, + ) + } + // Otherwise the token is expired or unparseable → fall through and + // overwrite it. + } + + // 2. Build the ProfileConfig we're about to upsert. If one already + // exists, preserve its overrides unless the caller explicitly passed a + // new value. + if existing := cfg.FindByOlaresID(flags.olaresID); existing != nil { + out := *existing + if flags.name != "" { + out.Name = flags.name + } + if flags.authURLOverride != "" { + out.AuthURLOverride = flags.authURLOverride + } + if flags.localURLPrefix != "" { + out.LocalURLPrefix = flags.localURLPrefix + } + if flags.insecureSkipVerify { + out.InsecureSkipVerify = true + } + return out, nil + } + return cliconfig.ProfileConfig{ + Name: flags.name, + OlaresID: flags.olaresID, + AuthURLOverride: flags.authURLOverride, + LocalURLPrefix: flags.localURLPrefix, + InsecureSkipVerify: flags.insecureSkipVerify, + }, nil +} + +// persistResult reports what happened to the active-profile pointer as a side +// effect of persistTokenAndProfile, so callers can print accurate UX. +// +// Switched is true exactly when CurrentProfile changed during this call. In +// that case PreviousCurrent holds whatever CurrentProfile pointed at before +// the switch (may be empty when the just-persisted profile is the very first +// one). +type persistResult struct { + Switched bool + PreviousCurrent string +} + +// persistTokenAndProfile writes the freshly-obtained Token into the token +// store and upserts the corresponding ProfileConfig into config.json. +// +// switchCurrent controls whether the just-persisted profile becomes current: +// - true → behave like `profile use `: if the new profile differs from +// the existing CurrentProfile, the old CurrentProfile is moved into +// PreviousProfile so users can revert with `profile use -`. Re-persisting +// the already-current profile is a no-op for current/previous. +// - false → leave CurrentProfile alone, except when it's empty: in that +// case fall back to the just-persisted profile so we never end up with +// "profiles exist but no current" — which would break every command that +// resolves credentials via the current profile. +func persistTokenAndProfile( + cfg *cliconfig.MultiProfileConfig, + store auth.TokenStore, + profile cliconfig.ProfileConfig, + tok *auth.Token, + switchCurrent bool, +) (persistResult, error) { + stored := auth.StoredToken{ + OlaresID: profile.OlaresID, + AccessToken: tok.AccessToken, + RefreshToken: tok.RefreshToken, + SessionID: tok.SessionID, + GrantedAt: time.Now().UnixMilli(), + } + if err := store.Set(stored); err != nil { + return persistResult{}, fmt.Errorf("save token: %w", err) + } + persisted := cfg.Upsert(profile) + + res := persistResult{} + newName := persisted.DisplayName() + prevCurrent := cfg.CurrentProfile + switch { + case switchCurrent && prevCurrent != newName: + // SetCurrent handles the empty-current case (no PreviousProfile + // update), and updates PreviousProfile when current actually moves. + // The lookup can only fail if the upsert above didn't land — treat + // that as an internal invariant violation. + if _, err := cfg.SetCurrent(newName); err != nil { + return persistResult{}, fmt.Errorf("activate profile %q: %w", newName, err) + } + res.Switched = true + res.PreviousCurrent = prevCurrent + case !switchCurrent && prevCurrent == "": + // Bootstrap path when --no-switch was passed but there's literally no + // current to preserve. Still no PreviousProfile bookkeeping (there + // was nothing to demote). + cfg.CurrentProfile = newName + res.Switched = true + } + + if err := cliconfig.SaveMultiProfileConfig(cfg); err != nil { + return persistResult{}, fmt.Errorf("save config: %w", err) + } + return res, nil +} + +// printSwitchNotice renders the post-login UX line(s) that explain whether +// CurrentProfile moved as a result of the just-finished login/import. +// +// We deliberately stay quiet when nothing changed (re-login on the +// already-current profile, or --no-switch with a non-empty current) so the +// happy-path output keeps a single line of "logged in as ...". +func printSwitchNotice(res persistResult, newDisplayName string) { + if !res.Switched { + return + } + fmt.Printf("switched current profile to %s\n", newDisplayName) + if res.PreviousCurrent != "" { + fmt.Printf("previous profile: %s (use 'olares-cli profile use -' to switch back)\n", res.PreviousCurrent) + } +} + +// printPlaintextWarning is shown after every successful login / import to set +// expectations: Phase 1 stores tokens in clear text. Phase 2 will move them +// into the OS keychain. +func printPlaintextWarning() { + tokensPath, _ := cliconfig.TokensFile() + fmt.Printf("warning: token stored in plaintext at %s (mode 0600). OS keychain support is coming in a future release.\n", tokensPath) +} diff --git a/cli/cmd/ctl/profile/credentials_test.go b/cli/cmd/ctl/profile/credentials_test.go new file mode 100644 index 000000000..1a12e286c --- /dev/null +++ b/cli/cmd/ctl/profile/credentials_test.go @@ -0,0 +1,167 @@ +package profile + +import ( + "testing" + + "github.com/beclab/Olares/cli/pkg/auth" + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +// TestPersistTokenAndProfile_Switching exercises the auto-switch contract +// added by the "login auto switch profile" plan. The behavior matrix lives +// in docs/notes/olares-cli-auth-profile-config.md §7.3; this table mirrors +// the verification checklist from the plan. +func TestPersistTokenAndProfile_Switching(t *testing.T) { + tok := func() *auth.Token { + return &auth.Token{AccessToken: "ignored", RefreshToken: "ignored"} + } + + type expect struct { + current string + previous string + switched bool + prevPtr string // res.PreviousCurrent + } + + cases := []struct { + name string + seedProfiles []cliconfig.ProfileConfig + seedCurrent string + seedPrevious string + newProfile cliconfig.ProfileConfig + switchCurrent bool + want expect + }{ + { + name: "first profile, switch=true: becomes current, previous untouched", + newProfile: cliconfig.ProfileConfig{OlaresID: "alice@olares.com"}, + switchCurrent: true, + want: expect{ + current: "alice@olares.com", + previous: "", + switched: true, + prevPtr: "", // there was no prior current to demote + }, + }, + { + name: "first profile, --no-switch: still becomes current (bootstrap fallback)", + newProfile: cliconfig.ProfileConfig{OlaresID: "alice@olares.com"}, + switchCurrent: false, + want: expect{ + current: "alice@olares.com", + previous: "", + switched: true, + prevPtr: "", + }, + }, + { + name: "different profile, switch=true: old current → previous, new is current", + seedProfiles: []cliconfig.ProfileConfig{{OlaresID: "alice@olares.com"}}, + seedCurrent: "alice@olares.com", + newProfile: cliconfig.ProfileConfig{OlaresID: "bob@olares.com"}, + switchCurrent: true, + want: expect{ + current: "bob@olares.com", + previous: "alice@olares.com", + switched: true, + prevPtr: "alice@olares.com", + }, + }, + { + name: "different profile, --no-switch: current/previous untouched", + seedProfiles: []cliconfig.ProfileConfig{{OlaresID: "alice@olares.com"}}, + seedCurrent: "alice@olares.com", + newProfile: cliconfig.ProfileConfig{OlaresID: "bob@olares.com"}, + switchCurrent: false, + want: expect{ + current: "alice@olares.com", + previous: "", + switched: false, + }, + }, + { + name: "same-account re-login, switch=true: no-op, no switched signal", + seedProfiles: []cliconfig.ProfileConfig{{OlaresID: "alice@olares.com"}}, + seedCurrent: "alice@olares.com", + newProfile: cliconfig.ProfileConfig{OlaresID: "alice@olares.com"}, + switchCurrent: true, + want: expect{ + current: "alice@olares.com", + previous: "", + switched: false, + }, + }, + { + name: "same-account re-login preserves PreviousProfile that was already set", + seedProfiles: []cliconfig.ProfileConfig{ + {OlaresID: "alice@olares.com"}, + {OlaresID: "bob@olares.com"}, + }, + seedCurrent: "alice@olares.com", + seedPrevious: "bob@olares.com", + newProfile: cliconfig.ProfileConfig{OlaresID: "alice@olares.com"}, + switchCurrent: true, + want: expect{ + current: "alice@olares.com", + previous: "bob@olares.com", // untouched + switched: false, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Setenv("OLARES_CLI_HOME", t.TempDir()) + path, err := cliconfig.TokensFile() + if err != nil { + t.Fatalf("TokensFile: %v", err) + } + store := auth.NewFileStoreAt(path) + + cfg := &cliconfig.MultiProfileConfig{ + Profiles: append([]cliconfig.ProfileConfig(nil), tc.seedProfiles...), + CurrentProfile: tc.seedCurrent, + PreviousProfile: tc.seedPrevious, + } + if len(tc.seedProfiles) > 0 { + if err := cliconfig.SaveMultiProfileConfig(cfg); err != nil { + t.Fatalf("seed save config: %v", err) + } + } + + res, err := persistTokenAndProfile(cfg, store, tc.newProfile, tok(), tc.switchCurrent) + if err != nil { + t.Fatalf("persistTokenAndProfile: %v", err) + } + + if cfg.CurrentProfile != tc.want.current { + t.Errorf("CurrentProfile = %q, want %q", cfg.CurrentProfile, tc.want.current) + } + if cfg.PreviousProfile != tc.want.previous { + t.Errorf("PreviousProfile = %q, want %q", cfg.PreviousProfile, tc.want.previous) + } + if res.Switched != tc.want.switched { + t.Errorf("res.Switched = %v, want %v", res.Switched, tc.want.switched) + } + if res.PreviousCurrent != tc.want.prevPtr { + t.Errorf("res.PreviousCurrent = %q, want %q", res.PreviousCurrent, tc.want.prevPtr) + } + + // Cross-check on-disk state matches in-memory state, since the + // helper is supposed to have flushed via SaveMultiProfileConfig. + persisted, err := cliconfig.LoadMultiProfileConfig() + if err != nil { + t.Fatalf("reload config: %v", err) + } + if persisted.CurrentProfile != cfg.CurrentProfile { + t.Errorf("on-disk CurrentProfile = %q, want %q", persisted.CurrentProfile, cfg.CurrentProfile) + } + if persisted.PreviousProfile != cfg.PreviousProfile { + t.Errorf("on-disk PreviousProfile = %q, want %q", persisted.PreviousProfile, cfg.PreviousProfile) + } + if got, _ := store.Get(tc.newProfile.OlaresID); got == nil { + t.Errorf("token for %q not persisted", tc.newProfile.OlaresID) + } + }) + } +} diff --git a/cli/cmd/ctl/profile/import.go b/cli/cmd/ctl/profile/import.go new file mode 100644 index 000000000..43a1459bb --- /dev/null +++ b/cli/cmd/ctl/profile/import.go @@ -0,0 +1,96 @@ +package profile + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/auth" + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +type importOptions struct { + commonCredFlags + refreshToken string + noSwitch bool +} + +// NewImportCommand: `olares-cli profile import --olares-id --refresh-token [...]` +// +// Mode B: bootstrap an access_token from a user-supplied refresh_token by +// performing exactly ONE call to /api/refresh. This is the way to seed a +// profile when the user obtained their refresh token elsewhere (LarePass, +// wizard activation, manual extraction). +// +// Phase 1 does NOT use the stored refresh_token for background renewal — +// that's a Phase 2 deliverable. The same `auth.Refresh` HTTP call will be +// reused there, so the wire-format contract is locked in now. +func NewImportCommand() *cobra.Command { + o := &importOptions{} + cmd := &cobra.Command{ + Use: "import", + Short: "import a refresh token to bootstrap an access token (mode B)", + Long: `Import an existing refresh_token (e.g. obtained via LarePass or the wizard +activation flow) and exchange it once for an access_token via /api/refresh. + +The profile is auto-created on first import. Importing into an +already-authenticated profile is rejected; remove the profile first +(` + "`olares-cli profile remove `" + `) and import again.`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + return runImport(cmd.Context(), o) + }, + } + o.commonCredFlags.bind(cmd) + cmd.Flags().StringVar(&o.refreshToken, "refresh-token", "", "refresh token to bootstrap (required)") + cmd.Flags().BoolVar(&o.noSwitch, "no-switch", false, "do not change the current profile after a successful import (useful for scripts)") + return cmd +} + +func runImport(ctx context.Context, o *importOptions) error { + if ctx == nil { + ctx = context.Background() + } + if o.refreshToken == "" { + return errors.New("--refresh-token is required") + } + _, _, authURL, err := o.commonCredFlags.validateAndDeriveAuthURL() + if err != nil { + return err + } + + cfg, err := cliconfig.LoadMultiProfileConfig() + if err != nil { + return err + } + store, err := auth.NewFileStore() + if err != nil { + return err + } + profile, err := ensureProfileWritable(cfg, store, o.commonCredFlags, time.Now()) + if err != nil { + return err + } + + tok, err := auth.Refresh(ctx, auth.RefreshRequest{ + AuthURL: authURL, + RefreshToken: o.refreshToken, + InsecureSkipVerify: o.insecureSkipVerify, + }) + if err != nil { + return err + } + + res, err := persistTokenAndProfile(cfg, store, profile, tok, !o.noSwitch) + if err != nil { + return err + } + + fmt.Printf("imported credentials for %s (profile: %s)\n", o.olaresID, profile.DisplayName()) + printSwitchNotice(res, profile.DisplayName()) + printPlaintextWarning() + return nil +} diff --git a/cli/cmd/ctl/profile/list.go b/cli/cmd/ctl/profile/list.go new file mode 100644 index 000000000..d1ed0b50d --- /dev/null +++ b/cli/cmd/ctl/profile/list.go @@ -0,0 +1,122 @@ +package profile + +import ( + "errors" + "fmt" + "os" + "text/tabwriter" + "time" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/auth" + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +// NewListCommand: `olares-cli profile list` +// +// Output is a TSV-like table: NAME / OLARES-ID / STATUS, with a leading "*" +// marking the current profile. STATUS reflects only what the local token +// store can prove without making a network call: +// +// logged-in (23h59m) — token present, JWT exp claim still in the future +// expired — token present, exp claim in the past +// invalidated — token present but explicitly marked unusable +// (Phase 2 sets this when /api/refresh returns 401/403); +// takes precedence over `expired` +// never — no stored token for this profile +// logged-in — token present but JWT has no exp claim (we can't +// tell client-side; trust until the server says no) +// +// Per §7.5 of the design doc, we deliberately do NOT print any other JWT +// claims (username / groups / mfa / jid). The OlaresID column is the local +// authoritative identity. +func NewListCommand() *cobra.Command { + return &cobra.Command{ + Use: "list", + Short: "list all profiles with login status and current marker", + Args: cobra.NoArgs, + RunE: func(_ *cobra.Command, _ []string) error { + return runList(os.Stdout) + }, + } +} + +func runList(out *os.File) error { + cfg, err := cliconfig.LoadMultiProfileConfig() + if err != nil { + return err + } + if len(cfg.Profiles) == 0 { + fmt.Fprintln(out, "no profiles configured.") + fmt.Fprintln(out, "run `olares-cli profile login --olares-id ` or `olares-cli profile import --olares-id --refresh-token ` to add one.") + return nil + } + + store, err := auth.NewFileStore() + if err != nil { + return err + } + + current := cfg.Current() + now := time.Now() + + w := tabwriter.NewWriter(out, 0, 0, 2, ' ', 0) + fmt.Fprintln(w, " \tNAME\tOLARES-ID\tSTATUS") + for i := range cfg.Profiles { + p := &cfg.Profiles[i] + marker := " " + if current != nil && current.OlaresID == p.OlaresID { + marker = "*" + } + status := profileStatus(store, p, now) + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", marker, p.DisplayName(), p.OlaresID, status) + } + return w.Flush() +} + +// profileStatus inspects the token store for `p` and returns a short status +// string. Errors reading the store collapse into an opaque "unknown" rather +// than aborting the whole listing — partial output beats no output here. +func profileStatus(store auth.TokenStore, p *cliconfig.ProfileConfig, now time.Time) string { + tok, err := store.Get(p.OlaresID) + if err != nil { + if errors.Is(err, auth.ErrTokenNotFound) { + return "never" + } + return "unknown" + } + // Explicit invalidation wins over JWT-exp inspection: a server-side + // rejection of the refresh leg means the entire grant is dead, even if + // the access_token JWT happens to still have time left on its `exp`. + if tok.InvalidatedAt > 0 { + return "invalidated" + } + exp, err := auth.ExpiresAt(tok.AccessToken) + if err != nil { + if errors.Is(err, auth.ErrNoExpClaim) { + return "logged-in" + } + return "logged-in (unparseable token)" + } + if !now.Before(exp) { + return "expired" + } + return fmt.Sprintf("logged-in (%s)", humanizeDuration(exp.Sub(now))) +} + +// humanizeDuration prints a coarse "23h59m" / "12m34s" / "5s" representation. +// Days are folded into hours to keep the column width predictable. +func humanizeDuration(d time.Duration) string { + if d < time.Minute { + return fmt.Sprintf("%ds", int(d.Seconds())) + } + if d < time.Hour { + m := int(d.Minutes()) + s := int(d.Seconds()) - m*60 + return fmt.Sprintf("%dm%ds", m, s) + } + h := int(d.Hours()) + m := int(d.Minutes()) - h*60 + return fmt.Sprintf("%dh%dm", h, m) +} diff --git a/cli/cmd/ctl/profile/login.go b/cli/cmd/ctl/profile/login.go new file mode 100644 index 000000000..9bc41b9b6 --- /dev/null +++ b/cli/cmd/ctl/profile/login.go @@ -0,0 +1,205 @@ +package profile + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "os" + "strings" + "syscall" + "time" + + "github.com/spf13/cobra" + "golang.org/x/term" + + "github.com/beclab/Olares/cli/pkg/auth" + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +type loginOptions struct { + commonCredFlags + passwordStdin bool + totp string + noSwitch bool +} + +// NewLoginCommand: `olares-cli profile login --olares-id [...]` +// +// Mode A (password login). Behavior matrix from the design doc: +// - profile does not exist → auto-create (with provided overrides) +// - profile exists, no/expired token → reuse existing profile, write new token +// - profile exists, valid token → reject with `profile remove` hint +// +// Password is read from stdin when --password-stdin is set, otherwise from +// the controlling terminal (with input echoing disabled). Two-factor accounts +// must supply --totp. +func NewLoginCommand() *cobra.Command { + o := &loginOptions{} + cmd := &cobra.Command{ + Use: "login", + Short: "log in to an Olares instance with a password (mode A)", + Long: `Authenticate to an Olares instance using the user's password (and TOTP if 2FA is enabled). + +The profile is auto-created on first login. Re-running login against an +already-authenticated profile is rejected; remove the profile first +(` + "`olares-cli profile remove `" + `) and log in again.`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + return runLogin(cmd.Context(), o) + }, + } + o.commonCredFlags.bind(cmd) + cmd.Flags().BoolVar(&o.passwordStdin, "password-stdin", false, "read the password from stdin instead of prompting") + cmd.Flags().StringVar(&o.totp, "totp", "", "TOTP code for accounts with two-factor authentication enabled") + cmd.Flags().BoolVar(&o.noSwitch, "no-switch", false, "do not change the current profile after a successful login (useful for scripts)") + return cmd +} + +// bind wires the cred flags onto a cobra.Command. Defined here (not on +// commonCredFlags directly) to keep the import-side flag set identical. +func (f *commonCredFlags) bind(cmd *cobra.Command) { + cmd.Flags().StringVar(&f.olaresID, "olares-id", "", "olaresId, e.g. alice@olares.com (required)") + cmd.Flags().StringVar(&f.name, "name", "", "optional alias for the profile (defaults to the olaresId)") + cmd.Flags().StringVar(&f.authURLOverride, "auth-url-override", "", "override the derived auth URL (dev/internal use)") + cmd.Flags().StringVar(&f.localURLPrefix, "local-url-prefix", "", "label inserted between the auth subdomain and the terminus name (dev/internal use)") + cmd.Flags().BoolVar(&f.insecureSkipVerify, "insecure-skip-verify", false, "disable TLS verification for HTTP calls under this profile (dev/internal use)") +} + +func runLogin(ctx context.Context, o *loginOptions) error { + if ctx == nil { + ctx = context.Background() + } + id, terminusName, authURL, err := o.commonCredFlags.validateAndDeriveAuthURL() + if err != nil { + return err + } + + cfg, err := cliconfig.LoadMultiProfileConfig() + if err != nil { + return err + } + store, err := auth.NewFileStore() + if err != nil { + return err + } + profile, err := ensureProfileWritable(cfg, store, o.commonCredFlags, time.Now()) + if err != nil { + return err + } + + password, err := readPassword(o.passwordStdin, o.olaresID) + if err != nil { + return err + } + + tok, err := loginWithTOTPPrompt(ctx, auth.LoginRequest{ + AuthURL: authURL, + LocalName: id.Local(), + TerminusName: terminusName, + Password: password, + TOTP: o.totp, + InsecureSkipVerify: o.insecureSkipVerify, + }, o.olaresID) + if err != nil { + return err + } + + res, err := persistTokenAndProfile(cfg, store, profile, tok, !o.noSwitch) + if err != nil { + return err + } + + fmt.Printf("logged in as %s (profile: %s)\n", o.olaresID, profile.DisplayName()) + printSwitchNotice(res, profile.DisplayName()) + printPlaintextWarning() + return nil +} + +// loginWithTOTPPrompt wraps auth.Login with one round of interactive TOTP +// recovery: if the first attempt comes back ErrTOTPRequired (meaning the +// account has 2FA enabled and the caller didn't supply --totp) AND we're +// running on a TTY, prompt the user for the 6-digit code and retry once. +// +// If --totp was already supplied OR stdin is not a TTY (e.g. piped via +// --password-stdin from a script), we degrade to the original error so the +// caller knows to re-run with --totp explicitly. +// +// Note the retry re-issues /api/firstfactor — the server doesn't keep +// transitional state between the two factor steps from our perspective, and +// re-validating the password is cheap. We do NOT re-prompt the password. +func loginWithTOTPPrompt(ctx context.Context, req auth.LoginRequest, olaresID string) (*auth.Token, error) { + tok, err := auth.Login(ctx, req) + if err == nil { + return tok, nil + } + if !errors.Is(err, auth.ErrTOTPRequired) || req.TOTP != "" { + return nil, err + } + if !term.IsTerminal(int(syscall.Stdin)) { + return nil, fmt.Errorf("two-factor authentication required: re-run with --totp ") + } + totp, perr := promptTOTP(olaresID) + if perr != nil { + return nil, perr + } + req.TOTP = totp + return auth.Login(ctx, req) +} + +// promptTOTP reads a 6-digit code from the controlling terminal. The code is +// short-lived and not secret-sensitive in the same way a password is, so we +// echo it (matches `gh auth login`, `aws sso login`, kubectl OIDC plugins). +func promptTOTP(olaresID string) (string, error) { + fmt.Printf("two-factor code for %s: ", olaresID) + reader := bufio.NewReader(os.Stdin) + line, err := reader.ReadString('\n') + if err != nil && err != io.EOF { + return "", fmt.Errorf("read TOTP: %w", err) + } + line = strings.TrimSpace(line) + if line == "" { + return "", errors.New("TOTP code is empty") + } + return line, nil +} + +// readPassword pulls the password from the requested source. --password-stdin +// reads exactly one line from stdin (newline stripped); the interactive path +// turns off terminal echo. We never log or print the password. +func readPassword(fromStdin bool, olaresID string) (string, error) { + if fromStdin { + return readSingleLine(os.Stdin) + } + if !term.IsTerminal(int(syscall.Stdin)) { + return "", errors.New("stdin is not a terminal; pass --password-stdin and pipe the password instead") + } + fmt.Printf("password for %s: ", olaresID) + pw, err := term.ReadPassword(int(syscall.Stdin)) + fmt.Println() + if err != nil { + return "", fmt.Errorf("read password: %w", err) + } + if len(pw) == 0 { + return "", errors.New("password is empty") + } + return string(pw), nil +} + +// readSingleLine reads up to and including the first '\n' (or EOF) from r and +// returns the trimmed line. Used for --password-stdin so that +// `printf '%s' "$P" | olares-cli profile login --password-stdin` works +// regardless of whether the input has a trailing newline. +func readSingleLine(r io.Reader) (string, error) { + br := bufio.NewReader(r) + line, err := br.ReadString('\n') + if err != nil && err != io.EOF { + return "", fmt.Errorf("read stdin: %w", err) + } + line = strings.TrimRight(line, "\r\n") + if line == "" { + return "", errors.New("password is empty") + } + return line, nil +} diff --git a/cli/cmd/ctl/profile/remove.go b/cli/cmd/ctl/profile/remove.go new file mode 100644 index 000000000..2da02b0b7 --- /dev/null +++ b/cli/cmd/ctl/profile/remove.go @@ -0,0 +1,64 @@ +package profile + +import ( + "errors" + "fmt" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/auth" + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +// NewRemoveCommand: `olares-cli profile remove ` +// +// Removes the profile entry AND its stored token in one shot. There is no +// separate `auth logout` — `profile remove` is the canonical way to invalidate +// local credentials. If the removed profile was the current one, the current +// pointer falls back to PreviousProfile (when still valid) or to the first +// remaining profile. +// +// Token deletion failures are reported but don't stop config save: a +// dangling token entry is harmless (it'll just be stale) and we'd rather +// have a consistent config.json than abort halfway. +func NewRemoveCommand() *cobra.Command { + return &cobra.Command{ + Use: "remove ", + Short: "delete a profile and its stored token", + Args: cobra.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + return runRemove(args[0]) + }, + } +} + +func runRemove(key string) error { + cfg, err := cliconfig.LoadMultiProfileConfig() + if err != nil { + return err + } + removed, ok := cfg.Remove(key) + if !ok { + return fmt.Errorf("profile %q not found", key) + } + if err := cliconfig.SaveMultiProfileConfig(cfg); err != nil { + return fmt.Errorf("save config: %w", err) + } + + store, err := auth.NewFileStore() + if err != nil { + return err + } + if err := store.Delete(removed.OlaresID); err != nil && !errors.Is(err, auth.ErrTokenNotFound) { + // Non-fatal: config is already updated. + fmt.Printf("warning: failed to clear stored token for %s: %v\n", removed.OlaresID, err) + } + + fmt.Printf("removed profile %s (%s)\n", removed.DisplayName(), removed.OlaresID) + if cfg.CurrentProfile != "" { + fmt.Printf("current profile is now: %s\n", cfg.CurrentProfile) + } else if len(cfg.Profiles) == 0 { + fmt.Println("no profiles remain.") + } + return nil +} diff --git a/cli/cmd/ctl/profile/root.go b/cli/cmd/ctl/profile/root.go new file mode 100644 index 000000000..d150f05aa --- /dev/null +++ b/cli/cmd/ctl/profile/root.go @@ -0,0 +1,44 @@ +// Package profile implements the `olares-cli profile` command tree. +// +// Phase 1 surface (5 subcommands, no separate `auth` namespace): +// +// profile list # list all profiles + login status, mark current +// profile use # switch current profile (`-` reverts to previous) +// profile remove # delete profile + its stored token +// profile login --olares-id ... # password-based login (mode A) +// profile import --olares-id ... # refresh-token bootstrap (mode B) +// +// See docs/notes/olares-cli-auth-profile-config.md for the full design. +package profile + +import "github.com/spf13/cobra" + +// NewProfileCommand returns the `profile` parent command, ready to be added +// to the olares-cli root. +func NewProfileCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "profile", + Short: "manage olares-cli profiles (one profile = one Olares instance + one user identity)", + Long: `Manage olares-cli profiles. A profile bundles a target Olares instance +(identified by an olaresId such as "alice@olares.com") with the local +credentials used to talk to it. + +Phase 1 stores tokens in plaintext at ~/.olares-cli/tokens.json with 0600 +permissions; OS keychain support arrives in Phase 2.`, + } + for _, sub := range []*cobra.Command{ + NewListCommand(), + NewUseCommand(), + NewRemoveCommand(), + NewLoginCommand(), + NewImportCommand(), + } { + // Don't dump cobra usage on every runtime error — those are user + // errors (bad creds, network, already-authenticated) whose message + // is already actionable. SilenceUsage is per-command (no inheritance + // from the parent), so we set it on every subcommand explicitly. + sub.SilenceUsage = true + cmd.AddCommand(sub) + } + return cmd +} diff --git a/cli/cmd/ctl/profile/use.go b/cli/cmd/ctl/profile/use.go new file mode 100644 index 000000000..5d0b46558 --- /dev/null +++ b/cli/cmd/ctl/profile/use.go @@ -0,0 +1,41 @@ +package profile + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +// NewUseCommand: `olares-cli profile use ` +// +// `name` may be a profile alias (Name) or its OlaresID. The literal `-` +// switches back to the previous profile (a la `cd -`), and is rejected when +// PreviousProfile is unset. +func NewUseCommand() *cobra.Command { + return &cobra.Command{ + Use: "use ", + Short: "switch the current profile (use `-` to switch back to the previous one)", + Args: cobra.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + return runUse(args[0]) + }, + } +} + +func runUse(key string) error { + cfg, err := cliconfig.LoadMultiProfileConfig() + if err != nil { + return err + } + target, err := cfg.SetCurrent(key) + if err != nil { + return err + } + if err := cliconfig.SaveMultiProfileConfig(cfg); err != nil { + return fmt.Errorf("save config: %w", err) + } + fmt.Printf("switched to profile %s (%s)\n", target.DisplayName(), target.OlaresID) + return nil +} diff --git a/cli/cmd/ctl/root.go b/cli/cmd/ctl/root.go index 0bef93eb6..b801bb5f9 100755 --- a/cli/cmd/ctl/root.go +++ b/cli/cmd/ctl/root.go @@ -11,6 +11,7 @@ import ( "github.com/beclab/Olares/cli/cmd/ctl/node" "github.com/beclab/Olares/cli/cmd/ctl/os" "github.com/beclab/Olares/cli/cmd/ctl/osinfo" + "github.com/beclab/Olares/cli/cmd/ctl/profile" "github.com/beclab/Olares/cli/cmd/ctl/user" "github.com/beclab/Olares/cli/cmd/ctl/wizard" "github.com/beclab/Olares/cli/version" @@ -53,6 +54,7 @@ func NewDefaultCommand() *cobra.Command { cmds.AddCommand(wizard.NewWizardCommand()) cmds.AddCommand(disk.NewDiskCommand()) cmds.AddCommand(app.NewAppCommand()) + cmds.AddCommand(profile.NewProfileCommand()) return cmds } diff --git a/cli/pkg/auth/jwt.go b/cli/pkg/auth/jwt.go new file mode 100644 index 000000000..bde2b657d --- /dev/null +++ b/cli/pkg/auth/jwt.go @@ -0,0 +1,80 @@ +// Package auth provides olares-cli's authentication primitives: JWT expiry +// extraction, password-based login (/api/firstfactor + /api/secondfactor/totp), +// refresh-token bootstrap (/api/refresh), and the on-disk token store. +// +// jwt.go intentionally exposes ONLY ExpiresAt(). The CLI does NOT verify JWT +// signatures (it has no signing key), so all other claims (`username`, +// `groups`, `mfa`, `jid`, ...) are untrusted and must not leak into UX. The +// only JWT field treated as a "hint" is `exp`, because faking it can only +// trigger a self-inflicted 401 from the server. See §7.5 of +// docs/notes/olares-cli-auth-profile-config.md for the full rationale. +package auth + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// expClaim is the minimal JWT payload subset we ever decode. We deliberately +// avoid Unmarshalling into a richer struct so reviewers can audit at a glance +// that no other claim ever escapes this package. +type expClaim struct { + Exp int64 `json:"exp"` +} + +// ExpiresAt decodes only the `exp` claim of a JWT (header.payload.signature) +// and returns it as a time.Time. It does NOT verify the signature. Use the +// returned value as a client-side hint only; the server remains the source of +// truth for token validity. +// +// Returns an error if the input doesn't look like a JWT, the payload can't be +// base64url-decoded, or the JSON is malformed. Tokens with no `exp` claim +// produce (zero time, ErrNoExpClaim). +func ExpiresAt(token string) (time.Time, error) { + if token == "" { + return time.Time{}, errors.New("token is empty") + } + parts := strings.Split(token, ".") + if len(parts) != 3 { + return time.Time{}, fmt.Errorf("token does not look like a JWT (want 3 segments, got %d)", len(parts)) + } + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + // Some encoders include `=` padding; tolerate that. + payload, err = base64.URLEncoding.DecodeString(parts[1]) + if err != nil { + return time.Time{}, fmt.Errorf("decode payload: %w", err) + } + } + var c expClaim + if err := json.Unmarshal(payload, &c); err != nil { + return time.Time{}, fmt.Errorf("parse payload: %w", err) + } + if c.Exp == 0 { + return time.Time{}, ErrNoExpClaim + } + return time.Unix(c.Exp, 0), nil +} + +// ErrNoExpClaim is returned by ExpiresAt when the JWT payload has no `exp` +// field. Callers can treat this as "unknown expiry" and decide their own +// policy (Phase 1 conservatively treats unknown as "trust the token until the +// server says otherwise"). +var ErrNoExpClaim = errors.New("jwt has no exp claim") + +// IsExpired returns true if ExpiresAt(token) is non-zero AND in the past +// relative to now (or within `skew` of now). Tokens with no exp claim or +// malformed tokens return (false, err). +// +// `skew` is treated as a non-negative leeway; pass 0 for exact comparison. +func IsExpired(token string, now time.Time, skew time.Duration) (bool, error) { + exp, err := ExpiresAt(token) + if err != nil { + return false, err + } + return !now.Add(skew).Before(exp), nil +} diff --git a/cli/pkg/auth/jwt_test.go b/cli/pkg/auth/jwt_test.go new file mode 100644 index 000000000..a801e3df7 --- /dev/null +++ b/cli/pkg/auth/jwt_test.go @@ -0,0 +1,74 @@ +package auth + +import ( + "errors" + "testing" + "time" +) + +// TestExpiresAt_RealOlaresToken pins the JWT decoder to a real Olares +// access_token captured from a pptest01 session. The payload is: +// +// {"exp":1777127385,"iat":1777040985,"username":"pptest01", +// "groups":["lldap_admin"],"mfa":0,"jid":13962895094395427312} +// +// The test asserts ONLY the `exp` claim — it must NOT depend on any other +// field, because per §7.5 of the design doc the decoder is forbidden from +// surfacing them. +func TestExpiresAt_RealOlaresToken(t *testing.T) { + const tok = "eyJhbGciOiJIUzUxMiJ9.eyJleHAiOjE3NzcxMjczODUsImlhdCI6MTc3NzA0MDk4NSwidXNlcm5hbWUiOiJwcHRlc3QwMSIsImdyb3VwcyI6WyJsbGRhcF9hZG1pbiJdLCJtZmEiOjAsImppZCI6MTM5NjI4OTUwOTQzOTU0MjczMTJ9.5uEvkvXlUrREuxqK1W2Vruke_OZdiuPdGysiC0XPXVJ9fz_X_-3wPyA4WXdsQKqT9P86yqeb5ZrRGFokCjGkmA" + + got, err := ExpiresAt(tok) + if err != nil { + t.Fatalf("ExpiresAt: unexpected error: %v", err) + } + want := time.Unix(1777127385, 0) + if !got.Equal(want) { + t.Errorf("ExpiresAt: got %s, want %s", got, want) + } +} + +func TestExpiresAt_Errors(t *testing.T) { + t.Run("empty", func(t *testing.T) { + if _, err := ExpiresAt(""); err == nil { + t.Fatal("expected error for empty token") + } + }) + t.Run("not-a-jwt", func(t *testing.T) { + if _, err := ExpiresAt("not.a.jwt.too.many.dots"); err == nil { + t.Fatal("expected error for malformed token") + } + }) + t.Run("no-exp-claim", func(t *testing.T) { + // header.payload.sig where payload = base64url("{}") = "e30" + _, err := ExpiresAt("h.e30.s") + if !errors.Is(err, ErrNoExpClaim) { + t.Fatalf("expected ErrNoExpClaim, got %v", err) + } + }) +} + +func TestIsExpired(t *testing.T) { + const tok = "eyJhbGciOiJIUzUxMiJ9.eyJleHAiOjE3NzcxMjczODUsImlhdCI6MTc3NzA0MDk4NSwidXNlcm5hbWUiOiJwcHRlc3QwMSIsImdyb3VwcyI6WyJsbGRhcF9hZG1pbiJdLCJtZmEiOjAsImppZCI6MTM5NjI4OTUwOTQzOTU0MjczMTJ9.x" + + cases := []struct { + name string + now time.Time + want bool + }{ + {"before-exp", time.Unix(1777127385-3600, 0), false}, + {"at-exp", time.Unix(1777127385, 0), true}, + {"after-exp", time.Unix(1777127385+1, 0), true}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got, err := IsExpired(tok, c.now, 0) + if err != nil { + t.Fatalf("IsExpired: %v", err) + } + if got != c.want { + t.Errorf("got %v, want %v", got, c.want) + } + }) + } +} diff --git a/cli/pkg/auth/login.go b/cli/pkg/auth/login.go new file mode 100644 index 000000000..e8fce10bd --- /dev/null +++ b/cli/pkg/auth/login.go @@ -0,0 +1,275 @@ +package auth + +import ( + "bytes" + "context" + "crypto/md5" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/http/cookiejar" + "strings" + "time" +) + +// Token mirrors the Authelia /api/firstfactor + /api/secondfactor/totp + +// /api/refresh response payload shared by Olares. +// +// We keep only the fields the CLI actually persists or inspects. The wire +// format historically also returns `expires_in`, `expires_at`, `fa2`, etc; +// the CLI ignores the time fields (auth.ExpiresAt(AccessToken) is the source +// of truth) but does honor `fa2` to detect when a TOTP step is required. +type Token struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + SessionID string `json:"session_id,omitempty"` + FA2 bool `json:"fa2,omitempty"` +} + +// LoginRequest captures everything Login needs to perform first-factor (and, +// if needed, second-factor TOTP) authentication. +// +// AuthURL is the Olares auth subdomain base, e.g. "https://auth.alice.olares.com". +// The CLI POSTs to AuthURL + "/api/firstfactor" and AuthURL + "/api/secondfactor/totp". +// +// LocalName is the bare username (the part before `@` of the olaresId). +// The web app uses this as `username` in the request body. +// +// TerminusName is "."; it's used to construct the second-factor +// `targetUrl` field (https://desktop./) which the auth backend +// echoes back as the redirect target. +// +// TOTP is optional — supply it when the account has 2FA enabled. If the +// first-factor response indicates FA2 is required and TOTP is empty, Login +// returns ErrTOTPRequired so the caller can prompt and retry. +type LoginRequest struct { + AuthURL string + LocalName string + TerminusName string + Password string + TOTP string + InsecureSkipVerify bool + Timeout time.Duration // zero → 10s default +} + +// ErrTOTPRequired is returned from Login when the first-factor response +// reports FA2 is needed but the caller didn't supply a TOTP code. Callers +// (e.g. `profile login`) can prompt the user and call Login again with TOTP set. +var ErrTOTPRequired = errors.New("two-factor authentication is required: re-run with --totp ") + +// Login executes the password login flow: +// 1. POST /api/firstfactor with the salted-MD5 password. +// 2. If the response says fa2 is required, POST /api/secondfactor/totp with +// the supplied TOTP code (or return ErrTOTPRequired if none was given). +// +// On success the returned Token contains the freshly minted access_token and +// refresh_token (the second-factor response overrides them when present). +// +// The function uses a short-lived http.Client with a cookie jar so that the +// Authelia session cookie set on /api/firstfactor is automatically attached +// to /api/secondfactor/totp — mirroring `withCredentials: true` in the web +// implementation in apps/packages/app/src/utils/BindTerminusBusiness.ts. +func Login(ctx context.Context, req LoginRequest) (*Token, error) { + if err := validateLoginRequest(req); err != nil { + return nil, err + } + client := newHTTPClient(req.Timeout, req.InsecureSkipVerify) + + tok, err := postFirstFactor(ctx, client, req) + if err != nil { + return nil, err + } + if !tok.FA2 { + return tok, nil + } + if req.TOTP == "" { + return nil, ErrTOTPRequired + } + tok2, err := postSecondFactorTOTP(ctx, client, req, tok.AccessToken) + if err != nil { + return nil, err + } + // Carry forward whatever the second-factor response refreshed; keep the + // first-factor SessionID as a fallback if the server returned an empty one. + if tok2.SessionID == "" { + tok2.SessionID = tok.SessionID + } + return tok2, nil +} + +func validateLoginRequest(req LoginRequest) error { + switch { + case req.AuthURL == "": + return errors.New("AuthURL is required") + case req.LocalName == "": + return errors.New("LocalName is required") + case req.TerminusName == "": + return errors.New("TerminusName is required") + case req.Password == "": + return errors.New("Password is required") + } + return nil +} + +// passwordSalt mirrors the `passwordAddSort` helper in +// pkg/wizard/auth.go (and its TS counterpart in BindTerminusBusiness.ts): +// MD5 of `@Olares2025`. The salt is a public, account-independent +// constant — it's NOT a security feature, just a wire-format quirk the auth +// backend expects. +func passwordSalt(password string) string { + hash := md5.Sum([]byte(password + "@Olares2025")) + return fmt.Sprintf("%x", hash) +} + +type firstFactorBody struct { + Username string `json:"username"` + Password string `json:"password"` + KeepMeLoggedIn bool `json:"keepMeLoggedIn"` + RequestMethod string `json:"requestMethod"` + TargetURL string `json:"targetURL"` + AcceptCookie bool `json:"acceptCookie"` +} + +type firstFactorResponse struct { + Status string `json:"status"` + Message string `json:"message,omitempty"` + Data Token `json:"data"` +} + +func postFirstFactor(ctx context.Context, client *http.Client, req LoginRequest) (*Token, error) { + body := firstFactorBody{ + Username: req.LocalName, + Password: passwordSalt(req.Password), + KeepMeLoggedIn: false, + RequestMethod: "POST", + // Always declare the desktop subdomain as the post-login redirect target. + // Authelia's `fa2` flag in the response is computed against this URL via + // its access-control policy, and only the desktop./ rule + // requires 2FA. Sending the auth or vault URL would silently downgrade + // the response to 1FA, hiding the fact that the account has 2FA enabled. + // See apps/packages/app/src/utils/account.ts (onFirstFactor) for the + // matching web behavior. + TargetURL: "https://desktop." + req.TerminusName + "/", + AcceptCookie: true, + } + resp, err := postJSON(ctx, client, req.AuthURL+"/api/firstfactor?hideCookie=true", body, nil) + if err != nil { + return nil, fmt.Errorf("/api/firstfactor: %w", err) + } + defer resp.Body.Close() + raw, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read /api/firstfactor body: %w", err) + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("/api/firstfactor returned HTTP %d: %s", resp.StatusCode, truncate(raw)) + } + var parsed firstFactorResponse + if err := json.Unmarshal(raw, &parsed); err != nil { + return nil, fmt.Errorf("parse /api/firstfactor body: %w (body=%s)", err, truncate(raw)) + } + if !strings.EqualFold(parsed.Status, "OK") { + msg := parsed.Status + if parsed.Message != "" { + msg = msg + ": " + parsed.Message + } + return nil, fmt.Errorf("first-factor authentication failed: %s", msg) + } + return &parsed.Data, nil +} + +type secondFactorBody struct { + TargetURL string `json:"targetUrl"` + Token string `json:"token"` +} + +func postSecondFactorTOTP(ctx context.Context, client *http.Client, req LoginRequest, firstFactorAccessToken string) (*Token, error) { + // `targetUrl` echoes the eventual redirect destination the web app would + // be sent to after a successful second factor. The auth backend validates + // its scheme/host but otherwise just relays it back, so we hard-code the + // desktop subdomain pattern to match BindTerminusBusiness.ts. + body := secondFactorBody{ + TargetURL: "https://desktop." + req.TerminusName + "/", + Token: req.TOTP, + } + headers := map[string]string{ + "X-Authorization": firstFactorAccessToken, + "X-Unauth-Error": "Non-Redirect", + } + resp, err := postJSON(ctx, client, req.AuthURL+"/api/secondfactor/totp", body, headers) + if err != nil { + return nil, fmt.Errorf("/api/secondfactor/totp: %w", err) + } + defer resp.Body.Close() + raw, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read /api/secondfactor/totp body: %w", err) + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("/api/secondfactor/totp returned HTTP %d: %s", resp.StatusCode, truncate(raw)) + } + var parsed firstFactorResponse // identical envelope as first-factor + if err := json.Unmarshal(raw, &parsed); err != nil { + return nil, fmt.Errorf("parse /api/secondfactor/totp body: %w (body=%s)", err, truncate(raw)) + } + if !strings.EqualFold(parsed.Status, "OK") { + msg := parsed.Status + if parsed.Message != "" { + msg = msg + ": " + parsed.Message + } + return nil, fmt.Errorf("second-factor authentication failed: %s", msg) + } + return &parsed.Data, nil +} + +// postJSON marshals `body` as JSON, posts it to `url` via `client`, and +// returns the raw response. Callers must close resp.Body. +func postJSON(ctx context.Context, client *http.Client, url string, body any, headers map[string]string) (*http.Response, error) { + data, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("marshal body: %w", err) + } + httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(data)) + if err != nil { + return nil, err + } + httpReq.Header.Set("Content-Type", "application/json") + httpReq.Header.Set("Accept", "application/json") + for k, v := range headers { + httpReq.Header.Set(k, v) + } + return client.Do(httpReq) +} + +// newHTTPClient returns an http.Client suitable for auth flows: short timeout, +// cookie jar (so first-factor session cookies attach to second-factor), and +// optional InsecureSkipVerify for dev environments. +func newHTTPClient(timeout time.Duration, insecure bool) *http.Client { + if timeout <= 0 { + timeout = 10 * time.Second + } + jar, _ := cookiejar.New(nil) + c := &http.Client{ + Timeout: timeout, + Jar: jar, + } + if insecure { + c.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // #nosec G402 -- dev override gated behind explicit flag + } + } + return c +} + +// truncate caps a body snippet for inclusion in error messages. +func truncate(b []byte) string { + const max = 256 + if len(b) <= max { + return string(b) + } + return string(b[:max]) + "...(truncated)" +} diff --git a/cli/pkg/auth/refresh.go b/cli/pkg/auth/refresh.go new file mode 100644 index 000000000..75e9b4e15 --- /dev/null +++ b/cli/pkg/auth/refresh.go @@ -0,0 +1,93 @@ +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +// RefreshRequest is the input to a single /api/refresh call. AccessToken is +// optional — the web client passes the (possibly expired) current token in +// `X-Authorization` and the server tolerates an empty value during bootstrap, +// so the CLI's `profile import` path leaves it blank. +type RefreshRequest struct { + AuthURL string + RefreshToken string + AccessToken string // optional, sent verbatim as X-Authorization when set + InsecureSkipVerify bool + Timeout time.Duration +} + +type refreshBody struct { + RefreshToken string `json:"refreshToken"` +} + +type refreshResponse struct { + Status string `json:"status"` + Message string `json:"message,omitempty"` + Data Token `json:"data"` +} + +// Refresh exchanges a refresh_token for a new Token via POST /api/refresh. +// +// Phase 1 uses this in two places: +// 1. `profile import` — bootstrap an access_token from a user-supplied +// refresh_token (no current access_token to pass). +// 2. (Phase 2) Background refresh when the stored access_token is near expiry. +// +// The wire format mirrors apps/packages/app/src/utils/account.ts `refresh_token`: +// POST `/api/refresh` with `{"refreshToken": "..."}`, optionally +// carrying `X-Authorization: `. Response envelope is +// `{"status": "OK", "data": Token}` (same shape as /api/firstfactor). +func Refresh(ctx context.Context, req RefreshRequest) (*Token, error) { + if req.AuthURL == "" { + return nil, errors.New("AuthURL is required") + } + if req.RefreshToken == "" { + return nil, errors.New("RefreshToken is required") + } + client := newHTTPClient(req.Timeout, req.InsecureSkipVerify) + + headers := map[string]string{} + if req.AccessToken != "" { + headers["X-Authorization"] = req.AccessToken + } + resp, err := postJSON(ctx, client, req.AuthURL+"/api/refresh", refreshBody{RefreshToken: req.RefreshToken}, headers) + if err != nil { + return nil, fmt.Errorf("/api/refresh: %w", err) + } + defer resp.Body.Close() + raw, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read /api/refresh body: %w", err) + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("/api/refresh returned HTTP %d: %s", resp.StatusCode, truncate(raw)) + } + var parsed refreshResponse + if err := json.Unmarshal(raw, &parsed); err != nil { + return nil, fmt.Errorf("parse /api/refresh body: %w (body=%s)", err, truncate(raw)) + } + if !strings.EqualFold(parsed.Status, "OK") { + msg := parsed.Status + if parsed.Message != "" { + msg = msg + ": " + parsed.Message + } + return nil, fmt.Errorf("refresh failed: %s", msg) + } + if parsed.Data.AccessToken == "" { + return nil, errors.New("refresh returned empty access_token") + } + // The server occasionally omits a fresh refresh_token (rotating policy + // disabled). Fall back to the caller-supplied one so the next refresh has + // something to send. + if parsed.Data.RefreshToken == "" { + parsed.Data.RefreshToken = req.RefreshToken + } + return &parsed.Data, nil +} diff --git a/cli/pkg/auth/token_store.go b/cli/pkg/auth/token_store.go new file mode 100644 index 000000000..d8cb97e2f --- /dev/null +++ b/cli/pkg/auth/token_store.go @@ -0,0 +1,218 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "time" + + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +// StoredToken is the per-olaresId record persisted to ~/.olares-cli/tokens.json +// during Phase 1. +// +// There is intentionally NO `ExpiresAt` field: AccessToken is a JWT and the +// only authoritative expiry comes from decoding its `exp` claim via +// auth.ExpiresAt. Mirroring the server's `expires_in` here would just create +// a second source of truth that can drift. +// +// RefreshToken is stored verbatim. It is not necessarily a JWT, so we never +// attempt to decode it. +// +// InvalidatedAt encodes server-side grant invalidation discovered by the +// client (e.g. /api/refresh returning 401/403). 0 means valid (or expiry +// has not yet been "discovered"); any value > 0 marks the entire grant +// (access_token + refresh_token) as unusable, even if the JWT's `exp` +// is still in the future. Phase 1 only DEFINES this field — no code path +// writes it. Phase 2's refreshWithLock is the writer. The only way to +// clear it back to 0 is a successful `profile login` / `profile import` +// (Set() defensively zeroes it). +type StoredToken struct { + OlaresID string `json:"olaresId"` + AccessToken string `json:"accessToken"` + RefreshToken string `json:"refreshToken,omitempty"` + SessionID string `json:"sessionId,omitempty"` + GrantedAt int64 `json:"grantedAt,omitempty"` // unix milliseconds, audit-only + InvalidatedAt int64 `json:"invalidatedAt,omitempty"` // unix milliseconds; 0 = valid +} + +// tokensFile is the on-disk schema. Keyed by OlaresID for O(1) lookup; the +// nested OlaresID field on StoredToken is redundant but kept for self-describing +// dumps. +type tokensFile struct { + Tokens map[string]StoredToken `json:"tokens"` +} + +// TokenStore is the Phase 1 plaintext-JSON token backend. It is intentionally +// a tiny interface (Get/Set/Delete/List/MarkInvalidated) so that Phase 2 can +// swap in an OS keychain implementation behind the same surface. +// +// MarkInvalidated stamps an existing entry's InvalidatedAt without touching +// other fields. Returns ErrTokenNotFound if no entry exists for olaresID. +// Phase 2's refreshWithLock calls this when /api/refresh returns 401/403. +type TokenStore interface { + Get(olaresID string) (*StoredToken, error) + Set(token StoredToken) error + Delete(olaresID string) error + List() ([]StoredToken, error) + MarkInvalidated(olaresID string, at time.Time) error +} + +// ErrTokenNotFound is returned when no token is stored for a given olaresId. +var ErrTokenNotFound = errors.New("token not found") + +// fileStore is the default plaintext-JSON implementation of TokenStore. Reads +// and writes are sequential (no concurrency); Phase 2 will add flock for +// cross-process safety together with keychain. +type fileStore struct { + path string +} + +// NewFileStore creates a TokenStore backed by ~/.olares-cli/tokens.json (or +// the override resolved by cliconfig.TokensFile()). +func NewFileStore() (TokenStore, error) { + path, err := cliconfig.TokensFile() + if err != nil { + return nil, err + } + return &fileStore{path: path}, nil +} + +// NewFileStoreAt is exposed for tests; production code should call NewFileStore. +func NewFileStoreAt(path string) TokenStore { + return &fileStore{path: path} +} + +func (s *fileStore) load() (*tokensFile, error) { + data, err := os.ReadFile(s.path) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return &tokensFile{Tokens: map[string]StoredToken{}}, nil + } + return nil, fmt.Errorf("read %s: %w", s.path, err) + } + if len(data) == 0 { + return &tokensFile{Tokens: map[string]StoredToken{}}, nil + } + tf := &tokensFile{} + if err := json.Unmarshal(data, tf); err != nil { + return nil, fmt.Errorf("parse %s: %w", s.path, err) + } + if tf.Tokens == nil { + tf.Tokens = map[string]StoredToken{} + } + return tf, nil +} + +func (s *fileStore) save(tf *tokensFile) error { + if _, err := cliconfig.EnsureHome(); err != nil { + return err + } + data, err := json.MarshalIndent(tf, "", " ") + if err != nil { + return fmt.Errorf("marshal tokens: %w", err) + } + return atomicWriteFile(s.path, data, 0o600) +} + +func (s *fileStore) Get(olaresID string) (*StoredToken, error) { + tf, err := s.load() + if err != nil { + return nil, err + } + tok, ok := tf.Tokens[olaresID] + if !ok { + return nil, ErrTokenNotFound + } + return &tok, nil +} + +func (s *fileStore) Set(token StoredToken) error { + if token.OlaresID == "" { + return errors.New("StoredToken.OlaresID is required") + } + tf, err := s.load() + if err != nil { + return err + } + // Defensive: a fresh grant always supersedes any prior invalidation + // stamp. Callers shouldn't be passing InvalidatedAt > 0 here, but if + // they do (or if they forget to clear it when overwriting), normalize. + token.InvalidatedAt = 0 + tf.Tokens[token.OlaresID] = token + return s.save(tf) +} + +func (s *fileStore) MarkInvalidated(olaresID string, at time.Time) error { + tf, err := s.load() + if err != nil { + return err + } + tok, ok := tf.Tokens[olaresID] + if !ok { + return ErrTokenNotFound + } + tok.InvalidatedAt = at.UnixMilli() + tf.Tokens[olaresID] = tok + return s.save(tf) +} + +func (s *fileStore) Delete(olaresID string) error { + tf, err := s.load() + if err != nil { + return err + } + if _, ok := tf.Tokens[olaresID]; !ok { + return ErrTokenNotFound + } + delete(tf.Tokens, olaresID) + return s.save(tf) +} + +func (s *fileStore) List() ([]StoredToken, error) { + tf, err := s.load() + if err != nil { + return nil, err + } + out := make([]StoredToken, 0, len(tf.Tokens)) + for _, t := range tf.Tokens { + out = append(out, t) + } + return out, nil +} + +// atomicWriteFile mirrors cliconfig.atomicWriteFile but is duplicated here to +// avoid an exported helper just for cross-package use. Both implementations +// must stay in sync. +func atomicWriteFile(path string, data []byte, perm os.FileMode) error { + dir := filepath.Dir(path) + tmp, err := os.CreateTemp(dir, ".tmp-*") + if err != nil { + return fmt.Errorf("create temp file in %s: %w", dir, err) + } + tmpName := tmp.Name() + cleanup := func() { _ = os.Remove(tmpName) } + if _, err := tmp.Write(data); err != nil { + _ = tmp.Close() + cleanup() + return fmt.Errorf("write temp file: %w", err) + } + if err := tmp.Chmod(perm); err != nil { + _ = tmp.Close() + cleanup() + return fmt.Errorf("chmod temp file: %w", err) + } + if err := tmp.Close(); err != nil { + cleanup() + return fmt.Errorf("close temp file: %w", err) + } + if err := os.Rename(tmpName, path); err != nil { + cleanup() + return fmt.Errorf("rename %s -> %s: %w", tmpName, path, err) + } + return nil +} diff --git a/cli/pkg/cliconfig/config.go b/cli/pkg/cliconfig/config.go new file mode 100644 index 000000000..75d69e039 --- /dev/null +++ b/cli/pkg/cliconfig/config.go @@ -0,0 +1,270 @@ +package cliconfig + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/beclab/Olares/cli/pkg/olares" +) + +// MultiProfileConfig is the on-disk schema of ~/.olares-cli/config.json. +// It tracks all known profiles plus which one is currently active, mirroring +// lark-cli's MultiAppConfig but stripped of brand / strict-mode / multi-app +// concerns (see docs/notes/olares-cli-auth-profile-config.md §11). +type MultiProfileConfig struct { + CurrentProfile string `json:"currentProfile,omitempty"` + PreviousProfile string `json:"previousProfile,omitempty"` + Profiles []ProfileConfig `json:"profiles,omitempty"` +} + +// ProfileConfig is a single profile entry: a target Olares instance + the +// user identity used to talk to it. The primary key is OlaresID; Name is an +// optional alias users can pass to commands like `profile use `. +// +// Tokens are NOT stored in this file — they live in tokens.json and are +// looked up by OlaresID. See pkg/auth.TokenStore. +type ProfileConfig struct { + // Name is an optional human-friendly alias. If empty, OlaresID is used as + // the display name. + Name string `json:"name,omitempty"` + + // OlaresID is the canonical user identity, e.g. "alice@olares.com". + // All per-user URLs (auth / vault / desktop) are derived from it. + OlaresID string `json:"olaresId"` + + // UserUID is optionally populated after login for diagnostics. Never used + // as an authoritative identity (see §7.5 of the design doc on JWT trust). + UserUID string `json:"userUid,omitempty"` + + // AuthURLOverride bypasses the standard URL derivation. Used for dev / + // internal environments. Leave empty in production. + AuthURLOverride string `json:"authUrlOverride,omitempty"` + + // LocalURLPrefix is inserted between the service subdomain and the + // terminus name when deriving URLs (e.g. "dev." → "auth.dev.alice.olares.com"). + // Leave empty in production. + LocalURLPrefix string `json:"localUrlPrefix,omitempty"` + + // InsecureSkipVerify disables TLS verification for HTTP calls under this + // profile. Dev only. + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` +} + +// DisplayName returns Name if set, else OlaresID. Used in CLI output where we +// want a stable handle for the profile. +func (p *ProfileConfig) DisplayName() string { + if p.Name != "" { + return p.Name + } + return p.OlaresID +} + +// ResolvedAuthURL returns the auth URL the CLI should hit for this profile, +// honoring AuthURLOverride when set. +func (p *ProfileConfig) ResolvedAuthURL() (string, error) { + if p.AuthURLOverride != "" { + return p.AuthURLOverride, nil + } + id, err := olares.ParseID(p.OlaresID) + if err != nil { + return "", err + } + return id.AuthURL(p.LocalURLPrefix), nil +} + +// FindProfile looks up a profile by Name first, then OlaresID. Mirrors +// lark-cli's MultiAppConfig.FindApp lookup order so that aliases shadow raw +// IDs in command-line UX. Returns nil if no match. +func (m *MultiProfileConfig) FindProfile(key string) *ProfileConfig { + if key == "" { + return nil + } + for i := range m.Profiles { + if m.Profiles[i].Name == key { + return &m.Profiles[i] + } + } + for i := range m.Profiles { + if m.Profiles[i].OlaresID == key { + return &m.Profiles[i] + } + } + return nil +} + +// FindByOlaresID is a strict OlaresID lookup. Used by login / import flows +// where we explicitly want to detect "same olaresId already exists". +func (m *MultiProfileConfig) FindByOlaresID(olaresID string) *ProfileConfig { + if olaresID == "" { + return nil + } + for i := range m.Profiles { + if m.Profiles[i].OlaresID == olaresID { + return &m.Profiles[i] + } + } + return nil +} + +// Current returns the active profile, or nil if there isn't one (no profiles +// at all, or CurrentProfile pointing at a stale entry). +func (m *MultiProfileConfig) Current() *ProfileConfig { + if m.CurrentProfile == "" { + if len(m.Profiles) == 0 { + return nil + } + return &m.Profiles[0] + } + return m.FindProfile(m.CurrentProfile) +} + +// Upsert inserts or replaces a profile by OlaresID. If a profile with the +// same OlaresID exists its slot is overwritten in place (preserving order); +// otherwise the new profile is appended. Returns the (possibly newly inserted) +// profile in its persisted slot. +func (m *MultiProfileConfig) Upsert(p ProfileConfig) *ProfileConfig { + for i := range m.Profiles { + if m.Profiles[i].OlaresID == p.OlaresID { + m.Profiles[i] = p + return &m.Profiles[i] + } + } + m.Profiles = append(m.Profiles, p) + return &m.Profiles[len(m.Profiles)-1] +} + +// Remove deletes a profile by Name or OlaresID. If the removed profile was +// the current one, CurrentProfile is repointed to PreviousProfile (if still +// valid) or to the first remaining profile. Returns the removed profile and a +// boolean indicating whether anything was deleted. +func (m *MultiProfileConfig) Remove(key string) (*ProfileConfig, bool) { + idx := -1 + for i := range m.Profiles { + if m.Profiles[i].Name == key || m.Profiles[i].OlaresID == key { + idx = i + break + } + } + if idx == -1 { + return nil, false + } + removed := m.Profiles[idx] + m.Profiles = append(m.Profiles[:idx], m.Profiles[idx+1:]...) + + wasCurrent := m.CurrentProfile == removed.Name || m.CurrentProfile == removed.OlaresID + wasPrevious := m.PreviousProfile == removed.Name || m.PreviousProfile == removed.OlaresID + if wasPrevious { + m.PreviousProfile = "" + } + if wasCurrent { + // Prefer falling back to PreviousProfile if it's still a valid entry, + // otherwise to whatever ended up first in the slice. + switch { + case m.PreviousProfile != "" && m.FindProfile(m.PreviousProfile) != nil: + m.CurrentProfile = m.PreviousProfile + m.PreviousProfile = "" + case len(m.Profiles) > 0: + m.CurrentProfile = m.Profiles[0].DisplayName() + default: + m.CurrentProfile = "" + } + } + return &removed, true +} + +// SetCurrent flips CurrentProfile / PreviousProfile, resolving "-" to the +// previous profile (a la `cd -`). Returns the newly-current profile. +func (m *MultiProfileConfig) SetCurrent(key string) (*ProfileConfig, error) { + if key == "-" { + if m.PreviousProfile == "" { + return nil, errors.New("no previous profile to switch back to") + } + key = m.PreviousProfile + } + target := m.FindProfile(key) + if target == nil { + return nil, fmt.Errorf("profile %q not found", key) + } + newCurrent := target.DisplayName() + if m.CurrentProfile != newCurrent { + m.PreviousProfile = m.CurrentProfile + m.CurrentProfile = newCurrent + } + return target, nil +} + +// LoadMultiProfileConfig reads config.json from disk. A missing file yields +// an empty config (not an error) so first-run UX works. +func LoadMultiProfileConfig() (*MultiProfileConfig, error) { + path, err := ConfigFile() + if err != nil { + return nil, err + } + data, err := os.ReadFile(path) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return &MultiProfileConfig{}, nil + } + return nil, fmt.Errorf("read %s: %w", path, err) + } + if len(data) == 0 { + return &MultiProfileConfig{}, nil + } + cfg := &MultiProfileConfig{} + if err := json.Unmarshal(data, cfg); err != nil { + return nil, fmt.Errorf("parse %s: %w", path, err) + } + return cfg, nil +} + +// SaveMultiProfileConfig writes config.json atomically with 0600 perms, +// creating the parent directory if needed. +func SaveMultiProfileConfig(cfg *MultiProfileConfig) error { + dir, err := EnsureHome() + if err != nil { + return err + } + path := filepath.Join(dir, configFilename) + data, err := json.MarshalIndent(cfg, "", " ") + if err != nil { + return fmt.Errorf("marshal config: %w", err) + } + return atomicWriteFile(path, data, filePerm) +} + +// atomicWriteFile writes data to path via a temp file + rename, mirroring the +// safety pattern in lark-cli's core.SaveMultiAppConfig. +func atomicWriteFile(path string, data []byte, perm os.FileMode) error { + dir := filepath.Dir(path) + tmp, err := os.CreateTemp(dir, ".tmp-*") + if err != nil { + return fmt.Errorf("create temp file in %s: %w", dir, err) + } + tmpName := tmp.Name() + cleanup := func() { + _ = os.Remove(tmpName) + } + if _, err := tmp.Write(data); err != nil { + _ = tmp.Close() + cleanup() + return fmt.Errorf("write temp file: %w", err) + } + if err := tmp.Chmod(perm); err != nil { + _ = tmp.Close() + cleanup() + return fmt.Errorf("chmod temp file: %w", err) + } + if err := tmp.Close(); err != nil { + cleanup() + return fmt.Errorf("close temp file: %w", err) + } + if err := os.Rename(tmpName, path); err != nil { + cleanup() + return fmt.Errorf("rename %s -> %s: %w", tmpName, path, err) + } + return nil +} diff --git a/cli/pkg/cliconfig/paths.go b/cli/pkg/cliconfig/paths.go new file mode 100644 index 000000000..06c2bf0c7 --- /dev/null +++ b/cli/pkg/cliconfig/paths.go @@ -0,0 +1,79 @@ +// Package cliconfig owns the on-disk profile configuration of olares-cli +// (~/.olares-cli/config.json + ~/.olares-cli/tokens.json). +// +// The package is named cliconfig (not "config") to avoid clashing with the +// pre-existing cmd/config package, which serves a different purpose +// (per-command flag wiring). +package cliconfig + +import ( + "fmt" + "os" + "path/filepath" +) + +// homeEnv is the environment variable used to override the config dir, mirroring +// lark-cli's $LARK_CLI_HOME convention. +const homeEnv = "OLARES_CLI_HOME" + +// defaultDir is the directory name used under $HOME when $OLARES_CLI_HOME is +// unset. +const defaultDir = ".olares-cli" + +// Filenames inside the config dir. +const ( + configFilename = "config.json" + tokensFilename = "tokens.json" +) + +// Permissions for the config dir & files. tokens.json carries refresh tokens +// in plaintext during Phase 1; both files therefore use 0600 / 0700. +const ( + dirPerm os.FileMode = 0o700 + filePerm os.FileMode = 0o600 +) + +// Home returns the resolved olares-cli config directory. It honors the +// $OLARES_CLI_HOME override and falls back to $HOME/.olares-cli. The directory +// is NOT created here — callers that intend to write should call EnsureHome +// instead. +func Home() (string, error) { + if v := os.Getenv(homeEnv); v != "" { + return v, nil + } + home, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("resolve user home: %w", err) + } + return filepath.Join(home, defaultDir), nil +} + +// EnsureHome resolves Home() and ensures the directory exists with 0700 perms. +func EnsureHome() (string, error) { + dir, err := Home() + if err != nil { + return "", err + } + if err := os.MkdirAll(dir, dirPerm); err != nil { + return "", fmt.Errorf("create %s: %w", dir, err) + } + return dir, nil +} + +// ConfigFile returns the absolute path to config.json (without creating it). +func ConfigFile() (string, error) { + dir, err := Home() + if err != nil { + return "", err + } + return filepath.Join(dir, configFilename), nil +} + +// TokensFile returns the absolute path to tokens.json (without creating it). +func TokensFile() (string, error) { + dir, err := Home() + if err != nil { + return "", err + } + return filepath.Join(dir, tokensFilename), nil +} diff --git a/cli/pkg/cmdutil/factory.go b/cli/pkg/cmdutil/factory.go new file mode 100644 index 000000000..8df69aa55 --- /dev/null +++ b/cli/pkg/cmdutil/factory.go @@ -0,0 +1,138 @@ +// Package cmdutil holds the shared "Factory" that command implementations +// reach into instead of constructing their own clients / loading their own +// config / etc. This is the olares-cli analogue of lark-cli's +// cmdutil.Factory. +// +// Phase 1 keeps the Factory deliberately minimal: lazily-resolved credential +// chain + Bearer-injecting HTTP client. Phase 2 will add automatic token +// refresh inside the same HTTPClient call without changing this surface. +package cmdutil + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "sync" + "time" + + "github.com/beclab/Olares/cli/pkg/credential" +) + +// Factory is the dependency-injection seam for olares-cli commands. Build +// one with NewFactory at the root command level and pass it (or a closure +// that closes over it) into command constructors. +// +// All accessors are lazy and memoized — calling HTTPClient(ctx) multiple +// times reuses the same resolved profile + client. +type Factory struct { + // ProfileOverride, when non-empty, forces ResolveProfile to look up this + // profile instead of the currently-selected one. Wired from a global + // `--profile` / `--olares-id` flag at the root command. + ProfileOverride string + + // IOStreams (Phase 2) will live here so commands can write to a swappable + // io.Writer pair. For now we just expose a Stderr getter to avoid a hard + // dependency churn when we add it. + Stderr io.Writer + + credentialOnce sync.Once + credentialErr error + credential *credential.CredentialProvider + + resolveOnce sync.Once + resolveErr error + resolved *credential.ResolvedProfile + + clientOnce sync.Once + client *http.Client +} + +// NewFactory builds a fresh Factory. Cheap; intended to be called once per +// process from the root command. +func NewFactory() *Factory { + return &Factory{} +} + +// Credential returns the lazily-constructed credential chain. The chain is +// (EnvProvider, DefaultProvider) — env first so future in-cluster builds +// can pre-empt on-disk config. +func (f *Factory) Credential() (*credential.CredentialProvider, error) { + f.credentialOnce.Do(func() { + def, err := credential.NewDefaultProvider() + if err != nil { + f.credentialErr = fmt.Errorf("init default credential provider: %w", err) + return + } + f.credential = credential.NewCredentialProvider( + credential.NewEnvProvider(), + def, + ) + }) + return f.credential, f.credentialErr +} + +// ResolveProfile returns the active profile fully resolved (URLs + token). +// Memoized; subsequent calls return the same ResolvedProfile. +func (f *Factory) ResolveProfile(ctx context.Context) (*credential.ResolvedProfile, error) { + f.resolveOnce.Do(func() { + cred, err := f.Credential() + if err != nil { + f.resolveErr = err + return + } + rp, err := cred.Resolve(ctx, f.ProfileOverride) + if err != nil { + f.resolveErr = err + return + } + f.resolved = rp + }) + return f.resolved, f.resolveErr +} + +// HTTPClient returns an *http.Client whose RoundTripper transparently injects +// `Authorization: Bearer ` on every outbound request. The client +// also honors the active profile's InsecureSkipVerify flag. +// +// Phase 1: the token is fetched once at first call and reused until the +// process exits. If it expires mid-run, requests will start returning 401 — +// the user's recourse is to re-run `profile login` / `profile import`. +// +// Phase 2 will refactor this into a refreshing transport without changing +// the signature. +func (f *Factory) HTTPClient(ctx context.Context) (*http.Client, error) { + rp, err := f.ResolveProfile(ctx) + if err != nil { + return nil, err + } + f.clientOnce.Do(func() { + base := http.DefaultTransport.(*http.Transport).Clone() + if rp.InsecureSkipVerify { + base.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} // #nosec G402 -- explicit profile opt-in + } + f.client = &http.Client{ + Timeout: 30 * time.Second, + Transport: &bearerTransport{base: base, token: rp.AccessToken}, + } + }) + return f.client, nil +} + +// bearerTransport injects Authorization: Bearer on outbound requests. +// It clones the request before mutating headers so the caller's *http.Request +// is left untouched (important when callers retry). +type bearerTransport struct { + base http.RoundTripper + token string +} + +func (b *bearerTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if b.token == "" { + return b.base.RoundTrip(req) + } + clone := req.Clone(req.Context()) + clone.Header.Set("Authorization", "Bearer "+b.token) + return b.base.RoundTrip(clone) +} diff --git a/cli/pkg/credential/default_provider.go b/cli/pkg/credential/default_provider.go new file mode 100644 index 000000000..330b3401f --- /dev/null +++ b/cli/pkg/credential/default_provider.go @@ -0,0 +1,147 @@ +package credential + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/beclab/Olares/cli/pkg/auth" + "github.com/beclab/Olares/cli/pkg/cliconfig" + "github.com/beclab/Olares/cli/pkg/olares" +) + +// DefaultProvider resolves a profile using the local config + plaintext token +// store. It implements the standard "ran `profile login` on the same machine" +// scenario. +// +// Resolve checks failure modes in this fixed priority order, each producing +// a distinct error type but the same user-facing CTA ("run profile login"): +// +// 1. profile nil → return (nil, nil); orchestrator surfaces ErrNoProfile +// 2. no token stored → ErrNotLoggedIn +// 3. stored.InvalidatedAt > 0 → ErrTokenInvalidated (Phase 2 marks this on /api/refresh 401/403) +// 4. JWT exp claim in the past → ErrTokenExpired +// 5. otherwise → ResolvedProfile +// +// Note (3) takes precedence over (4): an explicitly-invalidated grant is +// unusable even if the access_token JWT happens to still have time left, +// because the refresh leg is dead and we cannot get a new one. Phase 1 does +// NOT auto-refresh. Phase 2 will inject token-refresh logic here. +type DefaultProvider struct { + store auth.TokenStore + now func() time.Time +} + +// NewDefaultProvider opens the on-disk token store and returns a Provider +// suitable for normal CLI invocations. Returns an error only if the token +// store path itself can't be resolved (which usually means $HOME is broken). +func NewDefaultProvider() (Provider, error) { + store, err := auth.NewFileStore() + if err != nil { + return nil, err + } + return &DefaultProvider{store: store, now: time.Now}, nil +} + +// Name implements Provider. +func (d *DefaultProvider) Name() string { return "default" } + +// ErrNotLoggedIn is returned when a profile exists but has no stored token. +type ErrNotLoggedIn struct { + OlaresID string +} + +func (e *ErrNotLoggedIn) Error() string { + return fmt.Sprintf("no access token for %s; run: olares-cli profile login --olares-id %s (or profile import --refresh-token )", e.OlaresID, e.OlaresID) +} + +// ErrTokenExpired is returned when a stored token's JWT `exp` is in the past. +// Phase 1 does not auto-refresh; Phase 2 will catch this internally. +type ErrTokenExpired struct { + OlaresID string + ExpiredAt time.Time +} + +func (e *ErrTokenExpired) Error() string { + return fmt.Sprintf("access token for %s expired at %s; please run: olares-cli profile login --olares-id %s (or profile import --olares-id %s --refresh-token )", + e.OlaresID, e.ExpiredAt.Format(time.RFC3339), e.OlaresID, e.OlaresID) +} + +// ErrTokenInvalidated is returned when a stored token has been explicitly +// marked unusable via TokenStore.MarkInvalidated. The grant cannot be +// recovered locally — the user must re-authenticate. +// +// Phase 1 has no code path that writes InvalidatedAt; the only way to hit +// this in Phase 1 is by hand-editing tokens.json. Phase 2's refreshWithLock +// will write InvalidatedAt when /api/refresh returns 401/403. +type ErrTokenInvalidated struct { + OlaresID string + InvalidatedAt time.Time +} + +func (e *ErrTokenInvalidated) Error() string { + return fmt.Sprintf("refresh token for %s became invalid at %s; please run: olares-cli profile login --olares-id %s (or profile import --olares-id %s --refresh-token )", + e.OlaresID, e.InvalidatedAt.Format(time.RFC3339), e.OlaresID, e.OlaresID) +} + +// Resolve implements Provider. +func (d *DefaultProvider) Resolve(_ context.Context, profile *cliconfig.ProfileConfig) (*ResolvedProfile, error) { + if profile == nil { + return nil, nil + } + + stored, err := d.store.Get(profile.OlaresID) + if err != nil { + if errors.Is(err, auth.ErrTokenNotFound) { + return nil, &ErrNotLoggedIn{OlaresID: profile.OlaresID} + } + return nil, fmt.Errorf("read token store: %w", err) + } + + // Priority 1: explicit invalidation overrides any local heuristic. + // We can't talk to the server with this grant even if the JWT looks fresh. + if stored.InvalidatedAt > 0 { + return nil, &ErrTokenInvalidated{ + OlaresID: profile.OlaresID, + InvalidatedAt: time.UnixMilli(stored.InvalidatedAt), + } + } + + exp, expErr := auth.ExpiresAt(stored.AccessToken) + if expErr != nil && !errors.Is(expErr, auth.ErrNoExpClaim) { + return nil, fmt.Errorf("decode access token: %w", expErr) + } + if !exp.IsZero() && !d.now().Before(exp) { + return nil, &ErrTokenExpired{OlaresID: profile.OlaresID, ExpiredAt: exp} + } + + return buildResolved(profile, stored.AccessToken, exp) +} + +// buildResolved is shared between DefaultProvider and any future provider that +// needs to turn (ProfileConfig, accessToken) into a ResolvedProfile. +func buildResolved(profile *cliconfig.ProfileConfig, accessToken string, exp time.Time) (*ResolvedProfile, error) { + authURL, err := profile.ResolvedAuthURL() + if err != nil { + return nil, fmt.Errorf("derive auth URL: %w", err) + } + id, err := olares.ParseID(profile.OlaresID) + if err != nil { + return nil, err + } + rp := &ResolvedProfile{ + Name: profile.DisplayName(), + OlaresID: profile.OlaresID, + UserUID: profile.UserUID, + AuthURL: authURL, + VaultURL: id.VaultURL(profile.LocalURLPrefix), + DesktopURL: id.DesktopURL(profile.LocalURLPrefix), + AccessToken: accessToken, + InsecureSkipVerify: profile.InsecureSkipVerify, + } + if !exp.IsZero() { + rp.ExpiresAt = exp.Unix() + } + return rp, nil +} diff --git a/cli/pkg/credential/env_provider.go b/cli/pkg/credential/env_provider.go new file mode 100644 index 000000000..3ea146ca6 --- /dev/null +++ b/cli/pkg/credential/env_provider.go @@ -0,0 +1,29 @@ +package credential + +import ( + "context" + + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +// EnvProvider is the placeholder for the in-cluster ("sandbox") scenario: +// when olares-cli runs inside an application container, the user-service +// will inject access_token / scope / olaresId via environment variables and +// this provider will surface them as a ResolvedProfile. +// +// Phase 1 ships an inert implementation that always declines (returns +// (nil, nil)) so the chain falls through to DefaultProvider. Phase 3 will fill +// it in once the user-service env-var contract is finalized. +type EnvProvider struct{} + +// NewEnvProvider returns the Phase-1 stub. The real implementation will accept +// a config struct here. +func NewEnvProvider() Provider { return &EnvProvider{} } + +// Name implements Provider. +func (e *EnvProvider) Name() string { return "env" } + +// Resolve implements Provider. Always declines in Phase 1. +func (e *EnvProvider) Resolve(_ context.Context, _ *cliconfig.ProfileConfig) (*ResolvedProfile, error) { + return nil, nil +} diff --git a/cli/pkg/credential/provider.go b/cli/pkg/credential/provider.go new file mode 100644 index 000000000..5c1f13b14 --- /dev/null +++ b/cli/pkg/credential/provider.go @@ -0,0 +1,93 @@ +package credential + +import ( + "context" + "errors" + "fmt" + + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +// CredentialProvider chains zero or more Providers in priority order: the +// first one that returns a non-nil ResolvedProfile wins. Phase 1 wires +// (EnvProvider, DefaultProvider) — env first so the in-cluster scenario can +// pre-empt the on-disk config when shipped. +// +// This is the Phase-1 analogue of lark-cli's credential.CredentialProvider, +// minus the multi-app / token-cache plumbing (Phase 2). +type CredentialProvider struct { + providers []Provider +} + +// NewCredentialProvider returns a chain that consults each Provider in order. +// Pass them most-specific first. +func NewCredentialProvider(providers ...Provider) *CredentialProvider { + return &CredentialProvider{providers: providers} +} + +// ErrNoProfile is returned when no Provider could resolve a profile (typically +// because the user hasn't run `profile login` yet AND no in-cluster env vars +// are present). +var ErrNoProfile = errors.New("no Olares profile is configured: run `olares-cli profile login --olares-id ` or `olares-cli profile import --olares-id --refresh-token `") + +// Resolve walks the provider chain. It is responsible for loading the on-disk +// profile (if any) once and feeding it to each provider. The first +// non-nil ResolvedProfile from the chain is returned. If every provider +// declines, ErrNoProfile is returned (or the most informative error from a +// declining provider, if all returned errors). +// +// `profileKey` is an optional override (e.g. an `--olares-id`/`--profile` +// flag). When empty, the currently-selected profile from config.json is used. +func (c *CredentialProvider) Resolve(ctx context.Context, profileKey string) (*ResolvedProfile, error) { + cfg, err := cliconfig.LoadMultiProfileConfig() + if err != nil { + return nil, fmt.Errorf("load config: %w", err) + } + var profile *cliconfig.ProfileConfig + if profileKey != "" { + profile = cfg.FindProfile(profileKey) + if profile == nil { + return nil, fmt.Errorf("profile %q not found in %s", profileKey, configFileForError()) + } + } else { + profile = cfg.Current() + } + + var lastErr error + for _, p := range c.providers { + resolved, err := p.Resolve(ctx, profile) + if err != nil { + lastErr = fmt.Errorf("provider %s: %w", p.Name(), err) + continue + } + if resolved != nil { + if resolved.Source == "" { + resolved.Source = p.Name() + } + return resolved, nil + } + } + if lastErr != nil { + return nil, lastErr + } + return nil, ErrNoProfile +} + +// configFileForError best-effort-resolves the config path for inclusion in +// "not found" error messages. Returns "" if resolution itself fails. +func configFileForError() string { + p, err := cliconfig.ConfigFile() + if err != nil { + return "" + } + return p +} + +// RequireBuiltinCredentialProvider is a hook for Phase 3: when an env-driven +// (or other "external") provider is in play, mutating commands like +// `profile login` should refuse to run because there's nothing local to +// mutate. Phase 1 always returns nil; the call sites are wired now so future +// activation is mechanical. +func RequireBuiltinCredentialProvider(_ *CredentialProvider) error { + return nil +} diff --git a/cli/pkg/credential/types.go b/cli/pkg/credential/types.go new file mode 100644 index 000000000..c29fb197c --- /dev/null +++ b/cli/pkg/credential/types.go @@ -0,0 +1,56 @@ +// Package credential is the orchestration layer that turns a +// cliconfig.ProfileConfig + a stored token into a fully-resolved view that +// command code can consume without touching disk directly. +// +// The package is intentionally small in Phase 1: a Provider interface, a +// chained CredentialProvider, a DefaultProvider that reads +// ~/.olares-cli/{config,tokens}.json, and an EnvProvider stub for the future +// in-cluster (sandbox) scenario. Phase 2 adds keychain + automatic refresh +// inside DefaultProvider; the interface stays stable. +package credential + +import ( + "context" + + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +// ResolvedProfile is the "ready to make an API call" view of a profile — +// analogous to lark-cli's CliConfig. Command code interacts only with this +// struct so that swapping in an EnvProvider later requires zero changes +// upstream. +type ResolvedProfile struct { + Name string // alias, falls back to OlaresID + OlaresID string + UserUID string + + AuthURL string + VaultURL string + DesktopURL string + + AccessToken string + // ExpiresAt is the unix-seconds expiry decoded from AccessToken's `exp` + // claim. Zero means "no exp claim found" and is treated as "trust the + // token until the server says otherwise". + ExpiresAt int64 + + // Source identifies which Provider produced this ResolvedProfile (for + // diagnostics: "default", "env", ...). + Source string + + // InsecureSkipVerify is forwarded from the underlying ProfileConfig so + // HTTP clients constructed against this profile honor the dev override. + InsecureSkipVerify bool +} + +// Provider is implemented by anything that can turn a ProfileConfig (which +// may be nil for env-driven providers) into a ResolvedProfile. Returning +// (nil, nil) means "I don't claim this profile, try the next provider". +// +// The `profile` argument is provided by the orchestrating CredentialProvider: +// it's the currently-selected ProfileConfig from cliconfig (or nil when none +// exists). EnvProvider may ignore it entirely; DefaultProvider requires it. +type Provider interface { + Name() string + Resolve(ctx context.Context, profile *cliconfig.ProfileConfig) (*ResolvedProfile, error) +} diff --git a/cli/pkg/olares/id.go b/cli/pkg/olares/id.go new file mode 100644 index 000000000..c4c024331 --- /dev/null +++ b/cli/pkg/olares/id.go @@ -0,0 +1,88 @@ +// Package olares contains primitives shared across olares-cli that don't fit +// into a more specific subpackage. +// +// id.go: parse an Olares ID (e.g. "alice@olares.com") and derive the URLs of +// the per-user services that olares-cli talks to (auth / vault / desktop). +// +// Background: every Olares user is identified by an "olaresId" of the form +// "@". The terminus name is the same identity rendered with a +// dot ("."), and per-user service hostnames are constructed by +// prefixing that terminus name with the service subdomain (auth / vault / +// desktop / ...). The optional `localURLPrefix` is a dev-only knob that +// inserts an extra label between the service subdomain and the terminus name +// (used for staging / local DNS overrides). See pkg/wizard/user_store.go for +// the original derivation that the web app and CLI must stay in sync with. +package olares + +import ( + "fmt" + "strings" +) + +// DefaultDomain is the fallback domain used when an olaresId has no `@` +// suffix. Mirrors `TerminusDefaultDomain` in pkg/wizard/user_store.go. +const DefaultDomain = "olares.com" + +// ID is an opaque wrapper around an olaresId string. Construct one with +// ParseID; the zero value is invalid. +type ID string + +// ParseID validates a raw olaresId string and returns it as an ID. An empty +// string or a value containing more than one `@` is rejected. +func ParseID(raw string) (ID, error) { + raw = strings.TrimSpace(raw) + if raw == "" { + return "", fmt.Errorf("olaresId is empty") + } + if strings.Count(raw, "@") > 1 { + return "", fmt.Errorf("olaresId %q contains more than one '@'", raw) + } + return ID(raw), nil +} + +// String returns the canonical olaresId string. +func (id ID) String() string { return string(id) } + +// Local returns the part before `@`. For an unqualified id (no `@`) the whole +// id is returned. +func (id ID) Local() string { + s := string(id) + if i := strings.Index(s, "@"); i >= 0 { + return s[:i] + } + return s +} + +// Domain returns the part after `@`, falling back to DefaultDomain when absent. +func (id ID) Domain() string { + s := string(id) + if i := strings.Index(s, "@"); i >= 0 { + return s[i+1:] + } + return DefaultDomain +} + +// TerminusName renders the id with `.` instead of `@`, e.g. "alice.olares.com". +func (id ID) TerminusName() string { + return id.Local() + "." + id.Domain() +} + +// AuthURL returns the per-user Authelia base URL, e.g. +// "https://auth.alice.olares.com". `localPrefix` may be empty; when set it is +// inserted between the `auth.` subdomain and the terminus name (no trailing +// dot is added — callers pass e.g. "dev." for "auth.dev.alice.olares.com"). +func (id ID) AuthURL(localPrefix string) string { + return fmt.Sprintf("https://auth.%s%s", localPrefix, id.TerminusName()) +} + +// VaultURL returns the per-user vault base URL with the conventional `/server` +// suffix, e.g. "https://vault.alice.olares.com/server". +func (id ID) VaultURL(localPrefix string) string { + return fmt.Sprintf("https://vault.%s%s/server", localPrefix, id.TerminusName()) +} + +// DesktopURL returns the per-user desktop base URL, e.g. +// "https://desktop.alice.olares.com". +func (id ID) DesktopURL(localPrefix string) string { + return fmt.Sprintf("https://desktop.%s%s", localPrefix, id.TerminusName()) +} From 5efe2a0db0e3819bf04cc1575316279068007ba1 Mon Sep 17 00:00:00 2001 From: Peng Peng Date: Sat, 25 Apr 2026 16:31:34 +0800 Subject: [PATCH 02/12] feat(cli): add `olares-cli files ls` and Phase 1 cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes the Phase 1 loop with a first authenticated command and rolls in the audit fixes from the cleanup pass. `files ls /[/]`: - Parse + validate the 3-segment front-end path used by files-backend (drive/Home, drive/Data, sync/, awss3//, ...); unknown fileType / bad drive extend are rejected client-side before any HTTP. - Percent-encode each segment via url.PathEscape (mirrors the web app's encodeUrl) so filenames with `#`, `?`, `+`, spaces, `%`, non-ASCII survive the trip to /api/resources. - Render a one-line header (` (N dirs, M files, modified ...)`) followed by a MODE / SIZE / TYPE / MODIFIED / NAME table; MODE decodes os.FileMode (drwxr-xr-x / -rw-r--r-- / Lrwxr-xr-x) and TYPE surfaces the backend's class (video / image / audio / pdf / text / blob / ...). `--json` passes the raw response through verbatim. - 401/403 reuses DefaultProvider's "run profile login" CTA; other non-2xx surface the backend's error/code+message JSON verbatim. Supporting plumbing: - olares.ID.FilesURL derives `https://files.` (matches the web app's getModuleSever('files')). - ResolvedProfile + DefaultProvider expose FilesURL so commands don't have to redo the derivation. - Factory's HTTP client now injects the access token via the custom `X-Authorization` header (was `Authorization: Bearer`). Confirmed via l4-bfl-proxy + BFL filters that the standard Authorization header is filtered at the edge and never reaches per-user services; the web app uses the same X-Authorization path. - Root command grows a persistent `--profile` flag bound straight onto the shared Factory.ProfileOverride, so any subcommand that calls factory.ResolveProfile honors it without re-declaring the flag. - Drop the unused Factory.Stderr field. .gitignore: exclude the local `/files/` and `/market/` checkouts that are kept beside the repo for cross-reference but must never be committed. Tests: ParseFrontendPath (15 cases incl. URL-escape table covering `#?+%` / spaces / non-ASCII), formatSize (10 cases incl. 1023/1024/1MB boundary), formatMode (incl. the live-observed dir mode 2147484141 → drwxr-xr-x), formatType, formatHTTPError (401/403, error/code+message, raw fallback), renderListing (header + counts + dirs-first sort). Made-with: Cursor --- .gitignore | 3 + cli/cmd/ctl/files/ls.go | 347 +++++++++++++++++++++++++ cli/cmd/ctl/files/ls_test.go | 267 +++++++++++++++++++ cli/cmd/ctl/files/path.go | 215 +++++++++++++++ cli/cmd/ctl/files/path_test.go | 216 +++++++++++++++ cli/cmd/ctl/files/root.go | 64 +++++ cli/cmd/ctl/root.go | 13 + cli/pkg/cmdutil/factory.go | 56 ++-- cli/pkg/credential/default_provider.go | 1 + cli/pkg/credential/types.go | 1 + cli/pkg/olares/id.go | 8 + 11 files changed, 1168 insertions(+), 23 deletions(-) create mode 100644 cli/cmd/ctl/files/ls.go create mode 100644 cli/cmd/ctl/files/ls_test.go create mode 100644 cli/cmd/ctl/files/path.go create mode 100644 cli/cmd/ctl/files/path_test.go create mode 100644 cli/cmd/ctl/files/root.go diff --git a/.gitignore b/.gitignore index b3ecfd058..1bd05673f 100644 --- a/.gitignore +++ b/.gitignore @@ -44,3 +44,6 @@ node_modules cli/olares-cli* framework/app-service/bin + +/files/ +/market/ diff --git a/cli/cmd/ctl/files/ls.go b/cli/cmd/ctl/files/ls.go new file mode 100644 index 000000000..4df936583 --- /dev/null +++ b/cli/cmd/ctl/files/ls.go @@ -0,0 +1,347 @@ +package files + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "sort" + "strings" + "text/tabwriter" + "time" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +type lsOptions struct { + asJSON bool +} + +// NewLsCommand: `olares-cli files ls [--json]` +// +// Calls GET /api/resources// on the +// per-user files-backend (proxied via files.) and renders the +// result. The access token is injected by Factory's HTTP client as the +// `X-Authorization` header — see pkg/cmdutil/factory.go for why that header +// (not the standard Authorization: Bearer) is the right one for Olares. +// +// Errors: +// - bad / missing path is rejected client-side via ParseFrontendPath +// - 401/403 from the backend is reported with the same "run profile login" +// CTA that DefaultProvider uses, so the message is consistent across +// "no token" / "expired token" / "server-rejected token" +// - other non-2xx responses surface the backend's error/message JSON field +// verbatim, which is usually enough to debug (unknown node, missing repo, +// permission denied, ...) +func NewLsCommand(f *cmdutil.Factory) *cobra.Command { + o := &lsOptions{} + cmd := &cobra.Command{ + Use: "ls ", + Short: "list a directory on the per-user files-backend", + Long: `List a directory on the per-user files-backend. + +The path is the full 3-segment front-end path used by the backend +(/[/]); see ` + "`olares-cli files --help`" + ` for +the schema. + +Examples: + + olares-cli files ls drive/Home/ + olares-cli files ls drive/Home/Documents + olares-cli files ls drive/Data/ + olares-cli files ls cache// + olares-cli files ls sync// + olares-cli files ls awss3// +`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runLs(cmd.Context(), f, cmd.OutOrStdout(), args[0], o) + }, + } + cmd.Flags().BoolVar(&o.asJSON, "json", false, "print the raw JSON response (pretty-printed) instead of a table") + return cmd +} + +// listingItem is a deliberately-narrow projection of files-backend's +// FileInfo (files/pkg/files/file.go). We don't import the backend struct +// directly to avoid pulling in afero / klog / the rest of the per-user +// service into the CLI binary; we only decode what we render in the +// table view (MODE / SIZE / TYPE / MODIFIED / NAME) plus Path, which is +// handy for diagnostic error messages and for the future cat/cp/rm verbs. +// +// `Mode` is the raw integer value of Go's os.FileMode (ModeDir | perms | +// ...) — the backend marshals it that way, see files/pkg/files/file.go. +// `Type` is the backend's semantic class (one of "" / blob / video / +// audio / image / pdf / text / textImmutable / invalid_link); empty for +// directories. We pass it through verbatim and let the user see the same +// label the web app would. +type listingItem struct { + Name string `json:"name"` + IsDir bool `json:"isDir"` + IsSymlink bool `json:"isSymlink"` + Size int64 `json:"size"` + Modified time.Time `json:"modified"` + Mode uint32 `json:"mode"` + Path string `json:"path"` + Type string `json:"type"` +} + +// listingResponse decodes both the parent-directory envelope (used to print +// a one-line header before the table) and the items it contains. NumDirs / +// NumFiles come from the backend; we use them verbatim when present and +// fall back to counting `Items` if the backend reports zeros (defensive — +// older response shapes may not populate them for every fileType). +type listingResponse struct { + Name string `json:"name"` + Path string `json:"path"` + Modified time.Time `json:"modified"` + Mode uint32 `json:"mode"` + IsSymlink bool `json:"isSymlink"` + NumDirs int `json:"numDirs"` + NumFiles int `json:"numFiles"` + Items []listingItem `json:"items"` +} + +func runLs(ctx context.Context, f *cmdutil.Factory, out io.Writer, rawPath string, o *lsOptions) error { + if ctx == nil { + ctx = context.Background() + } + + fp, err := ParseFrontendPath(rawPath) + if err != nil { + return err + } + + rp, err := f.ResolveProfile(ctx) + if err != nil { + return err + } + client, err := f.HTTPClient(ctx) + if err != nil { + return err + } + + // URLPath percent-encodes each path segment (mirrors the web app's + // encodeUrl helper) so filenames with '#', '?', '+', spaces, etc. survive + // the trip to the backend. ParseFrontendPath already guarantees that + // listing the extend root ("drive/Home" or "drive/Home/") yields a + // SubPath of "/", so URLPath() naturally ends with '/' there — which is + // what FileParam.convert() in files-backend requires + // (it rejects len(strings.Split(u, "/")) < 3). + endpoint := rp.FilesURL + "/api/resources/" + fp.URLPath() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + if err != nil { + return fmt.Errorf("build request: %w", err) + } + req.Header.Set("Accept", "application/json") + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("GET %s: %w", endpoint, err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("read response body: %w", err) + } + + if resp.StatusCode/100 != 2 { + return formatHTTPError(resp.StatusCode, body, rp.OlaresID, endpoint) + } + + if o.asJSON { + return prettyPrintJSON(out, body) + } + + var listing listingResponse + if err := json.Unmarshal(body, &listing); err != nil { + return fmt.Errorf("decode response: %w (body=%s)", err, truncate(string(body), 200)) + } + return renderListing(out, fp, listing) +} + +// formatHTTPError turns a non-2xx response into a user-facing error. 401/403 +// is special-cased to match DefaultProvider's CTA so the user sees the same +// hint whether the local check or the remote check is what failed. +func formatHTTPError(status int, body []byte, olaresID, url string) error { + if status == http.StatusUnauthorized || status == http.StatusForbidden { + return fmt.Errorf("server rejected the access token (HTTP %d); please run: olares-cli profile login --olares-id %s", + status, olaresID) + } + // Backend returns errors as either {"error": "..."} or {"code":1,"message":"..."}. + // Try to surface either; fall back to the raw body. + var generic struct { + Error string `json:"error"` + Message string `json:"message"` + Code int `json:"code"` + } + if err := json.Unmarshal(body, &generic); err == nil { + switch { + case generic.Error != "": + return fmt.Errorf("GET %s: HTTP %d: %s", url, status, generic.Error) + case generic.Message != "": + return fmt.Errorf("GET %s: HTTP %d (code=%d): %s", url, status, generic.Code, generic.Message) + } + } + return fmt.Errorf("GET %s: HTTP %d: %s", url, status, truncate(string(body), 500)) +} + +// renderListing prints (a) a one-line header summarising the directory the +// user just listed, and (b) a 5-column table of its contents +// (MODE / SIZE / TYPE / MODIFIED / NAME). Directories sort first, then +// files, both case-insensitive alphabetical. Directory names get a +// trailing '/' so the distinction is also visible per row. +// +// Empty directories print the header followed by "(empty)" — the header is +// always present so the user sees the directory's own modified-time and +// dir/file counts even when there's nothing inside. +func renderListing(w io.Writer, fp FrontendPath, listing listingResponse) error { + writeListingHeader(w, fp, listing) + + if len(listing.Items) == 0 { + _, err := fmt.Fprintln(w, "(empty)") + return err + } + + items := append([]listingItem(nil), listing.Items...) + sort.SliceStable(items, func(i, j int) bool { + if items[i].IsDir != items[j].IsDir { + return items[i].IsDir // dirs first + } + return strings.ToLower(items[i].Name) < strings.ToLower(items[j].Name) + }) + + tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0) + fmt.Fprintln(tw, "MODE\tSIZE\tTYPE\tMODIFIED\tNAME") + for _, it := range items { + modified := "-" + if !it.Modified.IsZero() { + modified = it.Modified.Local().Format("2006-01-02 15:04") + } + name := it.Name + if it.IsDir { + name += "/" + } + fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\n", + formatMode(it.Mode, it.IsDir, it.IsSymlink), + formatSize(it.Size, it.IsDir), + formatType(it.Type, it.IsDir), + modified, + name, + ) + } + return tw.Flush() +} + +// writeListingHeader prints a single banner line of the form +// +// drive/Home/Code (1 dir, 3 files, modified 2026-04-17 19:31) +// +// Counts come from the envelope; we fall back to counting items when the +// backend reports zeros but the listing clearly isn't empty (defensive). +// The "modified" suffix is omitted when the envelope didn't carry one. +func writeListingHeader(w io.Writer, fp FrontendPath, listing listingResponse) { + dirs, files := listing.NumDirs, listing.NumFiles + if dirs == 0 && files == 0 && len(listing.Items) > 0 { + for _, it := range listing.Items { + if it.IsDir { + dirs++ + } else { + files++ + } + } + } + + parts := []string{ + pluralize(dirs, "dir", "dirs"), + pluralize(files, "file", "files"), + } + if !listing.Modified.IsZero() { + parts = append(parts, "modified "+listing.Modified.Local().Format("2006-01-02 15:04")) + } + fmt.Fprintf(w, "%s (%s)\n", fp.String(), strings.Join(parts, ", ")) +} + +// pluralize returns " ". Tiny helper, but it makes the +// header read naturally for the common 0/1/many cases ("0 dirs, 1 file"). +func pluralize(n int, singular, plural string) string { + if n == 1 { + return fmt.Sprintf("%d %s", n, singular) + } + return fmt.Sprintf("%d %s", n, plural) +} + +// formatSize renders bytes in a compact human-friendly form (1.2K, 3.4M). +// Directories report "-" because their backend Size is meaningless without +// a recursive walk and would confuse users. +func formatSize(n int64, isDir bool) string { + if isDir { + return "-" + } + const unit = 1024 + if n < unit { + return fmt.Sprintf("%dB", n) + } + div, exp := int64(unit), 0 + for n2 := n / unit; n2 >= unit; n2 /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f%cB", float64(n)/float64(div), "KMGTPE"[exp]) +} + +// formatMode renders the per-row mode column. The backend ships os.FileMode +// as a raw integer (ModeDir | perms | ...); when it's nonzero we delegate +// to os.FileMode.String(), which gives us proper "drwxr-xr-x" / "Lrwxr-xr-x" +// / "-rw-r--r--" forms — a strict superset of the old "-"/"d" indicator. +// +// When `mode` is zero (older response shapes / partial fixtures) we still +// surface dir/symlink-ness from the dedicated bool fields so the column +// remains informative. +func formatMode(mode uint32, isDir, isSymlink bool) string { + if mode != 0 { + return os.FileMode(mode).String() + } + switch { + case isSymlink: + return "L---------" + case isDir: + return "d---------" + default: + return "----------" + } +} + +// formatType returns what to display in the TYPE column. The backend's empty +// string is rendered as "-" so the column stays visually aligned for +// directories and uncategorised entries. +func formatType(t string, isDir bool) string { + if isDir || t == "" { + return "-" + } + return t +} + +func prettyPrintJSON(w io.Writer, body []byte) error { + var v any + if err := json.Unmarshal(body, &v); err != nil { + _, werr := w.Write(body) + return werr + } + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + return enc.Encode(v) +} + +func truncate(s string, n int) string { + if len(s) <= n { + return s + } + return s[:n] + "...(truncated)" +} diff --git a/cli/cmd/ctl/files/ls_test.go b/cli/cmd/ctl/files/ls_test.go new file mode 100644 index 000000000..cb8fa2051 --- /dev/null +++ b/cli/cmd/ctl/files/ls_test.go @@ -0,0 +1,267 @@ +package files + +import ( + "bytes" + "net/http" + "strings" + "testing" + "time" +) + +func TestFormatSize(t *testing.T) { + cases := []struct { + name string + n int64 + isDir bool + want string + }{ + {name: "directory always dash", n: 12345, isDir: true, want: "-"}, + {name: "zero bytes", n: 0, want: "0B"}, + {name: "one byte", n: 1, want: "1B"}, + {name: "just under 1K", n: 1023, want: "1023B"}, + {name: "exactly 1K", n: 1024, want: "1.0KB"}, + {name: "fractional KB", n: 1536, want: "1.5KB"}, + {name: "just under 1M", n: 1024*1024 - 1, want: "1024.0KB"}, + {name: "exactly 1M", n: 1024 * 1024, want: "1.0MB"}, + {name: "1.5GB", n: 1536 * 1024 * 1024, want: "1.5GB"}, + {name: "1TB", n: 1 << 40, want: "1.0TB"}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + if got := formatSize(c.n, c.isDir); got != c.want { + t.Errorf("formatSize(%d, %v) = %q, want %q", c.n, c.isDir, got, c.want) + } + }) + } +} + +func TestFormatHTTPError(t *testing.T) { + const url = "https://files.alice.olares.com/api/resources/drive/Home/" + const olaresID = "alice@olares.com" + + cases := []struct { + name string + status int + body string + wantSubstrs []string + wantNotSubstrs []string + }{ + { + name: "401 surfaces re-login CTA with olaresId", + status: http.StatusUnauthorized, + body: `{"error":"unauthorized"}`, + wantSubstrs: []string{"HTTP 401", "profile login", olaresID}, + // the body's error text shouldn't leak through on auth failures — + // the CTA is more useful than the raw 401 reason + wantNotSubstrs: []string{"unauthorized"}, + }, + { + name: "403 also routes to re-login CTA", + status: http.StatusForbidden, + body: ``, + wantSubstrs: []string{"HTTP 403", "profile login", olaresID}, + }, + { + name: "500 with {error} surfaces backend message", + status: http.StatusInternalServerError, + body: `{"error":"node missing"}`, + wantSubstrs: []string{"HTTP 500", "node missing", url}, + }, + { + name: "500 with code+message surfaces both", + status: http.StatusInternalServerError, + body: `{"code":1,"message":"Directory not exist."}`, + wantSubstrs: []string{"HTTP 500", "code=1", "Directory not exist."}, + }, + { + name: "non-JSON body falls back to raw (truncated)", + status: http.StatusBadGateway, + body: "upstream gone", + wantSubstrs: []string{"HTTP 502", "upstream gone"}, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := formatHTTPError(c.status, []byte(c.body), olaresID, url) + if err == nil { + t.Fatalf("formatHTTPError(%d): want error, got nil", c.status) + } + msg := err.Error() + for _, s := range c.wantSubstrs { + if !strings.Contains(msg, s) { + t.Errorf("formatHTTPError(%d): want %q in %q", c.status, s, msg) + } + } + for _, s := range c.wantNotSubstrs { + if strings.Contains(msg, s) { + t.Errorf("formatHTTPError(%d): did NOT want %q in %q", c.status, s, msg) + } + } + }) + } +} + +func TestRenderListing(t *testing.T) { + fp, err := ParseFrontendPath("drive/Home/Documents/") + if err != nil { + t.Fatalf("ParseFrontendPath: %v", err) + } + parentMod := time.Date(2026, 4, 17, 11, 31, 51, 0, time.UTC) + + t.Run("empty shows header line then (empty)", func(t *testing.T) { + var buf bytes.Buffer + if err := renderListing(&buf, fp, listingResponse{ + Name: "Documents", + NumDirs: 0, + NumFiles: 0, + Modified: parentMod, + }); err != nil { + t.Fatalf("renderListing: %v", err) + } + out := buf.String() + if strings.Contains(out, "MODE") || strings.Contains(out, "NAME") { + t.Errorf("empty listing should not print table header, got: %q", out) + } + if !strings.Contains(out, "drive/Home/Documents/") { + t.Errorf("expected requested path in header, got: %q", out) + } + if !strings.Contains(out, "0 dirs") || !strings.Contains(out, "0 files") { + t.Errorf("expected zero dir/file counts in header, got: %q", out) + } + if !strings.Contains(out, "(empty)") { + t.Errorf("missing (empty) marker in output: %q", out) + } + }) + + t.Run("counts pluralize correctly (1 dir, 1 file)", func(t *testing.T) { + var buf bytes.Buffer + if err := renderListing(&buf, fp, listingResponse{ + NumDirs: 1, + NumFiles: 1, + Modified: parentMod, + Items: []listingItem{ + {Name: "sub", IsDir: true, Mode: 0x80000000 | 0o755}, + {Name: "a.txt", IsDir: false, Mode: 0o644, Type: "text", Size: 12}, + }, + }); err != nil { + t.Fatalf("renderListing: %v", err) + } + out := buf.String() + if !strings.Contains(out, "1 dir,") || !strings.Contains(out, "1 file,") { + t.Errorf("expected singular forms '1 dir, 1 file' in header, got: %q", out) + } + }) + + t.Run("falls back to counting items when envelope counts are zero", func(t *testing.T) { + var buf bytes.Buffer + if err := renderListing(&buf, fp, listingResponse{ + // NumDirs/NumFiles intentionally zero; backend should still tell us + // what's in the dir via Items. + Items: []listingItem{ + {Name: "x", IsDir: true}, + {Name: "y", IsDir: true}, + {Name: "z.txt", IsDir: false}, + }, + }); err != nil { + t.Fatalf("renderListing: %v", err) + } + out := buf.String() + if !strings.Contains(out, "2 dirs") || !strings.Contains(out, "1 file") { + t.Errorf("expected derived '2 dirs, 1 file' counts, got: %q", out) + } + }) + + t.Run("dirs first then files alphabetical, with mode + type columns", func(t *testing.T) { + mod := time.Date(2026, 4, 1, 12, 30, 0, 0, time.UTC) + items := []listingItem{ + {Name: "zebra.txt", IsDir: false, Size: 10, Modified: mod, Mode: 0o644, Type: "text"}, + {Name: "Alpha", IsDir: true, Modified: mod, Mode: 0x80000000 | 0o755}, + {Name: "beta.md", IsDir: false, Size: 1024, Modified: mod, Mode: 0o644, Type: "text"}, + {Name: "movie.mp4", IsDir: false, Size: 1 << 20, Modified: mod, Mode: 0o644, Type: "video"}, + {Name: "zeta", IsDir: true, Modified: mod, Mode: 0x80000000 | 0o755}, + } + var buf bytes.Buffer + if err := renderListing(&buf, fp, listingResponse{ + NumDirs: 2, + NumFiles: 3, + Modified: parentMod, + Items: items, + }); err != nil { + t.Fatalf("renderListing: %v", err) + } + out := buf.String() + + // Header line then column header then 5 rows. + if !strings.Contains(out, "MODE") || !strings.Contains(out, "TYPE") { + t.Errorf("expected MODE and TYPE columns, got: %q", out) + } + if !strings.Contains(out, "drwxr-xr-x") || !strings.Contains(out, "-rw-r--r--") { + t.Errorf("expected decoded mode strings, got: %q", out) + } + if !strings.Contains(out, "video") { + t.Errorf("expected 'video' type for movie.mp4, got: %q", out) + } + + // Order: drop the banner line and the column header, then check NAME col. + lines := strings.Split(strings.TrimRight(out, "\n"), "\n") + if len(lines) < 7 { // banner + col header + 5 rows + t.Fatalf("expected banner + header + 5 rows, got %d lines: %q", len(lines), out) + } + names := make([]string, 0, len(lines)-2) + for _, ln := range lines[2:] { + fields := strings.Fields(ln) + names = append(names, fields[len(fields)-1]) + } + want := []string{"Alpha/", "zeta/", "beta.md", "movie.mp4", "zebra.txt"} + for i, n := range want { + if names[i] != n { + t.Errorf("row %d: name = %q, want %q (full output: %q)", i, names[i], n, out) + } + } + }) +} + +func TestFormatMode(t *testing.T) { + cases := []struct { + name string + mode uint32 + isDir bool + isSymlink bool + want string + }{ + {name: "regular 0644", mode: 0o644, want: "-rw-r--r--"}, + {name: "exec 0775", mode: 0o775, want: "-rwxrwxr-x"}, + {name: "directory 0755", mode: 0x80000000 | 0o755, want: "drwxr-xr-x"}, + {name: "directory weird high bits as observed live (FileMode 2147484141)", mode: 2147484141, want: "drwxr-xr-x"}, + {name: "fallback to flags when mode=0 and dir", mode: 0, isDir: true, want: "d---------"}, + {name: "fallback to flags when mode=0 and symlink", mode: 0, isSymlink: true, want: "L---------"}, + {name: "fallback to flags when mode=0 and regular", mode: 0, want: "----------"}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + if got := formatMode(c.mode, c.isDir, c.isSymlink); got != c.want { + t.Errorf("formatMode(%d, dir=%v, sym=%v) = %q, want %q", c.mode, c.isDir, c.isSymlink, got, c.want) + } + }) + } +} + +func TestFormatType(t *testing.T) { + cases := []struct { + t string + isDir bool + want string + }{ + {t: "video", want: "video"}, + {t: "text", want: "text"}, + {t: "", want: "-"}, + {t: "", isDir: true, want: "-"}, + {t: "video", isDir: true, want: "-"}, + } + for _, c := range cases { + if got := formatType(c.t, c.isDir); got != c.want { + t.Errorf("formatType(%q, dir=%v) = %q, want %q", c.t, c.isDir, got, c.want) + } + } +} diff --git a/cli/cmd/ctl/files/path.go b/cli/cmd/ctl/files/path.go new file mode 100644 index 000000000..fb0518f59 --- /dev/null +++ b/cli/cmd/ctl/files/path.go @@ -0,0 +1,215 @@ +// Package files implements the `olares-cli files ...` command tree, which +// talks to the per-user files-backend (the upstream `files` repo) over its +// /api/resources REST surface. +// +// The files-backend models every resource as a 3-segment "front-end path": +// +// // +// +// where `fileType` selects the storage class, `extend` selects the concrete +// volume / repo / account inside that class, and `subPath` is the relative +// path inside that volume. See files/pkg/models/file_param.go (FileParam.convert) +// and files/pkg/common/constant.go for the canonical definitions. +// +// We expose the full path verbatim on the CLI surface — the user always +// types all three segments — so the tooling stays close to the protocol. +// path.go centralizes parsing & validation; commands like `ls`, `cat`, `cp` +// (the latter two land in Phase 2) all consume the resulting FrontendPath. +package files + +import ( + "fmt" + "net/url" + "path" + "sort" + "strings" +) + +// Known fileType values understood by the files-backend. +// Mirrors files/pkg/common/constant.go (Drive, Cache, Sync, External, AwsS3, +// GoogleDrive ("google"), DropBox, TencentCos ("tencent"), Share, Internal). +// The list is intentionally case-sensitive lowercase: the backend lowercases +// the first segment before matching, so we accept only the canonical form on +// input to avoid surprising behavior. +var knownFileTypes = map[string]struct{}{ + "drive": {}, + "cache": {}, + "sync": {}, + "external": {}, + "awss3": {}, + "dropbox": {}, + "google": {}, + "tencent": {}, + "share": {}, + "internal": {}, +} + +// driveExtends enumerates the only valid `extend` values when fileType=="drive". +// Backend enforcement: files/pkg/models/file_param.go convert() L59-61. +var driveExtends = map[string]struct{}{ + "Home": {}, + "Data": {}, +} + +// knownFileTypesList is a stable, sorted, comma-joined rendering of +// knownFileTypes, computed once so the (cold) error path doesn't allocate +// on every parse failure. Keep in sync with knownFileTypes above. +var knownFileTypesList = func() string { + out := make([]string, 0, len(knownFileTypes)) + for k := range knownFileTypes { + out = append(out, k) + } + sort.Strings(out) + return strings.Join(out, ", ") +}() + +// FrontendPath is the parsed view of a 3-segment files-backend front-end path. +// Construct via ParseFrontendPath; the zero value has no meaning. +type FrontendPath struct { + // FileType is the (always-lowercase) storage class: drive/cache/sync/... + FileType string + // Extend is the volume / repo / account selector. Its semantics depend on + // FileType (Home|Data for drive, node name for cache/external, repo_id for + // sync, account key for cloud drives, ...). CLI-side we only hard-validate + // the drive case; everything else is left to the backend. + Extend string + // SubPath is the path inside Extend, always starting with '/'. Root is "/". + // A trailing slash present in the input is preserved (the backend uses it + // as a "this is a directory" hint in some places). + SubPath string + // trailingSlash records whether the original input ended with '/'. It's + // preserved through String() so we don't accidentally drop the trailing + // slash that the backend's FileParam.convert() requires for directory + // listings (it splits on '/' and rejects len < 3). + trailingSlash bool +} + +// ParseFrontendPath parses a user-supplied path string into a FrontendPath. +// +// Examples: +// +// "drive/Home/" → {drive, Home, "/", trailingSlash} +// "drive/Home/Documents" → {drive, Home, "/Documents"} +// "drive/Home/Documents/" → {drive, Home, "/Documents/", trailingSlash} +// "sync//sub/dir" → {sync, , "/sub/dir"} +// "awss3///k.txt" → {awss3, , "//k.txt"} +// +// Validation: +// - Path must have at least 2 non-empty segments (fileType + extend). +// `drive/Home` (no trailing slash) is accepted by ParseFrontendPath but +// callers that hit /api/resources will need a trailing slash to satisfy +// the backend's len(split) >= 3 check; String() preserves it from the +// original input. +// - FileType must be a known value (case-sensitive lowercase). Unknown +// values fail fast on the client to avoid an opaque 500 from the server. +// - When FileType=="drive", Extend must be "Home" or "Data" (case-sensitive). +// - Other FileTypes' Extend values (node names, repo ids, account keys) are +// not pre-validated locally; the backend is the source of truth for those. +// - Path traversal segments like ".." are NOT stripped here — the backend +// applies its own sandboxing. We do collapse runs of "//" to a single "/" +// via path.Clean while preserving any user-supplied trailing slash. +func ParseFrontendPath(raw string) (FrontendPath, error) { + raw = strings.TrimSpace(raw) + if raw == "" { + return FrontendPath{}, fmt.Errorf("front-end path is empty; expected /[/] (e.g. drive/Home/, sync//)") + } + + hadTrailingSlash := strings.HasSuffix(raw, "/") + trimmed := strings.Trim(raw, "/") + parts := strings.Split(trimmed, "/") + // strings.Split never returns empty slice; guard against the all-empty + // case for defense in depth. + if len(parts) == 0 || parts[0] == "" { + return FrontendPath{}, fmt.Errorf("front-end path %q is empty after trimming slashes", raw) + } + if len(parts) < 2 { + return FrontendPath{}, fmt.Errorf("front-end path %q must have /[/] (got only %d segment(s); try e.g. %q)", + raw, len(parts), parts[0]+"//") + } + + fileType := parts[0] + if _, ok := knownFileTypes[fileType]; !ok { + return FrontendPath{}, fmt.Errorf("unknown fileType %q, expected one of: %s", + fileType, knownFileTypesList) + } + + extend := parts[1] + if extend == "" { + return FrontendPath{}, fmt.Errorf("front-end path %q has empty segment", raw) + } + if fileType == "drive" { + if _, ok := driveExtends[extend]; !ok { + return FrontendPath{}, fmt.Errorf("drive extend must be Home or Data (got %q)", extend) + } + } + + sub := "/" + if len(parts) > 2 { + // path.Clean collapses "//", strips trailing "/" — restore the latter + // from the caller's intent below. + sub = path.Clean("/" + strings.Join(parts[2:], "/")) + } + if hadTrailingSlash && !strings.HasSuffix(sub, "/") { + sub += "/" + } + + return FrontendPath{ + FileType: fileType, + Extend: extend, + SubPath: sub, + trailingSlash: hadTrailingSlash, + }, nil +} + +// String renders the canonical front-end path as `/`. +// SubPath always starts with '/', so the output naturally looks like +// "drive/Home/Documents" or "drive/Home/" for the root with a trailing slash. +// +// This is the human-readable form, suitable for error messages and logs. +// For URL construction use URLPath() — String() does NOT percent-encode. +func (p FrontendPath) String() string { + return p.FileType + "/" + p.Extend + p.SubPath +} + +// URLPath returns the same path as String() but with every segment +// percent-encoded via url.PathEscape, while preserving '/' separators and +// any trailing '/'. This matches the web app's handling +// (apps/packages/app/src/utils/encode.ts: encodeUrl) and is what the +// files-backend's /api/resources endpoint expects for filenames containing +// '#', '?', '+', spaces, '%', non-ASCII, etc. +// +// FileType is always one of the known lowercase tokens (no escaping needed +// in practice), but we run it through the same escape so callers get a +// single, predictable encoder. +func (p FrontendPath) URLPath() string { + subEscaped := escapeSubPath(p.SubPath) + return url.PathEscape(p.FileType) + "/" + url.PathEscape(p.Extend) + subEscaped +} + +// escapeSubPath percent-encodes each segment of SubPath while keeping the +// leading '/' and any trailing '/' (the trailing slash is the backend's +// "this is a directory" hint — see FileParam.convert in files-backend). +func escapeSubPath(sub string) string { + if sub == "" || sub == "/" { + return sub + } + trailing := strings.HasSuffix(sub, "/") + trimmed := strings.Trim(sub, "/") + if trimmed == "" { + return sub + } + parts := strings.Split(trimmed, "/") + for i, p := range parts { + parts[i] = url.PathEscape(p) + } + out := "/" + strings.Join(parts, "/") + if trailing { + out += "/" + } + return out +} + +// HasTrailingSlash reports whether the original input ended with '/'. Useful +// for callers that want to disambiguate "list this directory" from "fetch this +// resource by exact name" without re-parsing. +func (p FrontendPath) HasTrailingSlash() bool { return p.trailingSlash } diff --git a/cli/cmd/ctl/files/path_test.go b/cli/cmd/ctl/files/path_test.go new file mode 100644 index 000000000..fbff4ab65 --- /dev/null +++ b/cli/cmd/ctl/files/path_test.go @@ -0,0 +1,216 @@ +package files + +import ( + "strings" + "testing" +) + +func TestParseFrontendPath(t *testing.T) { + cases := []struct { + name string + input string + wantFileType string + wantExtend string + wantSubPath string + wantTrailing bool + wantString string + wantErrSubstr string + }{ + { + name: "drive Home root with trailing slash", + input: "drive/Home/", + wantFileType: "drive", + wantExtend: "Home", + wantSubPath: "/", + wantTrailing: true, + wantString: "drive/Home/", + }, + { + name: "drive Home subdir", + input: "drive/Home/Documents", + wantFileType: "drive", + wantExtend: "Home", + wantSubPath: "/Documents", + wantString: "drive/Home/Documents", + }, + { + name: "drive Home subdir with trailing slash preserved", + input: "drive/Home/Documents/", + wantFileType: "drive", + wantExtend: "Home", + wantSubPath: "/Documents/", + wantTrailing: true, + wantString: "drive/Home/Documents/", + }, + { + name: "drive Data root", + input: "drive/Data/", + wantFileType: "drive", + wantExtend: "Data", + wantSubPath: "/", + wantTrailing: true, + wantString: "drive/Data/", + }, + { + name: "sync repo", + input: "sync/abc-123-repo/sub/dir", + wantFileType: "sync", + wantExtend: "abc-123-repo", + wantSubPath: "/sub/dir", + wantString: "sync/abc-123-repo/sub/dir", + }, + { + name: "awss3 nested", + input: "awss3/myaccount/bucket/key.txt", + wantFileType: "awss3", + wantExtend: "myaccount", + wantSubPath: "/bucket/key.txt", + wantString: "awss3/myaccount/bucket/key.txt", + }, + { + name: "leading slash tolerated", + input: "/drive/Home/", + wantFileType: "drive", + wantExtend: "Home", + wantSubPath: "/", + wantTrailing: true, + wantString: "drive/Home/", + }, + { + name: "double slashes collapsed", + input: "drive/Home//Documents///nested", + wantFileType: "drive", + wantExtend: "Home", + wantSubPath: "/Documents/nested", + wantString: "drive/Home/Documents/nested", + }, + { + name: "empty", + input: "", + wantErrSubstr: "is empty", + }, + { + name: "only slashes", + input: "///", + wantErrSubstr: "empty after trimming", + }, + { + name: "single segment", + input: "drive", + wantErrSubstr: "must have /", + }, + { + name: "single segment with trailing slash", + input: "drive/", + wantErrSubstr: "must have /", + }, + { + name: "unknown fileType", + input: "foo/bar/", + wantErrSubstr: "unknown fileType", + }, + { + name: "drive bad extend", + input: "drive/Other/", + wantErrSubstr: "drive extend must be Home or Data", + }, + { + name: "uppercase fileType rejected", + input: "Drive/Home/", + wantErrSubstr: "unknown fileType", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got, err := ParseFrontendPath(c.input) + if c.wantErrSubstr != "" { + if err == nil { + t.Fatalf("ParseFrontendPath(%q): want error containing %q, got nil (parsed=%+v)", c.input, c.wantErrSubstr, got) + } + if !strings.Contains(err.Error(), c.wantErrSubstr) { + t.Fatalf("ParseFrontendPath(%q): want error containing %q, got %q", c.input, c.wantErrSubstr, err.Error()) + } + return + } + if err != nil { + t.Fatalf("ParseFrontendPath(%q): unexpected error: %v", c.input, err) + } + if got.FileType != c.wantFileType { + t.Errorf("FileType = %q, want %q", got.FileType, c.wantFileType) + } + if got.Extend != c.wantExtend { + t.Errorf("Extend = %q, want %q", got.Extend, c.wantExtend) + } + if got.SubPath != c.wantSubPath { + t.Errorf("SubPath = %q, want %q", got.SubPath, c.wantSubPath) + } + if got.HasTrailingSlash() != c.wantTrailing { + t.Errorf("HasTrailingSlash() = %v, want %v", got.HasTrailingSlash(), c.wantTrailing) + } + if s := got.String(); s != c.wantString { + t.Errorf("String() = %q, want %q", s, c.wantString) + } + }) + } +} + +func TestFrontendPathURLPath(t *testing.T) { + cases := []struct { + name string + input string + want string + }{ + { + name: "no special chars", + input: "drive/Home/Documents", + want: "drive/Home/Documents", + }, + { + name: "trailing slash preserved", + input: "drive/Home/Documents/", + want: "drive/Home/Documents/", + }, + { + name: "extend root", + input: "drive/Home/", + want: "drive/Home/", + }, + { + name: "filename with space", + input: "drive/Home/My Documents/notes.md", + want: "drive/Home/My%20Documents/notes.md", + }, + { + name: "filename with hash and question mark", + input: "drive/Home/a#b?c.txt", + want: "drive/Home/a%23b%3Fc.txt", + }, + { + name: "filename with plus and percent", + input: "drive/Home/100%/x+y.txt", + want: "drive/Home/100%25/x+y.txt", + }, + { + name: "non-ASCII filename", + input: "drive/Home/笔记/分享.md", + want: "drive/Home/%E7%AC%94%E8%AE%B0/%E5%88%86%E4%BA%AB.md", + }, + { + name: "slashes still act as separators", + input: "drive/Home/a/b/c", + want: "drive/Home/a/b/c", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + fp, err := ParseFrontendPath(c.input) + if err != nil { + t.Fatalf("ParseFrontendPath(%q): unexpected error: %v", c.input, err) + } + if got := fp.URLPath(); got != c.want { + t.Errorf("URLPath() = %q, want %q", got, c.want) + } + }) + } +} diff --git a/cli/cmd/ctl/files/root.go b/cli/cmd/ctl/files/root.go new file mode 100644 index 000000000..dfae20aa3 --- /dev/null +++ b/cli/cmd/ctl/files/root.go @@ -0,0 +1,64 @@ +package files + +import ( + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +// NewFilesCommand returns the `files` parent command, ready to be added to +// the olares-cli root. +// +// Phase 1 surface (only one verb, intentionally minimal — Phase 2 adds +// cat / cp / mv / rm / mkdir): +// +// files ls /[/] [--json] +// +// The Factory is supplied by the root command so credential resolution and +// HTTP-client setup happen once per process — and so the global `--profile` +// flag wired up at the root can flow through here unchanged. +// +// See cmd/ctl/files/path.go for the front-end path schema and +// docs/notes/olares-cli-auth-profile-config.md for the broader Phase 1 +// design (this is the demo command that closes Phase 1). +func NewFilesCommand(f *cmdutil.Factory) *cobra.Command { + cmd := &cobra.Command{ + Use: "files", + Short: "interact with the per-user files-backend (list, ...)", + Long: `Talk to the Olares per-user files-backend over its /api/resources REST surface. + +Every resource is addressed by a 3-segment "front-end path": + + /[/] + +where: + + fileType storage class: drive | cache | sync | external | + awss3 | dropbox | google | tencent | + share | internal + extend volume / repo / account inside that class: + drive -> Home or Data + cache -> node name + sync -> seafile repo id + cloud -> account key + subPath path inside (root if omitted) + +Examples: + + olares-cli files ls drive/Home/ + olares-cli files ls drive/Home/Documents + olares-cli files ls drive/Data/ + olares-cli files ls sync// +`, + } + for _, sub := range []*cobra.Command{ + NewLsCommand(f), + } { + // Same rationale as cmd/ctl/profile/root.go: bad creds / network / + // path-not-found errors are already actionable; don't bury them under + // a usage dump. + sub.SilenceUsage = true + cmd.AddCommand(sub) + } + return cmd +} diff --git a/cli/cmd/ctl/root.go b/cli/cmd/ctl/root.go index b801bb5f9..bee53b3b7 100755 --- a/cli/cmd/ctl/root.go +++ b/cli/cmd/ctl/root.go @@ -7,6 +7,7 @@ import ( "github.com/beclab/Olares/cli/cmd/ctl/amdgpu" "github.com/beclab/Olares/cli/cmd/ctl/app" "github.com/beclab/Olares/cli/cmd/ctl/disk" + "github.com/beclab/Olares/cli/cmd/ctl/files" "github.com/beclab/Olares/cli/cmd/ctl/gpu" "github.com/beclab/Olares/cli/cmd/ctl/node" "github.com/beclab/Olares/cli/cmd/ctl/os" @@ -14,6 +15,7 @@ import ( "github.com/beclab/Olares/cli/cmd/ctl/profile" "github.com/beclab/Olares/cli/cmd/ctl/user" "github.com/beclab/Olares/cli/cmd/ctl/wizard" + "github.com/beclab/Olares/cli/pkg/cmdutil" "github.com/beclab/Olares/cli/version" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -21,6 +23,11 @@ import ( func NewDefaultCommand() *cobra.Command { var showVendor bool + // One Factory per process. Subcommands that need an authenticated HTTP + // client (today: files; future: user/app/settings) all reach into this + // same instance so credential resolution and HTTPClient construction are + // memoized across verbs in the same invocation. + factory := cmdutil.NewFactory() cobra.OnInitialize(func() { config.Init() }) @@ -44,6 +51,11 @@ func NewDefaultCommand() *cobra.Command { }, } cmds.Flags().BoolVar(&showVendor, "vendor", false, "show the vendor type of olares-cli") + // Persistent --profile flag binds straight onto the shared Factory so + // that subcommands which use it (factory.ResolveProfile) automatically + // honor the override without each having to re-declare the flag. + cmds.PersistentFlags().StringVar(&factory.ProfileOverride, "profile", "", + "olaresId of the profile to use (overrides the currently-selected one)") cmds.AddCommand(osinfo.NewCmdInfo()) cmds.AddCommand(os.NewOSCommands()...) @@ -55,6 +67,7 @@ func NewDefaultCommand() *cobra.Command { cmds.AddCommand(disk.NewDiskCommand()) cmds.AddCommand(app.NewAppCommand()) cmds.AddCommand(profile.NewProfileCommand()) + cmds.AddCommand(files.NewFilesCommand(factory)) return cmds } diff --git a/cli/pkg/cmdutil/factory.go b/cli/pkg/cmdutil/factory.go index 8df69aa55..df89b7b77 100644 --- a/cli/pkg/cmdutil/factory.go +++ b/cli/pkg/cmdutil/factory.go @@ -3,16 +3,18 @@ // config / etc. This is the olares-cli analogue of lark-cli's // cmdutil.Factory. // -// Phase 1 keeps the Factory deliberately minimal: lazily-resolved credential -// chain + Bearer-injecting HTTP client. Phase 2 will add automatic token -// refresh inside the same HTTPClient call without changing this surface. +// Phase 1 keeps the Factory deliberately minimal: a lazily-resolved +// credential chain plus an HTTP client whose RoundTripper injects the access +// token via the custom `X-Authorization` header (see authTransport for why +// that header, not the standard `Authorization: Bearer`). Phase 2 will add +// automatic token refresh inside the same HTTPClient call without changing +// this surface. package cmdutil import ( "context" "crypto/tls" "fmt" - "io" "net/http" "sync" "time" @@ -28,15 +30,10 @@ import ( // times reuses the same resolved profile + client. type Factory struct { // ProfileOverride, when non-empty, forces ResolveProfile to look up this - // profile instead of the currently-selected one. Wired from a global - // `--profile` / `--olares-id` flag at the root command. + // profile instead of the currently-selected one. Wired from the root + // command's persistent `--profile` flag. ProfileOverride string - // IOStreams (Phase 2) will live here so commands can write to a swappable - // io.Writer pair. For now we just expose a Stderr getter to avoid a hard - // dependency churn when we add it. - Stderr io.Writer - credentialOnce sync.Once credentialErr error credential *credential.CredentialProvider @@ -93,8 +90,9 @@ func (f *Factory) ResolveProfile(ctx context.Context) (*credential.ResolvedProfi } // HTTPClient returns an *http.Client whose RoundTripper transparently injects -// `Authorization: Bearer ` on every outbound request. The client -// also honors the active profile's InsecureSkipVerify flag. +// the active profile's access token on every outbound request via the custom +// `X-Authorization` header (see authTransport). The client also honors the +// active profile's InsecureSkipVerify flag. // // Phase 1: the token is fetched once at first call and reused until the // process exits. If it expires mid-run, requests will start returning 401 — @@ -114,25 +112,37 @@ func (f *Factory) HTTPClient(ctx context.Context) (*http.Client, error) { } f.client = &http.Client{ Timeout: 30 * time.Second, - Transport: &bearerTransport{base: base, token: rp.AccessToken}, + Transport: &authTransport{base: base, token: rp.AccessToken}, } }) return f.client, nil } -// bearerTransport injects Authorization: Bearer on outbound requests. -// It clones the request before mutating headers so the caller's *http.Request -// is left untouched (important when callers retry). -type bearerTransport struct { +// authTransport injects the access token via the custom `X-Authorization` +// header on outbound requests. It clones the request before mutating headers +// so the caller's *http.Request is left untouched (important when callers +// retry). +// +// Why X-Authorization (not the standard Authorization: Bearer)? +// Olares' edge stack — Authelia ext-authz wired through l4-bfl-proxy — +// inspects `X-Authorization` (and Cookie) to identify the user; see +// framework/l4-bfl-proxy/internal/translator/translator.go (RequestHeaders +// allow-list) and the BFL backend's +// framework/bfl/pkg/apiserver/filters.go (UserAuthorizationTokenKey = +// "X-Authorization"). The standard Authorization header is filtered out by +// the edge before it reaches per-user services, so X-Authorization is the +// only value that round-trips to the backend today. The web app does the +// same thing in apps/packages/app/src/platform/platformAjaxSender.ts. +type authTransport struct { base http.RoundTripper token string } -func (b *bearerTransport) RoundTrip(req *http.Request) (*http.Response, error) { - if b.token == "" { - return b.base.RoundTrip(req) +func (a *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if a.token == "" { + return a.base.RoundTrip(req) } clone := req.Clone(req.Context()) - clone.Header.Set("Authorization", "Bearer "+b.token) - return b.base.RoundTrip(clone) + clone.Header.Set("X-Authorization", a.token) + return a.base.RoundTrip(clone) } diff --git a/cli/pkg/credential/default_provider.go b/cli/pkg/credential/default_provider.go index 330b3401f..8c8138edd 100644 --- a/cli/pkg/credential/default_provider.go +++ b/cli/pkg/credential/default_provider.go @@ -137,6 +137,7 @@ func buildResolved(profile *cliconfig.ProfileConfig, accessToken string, exp tim AuthURL: authURL, VaultURL: id.VaultURL(profile.LocalURLPrefix), DesktopURL: id.DesktopURL(profile.LocalURLPrefix), + FilesURL: id.FilesURL(profile.LocalURLPrefix), AccessToken: accessToken, InsecureSkipVerify: profile.InsecureSkipVerify, } diff --git a/cli/pkg/credential/types.go b/cli/pkg/credential/types.go index c29fb197c..f88c2427c 100644 --- a/cli/pkg/credential/types.go +++ b/cli/pkg/credential/types.go @@ -27,6 +27,7 @@ type ResolvedProfile struct { AuthURL string VaultURL string DesktopURL string + FilesURL string AccessToken string // ExpiresAt is the unix-seconds expiry decoded from AccessToken's `exp` diff --git a/cli/pkg/olares/id.go b/cli/pkg/olares/id.go index c4c024331..a8b15c4c5 100644 --- a/cli/pkg/olares/id.go +++ b/cli/pkg/olares/id.go @@ -86,3 +86,11 @@ func (id ID) VaultURL(localPrefix string) string { func (id ID) DesktopURL(localPrefix string) string { return fmt.Sprintf("https://desktop.%s%s", localPrefix, id.TerminusName()) } + +// FilesURL returns the per-user files-backend base URL, e.g. +// "https://files.alice.olares.com". Mirrors the web app's +// `getModuleSever('files')` derivation in +// apps/packages/app/src/stores/user.ts. +func (id ID) FilesURL(localPrefix string) string { + return fmt.Sprintf("https://files.%s%s", localPrefix, id.TerminusName()) +} From ce392322a45302c2ac02a52435c91e69fe06e82c Mon Sep 17 00:00:00 2001 From: Peng Peng Date: Sat, 25 Apr 2026 18:09:55 +0800 Subject: [PATCH 03/12] feat(cli): move profile tokens into the OS keychain (Phase 2) Replaces the Phase 1 plaintext ~/.olares-cli/tokens.json store with a per-OS keychain backend so refresh tokens no longer sit on disk in clear text. The auth.TokenStore surface is unchanged; production code now constructs a keychainStore via auth.NewTokenStore, while tests inject an in-memory fake through auth.NewTokenStoreWith. Backends (cli/internal/keychain): - darwin: system Keychain for the master key, AES-256-GCM file blobs under StorageDir; falls back to a file-only master key if the system keychain is unavailable (sandbox / CI). - linux/other: file-based master key + AES-256-GCM blobs under StorageDir, all 0600/0700. - windows: DPAPI-encrypted values under HKCU\Software\OlaresCli\keychain. Hardening / UX in the same change: - keychainStore.List tolerates a single corrupted entry (warn to stderr, skip) instead of aborting the whole `profile list`. - StorageDir falls back to os.TempDir with a stderr warning when UserHomeDir is unresolvable, so we never silently write to '/'. - keychain.Backend(service) reports the active backend label (system-keychain / file-fallback / file / registry+dpapi); printed after every successful login/import so users notice when they land on the file fallback. - keychain.PurgeService is invoked when the last profile is removed, cleaning up the master key + storage dir / registry subkey so we don't leave orphan secrets behind. - wrapError stays terse by default and only attaches the verbose troubleshooting hint when OLARES_CLI_DEBUG is set. Refactor: - AES-GCM constants and the safeFileName helper live in a single aesgcm.go (//go:build !windows) so darwin and linux can't drift on the on-disk envelope. - The in-memory keychain fake is promoted to its own keychainfake subpackage and shared by pkg/auth and cmd/ctl/profile tests. Docs in cli/internal/keychain/doc.go explain the deltas vs. lark-cli's upstream copy, including why olares-cli keeps the KeychainAccess interface + keychainfake (keychainStore has MarkInvalidated / List / InvalidatedAt semantics that need unit-test coverage). Made-with: Cursor --- cli/cmd/ctl/profile/credentials.go | 20 +- cli/cmd/ctl/profile/credentials_test.go | 12 +- cli/cmd/ctl/profile/import.go | 7 +- cli/cmd/ctl/profile/list.go | 5 +- cli/cmd/ctl/profile/login.go | 7 +- cli/cmd/ctl/profile/remove.go | 18 +- cli/cmd/ctl/profile/root.go | 7 +- cli/go.mod | 3 + cli/go.sum | 6 + cli/internal/keychain/aesgcm.go | 83 +++++ cli/internal/keychain/default.go | 25 ++ cli/internal/keychain/doc.go | 47 +++ cli/internal/keychain/keychain.go | 132 ++++++++ cli/internal/keychain/keychain_darwin.go | 306 ++++++++++++++++++ cli/internal/keychain/keychain_darwin_test.go | 174 ++++++++++ cli/internal/keychain/keychain_other.go | 172 ++++++++++ cli/internal/keychain/keychain_other_test.go | 116 +++++++ cli/internal/keychain/keychain_test.go | 68 ++++ cli/internal/keychain/keychain_windows.go | 204 ++++++++++++ cli/internal/keychain/keychainfake/fake.go | 96 ++++++ cli/pkg/auth/token_store.go | 187 +---------- cli/pkg/auth/token_store_keychain.go | 195 +++++++++++ cli/pkg/auth/token_store_keychain_test.go | 206 ++++++++++++ cli/pkg/cliconfig/config.go | 4 +- cli/pkg/cliconfig/paths.go | 27 +- cli/pkg/credential/default_provider.go | 13 +- 26 files changed, 1908 insertions(+), 232 deletions(-) create mode 100644 cli/internal/keychain/aesgcm.go create mode 100644 cli/internal/keychain/default.go create mode 100644 cli/internal/keychain/doc.go create mode 100644 cli/internal/keychain/keychain.go create mode 100644 cli/internal/keychain/keychain_darwin.go create mode 100644 cli/internal/keychain/keychain_darwin_test.go create mode 100644 cli/internal/keychain/keychain_other.go create mode 100644 cli/internal/keychain/keychain_other_test.go create mode 100644 cli/internal/keychain/keychain_test.go create mode 100644 cli/internal/keychain/keychain_windows.go create mode 100644 cli/internal/keychain/keychainfake/fake.go create mode 100644 cli/pkg/auth/token_store_keychain.go create mode 100644 cli/pkg/auth/token_store_keychain_test.go diff --git a/cli/cmd/ctl/profile/credentials.go b/cli/cmd/ctl/profile/credentials.go index 6cc1cfbd7..0c8c871aa 100644 --- a/cli/cmd/ctl/profile/credentials.go +++ b/cli/cmd/ctl/profile/credentials.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/beclab/Olares/cli/internal/keychain" "github.com/beclab/Olares/cli/pkg/auth" "github.com/beclab/Olares/cli/pkg/cliconfig" "github.com/beclab/Olares/cli/pkg/olares" @@ -203,10 +204,17 @@ func printSwitchNotice(res persistResult, newDisplayName string) { } } -// printPlaintextWarning is shown after every successful login / import to set -// expectations: Phase 1 stores tokens in clear text. Phase 2 will move them -// into the OS keychain. -func printPlaintextWarning() { - tokensPath, _ := cliconfig.TokensFile() - fmt.Printf("warning: token stored in plaintext at %s (mode 0600). OS keychain support is coming in a future release.\n", tokensPath) +// printStorageNotice is the post-login UX hint that names where the freshly +// minted access/refresh tokens just landed. We surface the backend label +// (system-keychain / file-fallback / file / registry+dpapi) so a user who +// expected the system keychain but landed on the sandbox/CI file fallback +// notices immediately — that has very different security implications. +// +// We deliberately keep the (service, account) pair instead of an on-disk +// path: the path can rotate with future layout changes, but the keychain +// coordinates are the stable contract. +func printStorageNotice(olaresID string) { + fmt.Printf("token stored via %s (service %q, account %q).\n", + keychain.Backend(keychain.OlaresCliService), + keychain.OlaresCliService, olaresID) } diff --git a/cli/cmd/ctl/profile/credentials_test.go b/cli/cmd/ctl/profile/credentials_test.go index 1a12e286c..8b22e5952 100644 --- a/cli/cmd/ctl/profile/credentials_test.go +++ b/cli/cmd/ctl/profile/credentials_test.go @@ -3,10 +3,16 @@ package profile import ( "testing" + "github.com/beclab/Olares/cli/internal/keychain/keychainfake" "github.com/beclab/Olares/cli/pkg/auth" "github.com/beclab/Olares/cli/pkg/cliconfig" ) +// staticProfileLister returns a fixed set of olaresIds for List() tests. +type staticProfileLister []string + +func (s staticProfileLister) ListOlaresIDs() ([]string, error) { return []string(s), nil } + // TestPersistTokenAndProfile_Switching exercises the auto-switch contract // added by the "login auto switch profile" plan. The behavior matrix lives // in docs/notes/olares-cli-auth-profile-config.md §7.3; this table mirrors @@ -112,11 +118,7 @@ func TestPersistTokenAndProfile_Switching(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { t.Setenv("OLARES_CLI_HOME", t.TempDir()) - path, err := cliconfig.TokensFile() - if err != nil { - t.Fatalf("TokensFile: %v", err) - } - store := auth.NewFileStoreAt(path) + store := auth.NewTokenStoreWith(keychainfake.New(), staticProfileLister{tc.newProfile.OlaresID}) cfg := &cliconfig.MultiProfileConfig{ Profiles: append([]cliconfig.ProfileConfig(nil), tc.seedProfiles...), diff --git a/cli/cmd/ctl/profile/import.go b/cli/cmd/ctl/profile/import.go index 43a1459bb..938d0ac05 100644 --- a/cli/cmd/ctl/profile/import.go +++ b/cli/cmd/ctl/profile/import.go @@ -66,10 +66,7 @@ func runImport(ctx context.Context, o *importOptions) error { if err != nil { return err } - store, err := auth.NewFileStore() - if err != nil { - return err - } + store := auth.NewTokenStore() profile, err := ensureProfileWritable(cfg, store, o.commonCredFlags, time.Now()) if err != nil { return err @@ -91,6 +88,6 @@ func runImport(ctx context.Context, o *importOptions) error { fmt.Printf("imported credentials for %s (profile: %s)\n", o.olaresID, profile.DisplayName()) printSwitchNotice(res, profile.DisplayName()) - printPlaintextWarning() + printStorageNotice(profile.OlaresID) return nil } diff --git a/cli/cmd/ctl/profile/list.go b/cli/cmd/ctl/profile/list.go index d1ed0b50d..935dda94b 100644 --- a/cli/cmd/ctl/profile/list.go +++ b/cli/cmd/ctl/profile/list.go @@ -53,10 +53,7 @@ func runList(out *os.File) error { return nil } - store, err := auth.NewFileStore() - if err != nil { - return err - } + store := auth.NewTokenStore() current := cfg.Current() now := time.Now() diff --git a/cli/cmd/ctl/profile/login.go b/cli/cmd/ctl/profile/login.go index 9bc41b9b6..80536a049 100644 --- a/cli/cmd/ctl/profile/login.go +++ b/cli/cmd/ctl/profile/login.go @@ -80,10 +80,7 @@ func runLogin(ctx context.Context, o *loginOptions) error { if err != nil { return err } - store, err := auth.NewFileStore() - if err != nil { - return err - } + store := auth.NewTokenStore() profile, err := ensureProfileWritable(cfg, store, o.commonCredFlags, time.Now()) if err != nil { return err @@ -113,7 +110,7 @@ func runLogin(ctx context.Context, o *loginOptions) error { fmt.Printf("logged in as %s (profile: %s)\n", o.olaresID, profile.DisplayName()) printSwitchNotice(res, profile.DisplayName()) - printPlaintextWarning() + printStorageNotice(profile.OlaresID) return nil } diff --git a/cli/cmd/ctl/profile/remove.go b/cli/cmd/ctl/profile/remove.go index 2da02b0b7..ad4c59edc 100644 --- a/cli/cmd/ctl/profile/remove.go +++ b/cli/cmd/ctl/profile/remove.go @@ -6,6 +6,7 @@ import ( "github.com/spf13/cobra" + "github.com/beclab/Olares/cli/internal/keychain" "github.com/beclab/Olares/cli/pkg/auth" "github.com/beclab/Olares/cli/pkg/cliconfig" ) @@ -45,10 +46,7 @@ func runRemove(key string) error { return fmt.Errorf("save config: %w", err) } - store, err := auth.NewFileStore() - if err != nil { - return err - } + store := auth.NewTokenStore() if err := store.Delete(removed.OlaresID); err != nil && !errors.Is(err, auth.ErrTokenNotFound) { // Non-fatal: config is already updated. fmt.Printf("warning: failed to clear stored token for %s: %v\n", removed.OlaresID, err) @@ -59,6 +57,18 @@ func runRemove(key string) error { fmt.Printf("current profile is now: %s\n", cfg.CurrentProfile) } else if len(cfg.Profiles) == 0 { fmt.Println("no profiles remain.") + // Last profile gone → no remaining account is keyed under our + // keychain service. Wipe the master key + storage dir so we don't + // leave orphan secrets / files / registry values that would show + // up in security tooling (Keychain Access.app, regedit, etc.) for + // no functional reason. The config is already saved at this point, + // so a purge failure is non-fatal — log + continue. + if err := keychain.PurgeService(keychain.OlaresCliService); err != nil { + fmt.Printf("warning: failed to purge keychain storage: %v\n", err) + } else { + fmt.Printf("cleaned up keychain storage for service %q.\n", + keychain.OlaresCliService) + } } return nil } diff --git a/cli/cmd/ctl/profile/root.go b/cli/cmd/ctl/profile/root.go index d150f05aa..20c887921 100644 --- a/cli/cmd/ctl/profile/root.go +++ b/cli/cmd/ctl/profile/root.go @@ -23,8 +23,11 @@ func NewProfileCommand() *cobra.Command { (identified by an olaresId such as "alice@olares.com") with the local credentials used to talk to it. -Phase 1 stores tokens in plaintext at ~/.olares-cli/tokens.json with 0600 -permissions; OS keychain support arrives in Phase 2.`, +Tokens are stored in the OS keychain (service "olares-cli", account = the +profile's olaresId): macOS Keychain on darwin, an AES-256-GCM file under +~/.local/share/olares-cli/ on linux, and DPAPI under +HKCU\Software\OlaresCli\keychain on windows. The plaintext +~/.olares-cli/tokens.json from earlier builds is no longer used.`, } for _, sub := range []*cobra.Command{ NewListCommand(), diff --git a/cli/go.mod b/cli/go.mod index 08cc8c111..b87b56875 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -40,6 +40,7 @@ require ( github.com/stretchr/testify v1.11.1 github.com/syndtr/goleveldb v1.0.0 github.com/tyler-smith/go-bip39 v1.1.0 + github.com/zalando/go-keyring v0.2.8 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.46.0 golang.org/x/sys v0.39.0 @@ -90,6 +91,7 @@ require ( github.com/containers/image/v5 v5.36.1 // indirect github.com/containers/storage v1.59.1 // indirect github.com/cyphar/filepath-securejoin v0.5.1 // indirect + github.com/danieljoos/wincred v1.2.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dlclark/regexp2 v1.11.5 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect @@ -115,6 +117,7 @@ require ( github.com/go-resty/resty/v2 v2.16.5 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe // indirect + github.com/godbus/dbus/v5 v5.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect diff --git a/cli/go.sum b/cli/go.sum index eaf44270e..574a0ab37 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -99,6 +99,8 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.5.1 h1:eYgfMq5yryL4fbWfkLpFFy2ukSELzaJOTaUTuh+oF48= github.com/cyphar/filepath-securejoin v0.5.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -185,6 +187,8 @@ github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9L github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe h1:zn8tqiUbec4wR94o7Qj3LZCAT6uGobhEgnDRg6isG5U= github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ= +github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -486,6 +490,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zalando/go-keyring v0.2.8 h1:6sD/Ucpl7jNq10rM2pgqTs0sZ9V3qMrqfIIy5YPccHs= +github.com/zalando/go-keyring v0.2.8/go.mod h1:tsMo+VpRq5NGyKfxoBVjCuMrG47yj8cmakZDO5QGii0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= diff --git a/cli/internal/keychain/aesgcm.go b/cli/internal/keychain/aesgcm.go new file mode 100644 index 000000000..e824a9300 --- /dev/null +++ b/cli/internal/keychain/aesgcm.go @@ -0,0 +1,83 @@ +//go:build !windows + +package keychain + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "os" + "regexp" +) + +// Shared AES-256-GCM + filename helpers for the file-backed platform +// implementations (currently darwin and linux). Windows uses DPAPI plus a +// base64 registry value name and does not import this file (the build tag +// gates it out). +// +// Centralising these guarantees the two file backends never drift on the +// crypto envelope (IV size, tag size, key size) or the on-disk filename +// scheme — a subtle drift there would silently strand existing users on the +// day someone touches just one platform's copy. + +// AES-256-GCM parameters. The blob layout is `iv || ciphertext || tag`, +// where the tag is appended to the ciphertext by aesGCM.Seal — that's why +// decryptData gates on len >= ivBytes+tagBytes. +const ( + masterKeyBytes = 32 + ivBytes = 12 + tagBytes = 16 +) + +// safeFileNameRe / safeFileName turn an arbitrary account name (which is +// the olaresId, e.g. "alice@olares.com" — note the '@' and '.' which we +// keep, plus any custom alias) into a filename safe to land on the FS. +// Anything outside the whitelist collapses to '_'. +var safeFileNameRe = regexp.MustCompile(`[^a-zA-Z0-9._-]`) + +func safeFileName(account string) string { + return safeFileNameRe.ReplaceAllString(account, "_") + ".enc" +} + +// encryptData seals plaintext under AES-256-GCM with a fresh random IV. +func encryptData(plaintext string, key []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + aesGCM, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + iv := make([]byte, ivBytes) + if _, err := rand.Read(iv); err != nil { + return nil, err + } + ciphertext := aesGCM.Seal(nil, iv, []byte(plaintext), nil) + result := make([]byte, 0, ivBytes+len(ciphertext)) + result = append(result, iv...) + result = append(result, ciphertext...) + return result, nil +} + +// decryptData is the symmetric inverse of encryptData. +func decryptData(data []byte, key []byte) (string, error) { + if len(data) < ivBytes+tagBytes { + return "", os.ErrInvalid + } + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + aesGCM, err := cipher.NewGCM(block) + if err != nil { + return "", err + } + iv := data[:ivBytes] + ciphertext := data[ivBytes:] + plaintext, err := aesGCM.Open(nil, iv, ciphertext, nil) + if err != nil { + return "", err + } + return string(plaintext), nil +} diff --git a/cli/internal/keychain/default.go b/cli/internal/keychain/default.go new file mode 100644 index 000000000..96563a156 --- /dev/null +++ b/cli/internal/keychain/default.go @@ -0,0 +1,25 @@ +package keychain + +// defaultKeychain forwards KeychainAccess calls to the package-level Get/Set/ +// Remove functions, which in turn dispatch to the per-platform backend via +// build-tag-gated platformGet/Set/Remove implementations. +type defaultKeychain struct{} + +func (d *defaultKeychain) Get(service, account string) (string, error) { + return Get(service, account) +} + +func (d *defaultKeychain) Set(service, account, value string) error { + return Set(service, account, value) +} + +func (d *defaultKeychain) Remove(service, account string) error { + return Remove(service, account) +} + +// Default returns a KeychainAccess backed by the real platform keychain. It is +// the only place production code should construct a KeychainAccess; tests can +// substitute their own implementation when injected through this seam. +func Default() KeychainAccess { + return &defaultKeychain{} +} diff --git a/cli/internal/keychain/doc.go b/cli/internal/keychain/doc.go new file mode 100644 index 000000000..971e657a5 --- /dev/null +++ b/cli/internal/keychain/doc.go @@ -0,0 +1,47 @@ +// Package keychain provides cross-platform secure storage for olares-cli secrets +// (currently used for the per-olaresId access/refresh token grants written by +// the `profile login` and `profile import` commands). +// +// The implementation is adapted from larksuite/cli's internal/keychain package +// (same Get/Set/Remove surface, same per-platform strategy split): +// +// - macOS: a 32-byte AES-256 master key is kept in the system Keychain via +// github.com/zalando/go-keyring; per-secret data is AES-GCM encrypted and +// written to ~/Library/Application Support//.enc. +// If the system keychain is blocked (sandbox / CI) the master key falls +// back to an on-disk master.key.file (mode 0600) under the same dir, so +// the CLI keeps working at a Linux-equivalent security posture. +// - Linux: pure file-based AES-GCM. The master key lives at +// ~/.local/share//master.key (mode 0600); each secret lives at +// .enc next to it. Honors $OLARES_CLI_DATA_DIR when set to +// an absolute path. +// - Windows: DPAPI-protected blob (CryptProtectData/CryptUnprotectData) +// persisted under HKCU\Software\OlaresCli\keychain\, with +// deterministic entropy bound to (service, account) to thwart swap/replay. +// +// olares-cli keeps the package internal-only on purpose: its consumers all +// live inside this repo and the package-level Get/Set/Remove surface +// intentionally mirrors lark-cli so future security upgrades can be ported +// back without renaming dance. +// +// Olares-side adaptations vs. the upstream lark-cli copy: +// - dropped lark-cli's internal/vfs (we use stdlib os directly), +// internal/output (we return plain wrapped errors with the same hint), +// internal/validate (we do an explicit absolute-path check inline), +// - dropped auth_log.go (audit logging is out of scope for now; +// LogAuthError calls were removed), +// - service constant renamed to OlaresCliService = "olares-cli", +// - $LARKSUITE_CLI_DATA_DIR override renamed to $OLARES_CLI_DATA_DIR, +// - Windows registry root changed to Software\OlaresCli\keychain, +// - extra KeychainAccess interface + Default() seam + keychainfake +// subpackage exist on the olares-cli side, even though lark-cli has no +// equivalent. The reason is that olares-cli's keychainStore (in +// pkg/auth/token_store_keychain.go) carries semantics lark-cli's +// token_store.go does not — MarkInvalidated, List with +// ProfileLister-driven enumeration, and InvalidatedAt — and we want +// unit-test coverage for those without monkey-patching the platform +// seams. Tests inject keychainfake.New() through NewTokenStoreWith; +// production code only ever constructs the store via NewTokenStore +// which wires keychain.Default(). lark-cli gets away without this +// interface because its token_store.go has no unit tests of its own. +package keychain diff --git a/cli/internal/keychain/keychain.go b/cli/internal/keychain/keychain.go new file mode 100644 index 000000000..a8789e8f5 --- /dev/null +++ b/cli/internal/keychain/keychain.go @@ -0,0 +1,132 @@ +package keychain + +import ( + "errors" + "fmt" + "os" +) + +var ( + // ErrNotFound is returned when the requested credential is not found. + // platformGet implementations return ("", nil) for the common + // "not present" case; ErrNotFound is reserved for callers that want to + // turn that empty-string signal into a typed error and is also the + // sentinel that wrapError refuses to mask. + ErrNotFound = errors.New("keychain: item not found") + + // errNotInitialized is the internal sentinel used when the master key is + // missing or invalid. It triggers a more specific operator hint in + // wrapError so users know to reset / reconfigure rather than blaming + // permissions. + errNotInitialized = errors.New("keychain not initialized") +) + +// OlaresCliService is the unified keychain service name for all olares-cli +// secrets. Per-secret records are distinguished by their account name, which +// today is always the bare olaresId (e.g. "alice@olares.com"). Mirrors +// lark-cli's `LarkCliService = "lark-cli"` design. +const OlaresCliService = "olares-cli" + +// debugEnv toggles the long, multi-line operator hint that wrapError used +// to attach unconditionally. Default-on hints made every error message +// mushroom past 200 chars; gating them behind this env var keeps everyday +// failures grep-friendly and reserves the verbose version for users who +// actually want it. +const debugEnv = "OLARES_CLI_DEBUG" + +// debugLookup is a package-level seam so tests can flip the hint on/off +// without writing to process env (which would race other tests in the +// same package). +var debugLookup = func() bool { return os.Getenv(debugEnv) != "" } + +// wrapError is the single funnel that turns underlying backend errors into +// user-facing messages. Returning ErrNotFound (or nil) is preserved +// verbatim so callers can use errors.Is on it. +// +// Default output format: +// +// keychain failed for /: +// +// With OLARES_CLI_DEBUG set, an actionable English hint is appended in +// parentheses. The two-tier hint (generic vs errNotInitialized) is +// preserved — only the gating changes. Including the (service, account) +// pair on every line means logs are grep-able to a specific keychain +// slot without needing the surrounding context. +func wrapError(op, service, account string, err error) error { + if err == nil || errors.Is(err, ErrNotFound) { + return err + } + + base := fmt.Errorf("keychain %s failed for %s/%s: %w", op, service, account, err) + if !debugLookup() { + return base + } + + hint := "Check whether the OS keychain / credential manager is locked or accessible. " + + "If you are running inside a sandbox or CI environment, ensure the process has " + + "permission to use the keychain — running outside the sandbox usually fixes it." + if errors.Is(err, errNotInitialized) { + hint = "The keychain master key may have been deleted or corrupted. " + + "Re-run `olares-cli profile login` (or `profile import`) to re-issue credentials. " + + "In sandboxed / CI environments, ensure the process can access the OS keychain." + } + return fmt.Errorf("%w (%s)", base, hint) +} + +// KeychainAccess abstracts Get/Set/Remove for dependency injection. Production +// code wires the package-level functions through Default(); tests can pass a +// fake to assert call patterns without touching the real OS keychain. +type KeychainAccess interface { + Get(service, account string) (string, error) + Set(service, account, value string) error + Remove(service, account string) error +} + +// Get retrieves a value from the keychain. Returns ("", nil) when the entry +// does not exist (mirrors lark-cli's contract; callers that prefer a typed +// "not found" should check len(value)==0 or wrap with ErrNotFound). +func Get(service, account string) (string, error) { + val, err := platformGet(service, account) + return val, wrapError("Get", service, account, err) +} + +// Set stores a value in the keychain, overwriting any existing entry. +func Set(service, account, data string) error { + return wrapError("Set", service, account, platformSet(service, account, data)) +} + +// Remove deletes an entry from the keychain. Removing a non-existent entry is +// a no-op and returns nil, matching lark-cli's behavior. +func Remove(service, account string) error { + return wrapError("Remove", service, account, platformRemove(service, account)) +} + +// Backend returns a short, machine-friendly identifier of the platform +// backend currently in effect for service. Values are stable strings that +// callers can include in user-facing notices and grep for in logs: +// +// - "system-keychain" — darwin, master key lives in the OS keychain +// - "file-fallback" — darwin, sandbox/CI path: master key on disk +// - "file" — linux, master key on disk under XDG dir +// - "registry+dpapi" — windows, registry value protected by DPAPI +// +// Knowing which backend is active matters because the security posture +// differs (system keychain prompts on access; file-fallback does not). The +// value is recomputed on every call so we always reflect the current +// on-disk state — relevant for tests that move files around mid-process. +func Backend(service string) string { return platformBackend(service) } + +// PurgeService wipes ALL keychain state owned by the given service: the +// per-account encrypted blobs AND the master key (system-keychain entry on +// darwin, on-disk file on darwin/linux, registry hive on windows). Designed +// to be called when the last olares-cli profile is removed so we don't +// leave orphan secrets / files / registry values that would surface in +// security tooling and confuse users. +// +// Errors are wrapped through the same wrapError funnel for consistency, +// but callers (currently `profile remove`) are expected to log + continue +// on failure rather than abort: the user-facing config has already been +// updated and a leftover encrypted blob without a master key is harmless. +func PurgeService(service string) error { + return wrapError("Purge", service, "*", platformPurge(service)) +} diff --git a/cli/internal/keychain/keychain_darwin.go b/cli/internal/keychain/keychain_darwin.go new file mode 100644 index 000000000..0be14ee1e --- /dev/null +++ b/cli/internal/keychain/keychain_darwin.go @@ -0,0 +1,306 @@ +//go:build darwin + +package keychain + +import ( + "context" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/google/uuid" + "github.com/zalando/go-keyring" +) + +// keychainTimeout bounds system-keychain interactions to avoid hangs when the +// user dismisses (or never sees) the access prompt. +const keychainTimeout = 5 * time.Second + +// AES constants and crypto helpers (encryptData / decryptData) plus +// safeFileName live in aesgcm.go (build-tag !windows) so darwin and linux +// can't drift on the on-disk envelope. + +// fileMasterKeyName is the on-disk fallback master key used when the system +// keychain refuses access (sandbox / CI). Living next to the encrypted blobs +// at the same 0600 perms gives us a Linux-equivalent posture in that case. +const fileMasterKeyName = "master.key.file" + +// keyringGet / keyringSet are package-level seams so tests can simulate +// system-keychain behavior (ErrNotFound, blocked access, corrupted value) +// without touching the real macOS keychain. +var keyringGet = keyring.Get +var keyringSet = keyring.Set + +// StorageDir returns the absolute directory where per-service encrypted +// blobs live on macOS. When HOME can't be resolved (sandbox quirks, broken +// passwd entries) we land under os.TempDir() so the path is at least +// absolute — a relative ".olares-cli/keychain/..." would float with the +// process's cwd and could end up writing to /, which is the worst kind of +// silent surprise. +func StorageDir(service string) string { + home, err := os.UserHomeDir() + if err != nil || home == "" { + fallback := filepath.Join(os.TempDir(), "olares-cli", "keychain", service) + fmt.Fprintf(os.Stderr, + "warning: home directory unresolvable (%v); using fallback keychain dir %s\n", + err, fallback) + return fallback + } + return filepath.Join(home, "Library", "Application Support", service) +} + +// getMasterKey fetches the AES master key from the system keychain. The +// goroutine + timeout dance protects us from a hung permission prompt: when +// the system never replies within keychainTimeout we treat it as "blocked" +// rather than blocking the whole CLI. +// +// allowCreate gates the write path: Set() may create a fresh key, Get() may +// not (a missing key on read should surface as errNotInitialized so callers +// can re-login rather than silently get a useless empty value). +func getMasterKey(service string, allowCreate bool) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), keychainTimeout) + defer cancel() + + type result struct { + key []byte + err error + } + resCh := make(chan result, 1) + go func() { + defer func() { _ = recover() }() + + encodedKey, err := keyringGet(service, "master.key") + if err == nil { + key, decodeErr := base64.StdEncoding.DecodeString(encodedKey) + if decodeErr == nil && len(key) == masterKeyBytes { + resCh <- result{key: key, err: nil} + return + } + resCh <- result{key: nil, err: errors.New("keychain is corrupted")} + return + } else if !errors.Is(err, keyring.ErrNotFound) { + resCh <- result{key: nil, err: errors.New("keychain access blocked")} + return + } + + if !allowCreate { + resCh <- result{key: nil, err: errNotInitialized} + return + } + + key := make([]byte, masterKeyBytes) + if _, randErr := rand.Read(key); randErr != nil { + resCh <- result{key: nil, err: randErr} + return + } + encodedKeyStr := base64.StdEncoding.EncodeToString(key) + if setErr := keyringSet(service, "master.key", encodedKeyStr); setErr != nil { + resCh <- result{key: nil, err: setErr} + return + } + resCh <- result{key: key, err: nil} + }() + + select { + case res := <-resCh: + return res.key, res.err + case <-ctx.Done(): + return nil, errors.New("keychain access blocked") + } +} + +// getFileMasterKey is the on-disk fallback master key used when the system +// keychain is denied (sandbox / CI). Once a process has created it, future +// reads/writes prefer it over the system keychain (see platformGet / platformSet) +// so we never re-prompt the user for keychain access. +// +// The O_CREATE|O_EXCL + retry-read pattern handles the multi-process race +// where two CLI instances start simultaneously: whichever one loses the +// rename simply reads the winner's key. +func getFileMasterKey(service string, allowCreate bool) ([]byte, error) { + dir := StorageDir(service) + keyPath := filepath.Join(dir, fileMasterKeyName) + + key, err := os.ReadFile(keyPath) + if err == nil && len(key) == masterKeyBytes { + return key, nil + } + if err == nil && len(key) != masterKeyBytes { + return nil, errors.New("keychain is corrupted") + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + if !allowCreate { + return nil, errNotInitialized + } + if err := os.MkdirAll(dir, 0o700); err != nil { + return nil, err + } + key = make([]byte, masterKeyBytes) + if _, err := rand.Read(key); err != nil { + return nil, err + } + + file, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o600) + if err != nil { + if errors.Is(err, os.ErrExist) { + for i := 0; i < 3; i++ { + existingKey, readErr := os.ReadFile(keyPath) + if readErr == nil && len(existingKey) == masterKeyBytes { + return existingKey, nil + } + if readErr != nil { + return nil, readErr + } + if i < 2 { + time.Sleep(5 * time.Millisecond) + } + } + return nil, errors.New("keychain is corrupted") + } + return nil, err + } + + writeFailed := true + defer func() { + if writeFailed { + _ = os.Remove(keyPath) + } + }() + if _, err := file.Write(key); err != nil { + _ = file.Close() + return nil, err + } + if err := file.Close(); err != nil { + return nil, err + } + writeFailed = false + + canonicalKey, err := os.ReadFile(keyPath) + if err != nil { + existingKey, readErr := os.ReadFile(keyPath) + if readErr == nil && len(existingKey) == masterKeyBytes { + return existingKey, nil + } + if readErr == nil && len(existingKey) != masterKeyBytes { + return nil, errors.New("keychain is corrupted") + } + return nil, err + } + if len(canonicalKey) != masterKeyBytes { + return nil, errors.New("keychain is corrupted") + } + return canonicalKey, nil +} + +// platformGet is the macOS implementation of Get. The dual-master-key fallback +// (file first, then system keychain) is intentional: once the process has +// previously fallen back to a file master key we keep using it on subsequent +// reads, otherwise the system keychain remains the source of truth. +func platformGet(service, account string) (string, error) { + path := filepath.Join(StorageDir(service), safeFileName(account)) + data, err := os.ReadFile(path) + if errors.Is(err, os.ErrNotExist) { + return "", nil + } + if err != nil { + return "", err + } + if key, ferr := getFileMasterKey(service, false); ferr == nil { + if plaintext, derr := decryptData(data, key); derr == nil { + return plaintext, nil + } + } + key, err := getMasterKey(service, false) + if err != nil { + return "", err + } + plaintext, err := decryptData(data, key) + if err != nil { + return "", err + } + return plaintext, nil +} + +// platformSet writes the encrypted blob via temp-file + rename so that a +// crashed write never corrupts the previous good value. The key-acquisition +// chain is: prefer existing file master key → try system keychain (creating +// if needed) → fall back to creating a new file master key. +func platformSet(service, account, data string) error { + key, err := getFileMasterKey(service, false) + if err != nil { + key, err = getMasterKey(service, true) + if err != nil { + key, err = getFileMasterKey(service, true) + if err != nil { + return err + } + } + } + dir := StorageDir(service) + if err := os.MkdirAll(dir, 0o700); err != nil { + return err + } + encrypted, err := encryptData(data, key) + if err != nil { + return err + } + + targetPath := filepath.Join(dir, safeFileName(account)) + tmpPath := filepath.Join(dir, safeFileName(account)+"."+uuid.New().String()+".tmp") + defer os.Remove(tmpPath) + + if err := os.WriteFile(tmpPath, encrypted, 0o600); err != nil { + return err + } + return os.Rename(tmpPath, targetPath) +} + +// platformRemove deletes the encrypted blob; the master key (file or system) +// is intentionally left in place because it may still encrypt other accounts. +func platformRemove(service, account string) error { + err := os.Remove(filepath.Join(StorageDir(service), safeFileName(account))) + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// platformPurge wipes everything owned by service on darwin: the master key +// in the system keychain (best-effort — may not exist if we only ever used +// file fallback), the master.key.file on disk, and the entire StorageDir +// (every per-account .enc blob). +// +// Order matters: drop the system-keychain entry FIRST. If that errors we +// still attempt the on-disk cleanup, because leaving a stray .enc file +// without a master key is the worst combination — visible to grep, useless +// to decrypt. +func platformPurge(service string) error { + var firstErr error + if err := keyring.Delete(service, "master.key"); err != nil && !errors.Is(err, keyring.ErrNotFound) { + firstErr = err + } + if err := os.RemoveAll(StorageDir(service)); err != nil && firstErr == nil { + firstErr = err + } + return firstErr +} + +// platformBackend reports which master-key path is currently authoritative +// on darwin. Presence of master.key.file is the precise signal that we +// previously fell back off the system keychain (sandbox/CI denial), and +// platformGet/platformSet keep using it from then on. So the file's +// presence — not whether the system keychain is currently reachable — +// is what determines the current backend. +func platformBackend(service string) string { + keyPath := filepath.Join(StorageDir(service), fileMasterKeyName) + if info, err := os.Stat(keyPath); err == nil && info.Size() == masterKeyBytes { + return "file-fallback" + } + return "system-keychain" +} diff --git a/cli/internal/keychain/keychain_darwin_test.go b/cli/internal/keychain/keychain_darwin_test.go new file mode 100644 index 000000000..d93a68435 --- /dev/null +++ b/cli/internal/keychain/keychain_darwin_test.go @@ -0,0 +1,174 @@ +//go:build darwin + +package keychain + +import ( + "encoding/base64" + "errors" + "os" + "path/filepath" + "testing" + + "github.com/zalando/go-keyring" +) + +// TestPlatformSetFallsBackToFileMasterKey verifies writes fall back to the +// on-disk master key when both reads (`keyringGet -> ErrNotFound`) and writes +// (`keyringSet -> blocked`) against the system keychain fail. This mirrors +// what happens inside a sandbox / CI runner where the user can't grant +// keychain access at all. +func TestPlatformSetFallsBackToFileMasterKey(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + origGet := keyringGet + origSet := keyringSet + keyringGet = func(service, user string) (string, error) { + return "", keyring.ErrNotFound + } + keyringSet = func(service, user, password string) error { + return errors.New("blocked") + } + t.Cleanup(func() { + keyringGet = origGet + keyringSet = origSet + }) + + service := "test-service" + account := "alice@olares.com" + secret := "secret-value" + + if err := platformSet(service, account, secret); err != nil { + t.Fatalf("platformSet() error = %v", err) + } + if _, err := os.Stat(filepath.Join(StorageDir(service), fileMasterKeyName)); err != nil { + t.Fatalf("file master key not created: %v", err) + } + got, err := platformGet(service, account) + if err != nil { + t.Fatalf("platformGet() error = %v", err) + } + if got != secret { + t.Fatalf("platformGet() = %q, want %q", got, secret) + } +} + +// TestPlatformGetPrefersFileMasterKey verifies that when both a file master +// key and a (different) system-keychain master key exist, decryption tries +// the file one first. This is the reason a sandboxed-then-unsandboxed CLI +// keeps reading its own writes instead of trying to decrypt them with the +// "newer" system key. +func TestPlatformGetPrefersFileMasterKey(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + fileKey := make([]byte, masterKeyBytes) + for i := range fileKey { + fileKey[i] = byte(i + 1) + } + keychainKey := make([]byte, masterKeyBytes) + for i := range keychainKey { + keychainKey[i] = byte(i + 33) + } + + origGet := keyringGet + origSet := keyringSet + keyringGet = func(service, user string) (string, error) { + return base64.StdEncoding.EncodeToString(keychainKey), nil + } + keyringSet = func(service, user, password string) error { return nil } + t.Cleanup(func() { + keyringGet = origGet + keyringSet = origSet + }) + + service := "test-service" + account := "alice@olares.com" + secret := "secret-value" + + dir := StorageDir(service) + if err := os.MkdirAll(dir, 0o700); err != nil { + t.Fatalf("MkdirAll() error = %v", err) + } + if err := os.WriteFile(filepath.Join(dir, fileMasterKeyName), fileKey, 0o600); err != nil { + t.Fatalf("write master key: %v", err) + } + encrypted, err := encryptData(secret, fileKey) + if err != nil { + t.Fatalf("encryptData() error = %v", err) + } + if err := os.WriteFile(filepath.Join(dir, safeFileName(account)), encrypted, 0o600); err != nil { + t.Fatalf("write secret: %v", err) + } + + got, err := platformGet(service, account) + if err != nil { + t.Fatalf("platformGet() error = %v", err) + } + if got != secret { + t.Fatalf("platformGet() = %q, want %q", got, secret) + } +} + +// TestPlatformSetPrefersExistingFileMasterKey verifies that once the file +// master key exists, subsequent writes never touch the system keychain. This +// guarantees no surprise prompts for users who first used the CLI in a +// sandboxed environment. +func TestPlatformSetPrefersExistingFileMasterKey(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + origGet := keyringGet + origSet := keyringSet + keyringGet = func(service, user string) (string, error) { + t.Fatalf("keyringGet should not be called when file master key exists") + return "", nil + } + keyringSet = func(service, user, password string) error { + t.Fatalf("keyringSet should not be called when file master key exists") + return nil + } + t.Cleanup(func() { + keyringGet = origGet + keyringSet = origSet + }) + + service := "test-service" + account := "alice@olares.com" + secret := "secret-value" + + dir := StorageDir(service) + if err := os.MkdirAll(dir, 0o700); err != nil { + t.Fatalf("MkdirAll() error = %v", err) + } + fileKey := make([]byte, masterKeyBytes) + for i := range fileKey { + fileKey[i] = byte(i + 1) + } + if err := os.WriteFile(filepath.Join(dir, fileMasterKeyName), fileKey, 0o600); err != nil { + t.Fatalf("write master key: %v", err) + } + + if err := platformSet(service, account, secret); err != nil { + t.Fatalf("platformSet() error = %v", err) + } + got, err := platformGet(service, account) + if err != nil { + t.Fatalf("platformGet() error = %v", err) + } + if got != secret { + t.Fatalf("platformGet() = %q, want %q", got, secret) + } +} + +// TestPlatformRemove_NotPresentIsNoop verifies removing a missing entry does +// not error — the contract Remove() advertises to callers (so `profile remove` +// can be idempotent on machines where login was never run). +func TestPlatformRemove_NotPresentIsNoop(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + if err := platformRemove("svc", "no-such-account"); err != nil { + t.Fatalf("platformRemove(missing) error = %v", err) + } +} diff --git a/cli/internal/keychain/keychain_other.go b/cli/internal/keychain/keychain_other.go new file mode 100644 index 000000000..0187fea77 --- /dev/null +++ b/cli/internal/keychain/keychain_other.go @@ -0,0 +1,172 @@ +//go:build linux + +package keychain + +import ( + "crypto/rand" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/google/uuid" +) + +// AES constants and crypto helpers (encryptData / decryptData) plus +// safeFileName live in aesgcm.go (build-tag !windows) so darwin and linux +// can't drift on the on-disk envelope. + +// dataDirEnv lets users (typically inside containers / nix builds / test +// rigs) relocate the encrypted store. Keeping the env var olares-specific +// avoids accidental clashes with lark-cli on machines that have both. +const dataDirEnv = "OLARES_CLI_DATA_DIR" + +// StorageDir returns the absolute directory for service-scoped encrypted +// blobs on Linux. The lookup chain is: +// +// 1. $OLARES_CLI_DATA_DIR if it's an absolute, cleanly-resolved path, +// 2. XDG-style ~/.local/share/, +// 3. an absolute fallback under os.TempDir() when HOME is unresolvable. +// Earlier versions returned ".local/share/" relative to CWD, +// which silently writes to wherever the user happened to invoke the +// CLI from (worst case: cwd=/). Anchoring to TempDir() guarantees the +// path is absolute even if it's not the canonical XDG location. +func StorageDir(service string) string { + if dir := os.Getenv(dataDirEnv); dir != "" { + if safeDir, ok := safeAbsoluteDir(dir); ok { + return filepath.Join(safeDir, service) + } + } + home, err := os.UserHomeDir() + if err != nil || home == "" { + fallback := filepath.Join(os.TempDir(), "olares-cli", "keychain", service) + fmt.Fprintf(os.Stderr, + "warning: home directory unresolvable (%v); using fallback keychain dir %s\n", + err, fallback) + return fallback + } + return filepath.Join(home, ".local", "share", service) +} + +// safeAbsoluteDir is a deliberately narrow alternative to lark-cli's +// validate.SafeEnvDirPath: we only accept already-absolute paths after a +// Clean (which collapses "..", "//"), so users can't trick us into landing +// next to the binary or inside a relative path that floats with cwd. +func safeAbsoluteDir(p string) (string, bool) { + cleaned := filepath.Clean(p) + if !filepath.IsAbs(cleaned) { + return "", false + } + return cleaned, true +} + +// getMasterKey reads (or, when allowCreate is true, generates) the per-service +// master key on disk. The temp-file + rename guards against torn writes when +// two processes race the first-time creation. +func getMasterKey(service string, allowCreate bool) ([]byte, error) { + dir := StorageDir(service) + keyPath := filepath.Join(dir, "master.key") + + key, err := os.ReadFile(keyPath) + if err == nil && len(key) == masterKeyBytes { + return key, nil + } + if err == nil && len(key) != masterKeyBytes { + return nil, errors.New("keychain is corrupted") + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + + if !allowCreate { + return nil, errNotInitialized + } + + if err := os.MkdirAll(dir, 0o700); err != nil { + return nil, err + } + key = make([]byte, masterKeyBytes) + if _, err := rand.Read(key); err != nil { + return nil, err + } + + tmpKeyPath := filepath.Join(dir, "master.key."+uuid.New().String()+".tmp") + defer os.Remove(tmpKeyPath) + + if err := os.WriteFile(tmpKeyPath, key, 0o600); err != nil { + return nil, err + } + if err := os.Rename(tmpKeyPath, keyPath); err != nil { + // Lost the race: another process won, read its key. + existingKey, readErr := os.ReadFile(keyPath) + if readErr == nil && len(existingKey) == masterKeyBytes { + return existingKey, nil + } + return nil, err + } + return key, nil +} + +func platformGet(service, account string) (string, error) { + path := filepath.Join(StorageDir(service), safeFileName(account)) + data, err := os.ReadFile(path) + if errors.Is(err, os.ErrNotExist) { + return "", nil + } + if err != nil { + return "", err + } + key, err := getMasterKey(service, false) + if err != nil { + return "", err + } + plaintext, err := decryptData(data, key) + if err != nil { + return "", err + } + return plaintext, nil +} + +func platformSet(service, account, data string) error { + key, err := getMasterKey(service, true) + if err != nil { + return err + } + dir := StorageDir(service) + if err := os.MkdirAll(dir, 0o700); err != nil { + return err + } + encrypted, err := encryptData(data, key) + if err != nil { + return err + } + + targetPath := filepath.Join(dir, safeFileName(account)) + tmpPath := filepath.Join(dir, safeFileName(account)+"."+uuid.New().String()+".tmp") + defer os.Remove(tmpPath) + + if err := os.WriteFile(tmpPath, encrypted, 0o600); err != nil { + return err + } + return os.Rename(tmpPath, targetPath) +} + +func platformRemove(service, account string) error { + err := os.Remove(filepath.Join(StorageDir(service), safeFileName(account))) + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// platformBackend on linux is always file-based (no system keychain +// integration in this build). Returned as a constant so callers don't have +// to special-case the OS. +func platformBackend(_ string) string { return "file" } + +// platformPurge wipes the entire service-scoped storage dir on linux: +// master.key + every per-account .enc blob. There is no system keychain +// entry to delete on this build. +func platformPurge(service string) error { + return os.RemoveAll(StorageDir(service)) +} diff --git a/cli/internal/keychain/keychain_other_test.go b/cli/internal/keychain/keychain_other_test.go new file mode 100644 index 000000000..9b6b1a875 --- /dev/null +++ b/cli/internal/keychain/keychain_other_test.go @@ -0,0 +1,116 @@ +//go:build linux + +package keychain + +import ( + "os" + "path/filepath" + "testing" +) + +// TestStorageDir_UsesValidatedDataDirEnv verifies the absolute-path branch +// of $OLARES_CLI_DATA_DIR after Clean: the supplied path normalizes ".." +// segments, but service isolation is preserved by the trailing join. +func TestStorageDir_UsesValidatedDataDirEnv(t *testing.T) { + base := t.TempDir() + base, _ = filepath.EvalSymlinks(base) + t.Setenv("OLARES_CLI_DATA_DIR", filepath.Join(base, "data", "..", "store")) + + got := StorageDir("svc") + want := filepath.Join(base, "store", "svc") + if got != want { + t.Fatalf("StorageDir() = %q, want %q", got, want) + } +} + +// TestStorageDir_InvalidDataDirFallsBackToDefault verifies that a non-absolute +// $OLARES_CLI_DATA_DIR is rejected and we fall back to the XDG default. We +// never want a relative dir floating with cwd. +func TestStorageDir_InvalidDataDirFallsBackToDefault(t *testing.T) { + home := t.TempDir() + home, _ = filepath.EvalSymlinks(home) + t.Setenv("OLARES_CLI_DATA_DIR", "relative-data") + t.Setenv("HOME", home) + + got := StorageDir("svc") + want := filepath.Join(home, ".local", "share", "svc") + if got != want { + t.Fatalf("StorageDir() = %q, want %q", got, want) + } +} + +// TestPlatformRoundTrip exercises the full Get/Set/Remove cycle on the file +// backend (which is what's actually shipped on Linux). This is the closest +// we can get to a smoke test without mocking keychain bits. +func TestPlatformRoundTrip(t *testing.T) { + home := t.TempDir() + home, _ = filepath.EvalSymlinks(home) + t.Setenv("OLARES_CLI_DATA_DIR", "") + t.Setenv("HOME", home) + + const ( + service = "olares-cli-test" + account = "alice@olares.com" + secret = `{"olaresId":"alice@olares.com","accessToken":"abc"}` + ) + + if got, err := platformGet(service, account); err != nil || got != "" { + t.Fatalf("platformGet on empty store = (%q, %v); want (\"\", nil)", got, err) + } + + if err := platformSet(service, account, secret); err != nil { + t.Fatalf("platformSet() error = %v", err) + } + got, err := platformGet(service, account) + if err != nil { + t.Fatalf("platformGet() error = %v", err) + } + if got != secret { + t.Fatalf("platformGet() = %q, want %q", got, secret) + } + + // File perms must stay 0600 — the encrypted blob is plaintext to anyone + // with read access to the master key sitting next to it. + encPath := filepath.Join(StorageDir(service), safeFileName(account)) + st, err := os.Stat(encPath) + if err != nil { + t.Fatalf("stat encrypted file: %v", err) + } + if mode := st.Mode().Perm(); mode != 0o600 { + t.Fatalf("encrypted file perms = %v, want 0600", mode) + } + + if err := platformRemove(service, account); err != nil { + t.Fatalf("platformRemove() error = %v", err) + } + if got, err := platformGet(service, account); err != nil || got != "" { + t.Fatalf("platformGet after Remove = (%q, %v); want (\"\", nil)", got, err) + } + if err := platformRemove(service, account); err != nil { + t.Fatalf("platformRemove(missing) = %v; want nil", err) + } +} + +// TestPlatformGet_CorruptedEncryptedBlob ensures that a tampered .enc file +// surfaces a decryption error rather than a silent empty return. +func TestPlatformGet_CorruptedEncryptedBlob(t *testing.T) { + home := t.TempDir() + home, _ = filepath.EvalSymlinks(home) + t.Setenv("OLARES_CLI_DATA_DIR", "") + t.Setenv("HOME", home) + + service := "olares-cli-test" + account := "alice@olares.com" + + if err := platformSet(service, account, "secret"); err != nil { + t.Fatalf("seed platformSet() error = %v", err) + } + encPath := filepath.Join(StorageDir(service), safeFileName(account)) + if err := os.WriteFile(encPath, []byte("corrupt"), 0o600); err != nil { + t.Fatalf("corrupt blob write: %v", err) + } + + if _, err := platformGet(service, account); err == nil { + t.Fatal("platformGet(corrupted) returned nil; want error") + } +} diff --git a/cli/internal/keychain/keychain_test.go b/cli/internal/keychain/keychain_test.go new file mode 100644 index 000000000..9fe946a0d --- /dev/null +++ b/cli/internal/keychain/keychain_test.go @@ -0,0 +1,68 @@ +package keychain + +import ( + "errors" + "strings" + "testing" +) + +// TestWrapError_PassThroughs locks down the two cases where wrapError must +// NOT wrap: nil errors and ErrNotFound. A drift here would either spam +// callers with bogus failures or break errors.Is checks. +func TestWrapError_PassThroughs(t *testing.T) { + if got := wrapError("Get", "svc", "acct", nil); got != nil { + t.Errorf("wrapError(nil) = %v; want nil", got) + } + if got := wrapError("Get", "svc", "acct", ErrNotFound); !errors.Is(got, ErrNotFound) { + t.Errorf("wrapError(ErrNotFound) lost the sentinel: %v", got) + } +} + +// TestWrapError_TerseDefault checks the default-mode invariant: the message +// is short, names the (service, account) pair, includes the cause, and does +// NOT include the long English hint the old format always emitted. +func TestWrapError_TerseDefault(t *testing.T) { + prev := debugLookup + debugLookup = func() bool { return false } + defer func() { debugLookup = prev }() + + cause := errors.New("boom") + got := wrapError("Get", "olares-cli", "alice@olares.com", cause) + msg := got.Error() + + if !strings.Contains(msg, "olares-cli/alice@olares.com") { + t.Errorf("missing service/account in terse message: %q", msg) + } + if !strings.Contains(msg, "boom") { + t.Errorf("missing cause in terse message: %q", msg) + } + if strings.Contains(msg, "OS keychain / credential manager is locked") { + t.Errorf("verbose hint leaked into terse message: %q", msg) + } + if !errors.Is(got, cause) { + t.Errorf("wrapped error lost cause via errors.Is: %v", got) + } +} + +// TestWrapError_DebugVerbose flips the seam to the debug branch and +// confirms the long hint appears AND that errNotInitialized triggers the +// dedicated re-login hint instead of the generic one. +func TestWrapError_DebugVerbose(t *testing.T) { + prev := debugLookup + debugLookup = func() bool { return true } + defer func() { debugLookup = prev }() + + cause := errors.New("boom") + msg := wrapError("Set", "olares-cli", "alice@olares.com", cause).Error() + if !strings.Contains(msg, "OS keychain / credential manager is locked") { + t.Errorf("expected generic hint in debug mode, got: %q", msg) + } + + msg2 := wrapError("Get", "olares-cli", "alice@olares.com", errNotInitialized).Error() + if !strings.Contains(msg2, "master key may have been deleted or corrupted") { + t.Errorf("expected errNotInitialized hint, got: %q", msg2) + } + if strings.Contains(msg2, "OS keychain / credential manager is locked") { + t.Errorf("generic hint leaked into errNotInitialized branch: %q", msg2) + } +} diff --git a/cli/internal/keychain/keychain_windows.go b/cli/internal/keychain/keychain_windows.go new file mode 100644 index 000000000..ba50add57 --- /dev/null +++ b/cli/internal/keychain/keychain_windows.go @@ -0,0 +1,204 @@ +//go:build windows + +package keychain + +import ( + "encoding/base64" + "errors" + "fmt" + "regexp" + "strings" + "unsafe" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" +) + +// regRootPath is the HKCU subtree that holds all olares-cli-managed entries. +// Each service nests one level deeper (registryPathForService); each account +// becomes a value name inside the per-service key. +const regRootPath = `Software\OlaresCli\keychain` + +// registryPathForService returns the per-service registry key path. +func registryPathForService(service string) string { + return regRootPath + `\` + safeRegistryComponent(service) +} + +var safeRegRe = regexp.MustCompile(`[^a-zA-Z0-9._-]`) + +// safeRegistryComponent strips characters that would either nest a registry +// path unintentionally ('\\') or trip up registry tooling (anything outside +// alphanumerics + ._-). The output is deterministic so existing entries keep +// being addressable across CLI versions. +func safeRegistryComponent(s string) string { + s = strings.ReplaceAll(s, "\\", "_") + return safeRegRe.ReplaceAllString(s, "_") +} + +// valueNameForAccount keeps the registry value name fully alphanumeric (URL- +// safe base64). This avoids edge cases with '@' / '.' that olaresIds carry. +func valueNameForAccount(account string) string { + return base64.RawURLEncoding.EncodeToString([]byte(account)) +} + +// dpapiEntropy binds the ciphertext to (service, account) so a copied blob +// can't be decrypted under a different identity slot. DPAPI itself is keyed +// to the Windows user, so the blob is also useless if exfiltrated to another +// account on the same machine. +func dpapiEntropy(service, account string) *windows.DataBlob { + data := []byte(service + "\x00" + account) + if len(data) == 0 { + return nil + } + return &windows.DataBlob{Size: uint32(len(data)), Data: &data[0]} +} + +func dpapiProtect(plaintext []byte, entropy *windows.DataBlob) ([]byte, error) { + var in windows.DataBlob + if len(plaintext) > 0 { + in = windows.DataBlob{Size: uint32(len(plaintext)), Data: &plaintext[0]} + } + var out windows.DataBlob + err := windows.CryptProtectData(&in, nil, entropy, 0, nil, windows.CRYPTPROTECT_UI_FORBIDDEN, &out) + if err != nil { + return nil, err + } + defer freeDataBlob(&out) + + if out.Data == nil || out.Size == 0 { + return []byte{}, nil + } + buf := unsafe.Slice(out.Data, int(out.Size)) + res := make([]byte, len(buf)) + copy(res, buf) + return res, nil +} + +func dpapiUnprotect(ciphertext []byte, entropy *windows.DataBlob) ([]byte, error) { + var in windows.DataBlob + if len(ciphertext) > 0 { + in = windows.DataBlob{Size: uint32(len(ciphertext)), Data: &ciphertext[0]} + } + var out windows.DataBlob + err := windows.CryptUnprotectData(&in, nil, entropy, 0, nil, windows.CRYPTPROTECT_UI_FORBIDDEN, &out) + if err != nil { + return nil, err + } + defer freeDataBlob(&out) + + if out.Data == nil || out.Size == 0 { + return []byte{}, nil + } + buf := unsafe.Slice(out.Data, int(out.Size)) + res := make([]byte, len(buf)) + copy(res, buf) + return res, nil +} + +// freeDataBlob releases the DPAPI-allocated buffer per the contract that +// CryptProtectData / CryptUnprotectData impose on their out-parameters. +func freeDataBlob(b *windows.DataBlob) { + if b == nil || b.Data == nil { + return + } + _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(b.Data))) + b.Data = nil + b.Size = 0 +} + +func platformGet(service, account string) (string, error) { + v, ok := registryGet(service, account) + if !ok { + return "", nil + } + return v, nil +} + +func platformSet(service, account, data string) error { + entropy := dpapiEntropy(service, account) + protected, err := dpapiProtect([]byte(data), entropy) + if err != nil { + return fmt.Errorf("dpapi protect failed: %w", err) + } + return registrySet(service, account, protected) +} + +func platformRemove(service, account string) error { + return registryRemove(service, account) +} + +// registryGet pulls the base64-DPAPI blob from HKCU and unwraps it. +func registryGet(service, account string) (string, bool) { + keyPath := registryPathForService(service) + k, err := registry.OpenKey(registry.CURRENT_USER, keyPath, registry.QUERY_VALUE) + if err != nil { + return "", false + } + defer k.Close() + + b64, _, err := k.GetStringValue(valueNameForAccount(account)) + if err != nil || b64 == "" { + return "", false + } + blob, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return "", false + } + entropy := dpapiEntropy(service, account) + plain, err := dpapiUnprotect(blob, entropy) + if err != nil { + return "", false + } + return string(plain), true +} + +func registrySet(service, account string, protected []byte) error { + keyPath := registryPathForService(service) + k, _, err := registry.CreateKey(registry.CURRENT_USER, keyPath, registry.SET_VALUE) + if err != nil { + return fmt.Errorf("registry create/open failed: %w", err) + } + defer k.Close() + + b64 := base64.StdEncoding.EncodeToString(protected) + if err := k.SetStringValue(valueNameForAccount(account), b64); err != nil { + return fmt.Errorf("registry set failed: %w", err) + } + return nil +} + +func registryRemove(service, account string) error { + keyPath := registryPathForService(service) + k, err := registry.OpenKey(registry.CURRENT_USER, keyPath, registry.SET_VALUE) + if err != nil { + return nil + } + defer k.Close() + _ = k.DeleteValue(valueNameForAccount(account)) + return nil +} + +// platformBackend on windows is always the per-user registry hive sealed +// with DPAPI — there is no fallback path so this is a constant. +func platformBackend(_ string) string { return "registry+dpapi" } + +// platformPurge removes the entire per-service registry key under HKCU, +// which deletes every stored value (one per account) in one shot. DPAPI +// material itself is not user-visible state — wiping the registry value +// makes the ciphertext unreachable and OS-level cleanup is automatic. +// +// Missing keys are treated as success (idempotent purge): the registry +// package surfaces ERROR_FILE_NOT_FOUND for a key that was never created, +// which we map back to nil. Any other error bubbles up so wrapError can +// add the (service, "*") tag. +func platformPurge(service string) error { + keyPath := registryPathForService(service) + err := registry.DeleteKey(registry.CURRENT_USER, keyPath) + if err == nil { + return nil + } + if errors.Is(err, windows.ERROR_FILE_NOT_FOUND) { + return nil + } + return fmt.Errorf("registry delete %s: %w", keyPath, err) +} diff --git a/cli/internal/keychain/keychainfake/fake.go b/cli/internal/keychain/keychainfake/fake.go new file mode 100644 index 000000000..23df0c2d4 --- /dev/null +++ b/cli/internal/keychain/keychainfake/fake.go @@ -0,0 +1,96 @@ +// Package keychainfake provides a process-local KeychainAccess implementation +// for tests. Putting it in its own subpackage (rather than the main keychain +// package) keeps the production binary free of test-only code while letting +// every consumer of keychain.KeychainAccess share one canonical fake instead +// of redeclaring memKeychain in each test file. +// +// Tests are expected to import this package as keychainfake and call New(). +// Because all fields on Fake are exported, individual tests can drive +// failure modes (transient access denial, per-account get errors, set/remove +// failures) without subclassing. +package keychainfake + +import ( + "sync" + + "github.com/beclab/Olares/cli/internal/keychain" +) + +// Fake is an in-memory KeychainAccess. +// +// Field semantics: +// +// - GetErr: when non-nil, every Get returns this error (after the +// PerAccountGetErr table is consulted). Lets a test simulate a fully +// blocked keychain. +// - PerAccountGetErr: account → error. Consulted before GetErr so a single +// test can mark just one account as unreadable while the rest succeed — +// this is the precise shape of the "List() must tolerate one bad blob" +// contract in pkg/auth. +// - SetErr / RmErr: equivalent globals for Set / Remove. +// - GotKeys: ordered log of accounts queried via Get. Useful for asserting +// a code path didn't make redundant keychain lookups. +// +// The Mu protects every map / slice so the Fake is safe to share across +// goroutines, mirroring the real OS-keychain backends which are also safe +// for concurrent use. +type Fake struct { + Mu sync.Mutex + Data map[string]string + GetErr error + PerAccountGetErr map[string]error + SetErr error + RmErr error + GotKeys []string +} + +// New returns an empty Fake ready for use as a keychain.KeychainAccess. +func New() *Fake { + return &Fake{Data: map[string]string{}} +} + +// Key composes the storage key from (service, account). Exported so tests +// that want to seed Data directly (e.g. with intentionally corrupt JSON for +// the "corrupted blob" branch) can compute the same composite key the +// production lookups will use. +func (f *Fake) Key(service, account string) string { + return service + "\x00" + account +} + +func (f *Fake) Get(service, account string) (string, error) { + f.Mu.Lock() + defer f.Mu.Unlock() + f.GotKeys = append(f.GotKeys, account) + if err, ok := f.PerAccountGetErr[account]; ok { + return "", err + } + if f.GetErr != nil { + return "", f.GetErr + } + return f.Data[f.Key(service, account)], nil +} + +func (f *Fake) Set(service, account, value string) error { + f.Mu.Lock() + defer f.Mu.Unlock() + if f.SetErr != nil { + return f.SetErr + } + f.Data[f.Key(service, account)] = value + return nil +} + +func (f *Fake) Remove(service, account string) error { + f.Mu.Lock() + defer f.Mu.Unlock() + if f.RmErr != nil { + return f.RmErr + } + delete(f.Data, f.Key(service, account)) + return nil +} + +// Compile-time check that Fake satisfies the production interface. If this +// fails to compile after a keychain.KeychainAccess change, every fake-using +// test in the tree breaks at the point of import — exactly what we want. +var _ keychain.KeychainAccess = (*Fake)(nil) diff --git a/cli/pkg/auth/token_store.go b/cli/pkg/auth/token_store.go index d8cb97e2f..c7b39c7c9 100644 --- a/cli/pkg/auth/token_store.go +++ b/cli/pkg/auth/token_store.go @@ -1,19 +1,17 @@ package auth import ( - "encoding/json" "errors" - "fmt" - "io/fs" - "os" - "path/filepath" "time" - - "github.com/beclab/Olares/cli/pkg/cliconfig" ) -// StoredToken is the per-olaresId record persisted to ~/.olares-cli/tokens.json -// during Phase 1. +// StoredToken is the per-olaresId record persisted by the CLI. +// +// Phase 2 backend: the entire StoredToken is JSON-serialized and stored as a +// single keychain entry (service=keychain.OlaresCliService, account=olaresId). +// The keychain backend is OS-specific — see cli/internal/keychain — and on +// every supported OS the value lands encrypted at rest. Phase 1's plaintext +// ~/.olares-cli/tokens.json is gone. // // There is intentionally NO `ExpiresAt` field: AccessToken is a JWT and the // only authoritative expiry comes from decoding its `exp` claim via @@ -26,9 +24,9 @@ import ( // InvalidatedAt encodes server-side grant invalidation discovered by the // client (e.g. /api/refresh returning 401/403). 0 means valid (or expiry // has not yet been "discovered"); any value > 0 marks the entire grant -// (access_token + refresh_token) as unusable, even if the JWT's `exp` -// is still in the future. Phase 1 only DEFINES this field — no code path -// writes it. Phase 2's refreshWithLock is the writer. The only way to +// (access_token + refresh_token) as unusable, even if the JWT's `exp` is +// still in the future. Phase 1 only DEFINES this field — no code path +// writes it. Phase 2's refreshWithLock will write it. The only way to // clear it back to 0 is a successful `profile login` / `profile import` // (Set() defensively zeroes it). type StoredToken struct { @@ -40,16 +38,9 @@ type StoredToken struct { InvalidatedAt int64 `json:"invalidatedAt,omitempty"` // unix milliseconds; 0 = valid } -// tokensFile is the on-disk schema. Keyed by OlaresID for O(1) lookup; the -// nested OlaresID field on StoredToken is redundant but kept for self-describing -// dumps. -type tokensFile struct { - Tokens map[string]StoredToken `json:"tokens"` -} - -// TokenStore is the Phase 1 plaintext-JSON token backend. It is intentionally -// a tiny interface (Get/Set/Delete/List/MarkInvalidated) so that Phase 2 can -// swap in an OS keychain implementation behind the same surface. +// TokenStore abstracts the per-olaresId secret backend. Phase 2's only +// production implementation is keychainStore (cli/pkg/auth/token_store_keychain.go); +// tests can supply their own via NewTokenStoreWith. // // MarkInvalidated stamps an existing entry's InvalidatedAt without touching // other fields. Returns ErrTokenNotFound if no entry exists for olaresID. @@ -64,155 +55,3 @@ type TokenStore interface { // ErrTokenNotFound is returned when no token is stored for a given olaresId. var ErrTokenNotFound = errors.New("token not found") - -// fileStore is the default plaintext-JSON implementation of TokenStore. Reads -// and writes are sequential (no concurrency); Phase 2 will add flock for -// cross-process safety together with keychain. -type fileStore struct { - path string -} - -// NewFileStore creates a TokenStore backed by ~/.olares-cli/tokens.json (or -// the override resolved by cliconfig.TokensFile()). -func NewFileStore() (TokenStore, error) { - path, err := cliconfig.TokensFile() - if err != nil { - return nil, err - } - return &fileStore{path: path}, nil -} - -// NewFileStoreAt is exposed for tests; production code should call NewFileStore. -func NewFileStoreAt(path string) TokenStore { - return &fileStore{path: path} -} - -func (s *fileStore) load() (*tokensFile, error) { - data, err := os.ReadFile(s.path) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - return &tokensFile{Tokens: map[string]StoredToken{}}, nil - } - return nil, fmt.Errorf("read %s: %w", s.path, err) - } - if len(data) == 0 { - return &tokensFile{Tokens: map[string]StoredToken{}}, nil - } - tf := &tokensFile{} - if err := json.Unmarshal(data, tf); err != nil { - return nil, fmt.Errorf("parse %s: %w", s.path, err) - } - if tf.Tokens == nil { - tf.Tokens = map[string]StoredToken{} - } - return tf, nil -} - -func (s *fileStore) save(tf *tokensFile) error { - if _, err := cliconfig.EnsureHome(); err != nil { - return err - } - data, err := json.MarshalIndent(tf, "", " ") - if err != nil { - return fmt.Errorf("marshal tokens: %w", err) - } - return atomicWriteFile(s.path, data, 0o600) -} - -func (s *fileStore) Get(olaresID string) (*StoredToken, error) { - tf, err := s.load() - if err != nil { - return nil, err - } - tok, ok := tf.Tokens[olaresID] - if !ok { - return nil, ErrTokenNotFound - } - return &tok, nil -} - -func (s *fileStore) Set(token StoredToken) error { - if token.OlaresID == "" { - return errors.New("StoredToken.OlaresID is required") - } - tf, err := s.load() - if err != nil { - return err - } - // Defensive: a fresh grant always supersedes any prior invalidation - // stamp. Callers shouldn't be passing InvalidatedAt > 0 here, but if - // they do (or if they forget to clear it when overwriting), normalize. - token.InvalidatedAt = 0 - tf.Tokens[token.OlaresID] = token - return s.save(tf) -} - -func (s *fileStore) MarkInvalidated(olaresID string, at time.Time) error { - tf, err := s.load() - if err != nil { - return err - } - tok, ok := tf.Tokens[olaresID] - if !ok { - return ErrTokenNotFound - } - tok.InvalidatedAt = at.UnixMilli() - tf.Tokens[olaresID] = tok - return s.save(tf) -} - -func (s *fileStore) Delete(olaresID string) error { - tf, err := s.load() - if err != nil { - return err - } - if _, ok := tf.Tokens[olaresID]; !ok { - return ErrTokenNotFound - } - delete(tf.Tokens, olaresID) - return s.save(tf) -} - -func (s *fileStore) List() ([]StoredToken, error) { - tf, err := s.load() - if err != nil { - return nil, err - } - out := make([]StoredToken, 0, len(tf.Tokens)) - for _, t := range tf.Tokens { - out = append(out, t) - } - return out, nil -} - -// atomicWriteFile mirrors cliconfig.atomicWriteFile but is duplicated here to -// avoid an exported helper just for cross-package use. Both implementations -// must stay in sync. -func atomicWriteFile(path string, data []byte, perm os.FileMode) error { - dir := filepath.Dir(path) - tmp, err := os.CreateTemp(dir, ".tmp-*") - if err != nil { - return fmt.Errorf("create temp file in %s: %w", dir, err) - } - tmpName := tmp.Name() - cleanup := func() { _ = os.Remove(tmpName) } - if _, err := tmp.Write(data); err != nil { - _ = tmp.Close() - cleanup() - return fmt.Errorf("write temp file: %w", err) - } - if err := tmp.Chmod(perm); err != nil { - _ = tmp.Close() - cleanup() - return fmt.Errorf("chmod temp file: %w", err) - } - if err := tmp.Close(); err != nil { - cleanup() - return fmt.Errorf("close temp file: %w", err) - } - if err := os.Rename(tmpName, path); err != nil { - cleanup() - return fmt.Errorf("rename %s -> %s: %w", tmpName, path, err) - } - return nil -} diff --git a/cli/pkg/auth/token_store_keychain.go b/cli/pkg/auth/token_store_keychain.go new file mode 100644 index 000000000..9e6a9c807 --- /dev/null +++ b/cli/pkg/auth/token_store_keychain.go @@ -0,0 +1,195 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/beclab/Olares/cli/internal/keychain" + "github.com/beclab/Olares/cli/pkg/cliconfig" +) + +// listWarnSink receives one warning per profile whose token couldn't be +// decoded during List(). Defaults to os.Stderr; tests overwrite it to capture +// output without printing during `go test`. +var listWarnSink io.Writer = os.Stderr + +// ProfileLister enumerates the olaresIds the CLI knows about. The keychain +// backend can't enumerate its own contents (that's a deliberate property of +// every OS-keychain API: no globbing across accounts), so List() needs an +// external index. cliconfig.MultiProfileConfig already serves that purpose. +// +// The interface is deliberately tiny so tests can swap it out without +// pulling in the whole config layer, and so we avoid widening pkg/auth's +// coupling to cliconfig beyond the single function it actually needs. +type ProfileLister interface { + ListOlaresIDs() ([]string, error) +} + +// cliconfigProfileLister is the production ProfileLister backed by +// cliconfig.LoadMultiProfileConfig. A missing config file is treated as +// "no profiles" (no error) so List() works on a clean machine. +type cliconfigProfileLister struct{} + +func (cliconfigProfileLister) ListOlaresIDs() ([]string, error) { + cfg, err := cliconfig.LoadMultiProfileConfig() + if err != nil { + return nil, err + } + out := make([]string, 0, len(cfg.Profiles)) + for _, p := range cfg.Profiles { + out = append(out, p.OlaresID) + } + return out, nil +} + +// keychainStore implements TokenStore on top of an OS keychain. Each profile +// gets exactly one keychain entry (service=OlaresCliService, account=olaresId) +// whose value is the JSON-encoded StoredToken. We chose JSON-blob-per-account +// (rather than splitting access/refresh into separate entries) so: +// - the entry is atomic — no partial-write window where access exists but +// refresh is missing, +// - rotating any field (SessionID, GrantedAt, InvalidatedAt) is one write, +// - moving fields around in StoredToken doesn't require re-keying anything, +// - List() only needs one keychain Get per profile, not two or three. +type keychainStore struct { + kc keychain.KeychainAccess + profiles ProfileLister +} + +// NewTokenStore returns the production TokenStore: keychain.Default() +// backend + cliconfig-driven profile enumeration. +func NewTokenStore() TokenStore { + return &keychainStore{ + kc: keychain.Default(), + profiles: cliconfigProfileLister{}, + } +} + +// NewTokenStoreWith is the test seam: pass any KeychainAccess + ProfileLister. +// Production code should call NewTokenStore. +func NewTokenStoreWith(kc keychain.KeychainAccess, profiles ProfileLister) TokenStore { + return &keychainStore{kc: kc, profiles: profiles} +} + +// Get returns the StoredToken for olaresID. The keychain backend signals +// "not present" by returning ("", nil); we promote that to ErrTokenNotFound +// so callers can use errors.Is uniformly across the interface. +func (s *keychainStore) Get(olaresID string) (*StoredToken, error) { + if olaresID == "" { + return nil, errors.New("olaresID is required") + } + raw, err := s.kc.Get(keychain.OlaresCliService, olaresID) + if err != nil { + return nil, err + } + if raw == "" { + return nil, ErrTokenNotFound + } + var tok StoredToken + if err := json.Unmarshal([]byte(raw), &tok); err != nil { + return nil, fmt.Errorf("decode stored token for %s: %w", olaresID, err) + } + // Defense in depth: a corrupted blob that decodes but lost its OlaresID + // shouldn't masquerade as a different account's grant. + if tok.OlaresID == "" { + tok.OlaresID = olaresID + } + return &tok, nil +} + +// Set persists a fresh grant for token.OlaresID, overwriting any previous +// value. As with the historical fileStore, we defensively zero InvalidatedAt +// so a fresh write clears any prior invalidation stamp even if the caller +// forgot to. +func (s *keychainStore) Set(token StoredToken) error { + if token.OlaresID == "" { + return errors.New("StoredToken.OlaresID is required") + } + token.InvalidatedAt = 0 + data, err := json.Marshal(token) + if err != nil { + return fmt.Errorf("encode stored token: %w", err) + } + return s.kc.Set(keychain.OlaresCliService, token.OlaresID, string(data)) +} + +// MarkInvalidated stamps an existing entry as unusable. Read-modify-write, +// not atomic across processes; that's acceptable because (a) keychain writes +// are racy by nature and (b) the worst-case is a stale stamp that another +// process immediately re-clears with a successful Set. +func (s *keychainStore) MarkInvalidated(olaresID string, at time.Time) error { + tok, err := s.Get(olaresID) + if err != nil { + return err + } + tok.InvalidatedAt = at.UnixMilli() + data, err := json.Marshal(tok) + if err != nil { + return fmt.Errorf("encode stored token: %w", err) + } + return s.kc.Set(keychain.OlaresCliService, olaresID, string(data)) +} + +// Delete removes the keychain entry for olaresID. +// +// Behavior contract (callers MUST handle this): +// +// - If no entry exists for olaresID, Delete returns ErrTokenNotFound. This +// is intentionally NOT a no-op at our layer — it gives callers a typed +// signal so flows like `profile remove` can distinguish "deleted" from +// "wasn't there to begin with" and decide whether to print a warning. +// Callers that prefer no-op semantics should filter via errors.Is(err, +// ErrTokenNotFound) (see cmd/ctl/profile/remove.go). +// - The underlying keychain.Remove IS a no-op on missing entries, but we +// gate on a prior Get so that a successful return guarantees an entry +// was actually present and is now gone. +// - Any backend error from Get / Remove (corrupted blob, locked keychain, +// transient permission failure) is surfaced verbatim — Delete does NOT +// swallow them. +func (s *keychainStore) Delete(olaresID string) error { + if olaresID == "" { + return errors.New("olaresID is required") + } + if _, err := s.Get(olaresID); err != nil { + return err + } + return s.kc.Remove(keychain.OlaresCliService, olaresID) +} + +// List returns every StoredToken whose olaresId appears in the config-side +// profile index. Profiles without a stored token are silently skipped — that +// matches the user-visible model (a profile can exist before it has been +// authenticated). +// +// Per-entry read failures (corrupted blob, decode error, transient keychain +// access denial) are degraded to a single stderr warning and the offending +// entry is skipped. Aborting the entire list on one bad blob would make +// `profile list` useless across all profiles whenever a single keychain +// record gets damaged — surfacing the rest is strictly more useful. +// +// Only an error from the ProfileLister itself (i.e. we couldn't even find +// out which olaresIds to look up) is returned to the caller. +func (s *keychainStore) List() ([]StoredToken, error) { + ids, err := s.profiles.ListOlaresIDs() + if err != nil { + return nil, fmt.Errorf("enumerate profiles: %w", err) + } + out := make([]StoredToken, 0, len(ids)) + for _, id := range ids { + tok, err := s.Get(id) + if err != nil { + if errors.Is(err, ErrTokenNotFound) { + continue + } + fmt.Fprintf(listWarnSink, + "warning: skipping stored token for %s: %v\n", id, err) + continue + } + out = append(out, *tok) + } + return out, nil +} diff --git a/cli/pkg/auth/token_store_keychain_test.go b/cli/pkg/auth/token_store_keychain_test.go new file mode 100644 index 000000000..0d24d098f --- /dev/null +++ b/cli/pkg/auth/token_store_keychain_test.go @@ -0,0 +1,206 @@ +package auth + +import ( + "bytes" + "errors" + "strings" + "testing" + "time" + + "github.com/beclab/Olares/cli/internal/keychain" + "github.com/beclab/Olares/cli/internal/keychain/keychainfake" +) + +type staticLister []string + +func (s staticLister) ListOlaresIDs() ([]string, error) { return []string(s), nil } + +// TestKeychainStore_RoundTrip locks down the basic Get/Set/Delete contract: +// Set persists, Get round-trips, and Delete makes a subsequent Get return +// ErrTokenNotFound (not a backend error). +func TestKeychainStore_RoundTrip(t *testing.T) { + kc := keychainfake.New() + store := NewTokenStoreWith(kc, staticLister{"alice@olares.com"}) + + if _, err := store.Get("alice@olares.com"); !errors.Is(err, ErrTokenNotFound) { + t.Fatalf("Get on empty store = %v; want ErrTokenNotFound", err) + } + + in := StoredToken{ + OlaresID: "alice@olares.com", + AccessToken: "access-tok", + RefreshToken: "refresh-tok", + SessionID: "sess", + GrantedAt: time.Now().UnixMilli(), + } + if err := store.Set(in); err != nil { + t.Fatalf("Set: %v", err) + } + + out, err := store.Get("alice@olares.com") + if err != nil { + t.Fatalf("Get: %v", err) + } + if out.AccessToken != in.AccessToken || out.RefreshToken != in.RefreshToken { + t.Errorf("round-trip mismatch: got %+v, want %+v", out, in) + } + + if err := store.Delete("alice@olares.com"); err != nil { + t.Fatalf("Delete: %v", err) + } + if _, err := store.Get("alice@olares.com"); !errors.Is(err, ErrTokenNotFound) { + t.Fatalf("Get after Delete = %v; want ErrTokenNotFound", err) + } +} + +// TestKeychainStore_SetClearsInvalidatedAt locks down the invariant the +// Phase 1 fileStore had: a fresh Set() of a token with a non-zero +// InvalidatedAt must NOT carry that stamp through to the keychain entry, +// because Set() represents a successful fresh grant. +func TestKeychainStore_SetClearsInvalidatedAt(t *testing.T) { + kc := keychainfake.New() + store := NewTokenStoreWith(kc, staticLister{"alice@olares.com"}) + + in := StoredToken{ + OlaresID: "alice@olares.com", + AccessToken: "tok", + InvalidatedAt: time.Now().UnixMilli(), + } + if err := store.Set(in); err != nil { + t.Fatalf("Set: %v", err) + } + out, err := store.Get("alice@olares.com") + if err != nil { + t.Fatalf("Get: %v", err) + } + if out.InvalidatedAt != 0 { + t.Errorf("InvalidatedAt = %d, want 0 (Set must clear)", out.InvalidatedAt) + } +} + +// TestKeychainStore_MarkInvalidated stamps the entry without touching other +// fields, and is preserved across a subsequent Get. +func TestKeychainStore_MarkInvalidated(t *testing.T) { + kc := keychainfake.New() + store := NewTokenStoreWith(kc, staticLister{"alice@olares.com"}) + + if err := store.Set(StoredToken{ + OlaresID: "alice@olares.com", + AccessToken: "tok", + }); err != nil { + t.Fatalf("Set: %v", err) + } + + at := time.Now() + if err := store.MarkInvalidated("alice@olares.com", at); err != nil { + t.Fatalf("MarkInvalidated: %v", err) + } + out, err := store.Get("alice@olares.com") + if err != nil { + t.Fatalf("Get: %v", err) + } + if out.AccessToken != "tok" { + t.Errorf("AccessToken changed unexpectedly: %q", out.AccessToken) + } + if out.InvalidatedAt != at.UnixMilli() { + t.Errorf("InvalidatedAt = %d, want %d", out.InvalidatedAt, at.UnixMilli()) + } + + // Marking a missing entry should report ErrTokenNotFound (NOT silently + // create one), so callers can distinguish "no grant ever existed" from + // "grant existed and was just stamped". + if err := store.MarkInvalidated("ghost@olares.com", at); !errors.Is(err, ErrTokenNotFound) { + t.Errorf("MarkInvalidated(missing) = %v; want ErrTokenNotFound", err) + } +} + +// TestKeychainStore_List_SkipsMissing exercises the contract that List() +// uses the ProfileLister as the index but tolerates missing keychain entries +// (a profile may exist before login). It also confirms profiles whose token +// IS present round-trip. +func TestKeychainStore_List_SkipsMissing(t *testing.T) { + kc := keychainfake.New() + store := NewTokenStoreWith(kc, staticLister{ + "alice@olares.com", + "bob@olares.com", + }) + + if err := store.Set(StoredToken{OlaresID: "alice@olares.com", AccessToken: "a"}); err != nil { + t.Fatalf("seed Set: %v", err) + } + + got, err := store.List() + if err != nil { + t.Fatalf("List: %v", err) + } + if len(got) != 1 || got[0].OlaresID != "alice@olares.com" { + t.Fatalf("List = %+v, want exactly alice", got) + } +} + +// TestKeychainStore_List_TolerantToBadEntry verifies the resilience contract: +// a single profile whose stored blob is unreadable (decode error, transient +// keychain failure) must not abort `profile list` for everyone else. We +// inject one corrupted JSON blob and one per-account get error and expect +// the remaining healthy entry to come back, with one warning per casualty. +func TestKeychainStore_List_TolerantToBadEntry(t *testing.T) { + prevSink := listWarnSink + var warnings bytes.Buffer + listWarnSink = &warnings + defer func() { listWarnSink = prevSink }() + + kc := keychainfake.New() + store := NewTokenStoreWith(kc, staticLister{ + "alice@olares.com", + "bob@olares.com", + "carol@olares.com", + }) + + if err := store.Set(StoredToken{OlaresID: "alice@olares.com", AccessToken: "a"}); err != nil { + t.Fatalf("seed alice: %v", err) + } + // Bob's blob is present but undecodable JSON. This goes through Get's + // json.Unmarshal path — exactly the corruption mode we care about for + // the resilience invariant. + kc.Data[kc.Key(keychain.OlaresCliService, "bob@olares.com")] = "{not-json" + // Carol's read is gated by a transient keychain access denial (e.g. + // the OS keychain refused to unlock that one slot). Still must not + // abort the whole list. + kc.PerAccountGetErr = map[string]error{ + "carol@olares.com": errors.New("keychain access denied"), + } + + got, err := store.List() + if err != nil { + t.Fatalf("List: %v", err) + } + if len(got) != 1 || got[0].OlaresID != "alice@olares.com" { + t.Fatalf("List = %+v, want exactly alice", got) + } + out := warnings.String() + if !strings.Contains(out, "bob@olares.com") || !strings.Contains(out, "carol@olares.com") { + t.Errorf("expected warnings naming bob and carol, got:\n%s", out) + } + if strings.Count(out, "warning:") != 2 { + t.Errorf("expected exactly 2 warnings, got:\n%s", out) + } +} + +// TestKeychainStore_UsesOlaresCliService asserts the (service, account) pair +// is the contractual one — service is fixed, account is the bare olaresId. +// If this ever drifts, every existing user re-loses their stored token. +func TestKeychainStore_UsesOlaresCliService(t *testing.T) { + kc := keychainfake.New() + store := NewTokenStoreWith(kc, staticLister{"alice@olares.com"}) + + if err := store.Set(StoredToken{ + OlaresID: "alice@olares.com", + AccessToken: "tok", + }); err != nil { + t.Fatalf("Set: %v", err) + } + wantKey := keychain.OlaresCliService + "\x00alice@olares.com" + if _, ok := kc.Data[wantKey]; !ok { + t.Fatalf("expected keychain entry at %q; have %+v", wantKey, kc.Data) + } +} diff --git a/cli/pkg/cliconfig/config.go b/cli/pkg/cliconfig/config.go index 75d69e039..3a9d2fa1c 100644 --- a/cli/pkg/cliconfig/config.go +++ b/cli/pkg/cliconfig/config.go @@ -25,8 +25,8 @@ type MultiProfileConfig struct { // user identity used to talk to it. The primary key is OlaresID; Name is an // optional alias users can pass to commands like `profile use `. // -// Tokens are NOT stored in this file — they live in tokens.json and are -// looked up by OlaresID. See pkg/auth.TokenStore. +// Tokens are NOT stored in this file — they live in the OS keychain +// (one entry per OlaresID; see cli/internal/keychain and pkg/auth.TokenStore). type ProfileConfig struct { // Name is an optional human-friendly alias. If empty, OlaresID is used as // the display name. diff --git a/cli/pkg/cliconfig/paths.go b/cli/pkg/cliconfig/paths.go index 06c2bf0c7..7761b19c3 100644 --- a/cli/pkg/cliconfig/paths.go +++ b/cli/pkg/cliconfig/paths.go @@ -1,5 +1,6 @@ // Package cliconfig owns the on-disk profile configuration of olares-cli -// (~/.olares-cli/config.json + ~/.olares-cli/tokens.json). +// (~/.olares-cli/config.json). Token secrets are NOT stored here — they +// live in the OS keychain via cli/internal/keychain. // // The package is named cliconfig (not "config") to avoid clashing with the // pre-existing cmd/config package, which serves a different purpose @@ -20,14 +21,15 @@ const homeEnv = "OLARES_CLI_HOME" // unset. const defaultDir = ".olares-cli" -// Filenames inside the config dir. -const ( - configFilename = "config.json" - tokensFilename = "tokens.json" -) +// configFilename is the only file this package owns. Token secrets used to +// live next to it as tokens.json (Phase 1, plaintext); Phase 2 moved them +// into the OS keychain — see cli/internal/keychain and +// cli/pkg/auth/token_store_keychain.go. +const configFilename = "config.json" -// Permissions for the config dir & files. tokens.json carries refresh tokens -// in plaintext during Phase 1; both files therefore use 0600 / 0700. +// Permissions for the config dir & file. config.json holds the profile index +// (no secrets) but we still keep it 0600 because it does carry the +// `currentProfile` selection and any auth-URL overrides. const ( dirPerm os.FileMode = 0o700 filePerm os.FileMode = 0o600 @@ -68,12 +70,3 @@ func ConfigFile() (string, error) { } return filepath.Join(dir, configFilename), nil } - -// TokensFile returns the absolute path to tokens.json (without creating it). -func TokensFile() (string, error) { - dir, err := Home() - if err != nil { - return "", err - } - return filepath.Join(dir, tokensFilename), nil -} diff --git a/cli/pkg/credential/default_provider.go b/cli/pkg/credential/default_provider.go index 8c8138edd..5a2b28f3c 100644 --- a/cli/pkg/credential/default_provider.go +++ b/cli/pkg/credential/default_provider.go @@ -33,15 +33,12 @@ type DefaultProvider struct { now func() time.Time } -// NewDefaultProvider opens the on-disk token store and returns a Provider -// suitable for normal CLI invocations. Returns an error only if the token -// store path itself can't be resolved (which usually means $HOME is broken). +// NewDefaultProvider returns a Provider backed by the keychain-backed token +// store. The error return is preserved from the Phase 1 file-store signature +// so future backends with non-trivial init (e.g. a remote sidecar) can opt +// in without re-touching every caller. func NewDefaultProvider() (Provider, error) { - store, err := auth.NewFileStore() - if err != nil { - return nil, err - } - return &DefaultProvider{store: store, now: time.Now}, nil + return &DefaultProvider{store: auth.NewTokenStore(), now: time.Now}, nil } // Name implements Provider. From 53c495148342e9619cd845f86b943982e85df9f4 Mon Sep 17 00:00:00 2001 From: Peng Peng Date: Sat, 25 Apr 2026 19:06:39 +0800 Subject: [PATCH 04/12] refactor(cli): align auth flow with TS onFirstFactor/loginTerminus shape Restructure cli/pkg/auth so it mirrors the two-tiered authentication pattern in apps/packages/app/src/utils/{account.ts,BindTerminusBusiness.ts} 1:1, and migrate the wizard package off its own duplicated copies of the salt math, cookie jar, and 2FA wiring. - pkg/auth.LoginRequest: drop the hallucinated TargetURL/SkipSecondFactor fields; add NeedTwoFactor (controls vault vs desktop targetURL only) and AcceptCookie (1:1 with TS `acceptCookie` arg). Docstrings cite the TS file + line numbers so the next change does not have to reverse- engineer ground truth. - pkg/auth.FirstFactor: new exported low-level primitive that POSTs /api/firstfactor and returns the raw token without inspecting `fa2`, matching TS onFirstFactor (account.ts L7-71). - pkg/auth.Login: rewritten as a thin wrapper around the shared firstFactorWithClient + optional /api/secondfactor/totp escalation, matching TS loginTerminus (BindTerminusBusiness.ts L353-446). Gate uses tok.FA2 only, deliberately diverging from TS's `tok.FA2 || needTwoFactor` because the CLI has no caller-side knowledge to defensively force 2FA and OR'ing would surface a spurious ErrTOTPRequired the moment a caller probes with the desktop targetURL. - pkg/wizard.UserBindTerminus: switch from auth.Login to auth.FirstFactor with NeedTwoFactor=false / AcceptCookie=false, mirroring TS L58-66. This restores `olares-cli wizard activate`, which broke when the unified auth.Login enforced a 2FA gate the original wizard flow intentionally bypassed (no MFA seed exists at signup time). - pkg/wizard.LoginTerminus: pass NeedTwoFactor through and set AcceptCookie=true to match TS L364-372; keep the eager-TOTP and ErrTOTPRequired retry paths that bridge the CLI to the MFA-store TOTP source. - profile login: pass NeedTwoFactor=true so /api/firstfactor uses the desktop targetURL and Authelia honestly reports fa2=true on 2FA-enabled accounts. With the previous vault targetURL the server silently downgraded to fa2=false and the TOTP prompt never fired. Made-with: Cursor --- cli/cmd/ctl/profile/login.go | 25 +++- cli/pkg/auth/login.go | 144 +++++++++++++++++------ cli/pkg/wizard/auth.go | 142 +++++----------------- cli/pkg/wizard/login_terminus.go | 194 ++++++++++--------------------- 4 files changed, 219 insertions(+), 286 deletions(-) diff --git a/cli/cmd/ctl/profile/login.go b/cli/cmd/ctl/profile/login.go index 80536a049..781f47298 100644 --- a/cli/cmd/ctl/profile/login.go +++ b/cli/cmd/ctl/profile/login.go @@ -92,11 +92,26 @@ func runLogin(ctx context.Context, o *loginOptions) error { } tok, err := loginWithTOTPPrompt(ctx, auth.LoginRequest{ - AuthURL: authURL, - LocalName: id.Local(), - TerminusName: terminusName, - Password: password, - TOTP: o.totp, + AuthURL: authURL, + LocalName: id.Local(), + TerminusName: terminusName, + Password: password, + TOTP: o.totp, + // NeedTwoFactor=true sends targetURL=desktop./ on + // /api/firstfactor so Authelia evaluates its 2FA access policy + // and reports `fa2=true` for accounts that actually have 2FA + // enabled. Sending the vault targetURL (NeedTwoFactor=false) + // would silently downgrade the response to fa2=false even on + // 2FA-enabled accounts, and the user would never be prompted + // for their TOTP code. This is the CLI's "probe" — auth.Login's + // gate still uses tok.FA2 only, so non-2FA accounts under the + // same desktop probe just succeed without a TOTP prompt. + NeedTwoFactor: true, + // AcceptCookie mirrors apps/packages/app/src/utils/BindTerminusBusiness.ts + // L368 (loginTerminus): the web UI always asks Authelia to set + // the session cookie because it follows up with + // /api/secondfactor/totp when fa2 fires. + AcceptCookie: true, InsecureSkipVerify: o.insecureSkipVerify, }, o.olaresID) if err != nil { diff --git a/cli/pkg/auth/login.go b/cli/pkg/auth/login.go index e8fce10bd..4cb09c263 100644 --- a/cli/pkg/auth/login.go +++ b/cli/pkg/auth/login.go @@ -30,28 +30,58 @@ type Token struct { FA2 bool `json:"fa2,omitempty"` } -// LoginRequest captures everything Login needs to perform first-factor (and, -// if needed, second-factor TOTP) authentication. +// LoginRequest is the input shared by FirstFactor (low-level, equivalent +// to the TS onFirstFactor primitive) and Login (high-level wrapper around +// FirstFactor + optional /api/secondfactor/totp, equivalent to the TS +// loginTerminus flow). // -// AuthURL is the Olares auth subdomain base, e.g. "https://auth.alice.olares.com". +// Field semantics mirror the TS web reference 1:1; if you change anything +// here, also re-read those two TS functions and keep them aligned: +// +// - apps/packages/app/src/utils/account.ts L7-71 (onFirstFactor) +// - apps/packages/app/src/utils/BindTerminusBusiness.ts L353-446 (loginTerminus) +// +// AuthURL is the Olares auth base, e.g. "https://auth.alice.olares.com". // The CLI POSTs to AuthURL + "/api/firstfactor" and AuthURL + "/api/secondfactor/totp". // // LocalName is the bare username (the part before `@` of the olaresId). // The web app uses this as `username` in the request body. // -// TerminusName is "."; it's used to construct the second-factor -// `targetUrl` field (https://desktop./) which the auth backend -// echoes back as the redirect target. +// TerminusName is "."; it's only used to derive the +// `targetURL` form field (vault./server by default, +// desktop./ when NeedTwoFactor is true) and the second-factor +// `targetUrl`. +// +// TOTP is optional — supply it when the account has 2FA enabled. Login +// returns ErrTOTPRequired when 2FA is needed (tok.FA2 || NeedTwoFactor) +// but TOTP is empty. FirstFactor never reads TOTP. +// +// NeedTwoFactor mirrors the `needTwoFactor` parameter on TS onFirstFactor: +// when true, swap targetURL from `vault./server` to +// `desktop./`. This is the ONLY thing it does in the Go API. +// +// Authelia's per-URL access policy is what makes `fa2` flip to true in +// the response — the vault URL maps to a 1FA policy, the desktop URL +// maps to a 2FA policy. Callers that want the server to honestly tell +// them whether the account has 2FA enabled (e.g. `profile login`'s +// initial probe) MUST pass NeedTwoFactor=true so Authelia evaluates the +// 2FA policy. NeedTwoFactor does NOT participate in Login's escalation +// gate — Login uses `tok.FA2` from the server only; see Login's doc for +// why we diverge from TS's `tok.FA2 || needTwoFactor` here. // -// TOTP is optional — supply it when the account has 2FA enabled. If the -// first-factor response indicates FA2 is required and TOTP is empty, Login -// returns ErrTOTPRequired so the caller can prompt and retry. +// AcceptCookie mirrors the `acceptCookie` parameter on TS onFirstFactor; +// it is passed through verbatim into the request body. callers known to +// follow up with /api/secondfactor/totp pass true (so Authelia sets the +// session cookie that the second-factor request needs); the +// activation/signup caller (cli/pkg/wizard.UserBindTerminus) passes false. type LoginRequest struct { AuthURL string LocalName string TerminusName string Password string TOTP string + NeedTwoFactor bool + AcceptCookie bool InsecureSkipVerify bool Timeout time.Duration // zero → 10s default } @@ -61,25 +91,55 @@ type LoginRequest struct { // (e.g. `profile login`) can prompt the user and call Login again with TOTP set. var ErrTOTPRequired = errors.New("two-factor authentication is required: re-run with --totp ") -// Login executes the password login flow: -// 1. POST /api/firstfactor with the salted-MD5 password. -// 2. If the response says fa2 is required, POST /api/secondfactor/totp with -// the supplied TOTP code (or return ErrTOTPRequired if none was given). +// FirstFactor performs a single POST /api/firstfactor and returns the raw +// token. Mirrors apps/packages/app/src/utils/account.ts:onFirstFactor (L7-71) +// 1:1: it does NOT inspect or act on the response's `fa2` flag — choosing +// whether to escalate to /api/secondfactor/totp is the caller's job. // -// On success the returned Token contains the freshly minted access_token and -// refresh_token (the second-factor response overrides them when present). +// Two callers exist today: // -// The function uses a short-lived http.Client with a cookie jar so that the -// Authelia session cookie set on /api/firstfactor is automatically attached -// to /api/secondfactor/totp — mirroring `withCredentials: true` in the web -// implementation in apps/packages/app/src/utils/BindTerminusBusiness.ts. +// - Login (this file) wraps FirstFactor and does the +// `(tok.FA2 || NeedTwoFactor)` escalation, mirroring TS loginTerminus. +// - cli/pkg/wizard.UserBindTerminus uses FirstFactor directly and +// ignores fa2, mirroring TS userBindTerminus — at signup time there is +// no MFA seed yet, so the 1st-factor access_token is what the +// subsequent signup endpoints need. +func FirstFactor(ctx context.Context, req LoginRequest) (*Token, error) { + if err := validateLoginRequest(req); err != nil { + return nil, err + } + client := newHTTPClient(req.Timeout, req.InsecureSkipVerify) + return firstFactorWithClient(ctx, client, req) +} + +// Login executes the full password login flow: +// +// 1. POST /api/firstfactor with the salted-MD5 password (via FirstFactor). +// 2. If the server reports `tok.FA2`, POST /api/secondfactor/totp with the +// supplied TOTP code (or return ErrTOTPRequired if none was given). +// +// Mirrors apps/packages/app/src/utils/BindTerminusBusiness.ts:loginTerminus +// (L353-446), with one deliberate divergence: the gate is `tok.FA2` only, +// not the TS `tok.FA2 || needTwoFactor`. The TS code OR's in +// `needTwoFactor` so the web UI can *force* 2FA when it locally knows the +// user has it but the server hasn't reported it (defensive UI-state +// pattern). The CLI has no such caller-side knowledge — it can only +// trust whatever the server says — and gating on the OR would make +// non-2FA users (who get fa2=false) hit a spurious ErrTOTPRequired the +// moment a caller passes NeedTwoFactor=true to probe with the desktop +// targetURL (e.g. `profile login`). +// +// FirstFactor and the optional second-factor POST share a single +// http.Client (with cookie jar) so the Authelia session cookie set on +// /api/firstfactor automatically attaches to /api/secondfactor/totp, +// mirroring `withCredentials: true` in the TS axios instance. func Login(ctx context.Context, req LoginRequest) (*Token, error) { if err := validateLoginRequest(req); err != nil { return nil, err } client := newHTTPClient(req.Timeout, req.InsecureSkipVerify) - tok, err := postFirstFactor(ctx, client, req) + tok, err := firstFactorWithClient(ctx, client, req) if err != nil { return nil, err } @@ -115,12 +175,18 @@ func validateLoginRequest(req LoginRequest) error { return nil } -// passwordSalt mirrors the `passwordAddSort` helper in -// pkg/wizard/auth.go (and its TS counterpart in BindTerminusBusiness.ts): -// MD5 of `@Olares2025`. The salt is a public, account-independent -// constant — it's NOT a security feature, just a wire-format quirk the auth -// backend expects. -func passwordSalt(password string) string { +// PasswordSalt is the md5(`@Olares2025`) wire-format the Authelia +// backend expects on /api/firstfactor and on the bfl +// /iam/v1alpha1/users//password reset endpoint. The salt string is a +// public, account-independent constant — it is NOT a security feature, only +// a quirk we have to reproduce on every code path that talks to those two +// endpoints. The TS counterpart is `passwordAddSort` in +// apps/packages/app/src/utils/BindTerminusBusiness.ts. +// +// Exported so cli/pkg/wizard.ResetPassword can reuse the same implementation +// instead of carrying its own copy — having two copies invites silent drift +// the day someone changes the salt server-side. +func PasswordSalt(password string) string { hash := md5.Sum([]byte(password + "@Olares2025")) return fmt.Sprintf("%x", hash) } @@ -140,21 +206,25 @@ type firstFactorResponse struct { Data Token `json:"data"` } -func postFirstFactor(ctx context.Context, client *http.Client, req LoginRequest) (*Token, error) { +// firstFactorWithClient is the shared implementation behind FirstFactor and +// Login. Splitting it out lets Login reuse the cookie-jarred client across +// /api/firstfactor and /api/secondfactor/totp without re-dialling. +// +// targetURL derivation matches apps/packages/app/src/utils/account.ts +// L19-26: vault./server by default, desktop./ when the caller +// asks for the 2FA-bearing policy via NeedTwoFactor. +func firstFactorWithClient(ctx context.Context, client *http.Client, req LoginRequest) (*Token, error) { + targetURL := "https://vault." + req.TerminusName + "/server" + if req.NeedTwoFactor { + targetURL = "https://desktop." + req.TerminusName + "/" + } body := firstFactorBody{ Username: req.LocalName, - Password: passwordSalt(req.Password), + Password: PasswordSalt(req.Password), KeepMeLoggedIn: false, RequestMethod: "POST", - // Always declare the desktop subdomain as the post-login redirect target. - // Authelia's `fa2` flag in the response is computed against this URL via - // its access-control policy, and only the desktop./ rule - // requires 2FA. Sending the auth or vault URL would silently downgrade - // the response to 1FA, hiding the fact that the account has 2FA enabled. - // See apps/packages/app/src/utils/account.ts (onFirstFactor) for the - // matching web behavior. - TargetURL: "https://desktop." + req.TerminusName + "/", - AcceptCookie: true, + TargetURL: targetURL, + AcceptCookie: req.AcceptCookie, } resp, err := postJSON(ctx, client, req.AuthURL+"/api/firstfactor?hideCookie=true", body, nil) if err != nil { diff --git a/cli/pkg/wizard/auth.go b/cli/pkg/wizard/auth.go index 162322bc7..6c206e16f 100644 --- a/cli/pkg/wizard/auth.go +++ b/cli/pkg/wizard/auth.go @@ -1,121 +1,15 @@ package wizard import ( - "crypto/md5" + "context" "encoding/json" "fmt" - "io" "log" - "net/http" "strings" "time" -) - -// Token struct, corresponds to TypeScript Token interface -type Token struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - ExpiresAt int `json:"expires_at"` - SessionID string `json:"session_id"` - FA2 bool `json:"fa2"` -} - -// FirstFactorRequest represents first factor request structure -type FirstFactorRequest struct { - Username string `json:"username"` - Password string `json:"password"` - KeepMeLoggedIn bool `json:"keepMeLoggedIn"` - RequestMethod string `json:"requestMethod"` - TargetURL string `json:"targetURL"` - AcceptCookie bool `json:"acceptCookie"` -} - -// FirstFactorResponse represents first factor response structure -type FirstFactorResponse struct { - Status string `json:"status"` - Data Token `json:"data"` -} - -// OnFirstFactor implements first factor authentication (ref: BindTerminusBusiness.ts). -// -// If client is nil, a fresh http.Client is created internally. Pass a shared -// client (e.g. from newAuthHTTPClient) when the caller intends to follow up -// with /api/secondfactor/totp so that Authelia session cookies set by this -// request are reused on the next one. -func OnFirstFactor(client *http.Client, baseURL, terminusName, osUser, osPwd string, acceptCookie, needTwoFactor bool) (*Token, error) { - log.Printf("Starting onFirstFactor for user: %s", osUser) - - // Process password (salted MD5) - processedPassword := passwordAddSort(osPwd) - - // Build request - reqData := FirstFactorRequest{ - Username: osUser, - Password: processedPassword, - KeepMeLoggedIn: false, - RequestMethod: "POST", - TargetURL: baseURL, - AcceptCookie: acceptCookie, - } - - jsonData, err := json.Marshal(reqData) - if err != nil { - return nil, fmt.Errorf("failed to marshal request: %v", err) - } - - if client == nil { - client = &http.Client{ - Timeout: 10 * time.Second, - } - } - - reqURL := fmt.Sprintf("%s/api/firstfactor?hideCookie=true", baseURL) - req, err := http.NewRequest("POST", reqURL, strings.NewReader(string(jsonData))) - if err != nil { - return nil, fmt.Errorf("failed to create request: %v", err) - } - - req.Header.Set("Content-Type", "application/json") - log.Printf("Sending request to: %s", reqURL) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("request failed: %v", err) - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read response: %v", err) - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body)) - } - - var response FirstFactorResponse - if err := json.Unmarshal(body, &response); err != nil { - return nil, fmt.Errorf("failed to unmarshal response: %v", err) - } - - if response.Status != "OK" { - return nil, fmt.Errorf("authentication failed: %s", response.Status) - } - - log.Printf("First factor authentication successful") - return &response.Data, nil -} - -// passwordAddSort implements salted MD5 (ref: TypeScript version) -func passwordAddSort(password string) string { - // Salt and MD5 - saltedPassword := password + "@Olares2025" - hash := md5.Sum([]byte(saltedPassword)) - return fmt.Sprintf("%x", hash) -} + "github.com/beclab/Olares/cli/pkg/auth" +) // Main authentication function - corresponds to original TypeScript _authenticate function func Authenticate(req AuthenticateRequest) (*AuthenticateResponse, error) { @@ -212,10 +106,32 @@ func UserBindTerminus(mnemonic, bflUrl, vaultUrl, authUrl, osPwd, terminusName, log.Printf("Using bflUrl: %s", bflUrl) - // 3. Call onFirstFactor to get token (ref: TypeScript implementation). - // Use a cookie-jar-backed client for consistency with LoginTerminus, - // even though no second factor follows in this flow. - token, err := OnFirstFactor(newAuthHTTPClient(), bflUrl, terminusName, localName, osPwd, false, false) + // 3. Call /api/firstfactor via the shared pkg/auth implementation. + // + // 1:1 mirror of apps/packages/app/src/utils/BindTerminusBusiness.ts + // L58-66 (userBindTerminus), which calls onFirstFactor(baseURL, + // name, local_name, osPwd, false /*acceptCookie*/, undefined + // /*needTwoFactor*/, osVersion) and uses the 1st-factor token + // directly without inspecting fa2. + // + // We use auth.FirstFactor (low-level) — NOT auth.Login — because: + // - There is no MFA seed yet (it is returned later in + // signupResponse.MFA), so even if Authelia echoes fa2=true we + // cannot respond to it. + // - The first-factor access_token is what the subsequent signup + // endpoints need. + // + // NeedTwoFactor=false keeps targetURL = vault./server (TS + // default). AcceptCookie=false matches the explicit `false` arg + // in TS L62. + token, err := auth.FirstFactor(context.TODO(), auth.LoginRequest{ + AuthURL: bflUrl, + LocalName: localName, + TerminusName: terminusName, + Password: osPwd, + NeedTwoFactor: false, + AcceptCookie: false, + }) if err != nil { return "", fmt.Errorf("onFirstFactor failed: %v", err) } diff --git a/cli/pkg/wizard/login_terminus.go b/cli/pkg/wizard/login_terminus.go index 2847e741f..7539d6f08 100644 --- a/cli/pkg/wizard/login_terminus.go +++ b/cli/pkg/wizard/login_terminus.go @@ -1,79 +1,84 @@ package wizard import ( + "context" "crypto/hmac" "crypto/sha1" "encoding/base32" "encoding/json" + "errors" "fmt" "io" "log" "math" "net/http" - "net/http/cookiejar" "strings" "time" -) -// newAuthHTTPClient creates an HTTP client with a cookie jar so that -// Set-Cookie headers from /api/firstfactor are automatically attached to -// subsequent requests like /api/secondfactor/totp. This mirrors the -// `withCredentials: true` behavior of the TS axios instance used in -// loginTerminus (BindTerminusBusiness.ts). -func newAuthHTTPClient() *http.Client { - jar, _ := cookiejar.New(nil) - return &http.Client{ - Timeout: 10 * time.Second, - Jar: jar, - } -} + "github.com/beclab/Olares/cli/pkg/auth" +) -// LoginTerminus implements Terminus login functionality (ref: BindTerminusBusiness.ts loginTerminus) -func LoginTerminus(bflUrl, terminusName, localName, password string, needTwoFactor bool) (*Token, error) { +// LoginTerminus performs first-factor (and, when needed, second-factor TOTP) +// authentication against the Authelia backend. The actual HTTP work is +// delegated to pkg/auth.Login so the wizard never owns its own copy of the +// `passwordAddSort` salt math, the cookie-jar / 2FA wiring, or the response +// parser — keeping wire-format quirks centralised in pkg/auth. +// +// The wizard-specific bit that pkg/auth deliberately does not know about is +// where the TOTP code comes from: during activation it has to be computed +// locally from the MFA seed stored in globalUserStore (see getTOTPFromMFA). +// We therefore: +// +// 1. pre-compute TOTP eagerly when the caller already knows 2FA is on +// (`needTwoFactor=true`), so we can submit both factors in one call; +// 2. fall back to the same TOTP source if pkg/auth.Login surfaces +// ErrTOTPRequired (caller passed false but server says fa2 is needed) — +// this matches the old wizard behaviour of branching on +// `token.FA2 || needTwoFactor`. +func LoginTerminus(bflUrl, terminusName, localName, password string, needTwoFactor bool) (*auth.Token, error) { log.Printf("Starting loginTerminus for user: %s", terminusName) - // Share a single http.Client (with cookie jar) across both factors so that - // the Authelia session cookie set by /api/firstfactor is automatically - // attached to /api/secondfactor/totp, mirroring the TS axios - // `withCredentials: true` behavior in loginTerminus. - client := newAuthHTTPClient() - - // 1. Call onFirstFactor to get initial token (ref: loginTerminus line 364-372) - token, err := OnFirstFactor(client, bflUrl, terminusName, localName, password, true, needTwoFactor) - if err != nil { - return nil, fmt.Errorf("first factor authentication failed: %v", err) - } - - log.Printf("First factor completed, session_id: %s, FA2 required: %t", token.SessionID, token.FA2 || needTwoFactor) - - // 2. If second factor authentication is required (ref: loginTerminus line 379-446) - if token.FA2 || needTwoFactor { - log.Printf("Second factor authentication required") - - // Get TOTP value - totpValue, err := getTOTPFromMFA() + // 1:1 mirror of apps/packages/app/src/utils/BindTerminusBusiness.ts + // L364-372 (loginTerminus): onFirstFactor is invoked with + // `acceptCookie=true, needTwoFactor=`. NeedTwoFactor here flips + // the targetURL onto desktop./ so Authelia's 2FA-policy fires + // (matching TS L21-25 in account.ts) and is also OR'd with tok.FA2 + // inside auth.Login to gate the second-factor POST (TS L379). + req := auth.LoginRequest{ + AuthURL: bflUrl, + LocalName: localName, + TerminusName: terminusName, + Password: password, + NeedTwoFactor: needTwoFactor, + AcceptCookie: true, + } + if needTwoFactor { + totp, err := getTOTPFromMFA() if err != nil { - return nil, fmt.Errorf("failed to get TOTP: %v", err) + return nil, fmt.Errorf("get totp: %w", err) } - - log.Printf("Generated TOTP: %s", totpValue) - - // Perform second factor authentication - secondToken, err := performSecondFactor(client, bflUrl, terminusName, totpValue, token.AccessToken) - if err != nil { - return nil, fmt.Errorf("second factor authentication failed: %v", err) + log.Printf("Generated TOTP (eager, needTwoFactor=true)") + req.TOTP = totp + } + + tok, err := auth.Login(context.TODO(), req) + if errors.Is(err, auth.ErrTOTPRequired) { + // Caller asserted no 2FA but the server disagreed. Pull the TOTP + // from the MFA seed and retry once. + log.Printf("Server reported fa2 even though caller passed needTwoFactor=false; retrying with TOTP") + totp, ferr := getTOTPFromMFA() + if ferr != nil { + return nil, fmt.Errorf("get totp: %w", ferr) } - - // Update token information - token.AccessToken = secondToken.AccessToken - token.RefreshToken = secondToken.RefreshToken - token.SessionID = secondToken.SessionID - - log.Printf("Second factor completed, updated session_id: %s", token.SessionID) + req.TOTP = totp + tok, err = auth.Login(context.TODO(), req) + } + if err != nil { + return nil, err } - log.Printf("LoginTerminus completed successfully") - return token, nil + log.Printf("LoginTerminus completed successfully, session_id: %s", tok.SessionID) + return tok, nil } // getTOTPFromMFA generates TOTP from stored MFA (ref: loginTerminus line 380-403) @@ -141,87 +146,14 @@ func generateHOTP(secret string, counter int64) (string, error) { return fmt.Sprintf("%06d", otp), nil } -// performSecondFactor performs second factor authentication (ref: loginTerminus line 419-446). -// -// Pass the same *http.Client that was used for the first factor so that the -// Authelia session cookie set on /api/firstfactor is automatically attached -// here. If client is nil, a fresh client is created (cookies will not be -// shared, which the server typically rejects). -func performSecondFactor(client *http.Client, baseURL, terminusName, totpValue string, accessToken string) (*Token, error) { - log.Printf("Performing second factor authentication") - - // Build target URL - targetURL := fmt.Sprintf("https://desktop.%s/", strings.ReplaceAll(terminusName, "@", ".")) - - // Build request data - reqData := map[string]interface{}{ - "targetUrl": targetURL, - "token": totpValue, - } - - jsonData, err := json.Marshal(reqData) - if err != nil { - return nil, fmt.Errorf("failed to marshal request: %v", err) - } - - if client == nil { - client = &http.Client{ - Timeout: 10 * time.Second, - } - } - - url := fmt.Sprintf("%s/api/secondfactor/totp", baseURL) - req, err := http.NewRequest("POST", url, strings.NewReader(string(jsonData))) - if err != nil { - return nil, fmt.Errorf("failed to create request: %v", err) - } - - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Access-Control-Allow-Origin", "*") - req.Header.Set("X-Unauth-Error", "Non-Redirect") - req.Header.Set("X-Authorization", accessToken) - - log.Printf("Sending second factor request to: %s", url) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("request failed: %v", err) - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read response: %v", err) - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body)) - } - - var response struct { - Status string `json:"status"` - Data Token `json:"data"` - } - - if err := json.Unmarshal(body, &response); err != nil { - return nil, fmt.Errorf("failed to unmarshal response: %v", err) - } - - if response.Status != "OK" { - return nil, fmt.Errorf("second factor authentication failed: %s", response.Status) - } - - log.Printf("Second factor authentication successful") - return &response.Data, nil -} - // ResetPassword implements password reset functionality (ref: account.ts reset_password) func ResetPassword(baseURL, localName, currentPassword, newPassword, accessToken string) error { log.Printf("Starting reset password for user: %s", localName) - // Process passwords (salted MD5) - processedCurrentPassword := passwordAddSort(currentPassword) - processedNewPassword := passwordAddSort(newPassword) + // Process passwords (salted MD5) — reuse pkg/auth so wizard never owns + // its own copy of the salt; see auth.PasswordSalt for rationale. + processedCurrentPassword := auth.PasswordSalt(currentPassword) + processedNewPassword := auth.PasswordSalt(newPassword) // Build request data (ref: account.ts line 138-141) reqData := map[string]interface{}{ @@ -280,7 +212,7 @@ func ResetPassword(baseURL, localName, currentPassword, newPassword, accessToken if err := json.Unmarshal(body, &response); err != nil { return fmt.Errorf("failed to unmarshal response: %v", err) } - + // Check response status (ref: account.ts line 148-155) if response.Code != 0 { if response.Message != "" { From f5aad7397f13a836c4584e540ce887d21cf30a16 Mon Sep 17 00:00:00 2001 From: Peng Peng Date: Sat, 25 Apr 2026 22:05:55 +0800 Subject: [PATCH 05/12] feat(cli): add files upload/download/cat/rm commands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds four new verbs to `olares-cli files`, mirroring the LarePass web app's Drive UX over the per-user files-backend on `files.`: - `files upload` — resumable chunked upload (Drive v2 protocol: upload-link + file-uploaded-bytes + Content-Range POSTs). Recurses into local directories; the destination directory must pre-exist on the server because POST /api/resources// auto-renames an existing dir to "Dir (1)" instead of returning 409. - `files download` — single-file with server-driven `Range:` resume + atomic `.tmp`+rename overwrite, or recursive directory mirroring with `--parallel N` errgroup-bounded concurrency. - `files cat` — streams `/api/raw/?inline=true` to stdout, refusing directories up-front for a friendlier message than the backend's terse 400. - `files rm` — Unix-like `-r/-R` and `-f/--force`; groups targets by parent dir and issues one `DELETE /api/resources//` per group with `{"dirents":[...]}` in the body, matching the frontend's batchDelete wire. `Stat` uses the parent-listing strategy (list parent dir, find leaf in items) rather than `GET /api/resources/` directly, because the backend's single-file List handler hard-codes `Content: true` and 500s on most real files. This matches what the LarePass web app does. All four commands reuse the existing 3-segment `FrontendPath` parser and the `X-Authorization` access-token transport from `pkg/cmdutil/factory.go`. httptest-driven unit tests cover resume / overwrite / retries / range headers (download), plan grouping + dir-without-r refusal + wire shape (rm), encodeURIComponent parity + chunked upload protocol (upload). Made-with: Cursor --- cli/cmd/ctl/files/cat.go | 100 +++++ cli/cmd/ctl/files/download.go | 365 +++++++++++++++ cli/cmd/ctl/files/rm.go | 289 ++++++++++++ cli/cmd/ctl/files/root.go | 4 + cli/cmd/ctl/files/upload.go | 357 +++++++++++++++ cli/pkg/files/download/client.go | 134 ++++++ cli/pkg/files/download/download.go | 460 +++++++++++++++++++ cli/pkg/files/download/download_test.go | 466 +++++++++++++++++++ cli/pkg/files/download/list.go | 71 +++ cli/pkg/files/download/stat.go | 140 ++++++ cli/pkg/files/download/walker.go | 195 ++++++++ cli/pkg/files/download/walker_test.go | 200 +++++++++ cli/pkg/files/rm/rm.go | 245 ++++++++++ cli/pkg/files/rm/rm_test.go | 228 ++++++++++ cli/pkg/files/upload/api.go | 297 +++++++++++++ cli/pkg/files/upload/api_test.go | 202 +++++++++ cli/pkg/files/upload/encode.go | 106 +++++ cli/pkg/files/upload/encode_test.go | 70 +++ cli/pkg/files/upload/uploader.go | 535 ++++++++++++++++++++++ cli/pkg/files/upload/uploader_test.go | 566 ++++++++++++++++++++++++ cli/pkg/files/upload/walker.go | 312 +++++++++++++ cli/pkg/files/upload/walker_test.go | 121 +++++ 22 files changed, 5463 insertions(+) create mode 100644 cli/cmd/ctl/files/cat.go create mode 100644 cli/cmd/ctl/files/download.go create mode 100644 cli/cmd/ctl/files/rm.go create mode 100644 cli/cmd/ctl/files/upload.go create mode 100644 cli/pkg/files/download/client.go create mode 100644 cli/pkg/files/download/download.go create mode 100644 cli/pkg/files/download/download_test.go create mode 100644 cli/pkg/files/download/list.go create mode 100644 cli/pkg/files/download/stat.go create mode 100644 cli/pkg/files/download/walker.go create mode 100644 cli/pkg/files/download/walker_test.go create mode 100644 cli/pkg/files/rm/rm.go create mode 100644 cli/pkg/files/rm/rm_test.go create mode 100644 cli/pkg/files/upload/api.go create mode 100644 cli/pkg/files/upload/api_test.go create mode 100644 cli/pkg/files/upload/encode.go create mode 100644 cli/pkg/files/upload/encode_test.go create mode 100644 cli/pkg/files/upload/uploader.go create mode 100644 cli/pkg/files/upload/uploader_test.go create mode 100644 cli/pkg/files/upload/walker.go create mode 100644 cli/pkg/files/upload/walker_test.go diff --git a/cli/cmd/ctl/files/cat.go b/cli/cmd/ctl/files/cat.go new file mode 100644 index 000000000..02b9f674b --- /dev/null +++ b/cli/cmd/ctl/files/cat.go @@ -0,0 +1,100 @@ +package files + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" + "github.com/beclab/Olares/cli/pkg/files/download" +) + +// NewCatCommand: `olares-cli files cat ` +// +// Streams the raw bytes of a single remote file to stdout. The wire +// call is GET /api/raw/?inline=true (same path the LarePass +// web app uses for text-content previews — `inline=true` only +// affects Content-Disposition, the body is identical). +// +// Like `cat` itself, this is binary-safe: we don't sniff or +// interpret the body, we just copy it through. That means cat-ing a +// huge image will dump the bytes — the user is expected to pipe to +// `less`, `head`, or a similar tool when they care about safety. +// +// We Stat the path before fetching so a directory target produces a +// clear "is a directory" error rather than the server's terser +// "not a file, path: ..." 400. +func NewCatCommand(f *cmdutil.Factory) *cobra.Command { + cmd := &cobra.Command{ + Use: "cat ", + Short: "stream a remote file's contents to stdout", + Long: `Stream the raw bytes of a single file on the per-user files-backend to stdout. + +Equivalent to ` + "`olares-cli files download -`" + ` if a future +` + "`-`" + ` -means-stdout convention is added — for now ` + "`cat`" + ` is the explicit +verb. The transfer is binary-safe (no buffering, no transformation), +so piping into ` + "`less`" + ` / ` + "`hexdump`" + ` / ` + "`head -c`" + ` works as expected. + +Directories produce an error rather than a recursive concatenation +(use ` + "`files download /`" + ` if you want the contents on disk +first). + +Examples: + + olares-cli files cat drive/Home/Documents/notes.md + olares-cli files cat drive/Home/Logs/today.log | tail -n 50 +`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runCat(cmd.Context(), f, cmd.OutOrStdout(), args[0]) + }, + } + return cmd +} + +func runCat(ctx context.Context, f *cmdutil.Factory, out io.Writer, remoteArg string) error { + if ctx == nil { + ctx = context.Background() + } + + fp, err := ParseFrontendPath(remoteArg) + if err != nil { + return err + } + + rp, err := f.ResolveProfile(ctx) + if err != nil { + return err + } + + httpClient := newUploadHTTPClient(rp.InsecureSkipVerify) + client := &download.Client{ + HTTPClient: httpClient, + BaseURL: rp.FilesURL, + AccessToken: rp.AccessToken, + } + + plain := strings.TrimSuffix(fp.String(), "/") + + // Probe before streaming. Two cheap wins: + // - friendly "is a directory" message for `cat drive/Home/` + // instead of the server's terse 400; + // - 401/403/404 reformatted with the standard CTA before we + // start writing partial data to stdout. + st, err := client.Stat(ctx, plain) + if err != nil { + return reformatHTTPErr(err, rp.OlaresID, "stat", plain) + } + if st.IsDir { + return fmt.Errorf("%s is a directory: cat only works on files (use `olares-cli files ls %s` to list it)", + fp.String(), fp.String()) + } + + if _, err := client.StreamRaw(ctx, plain, out); err != nil { + return reformatHTTPErr(err, rp.OlaresID, "cat", plain) + } + return nil +} diff --git a/cli/cmd/ctl/files/download.go b/cli/cmd/ctl/files/download.go new file mode 100644 index 000000000..33e081383 --- /dev/null +++ b/cli/cmd/ctl/files/download.go @@ -0,0 +1,365 @@ +package files + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + + "github.com/beclab/Olares/cli/pkg/cmdutil" + "github.com/beclab/Olares/cli/pkg/files/download" +) + +type downloadOptions struct { + parallel int + maxRetries int + overwrite bool + resume bool +} + +// NewDownloadCommand: `olares-cli files download []` +// +// Pulls a single file or a whole directory tree from the per-user +// files-backend down to the local filesystem. Single-file downloads +// resume via the server's native `Range: bytes=N-` support +// (raw_service.go's parseRangeHeader); directories walk recursively +// over /api/resources and pull each file with the same code path. +// +// Local destination semantics: +// +// - omitted → ./ in the current directory +// - existing dir → write under that directory using the remote +// basename (mirrors `cp`'s behavior) +// - any other path → treated as the full local target path +// (file mode), or the directory to create / use as the root +// (directory mode). +// +// Concurrency only kicks in for directory mode; --parallel N runs N +// file downloads in flight at once. Per-file resume + retry are +// independent of --parallel. +func NewDownloadCommand(f *cmdutil.Factory) *cobra.Command { + o := &downloadOptions{} + cmd := &cobra.Command{ + Use: "download []", + Short: "download a file or directory from the per-user files-backend", + Long: `Download a file or directory tree from the per-user files-backend. + +Single-file resume is server-driven: pass --resume and the CLI sends +Range: bytes=- so the server only ships the bytes you don't +already have. The local file is opened with O_APPEND so a Ctrl-C + +re-run keeps making forward progress without sidecar progress files. + +Without --resume or --overwrite, the command refuses to clobber an +existing local file. Use --overwrite to replace it (writes to +.tmp + rename, so the previous version stays intact until the +new one lands), or --resume to continue a previously-interrupted +download. + +Directory downloads recursively walk /api/resources, recreate the +remote directory tree under the local destination (the remote root's +own basename becomes the top-level directory there, matching the +LarePass folder-download UX), and run --parallel N file fetches +concurrently. Empty subdirectories are mirrored locally so the on-disk +tree matches even when a directory has no files. + + uses the same 3-segment frontend path as ` + "`olares-cli files ls`" + `. +A trailing '/' on means "treat as directory" (validated +against the server's actual type via /api/resources stat); without +one, the path is treated as a file. + +Examples: + + # Download one file into the current directory. + olares-cli files download drive/Home/Documents/report.pdf + + # Same, but pick a different local name. + olares-cli files download drive/Home/Documents/report.pdf ./Q1.pdf + + # Resume an interrupted download. + olares-cli files download drive/Home/Backups/big.tar ./big.tar --resume + + # Recursively pull a folder, 4 files at a time. + olares-cli files download drive/Home/Documents/ ./out/ --parallel 4 +`, + Args: cobra.RangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + localArg := "" + if len(args) == 2 { + localArg = args[1] + } + return runDownload(cmd.Context(), f, cmd.OutOrStdout(), args[0], localArg, o) + }, + } + cmd.Flags().IntVar(&o.parallel, "parallel", 4, + "number of files to download concurrently in directory mode") + cmd.Flags().IntVar(&o.maxRetries, "max-retries", download.DefaultMaxRetries, + "maximum retry attempts per file on transient failures") + cmd.Flags().BoolVar(&o.overwrite, "overwrite", false, + "replace existing local files (writes to .tmp + rename)") + cmd.Flags().BoolVar(&o.resume, "resume", false, + "resume an interrupted download via the server's Range support") + return cmd +} + +func runDownload( + ctx context.Context, + f *cmdutil.Factory, + out io.Writer, + remoteArg, localArg string, + o *downloadOptions, +) error { + if ctx == nil { + ctx = context.Background() + } + if o.parallel < 1 { + o.parallel = 1 + } + if o.overwrite && o.resume { + return errors.New("--overwrite and --resume are mutually exclusive") + } + + fp, err := ParseFrontendPath(remoteArg) + if err != nil { + return err + } + + rp, err := f.ResolveProfile(ctx) + if err != nil { + return err + } + + httpClient := newUploadHTTPClient(rp.InsecureSkipVerify) + client := &download.Client{ + HTTPClient: httpClient, + BaseURL: rp.FilesURL, + AccessToken: rp.AccessToken, + } + + // Stat first so we (a) reject auth errors / 404s with a clean + // message before we touch the local filesystem, and (b) know + // whether to take the single-file or recursive directory branch. + plain := strings.TrimSuffix(fp.String(), "/") + st, err := client.Stat(ctx, plain) + if err != nil { + return reformatHTTPErr(err, rp.OlaresID, "stat", plain) + } + + if st.IsDir { + return runDownloadDir(ctx, client, fp, plain, localArg, o, out) + } + return runDownloadFile(ctx, client, fp, plain, st.Size, localArg, o, out) +} + +// runDownloadFile handles the single-file branch. The local destination +// is resolved here so the helper is easy to test in isolation; it's +// the only place we synthesise the implicit " in cwd" / +// "into existing dir" rules. +func runDownloadFile( + ctx context.Context, + client *download.Client, + fp FrontendPath, + plain string, + remoteSize int64, + localArg string, + o *downloadOptions, + out io.Writer, +) error { + remoteBase := lastSegmentOfFrontendPath(fp) + if remoteBase == "" { + return fmt.Errorf("cannot derive a local filename from remote %q (no trailing path component)", fp.String()) + } + dst, err := resolveLocalFile(localArg, remoteBase) + if err != nil { + return err + } + + fmt.Fprintf(out, "downloading %s (%s) → %s\n", fp.String(), humanBytes(remoteSize), dst) + + start := time.Now() + written, err := client.DownloadFile(ctx, plain, dst, download.Options{ + Overwrite: o.overwrite, + Resume: o.resume, + MaxRetries: o.maxRetries, + }, nil) + if err != nil { + return reformatHTTPErr(err, "", "download", plain) + } + fmt.Fprintf(out, "done: wrote %s in %s (file size %s)\n", + humanBytes(written), + time.Since(start).Truncate(time.Millisecond), + humanBytes(remoteSize), + ) + return nil +} + +// runDownloadDir handles the recursive directory branch: walk the +// remote tree, recreate it locally, run errgroup-bounded parallel file +// downloads. Uses the same per-file Options as the single-file branch +// so --resume / --overwrite have consistent semantics regardless of +// mode. +func runDownloadDir( + ctx context.Context, + client *download.Client, + fp FrontendPath, + plain string, + localArg string, + o *downloadOptions, + out io.Writer, +) error { + if localArg == "" { + // "Into the current directory" — the recreated remote root + // becomes .//. + localArg = "." + } + plan, err := download.BuildPlan(ctx, client, plain, localArg) + if err != nil { + return reformatHTTPErr(err, "", "list", plain) + } + + // Pre-create the local root + every empty subdirectory so the + // on-disk tree mirrors the remote one. Doing this before any + // downloads start means concurrent file writes never race on + // MkdirAll for shared parents. + if err := os.MkdirAll(plan.LocalRoot, 0o755); err != nil { + return fmt.Errorf("mkdir %s: %w", plan.LocalRoot, err) + } + for _, ed := range plan.EmptyDirs { + full := filepath.Join(plan.LocalRoot, filepath.FromSlash(ed)) + if err := os.MkdirAll(full, 0o755); err != nil { + return fmt.Errorf("mkdir %s: %w", full, err) + } + } + + if len(plan.Files) == 0 { + fmt.Fprintf(out, "no files to download (remote tree has no regular files)\n") + return nil + } + + totalBytes := int64(0) + for _, t := range plan.Files { + totalBytes += t.Size + } + fmt.Fprintf(out, "downloading %d file(s), %s, into %s (parallel=%d)\n", + len(plan.Files), humanBytes(totalBytes), plan.LocalRoot, o.parallel) + + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(o.parallel) + var ( + mu sync.Mutex + completed int + bytesDone int64 + ) + totalFiles := len(plan.Files) + + for _, task := range plan.Files { + task := task + g.Go(func() error { + start := time.Now() + fmt.Fprintf(out, " → %s (%s)\n", task.RelativePath, humanBytes(task.Size)) + written, err := client.DownloadFile(gctx, task.RemotePlainPath, task.LocalPath, download.Options{ + Overwrite: o.overwrite, + Resume: o.resume, + MaxRetries: o.maxRetries, + }, nil) + if err != nil { + return fmt.Errorf("%s: %w", task.RelativePath, err) + } + mu.Lock() + completed++ + atomic.AddInt64(&bytesDone, written) + done := completed + mu.Unlock() + fmt.Fprintf(out, " ✓ %s (%s, %s) [%d/%d]\n", + task.RelativePath, humanBytes(written), + time.Since(start).Truncate(time.Millisecond), + done, totalFiles) + return nil + }) + } + if err := g.Wait(); err != nil { + return err + } + fmt.Fprintf(out, "done: %d file(s), %s\n", completed, humanBytes(bytesDone)) + return nil +} + +// lastSegmentOfFrontendPath returns the basename of the resource the +// path points at — the last non-empty '/'-split segment of SubPath, or +// the Extend value when SubPath is just "/" (which means "the root of +// this volume", whose effective name on the local side is the volume +// name like "Home"). +func lastSegmentOfFrontendPath(fp FrontendPath) string { + sub := strings.Trim(fp.SubPath, "/") + if sub == "" { + return fp.Extend + } + if idx := strings.LastIndex(sub, "/"); idx >= 0 { + return sub[idx+1:] + } + return sub +} + +// resolveLocalFile applies the implicit local-destination rules for +// the single-file download path: +// +// - empty localArg → ./ +// - localArg is a dir → / +// - any other localArg → use as the full target path +// +// Returned path may not yet exist; the downloader's +// planLocalWrite handles the existence + overwrite/resume policy. +func resolveLocalFile(localArg, remoteBase string) (string, error) { + if localArg == "" { + return remoteBase, nil + } + st, err := os.Stat(localArg) + switch { + case err == nil && st.IsDir(): + return filepath.Join(localArg, remoteBase), nil + case err == nil: + return localArg, nil + case errors.Is(err, os.ErrNotExist): + // Trailing slash means "treat as directory even if it doesn't + // exist yet" — same convention as `cp` / `rsync`. + if strings.HasSuffix(localArg, string(os.PathSeparator)) || strings.HasSuffix(localArg, "/") { + return filepath.Join(localArg, remoteBase), nil + } + return localArg, nil + default: + return "", fmt.Errorf("stat %s: %w", localArg, err) + } +} + +// reformatHTTPErr maps download.HTTPError codes onto user-friendly +// messages, mirroring the formatHTTPError helper in ls.go. We don't +// share the helper directly because the download package's HTTPError +// type isn't compatible with the upload package's, and untyped +// duck-typing here would be more confusing than the small duplication. +func reformatHTTPErr(err error, olaresID, op, target string) error { + if err == nil { + return nil + } + var hErr *download.HTTPError + if errors.As(err, &hErr) { + switch hErr.Status { + case 401, 403: + if olaresID != "" { + return fmt.Errorf("server rejected the access token (HTTP %d); please run: olares-cli profile login --olares-id %s", + hErr.Status, olaresID) + } + return fmt.Errorf("server rejected the access token (HTTP %d); please re-run `olares-cli profile login`", hErr.Status) + case 404: + return fmt.Errorf("%s %s: not found on the server (HTTP 404)", op, target) + } + } + return err +} diff --git a/cli/cmd/ctl/files/rm.go b/cli/cmd/ctl/files/rm.go new file mode 100644 index 000000000..670d1d23b --- /dev/null +++ b/cli/cmd/ctl/files/rm.go @@ -0,0 +1,289 @@ +package files + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "os" + "strings" + "syscall" + + "github.com/spf13/cobra" + "golang.org/x/term" + + "github.com/beclab/Olares/cli/pkg/cmdutil" + "github.com/beclab/Olares/cli/pkg/files/rm" +) + +type rmOptions struct { + recursive bool + force bool +} + +// NewRmCommand: `olares-cli files rm [-r] [-f] ...` +// +// Deletes one or more remote entries via the per-user files-backend's +// batch DELETE endpoint. Multiple targets sharing a parent directory +// collapse into a single wire request — the LarePass web app does the +// same, see batchDeleteFileItems in v2/common/utils.ts. +// +// Conventions: +// +// - --recursive / -r / -R is required to remove directories. A +// trailing '/' on a target is interpreted as "this is a +// directory" intent and triggers the same check. +// - --force / -f skips the interactive confirmation prompt. Without +// it, we list what would be deleted and ask y/N. In a non-TTY +// environment (CI, piped stdin) we refuse rather than guessing — +// the user has to opt in to deletion explicitly. +// - Removing the root of a volume (`drive/Home/`, `sync//`, +// ...) is rejected by the planner; that operation has to be +// expressed differently if it's ever needed. +func NewRmCommand(f *cmdutil.Factory) *cobra.Command { + o := &rmOptions{} + cmd := &cobra.Command{ + Use: "rm [-r] [-f] ...", + Aliases: []string{"remove", "delete"}, + Short: "delete one or more remote files / directories", + Long: `Delete one or more files or directories on the per-user files-backend. + +Wire shape (batch DELETE per parent dir): + + DELETE /api/resources// body: {"dirents": [...]} + +Multiple targets that share a parent directory collapse into one +request, matching the LarePass web app's batchDeleteFileItems helper. +Targets across different parents send one request each, in a stable +order (sorted by fileType + extend + parent). + +Confirmation: + + By default ` + "`rm`" + ` lists what it would delete and asks y/N. Pass + --force / -f to skip the prompt (e.g. in scripts). In a non-TTY + context (CI, piped stdin) we refuse without --force rather than + guessing. + +Trailing slash on a target signals "this is a directory" — the +planner errors out without --recursive in that case (Unix-style). +With --recursive both forms (` + "`foo`" + ` and ` + "`foo/`" + `) are accepted. + +Examples: + + olares-cli files rm drive/Home/Documents/old.pdf + olares-cli files rm -r drive/Home/Backups/2024/ + olares-cli files rm -rf drive/Home/junk drive/Home/scratch/ +`, + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRm(cmd.Context(), f, cmd.OutOrStdout(), os.Stdin, args, o) + }, + } + cmd.Flags().BoolVarP(&o.recursive, "recursive", "r", false, + "recursively remove directories (also: -R)") + // -R is the BSD spelling; same flag, just an alias so users with + // muscle memory either way get the expected behavior. + cmd.Flags().BoolVarP(&o.force, "force", "f", false, + "skip the interactive y/N confirmation") + cmd.Flags().BoolP("recursive-bsd", "R", false, "alias for -r") + cmd.Flags().Lookup("recursive-bsd").Hidden = true + // Wire -R to the same boolean by post-parse fixup so we don't + // have to define two separate variables. + cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + bsd, err := cmd.Flags().GetBool("recursive-bsd") + if err == nil && bsd { + o.recursive = true + } + return nil + } + return cmd +} + +func runRm( + ctx context.Context, + f *cmdutil.Factory, + out io.Writer, + in io.Reader, + args []string, + o *rmOptions, +) error { + if ctx == nil { + ctx = context.Background() + } + + targets := make([]rm.Target, 0, len(args)) + for _, a := range args { + t, err := frontendPathToRmTarget(a) + if err != nil { + return err + } + targets = append(targets, t) + } + + groups, err := rm.Plan(targets, o.recursive) + if err != nil { + return err + } + + // Show the user the exact set of operations before any wire call + // goes out — even with --force; deletion is destructive enough + // that a one-line "deleting N entries" line is worth printing. + totalDirents := 0 + for _, g := range groups { + totalDirents += len(g.Dirents) + } + fmt.Fprintf(out, "will delete %d entr%s in %d batch%s:\n", + totalDirents, pluralYies(totalDirents), + len(groups), pluralEs(len(groups)), + ) + for _, g := range groups { + parent := g.ParentSubPath + if parent == "" { + parent = "/" + } + fmt.Fprintf(out, " %s/%s%s\n", g.FileType, g.Extend, parent) + for _, d := range g.Dirents { + fmt.Fprintf(out, " %s\n", d) + } + } + + if !o.force { + if !term.IsTerminal(int(syscall.Stdin)) { + return errors.New("refusing to delete without --force in a non-interactive context (no TTY)") + } + fmt.Fprintf(out, "proceed with deletion? [y/N]: ") + ok, err := readYesNo(in) + if err != nil { + return err + } + if !ok { + fmt.Fprintf(out, "aborted\n") + return nil + } + } + + rp, err := f.ResolveProfile(ctx) + if err != nil { + return err + } + httpClient := newUploadHTTPClient(rp.InsecureSkipVerify) + client := &rm.Client{ + HTTPClient: httpClient, + BaseURL: rp.FilesURL, + AccessToken: rp.AccessToken, + } + + // Serial DELETE per group. Per-group failures abort the rest: + // the user should see exactly which group failed so they can + // re-run on a narrower set, rather than getting a partial-success + // state with no clear "what's left". + for _, g := range groups { + if err := client.DeleteBatch(ctx, g); err != nil { + return reformatRmHTTPErr(err, rp.OlaresID, g) + } + fmt.Fprintf(out, " ✓ %s/%s%s (%d entr%s)\n", + g.FileType, g.Extend, g.ParentSubPath, + len(g.Dirents), pluralYies(len(g.Dirents)), + ) + } + fmt.Fprintf(out, "done: deleted %d entr%s\n", totalDirents, pluralYies(totalDirents)) + return nil +} + +// frontendPathToRmTarget converts a user-supplied path (e.g. +// "drive/Home/Documents/foo.pdf" or "drive/Home/Backups/") into the +// canonical rm.Target shape. The trailing slash is preserved as the +// directory-intent signal so the planner can require --recursive for +// it. +// +// Errors when the path resolves to "the root of /" +// — that case is intentionally unsupported because the user almost +// never means "wipe my Home/repo/bucket" and the cost of accidentally +// allowing it would be enormous. +func frontendPathToRmTarget(raw string) (rm.Target, error) { + fp, err := ParseFrontendPath(raw) + if err != nil { + return rm.Target{}, err + } + sub := fp.SubPath + isDir := strings.HasSuffix(sub, "/") + clean := strings.Trim(sub, "/") + if clean == "" { + return rm.Target{}, fmt.Errorf("refusing to delete the root of %s/%s", fp.FileType, fp.Extend) + } + + idx := strings.LastIndex(clean, "/") + var ( + parentSub string + name string + ) + if idx < 0 { + // Direct child of Extend root: parent is "/". + parentSub = "/" + name = clean + } else { + parentSub = "/" + clean[:idx] + "/" + name = clean[idx+1:] + } + return rm.Target{ + FileType: fp.FileType, + Extend: fp.Extend, + ParentSubPath: parentSub, + Name: name, + IsDirIntent: isDir, + }, nil +} + +// readYesNo reads one line from `in` and returns true when it starts +// with 'y' or 'Y'. Anything else (including EOF) is "no". We +// deliberately don't accept `yes`/`no` as full words separately — +// matches `rm -i`'s permissive behavior. +func readYesNo(in io.Reader) (bool, error) { + br := bufio.NewReader(in) + line, err := br.ReadString('\n') + if err != nil && err != io.EOF { + return false, err + } + line = strings.TrimSpace(line) + if line == "" { + return false, nil + } + switch strings.ToLower(line)[0] { + case 'y': + return true, nil + default: + return false, nil + } +} + +// reformatRmHTTPErr maps rm.HTTPError onto user-friendly messages, +// same spirit as the download counterpart. +func reformatRmHTTPErr(err error, olaresID string, g *rm.Group) error { + if err == nil { + return nil + } + var hErr *rm.HTTPError + if errors.As(err, &hErr) { + switch hErr.Status { + case 401, 403: + if olaresID != "" { + return fmt.Errorf("server rejected the access token (HTTP %d); please run: olares-cli profile login --olares-id %s", + hErr.Status, olaresID) + } + return fmt.Errorf("server rejected the access token (HTTP %d); please re-run `olares-cli profile login`", hErr.Status) + case 404: + return fmt.Errorf("delete %s/%s%s: not found on the server (HTTP 404)", + g.FileType, g.Extend, g.ParentSubPath) + } + } + return err +} + +// pluralEs handles "batch" / "batches" — same pattern as pluralYies. +func pluralEs(n int) string { + if n == 1 { + return "" + } + return "es" +} diff --git a/cli/cmd/ctl/files/root.go b/cli/cmd/ctl/files/root.go index dfae20aa3..9619991c8 100644 --- a/cli/cmd/ctl/files/root.go +++ b/cli/cmd/ctl/files/root.go @@ -53,6 +53,10 @@ Examples: } for _, sub := range []*cobra.Command{ NewLsCommand(f), + NewUploadCommand(f), + NewDownloadCommand(f), + NewCatCommand(f), + NewRmCommand(f), } { // Same rationale as cmd/ctl/profile/root.go: bad creds / network / // path-not-found errors are already actionable; don't bury them under diff --git a/cli/cmd/ctl/files/upload.go b/cli/cmd/ctl/files/upload.go new file mode 100644 index 000000000..b013d1ba1 --- /dev/null +++ b/cli/cmd/ctl/files/upload.go @@ -0,0 +1,357 @@ +package files + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + + "github.com/beclab/Olares/cli/pkg/cmdutil" + "github.com/beclab/Olares/cli/pkg/files/upload" +) + +type uploadOptions struct { + parallel int + chunkSize int64 + maxRetries int + node string +} + +// NewUploadCommand: `olares-cli files upload ` +// +// Pushes a single file or a whole directory tree from the local +// filesystem into Drive/Home on the per-user files-backend, using the +// same chunked-resumable protocol the LarePass web app speaks +// (Resumable.js + the Drive v2 endpoints under /upload/upload-link, +// /upload/file-uploaded-bytes, /api/resources/drive/Home/...). See +// pkg/files/upload/uploader.go for the wire-level details. +// +// Resume is enabled by default and is server-driven: before each file +// the CLI calls /upload/file-uploaded-bytes// to ask "how much do +// you already have?", floors that to a chunk boundary, and resumes +// from there. There's no local progress file — re-running the same +// command after a Ctrl-C just re-asks the server, which is robust +// against any state drift between client invocations. +// +// File-level concurrency: --parallel N runs N files concurrently +// through an errgroup. Within a single file, chunks are sent +// sequentially (matching the web app's simultaneousUploads=1 default); +// pipelining chunks per file is not implemented because the +// resume-probe + chunk-sequence assumes a single in-flight chunk per +// file at a time. +// +// Path schema for : same as `files ls`, but the upload target +// must live under drive/Home (drive/Data is read-only on the wire). +// Trailing '/' on is significant — it's how we distinguish +// "upload into this directory" from "upload as this exact path +// (rename)" for the single-file case. +func NewUploadCommand(f *cmdutil.Factory) *cobra.Command { + o := &uploadOptions{} + cmd := &cobra.Command{ + Use: "upload ", + Short: "upload a file or directory to Drive/Home with resumable chunks", + Long: `Upload a local file or directory to drive/Home/<...> on the per-user files-backend. + +The chunked / resumable protocol mirrors the LarePass web app: each +file is probed against /upload/file-uploaded-bytes/ to figure out the +resume offset, chunks are POSTed (default 8 MiB each) until the file +is complete, and per-chunk failures are retried with backoff. Re-run +the same command after a Ctrl-C and the upload picks up where the +server stopped accepting bytes. + + uses the same 3-segment frontend path as ` + "`olares-cli files ls`" + ` — +the upload target must be under drive/Home (drive/Data is read-only on +the wire). A trailing '/' on means "upload into this +directory"; without one, is treated as the full target path +(useful to rename a file on the way in). + +The destination directory MUST already exist on the server. The +files-backend's "create directory" call auto-renames on collision +(POST .../Documents/ on an existing Documents creates "Documents (1)" +instead of returning a conflict), so we don't try to pre-create it — +use the web app or a future ` + "`files mkdir`" + ` verb if you need to +materialize a new directory first. + +Examples: + + # Upload one file into a directory. + olares-cli files upload report.pdf drive/Home/Documents/ + + # Same, but rename to 2026-Q1.pdf on the server. + olares-cli files upload report.pdf drive/Home/Documents/2026-Q1.pdf + + # Upload a directory tree (preserves the source folder name). + olares-cli files upload ./photos drive/Home/Backups/ + + # Two files in flight at a time. + olares-cli files upload ./photos drive/Home/Backups/ --parallel 2 +`, + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpload(cmd.Context(), f, cmd.OutOrStdout(), args[0], args[1], o) + }, + } + cmd.Flags().IntVar(&o.parallel, "parallel", 2, + "number of files to upload concurrently (per-file chunks remain sequential)") + cmd.Flags().Int64Var(&o.chunkSize, "chunk-size", upload.DefaultChunkSize, + "chunk size in bytes (default 8 MiB; should match the server's expected size)") + cmd.Flags().IntVar(&o.maxRetries, "max-retries", upload.DefaultMaxRetries, + "maximum retry attempts per chunk on transient failures") + cmd.Flags().StringVar(&o.node, "node", "", + "override the upload node name (defaults to the first node from /api/nodes/)") + return cmd +} + +func runUpload( + ctx context.Context, + f *cmdutil.Factory, + out io.Writer, + localPath, remotePath string, + o *uploadOptions, +) error { + if ctx == nil { + ctx = context.Background() + } + if o.parallel < 1 { + o.parallel = 1 + } + + // Parse remote path with the same parser `files ls` uses, then + // enforce the upload-only constraint (drive/Home only). The chunk + // pipeline simply has no path that points at drive/Data on the + // wire — see apps/packages/app/src/api/files/v2/drive/utils.ts + // (driveCommonUrl always emits /drive/Home). + fp, err := ParseFrontendPath(remotePath) + if err != nil { + return err + } + if fp.FileType != "drive" || fp.Extend != "Home" { + return fmt.Errorf("upload destination must be under drive/Home (got %q)", fp.String()) + } + // SubPath always starts with '/' from the parser; strip the leading + // slash so BuildPlan sees a relative form like "Documents/Backups/". + remoteSub := strings.TrimPrefix(fp.SubPath, "/") + + rp, err := f.ResolveProfile(ctx) + if err != nil { + return err + } + + // Build a dedicated *http.Client for the upload session: same + // X-Authorization injection convention as the rest of the CLI, but + // without the factory's 30s overall timeout — an 8 MiB chunk on a + // slow link can easily exceed that, and we'd rather fail via + // context cancellation than via http.Client.Timeout (the latter + // truncates the request body mid-flight, which leaves the server + // in an inconsistent state). + httpClient := newUploadHTTPClient(rp.InsecureSkipVerify) + client := &upload.Client{ + HTTPClient: httpClient, + BaseURL: rp.FilesURL, + AccessToken: rp.AccessToken, + } + + node := o.node + if node == "" { + nodes, err := client.FetchNodes(ctx) + if err != nil { + return fmt.Errorf("fetch upload nodes: %w", err) + } + // Mirrors the web app's getUploadNode() — first node wins. A + // future iteration can pick by master flag or by --node, but + // the web app's default is good enough for the common case. + node = nodes[0].Name + if node == "" { + return fmt.Errorf("upload node returned by /api/nodes/ has empty name") + } + } + + plan, err := upload.BuildPlan(localPath, remoteSub) + if err != nil { + return err + } + + // Why we DON'T pre-mkdir the destination root or any source-tree + // directory here: + // + // - POST /api/resources/...// does NOT return 409 on + // a name collision; the files-backend silently auto-renames to + // " (1)" and creates an empty new directory next + // to the original. That's surprising for an idempotent "ensure + // this dir exists" operation, and the original report from + // hitting this exact bug was a stray "Documents (1)" appearing + // on the server even though the file landed in the real + // "Documents" via the chunk POST. + // - The chunk POST routes by parent_dir + relative_path and the + // server transparently creates intermediate directories on the + // way (it's how folder upload from the web app works). So the + // destination root MUST already exist (matching the web app, + // which can only upload to a directory the user already + // navigated into via the file picker), and source-tree dirs + // are auto-created by the file uploads. + // + // Empty subdirectories of a folder upload still surface in + // plan.EmptyDirs for diagnostic / future-flag use, but we + // deliberately don't mkdir them by default — same behavior as the + // browser folder picker (which can't deliver empty directories + // either). Surface a one-line note so the user isn't surprised + // that empty dirs disappeared. + if len(plan.EmptyDirs) > 0 { + fmt.Fprintf(out, "note: skipping %d empty subdirector%s (matches web app behavior; pass files instead if needed)\n", + len(plan.EmptyDirs), + pluralYies(len(plan.EmptyDirs)), + ) + } + + if len(plan.Files) == 0 { + fmt.Fprintf(out, "no files to upload (empty source or directories only)\n") + return nil + } + + // Plan summary first — gives the user something to look at while + // the first probe is in flight (which can take a second on a cold + // connection). The summary is one line so it doesn't crowd the + // per-file progress lines that follow. + totalBytes := int64(0) + for _, ft := range plan.Files { + totalBytes += ft.Size + } + fmt.Fprintf(out, "uploading %d file(s), %s, into %s (parallel=%d, chunk=%s)\n", + len(plan.Files), humanBytes(totalBytes), plan.ParentDir, + o.parallel, humanBytes(o.chunkSize)) + + return runUploads(ctx, client, plan, node, o, out) +} + +// runUploads schedules per-file UploadFile calls through an errgroup +// of `o.parallel` workers. Per-file failures cancel the group's +// context, so an unrecoverable error in one file aborts the rest of +// the batch quickly (otherwise the user would have to wait for every +// remaining file to also fail before getting their shell back). +func runUploads( + ctx context.Context, + client *upload.Client, + plan *upload.Plan, + node string, + o *uploadOptions, + out io.Writer, +) error { + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(o.parallel) + + var ( + mu sync.Mutex + completed int + bytesDone int64 + ) + totalFiles := len(plan.Files) + + for _, task := range plan.Files { + task := task + g.Go(func() error { + opts := plan.ToUploadOpts(task, node, o.chunkSize, o.maxRetries) + + start := time.Now() + fmt.Fprintf(out, " → %s (%s)\n", task.RelativePath, humanBytes(task.Size)) + + var lastReported int64 + progress := func(uploaded, total int64) { + // Throttle progress lines so a 50-chunk file doesn't + // emit 50 log lines: only print when crossing 25% + // boundaries. The final 100% line is always emitted + // because uploaded==total exactly. + if total <= 0 { + return + } + step := total / 4 + if step <= 0 { + step = 1 + } + if uploaded == total || uploaded-lastReported >= step { + lastReported = uploaded + fmt.Fprintf(out, " %s: %d/%d (%s/%s)\n", + task.RelativePath, uploaded, total, + humanBytes(uploaded), humanBytes(total)) + } + } + if err := client.UploadFile(gctx, opts, progress); err != nil { + return fmt.Errorf("%s: %w", task.RelativePath, err) + } + + mu.Lock() + completed++ + atomic.AddInt64(&bytesDone, task.Size) + done := completed + mu.Unlock() + fmt.Fprintf(out, " ✓ %s (%s, %s) [%d/%d]\n", + task.RelativePath, humanBytes(task.Size), + time.Since(start).Truncate(time.Millisecond), + done, totalFiles) + return nil + }) + } + if err := g.Wait(); err != nil { + return err + } + fmt.Fprintf(out, "done: %d file(s), %s\n", completed, humanBytes(bytesDone)) + return nil +} + +// newUploadHTTPClient builds a *http.Client suitable for streaming +// chunks: TLS verification follows the active profile, NO overall +// Timeout (we rely on context cancellation), and explicit keep-alive + +// HTTP/2 from http.DefaultTransport. +// +// The X-Authorization header is injected per-request inside upload.Client +// (rather than via a transport wrapper) so the same Client can talk to +// httptest servers in tests without dragging the access token into the +// fixture surface. +func newUploadHTTPClient(insecureSkipVerify bool) *http.Client { + base := http.DefaultTransport.(*http.Transport).Clone() + if insecureSkipVerify { + base.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} // #nosec G402 -- explicit profile opt-in + } + return &http.Client{ + // Timeout: 0 — no overall request timeout. Big chunks on slow + // links would otherwise truncate mid-POST. Cancellation flows + // through context (Ctrl-C / parent ctx). + Transport: base, + } +} + +// pluralYies turns 1 → "y" and any other number → "ies", so the user- +// facing "1 empty subdirectory" / "2 empty subdirectories" message +// reads naturally without a separate string for the singular case. +func pluralYies(n int) string { + if n == 1 { + return "y" + } + return "ies" +} + +// humanBytes is the local copy of the same helper from ls.go's +// formatSize, kept inline so upload.go has no test-time dependency on +// the listing-only render code. +func humanBytes(n int64) string { + const unit = 1024 + if n < 0 { + return fmt.Sprintf("%dB", n) + } + if n < unit { + return fmt.Sprintf("%dB", n) + } + div, exp := int64(unit), 0 + for n2 := n / unit; n2 >= unit; n2 /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f%cB", float64(n)/float64(div), "KMGTPE"[exp]) +} diff --git a/cli/pkg/files/download/client.go b/cli/pkg/files/download/client.go new file mode 100644 index 000000000..c21e899dc --- /dev/null +++ b/cli/pkg/files/download/client.go @@ -0,0 +1,134 @@ +// Package download implements the per-file and recursive directory +// download path for `olares-cli files download` and `files cat`. It +// talks to the same per-user files-backend endpoints the LarePass web +// app uses: +// +// - GET /api/resources// → metadata / listing +// (`Stat` returns the envelope shape used by the web app's +// `getFileInfo` / `formatRequestUrl` helpers; see +// [cli/cmd/ctl/files/ls.go] for the precedent). +// - GET /api/raw// → raw bytes +// The file-server supports `Range: bytes=N-` (raw_service.go's +// parseRangeHeader), so single-file resume is server-driven and we +// don't need a sidecar progress file. +// +// Same X-Authorization injection convention as the upload package and +// the rest of the CLI (see pkg/cmdutil/factory.go's authTransport for +// the rationale): Olares' edge stack only forwards X-Authorization to +// per-user services, so the standard `Authorization: Bearer ...` would +// silently drop the credential on the way through. +package download + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + + "github.com/beclab/Olares/cli/pkg/files/upload" +) + +// Client is the per-FilesURL handle used by Stat / List / DownloadFile +// and by the cobra command. It is cheap to construct; reuse one per +// `files download` / `files cat` invocation. +// +// AccessToken is sent as `X-Authorization` (not `Authorization: Bearer`), +// because Olares' edge stack only forwards the X-Authorization header to +// per-user services. See pkg/cmdutil/factory.go for the full rationale. +type Client struct { + HTTPClient *http.Client + BaseURL string // FilesURL, e.g. https://files.alice.olares.com + AccessToken string +} + +// HTTPError carries the status + truncated body of a non-2xx response so +// callers can branch on the status code without stringly-typed error +// parsing (the same shape as upload.HTTPError; we keep an in-package +// type so download has no leaky abstractions in its public surface). +type HTTPError struct { + Status int + Body string + URL string + Method string +} + +func (e *HTTPError) Error() string { + body := e.Body + if len(body) > 500 { + body = body[:500] + "...(truncated)" + } + return fmt.Sprintf("%s %s: HTTP %d: %s", e.Method, e.URL, e.Status, body) +} + +// resourcesURL returns `/api/resources/`. The +// caller's trailing `/` (if any) is preserved — the backend uses it as +// a "this is a directory" hint, see files/pkg/models/file_param.go's +// FileParam.convert (it splits on '/' and rejects len < 3 for resource +// listings). plainPath looks like `drive/Home/Documents` or +// `drive/Home/Documents/`. +func (c *Client) resourcesURL(plainPath string) string { + return c.BaseURL + "/api/resources/" + upload.EncodeURL(plainPath) +} + +// rawURL returns `/api/raw/`. Mirrors the web +// app's `driveCommonUrl('raw', filePath)` (data.ts in v2/drive). The +// raw endpoint refuses non-file paths with a 400, so callers should +// Stat first when the user-supplied path could be either. +func (c *Client) rawURL(plainPath string) string { + return c.BaseURL + "/api/raw/" + upload.EncodeURL(plainPath) +} + +// do performs a single HTTP request with the configured access token +// injected as `X-Authorization`, and returns the response body on 2xx. +// Non-2xx responses surface as *HTTPError so callers can branch on +// status (e.g. 404 from Stat is meaningful: "not found" vs. "auth +// problem"). +// +// `body` may be nil. Extra headers (Range, Accept) ride on `headers`. +// We deliberately do NOT stream the body here — Stat / List responses +// are small JSON envelopes and the caller wants them whole. The +// downloader path bypasses do() and streams resp.Body directly so we +// never buffer a multi-GB file in memory. +func (c *Client) do( + ctx context.Context, + method, endpoint string, + body io.Reader, + headers http.Header, +) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, method, endpoint, body) + if err != nil { + return nil, fmt.Errorf("build request: %w", err) + } + if c.AccessToken != "" { + req.Header.Set("X-Authorization", c.AccessToken) + } + for k, vs := range headers { + for _, v := range vs { + req.Header.Add(k, v) + } + } + resp, err := c.HTTPClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + respBody, _ := io.ReadAll(resp.Body) + if resp.StatusCode/100 != 2 { + return nil, &HTTPError{ + Status: resp.StatusCode, + Body: string(respBody), + URL: endpoint, + Method: method, + } + } + return respBody, nil +} + +// trimResourcesPlainPath drops a trailing '/' from a resource path so +// Stat probes hit the "metadata for X" form rather than the "listing +// of directory X/" form. Callers pre-validate that the path is +// non-empty (the cobra cmd does this via ParseFrontendPath). +func trimResourcesPlainPath(p string) string { + return strings.TrimRight(p, "/") +} diff --git a/cli/pkg/files/download/download.go b/cli/pkg/files/download/download.go new file mode 100644 index 000000000..b31d35d3b --- /dev/null +++ b/cli/pkg/files/download/download.go @@ -0,0 +1,460 @@ +// download.go: single-file download with optional resume + retry. +// +// Wire-level behavior: +// +// - GET /api/raw/; the file-server's raw_service.go sets +// `Accept-Ranges: bytes` and parseRangeHeader implements +// `Range: bytes=N-` / `bytes=N-M` / `bytes=-N`. So when the user +// passes --resume, we send `Range: bytes=-`, expect a +// 206 Partial Content, and append to the existing local file. +// - 200 OK in response to a Range request means the server ignored +// the header (most often because the resource isn't a real file — +// a redirect / cloud-backed handler — or the file changed under +// us). We fall back to a full overwrite via the same tmp+rename +// dance --overwrite uses, so the local file is left consistent. +// - 416 Requested Range Not Satisfiable typically means localSize == +// remoteSize (we already have the whole file). We treat that as +// success. +// +// Failure handling: +// +// - 4xx (other than 416 above) is a permanent error — no retries. +// - 5xx and transport errors retry with exponential backoff up to +// opts.MaxRetries times. Same retry classification spirit as the +// upload package's chunk POST loop. +// +// Atomicity: +// +// - Full and overwrite paths write to `dst.tmp` and rename on +// success. So a crash mid-download leaves the previous version of +// dst intact (or no file at all if it was a fresh download). +// - Resume writes directly to dst with O_APPEND. A crash mid-resume +// leaves a partial file that the next --resume run will pick up +// from — that's the whole point of the flag. +package download + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +// DefaultMaxRetries is the per-file retry budget on transient errors. +// Matches the upload pipeline's default chunk retries. +const DefaultMaxRetries = 3 + +// DefaultRetryBackoff: base wait between retries; the loop doubles up +// to a cap of 4s so a transient blip doesn't burn the whole budget on +// the first failure. +const DefaultRetryBackoff = 250 * time.Millisecond + +// Options controls a single DownloadFile call. Zero-valued fields fall +// back to package defaults (see normalize()). +type Options struct { + // Overwrite: if dst exists, replace it. Without this AND without + // Resume, DownloadFile errors out so the user has to opt in + // explicitly to clobber local data. + Overwrite bool + // Resume: if dst exists, ask the server to start at the local + // file's current size via `Range: bytes=N-`. Implies "this is the + // continuation of a previous attempt": the local tail bytes are + // trusted as-is. Falls back to a full overwrite if the server + // returns 200 (Range ignored) — see the file header for why. + Resume bool + // MaxRetries: transient error retries (0 means use the default; + // negative disables retries entirely). + MaxRetries int + // RetryBackoff: base backoff; the loop doubles each attempt up to + // 4s. 0 means use the default. + RetryBackoff time.Duration +} + +// ProgressFunc is the per-write progress callback. `written` is the +// total bytes flushed to disk so far for this file (cumulative, +// including any resumed prefix); `total` is the file's size as +// reported by the server's Content-Length / Content-Range, or -1 if +// the server didn't tell us. Called periodically (not per-byte) — the +// downloader throttles to ~64 KiB granularity so progress updates +// don't dominate CPU on fast loopback transfers. +type ProgressFunc func(written, total int64) + +// DownloadFile fetches `plainPath` (a `//` +// triple, un-encoded — the client encodes with EncodeURL internally) +// into the local file at `dst`. +// +// Returns the number of bytes WRITTEN to dst by this call (so a +// resumed download reports just the appended bytes — that matches +// the per-call "did work" semantics callers want for status lines). +// +// `progress` may be nil. When non-nil it's invoked with the cumulative +// bytes-written-this-call AND the total file size if known. +func (c *Client) DownloadFile( + ctx context.Context, + plainPath, dst string, + opts Options, + progress ProgressFunc, +) (int64, error) { + opts.normalize() + + // Decide the strategy first — it dictates which path we open and + // what Range header (if any) we send. + mode, localSize, err := planLocalWrite(dst, opts) + if err != nil { + return 0, err + } + + maxAttempts := opts.MaxRetries + 1 + if maxAttempts < 1 { + maxAttempts = 1 + } + + var lastErr error + backoff := opts.RetryBackoff + for attempt := 1; attempt <= maxAttempts; attempt++ { + written, err := c.attemptDownload(ctx, plainPath, dst, mode, localSize, progress) + if err == nil { + return written, nil + } + + // Cancellation is always final. + if ctxErr := ctx.Err(); ctxErr != nil { + return written, ctxErr + } + + // 4xx (other than 416, handled inside attemptDownload as + // "already complete") is permanent: no point retrying. + var hErr *HTTPError + if errors.As(err, &hErr) && hErr.Status >= 400 && hErr.Status < 500 { + return written, err + } + + lastErr = err + if attempt < maxAttempts { + select { + case <-time.After(backoff): + case <-ctx.Done(): + return written, ctx.Err() + } + // Exponential backoff capped at 4s. + backoff *= 2 + if backoff > 4*time.Second { + backoff = 4 * time.Second + } + } + } + return 0, fmt.Errorf("download %s: exhausted %d attempts: %w", plainPath, maxAttempts, lastErr) +} + +// writeMode encodes how attemptDownload should open the destination +// file. Pulled out as a typed value rather than a bag of bools so the +// branching inside attemptDownload reads naturally. +type writeMode int + +const ( + // writeFresh: dst doesn't exist (or we don't care about its + // previous contents). Write to dst.tmp + rename. + writeFresh writeMode = iota + // writeOverwrite: dst exists and Overwrite is set. Write to + // dst.tmp + rename so the previous version stays intact until the + // new one is fully on disk. + writeOverwrite + // writeResume: dst exists and Resume is set. Open dst with + // O_APPEND and ask the server for `Range: bytes=-`. + writeResume +) + +// planLocalWrite picks the writeMode + initial local size based on +// what's currently at `dst` and which flags the caller passed. It's +// the only thing in this file that touches `os.Stat`, so the test +// matrix lives in one place. +func planLocalWrite(dst string, opts Options) (writeMode, int64, error) { + st, err := os.Stat(dst) + switch { + case err == nil && st.IsDir(): + return 0, 0, fmt.Errorf("local destination %q is an existing directory", dst) + case err == nil: + switch { + case opts.Resume: + return writeResume, st.Size(), nil + case opts.Overwrite: + return writeOverwrite, 0, nil + default: + return 0, 0, fmt.Errorf( + "local file %q already exists; pass --overwrite to replace it or --resume to continue a previous download", + dst, + ) + } + case errors.Is(err, os.ErrNotExist): + return writeFresh, 0, nil + default: + return 0, 0, fmt.Errorf("stat %s: %w", dst, err) + } +} + +// attemptDownload runs one HTTP request + body copy. It is called +// from DownloadFile inside a retry loop, so it must: +// - leave dst in a valid state on failure (tmp file is cleaned up; +// resume mode never closes the real file in a half-flushed state); +// - return enough information for the retry classifier (status code +// embedded in *HTTPError for 4xx/5xx, raw transport errors for the +// network layer). +func (c *Client) attemptDownload( + ctx context.Context, + plainPath, dst string, + mode writeMode, + localSize int64, + progress ProgressFunc, +) (int64, error) { + endpoint := c.rawURL(plainPath) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + if err != nil { + return 0, fmt.Errorf("build request: %w", err) + } + if c.AccessToken != "" { + req.Header.Set("X-Authorization", c.AccessToken) + } + if mode == writeResume && localSize > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", localSize)) + } + + resp, err := c.HTTPClient.Do(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // Two cases collapse here: + // 1. fresh / overwrite: expected. + // 2. resume: the server ignored Range. Fall back to full + // overwrite — re-download the whole file via tmp+rename + // so we never leave dst in a torn state. + return writeFullToDst(dst, resp, progress) + case http.StatusPartialContent: + if mode != writeResume { + // We didn't ask for a range but the server sent one + // anyway. Don't second-guess — append the partial body + // to dst.tmp, but treat as fresh write so the on-disk + // state stays well-defined. + return writeFullToDst(dst, resp, progress) + } + return appendToDst(dst, localSize, resp, progress) + case http.StatusRequestedRangeNotSatisfiable: + // Almost always means localSize == remoteSize: the file is + // already complete. Surface that as success; the user gets a + // "0 new bytes" line in the cobra cmd's progress output. + if mode == writeResume { + return 0, nil + } + fallthrough + default: + body, _ := io.ReadAll(resp.Body) + return 0, &HTTPError{ + Status: resp.StatusCode, + Body: string(body), + URL: endpoint, + Method: http.MethodGet, + } + } +} + +// writeFullToDst streams resp.Body into `dst.tmp` and renames it onto +// `dst` on success. Used by the fresh / overwrite paths AND by the +// resume-fell-back-to-200 case, so an in-flight failure can never +// corrupt a previously-good local file. +func writeFullToDst(dst string, resp *http.Response, progress ProgressFunc) (int64, error) { + if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil { + return 0, fmt.Errorf("mkdir parent of %s: %w", dst, err) + } + tmp := dst + ".tmp" + // O_TRUNC so a stale tmp from a previous failed attempt doesn't + // concatenate with the new body. 0o644 matches `cp` defaults; the + // user's umask still applies via the OS layer. + f, err := os.OpenFile(tmp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644) + if err != nil { + return 0, fmt.Errorf("open %s: %w", tmp, err) + } + total := contentLengthOrTotal(resp) + written, copyErr := copyWithProgress(f, resp.Body, total, progress) + closeErr := f.Close() + if copyErr != nil { + _ = os.Remove(tmp) + return written, copyErr + } + if closeErr != nil { + _ = os.Remove(tmp) + return written, fmt.Errorf("close %s: %w", tmp, closeErr) + } + if err := os.Rename(tmp, dst); err != nil { + _ = os.Remove(tmp) + return written, fmt.Errorf("rename %s -> %s: %w", tmp, dst, err) + } + return written, nil +} + +// appendToDst is the resume path: open dst with O_APPEND and stream +// the partial-content body straight onto the end. We don't use a tmp +// file here because the server has already promised us "exactly the +// bytes from offset N onwards" and we want a crash mid-resume to +// leave a longer (still-resumable) prefix, not a truncated one. +func appendToDst(dst string, localSize int64, resp *http.Response, progress ProgressFunc) (int64, error) { + f, err := os.OpenFile(dst, os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return 0, fmt.Errorf("open %s for append: %w", dst, err) + } + defer f.Close() + + // total = current local size + remaining. Prefer the parsed + // Content-Range total when present; fall back to local + + // Content-Length when not. + total := totalFromContentRange(resp) + if total < 0 { + if cl := resp.ContentLength; cl >= 0 { + total = localSize + cl + } + } + + // Wrap the progress fn so the caller sees cumulative bytes for the + // file (local + new), not just the new tail — matches what `wget + // -c` / `curl -C -` show. + var wrapped ProgressFunc + if progress != nil { + wrapped = func(written, t int64) { + progress(localSize+written, t) + } + } + written, copyErr := copyWithProgress(f, resp.Body, total, wrapped) + if copyErr != nil { + return written, copyErr + } + return written, nil +} + +// copyWithProgress is io.Copy with a 64 KiB buffer + a per-buffer +// progress callback. Throttled to one callback per full 64 KiB read +// (or the final short read), which keeps the CLI's terminal output +// reasonable on fast networks without losing fidelity on slow ones. +func copyWithProgress(dst io.Writer, src io.Reader, total int64, progress ProgressFunc) (int64, error) { + const bufSize = 64 * 1024 + buf := make([]byte, bufSize) + var written int64 + for { + n, rerr := src.Read(buf) + if n > 0 { + nw, werr := dst.Write(buf[:n]) + written += int64(nw) + if werr != nil { + return written, werr + } + if nw < n { + return written, io.ErrShortWrite + } + if progress != nil { + progress(written, total) + } + } + if rerr != nil { + if rerr == io.EOF { + return written, nil + } + return written, rerr + } + } +} + +// contentLengthOrTotal returns the file's expected total size, derived +// from the response. For a 200 it's just Content-Length; for a 206 +// it's parsed from `Content-Range: bytes -/`. Returns -1 +// when neither is informative (chunked transfer with no length). +func contentLengthOrTotal(resp *http.Response) int64 { + if resp.StatusCode == http.StatusPartialContent { + if t := totalFromContentRange(resp); t >= 0 { + return t + } + } + if resp.ContentLength >= 0 { + return resp.ContentLength + } + return -1 +} + +// totalFromContentRange parses the `/` suffix of a Content-Range +// header. Returns -1 when the header is missing / malformed / `*`. +func totalFromContentRange(resp *http.Response) int64 { + cr := resp.Header.Get("Content-Range") + if cr == "" { + return -1 + } + idx := strings.LastIndex(cr, "/") + if idx < 0 || idx == len(cr)-1 { + return -1 + } + tail := cr[idx+1:] + if tail == "*" { + return -1 + } + n, err := strconv.ParseInt(tail, 10, 64) + if err != nil { + return -1 + } + return n +} + +// StreamRaw issues GET /api/raw/?inline=true and copies +// the response body to `w`. Used by `files cat` so the body lands on +// stdout without ever being fully buffered (a 4 GiB file is a valid +// `cat` target on the wire even if it's a poor choice on the human +// side). +// +// `inline=true` mirrors what the LarePass web app's +// formatFileContent / preview pipelines do (data.ts in v2/drive). It +// only changes Content-Disposition on the response, but we keep it +// because some files-backend code paths key off it for cache headers. +// +// Errors: +// - non-2xx surfaces as *HTTPError (same shape as DownloadFile). +// 400 is the "not a file" code from raw_service.go; the caller +// should Stat first to give the user a friendlier error than the +// server's "not a file, path: ..." message. +func (c *Client) StreamRaw(ctx context.Context, plainPath string, w io.Writer) (int64, error) { + endpoint := c.rawURL(plainPath) + "?inline=true" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + if err != nil { + return 0, fmt.Errorf("build request: %w", err) + } + if c.AccessToken != "" { + req.Header.Set("X-Authorization", c.AccessToken) + } + resp, err := c.HTTPClient.Do(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + if resp.StatusCode/100 != 2 { + body, _ := io.ReadAll(resp.Body) + return 0, &HTTPError{ + Status: resp.StatusCode, + Body: string(body), + URL: endpoint, + Method: http.MethodGet, + } + } + return io.Copy(w, resp.Body) +} + +func (o *Options) normalize() { + if o.MaxRetries == 0 { + o.MaxRetries = DefaultMaxRetries + } + if o.RetryBackoff == 0 { + o.RetryBackoff = DefaultRetryBackoff + } +} diff --git a/cli/pkg/files/download/download_test.go b/cli/pkg/files/download/download_test.go new file mode 100644 index 000000000..91ea0c24d --- /dev/null +++ b/cli/pkg/files/download/download_test.go @@ -0,0 +1,466 @@ +package download + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strconv" + "strings" + "sync/atomic" + "testing" + "time" +) + +// newTestClient wires a *Client up to an httptest.Server. We always +// inject a recognisable token so each test can assert that +// X-Authorization made it onto the wire (the most common refactor +// regression in this CLI is dropping the header). +func newTestClient(t *testing.T, h http.Handler) (*Client, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(h) + t.Cleanup(srv.Close) + return &Client{ + HTTPClient: srv.Client(), + BaseURL: srv.URL, + AccessToken: "test-token-XYZ", + }, srv +} + +// TestStat_LeafLookupInParent: Stat must list the *parent* and find +// the leaf there, NOT probe /api/resources/ directly. +// The backend's single-file GET path returns HTTP 500 for many real +// files (it tries to read content into the response — see stat.go's +// statByParentListing comment for why), so the parent-listing +// strategy is the only one the wire actually supports. +func TestStat_LeafLookupInParent(t *testing.T) { + for _, tc := range []struct { + name string + path string + wantParent string // expected URL path on the wire, including trailing slash + wantLeaf string + wantIsDir bool + wantSize int64 + entriesJSON string + }{ + { + name: "file under nested parent", + path: "drive/Home/Documents/foo.pdf", + wantParent: "/api/resources/drive/Home/Documents/", + wantLeaf: "foo.pdf", + wantSize: 1234, + entriesJSON: `[ + {"name":"other.pdf","isDir":false,"size":1}, + {"name":"foo.pdf","isDir":false,"size":1234} + ]`, + }, + { + name: "directory under volume root", + path: "drive/Home/Backups", + wantParent: "/api/resources/drive/Home/", + wantLeaf: "Backups", + wantIsDir: true, + entriesJSON: `[ + {"name":"Backups","isDir":true,"size":0} + ]`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Authorization") != "test-token-XYZ" { + t.Fatalf("missing/wrong X-Authorization") + } + if r.Method != http.MethodGet { + t.Fatalf("want GET, got %s", r.Method) + } + if r.URL.Path != tc.wantParent { + t.Fatalf("Stat should hit the parent listing URL %q, got %q", + tc.wantParent, r.URL.Path) + } + _, _ = io.WriteString(w, `{"items":`+tc.entriesJSON+`}`) + })) + info, err := client.Stat(context.Background(), tc.path) + if err != nil { + t.Fatalf("Stat: %v", err) + } + if info.Name != tc.wantLeaf { + t.Errorf("Name: want %q, got %q", tc.wantLeaf, info.Name) + } + if info.IsDir != tc.wantIsDir { + t.Errorf("IsDir: want %v, got %v", tc.wantIsDir, info.IsDir) + } + if !info.IsDir && info.Size != tc.wantSize { + t.Errorf("Size: want %d, got %d", tc.wantSize, info.Size) + } + }) + } +} + +// TestStat_VolumeRoot: paths that are themselves the root of a +// `/` tuple (e.g. "drive/Home") have no parent to +// list. Stat returns a synthetic dir record without touching the +// network. +func TestStat_VolumeRoot(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Errorf("Stat at volume root should not hit the network, got %s %s", r.Method, r.URL.Path) + })) + info, err := client.Stat(context.Background(), "drive/Home") + if err != nil { + t.Fatalf("Stat: %v", err) + } + if !info.IsDir || info.Name != "Home" { + t.Errorf("volume-root Stat: want {Name:Home, IsDir:true}, got %+v", info) + } +} + +// TestStat_NotFound covers two paths that should both surface as +// IsNotFound: the parent listing returning 404, and the parent +// existing but the leaf not being in it (synthetic 404). +func TestStat_NotFound(t *testing.T) { + t.Run("parent 404", func(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = io.WriteString(w, `{"error":"not found"}`) + })) + _, err := client.Stat(context.Background(), "drive/Home/missing-dir/foo") + if !IsNotFound(err) { + t.Errorf("IsNotFound should be true for parent 404, got: %v", err) + } + }) + t.Run("leaf not in parent listing", func(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = io.WriteString(w, `{"items":[{"name":"other.txt","isDir":false,"size":1}]}`) + })) + _, err := client.Stat(context.Background(), "drive/Home/Documents/missing.txt") + if !IsNotFound(err) { + t.Errorf("IsNotFound should be true for missing leaf, got: %v", err) + } + }) +} + +func TestList_HappyPath(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasSuffix(r.URL.Path, "/") { + t.Errorf("List should hit a trailing-slash URL, got %q", r.URL.Path) + } + _, _ = io.WriteString(w, `{ + "items":[ + {"name":"a.txt","isDir":false,"size":10}, + {"name":"sub","isDir":true,"size":0} + ] + }`) + })) + entries, err := client.List(context.Background(), "drive/Home/Documents") + if err != nil { + t.Fatalf("List: %v", err) + } + if len(entries) != 2 { + t.Fatalf("want 2 entries, got %d", len(entries)) + } + if entries[0].Name != "a.txt" || entries[0].IsDir || entries[0].Size != 10 { + t.Errorf("entries[0] mismatch: %+v", entries[0]) + } + if entries[1].Name != "sub" || !entries[1].IsDir { + t.Errorf("entries[1] mismatch: %+v", entries[1]) + } +} + +// TestDownloadFile_Fresh exercises the "no local file yet" path: full +// 200, written via tmp+rename. Asserts the local file ends up with the +// exact body bytes and the .tmp file does NOT linger. +func TestDownloadFile_Fresh(t *testing.T) { + body := []byte("hello world payload") + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") != "" { + t.Fatalf("fresh download should not send Range, got %q", r.Header.Get("Range")) + } + w.Header().Set("Content-Length", strconv.Itoa(len(body))) + _, _ = w.Write(body) + })) + dst := filepath.Join(t.TempDir(), "fresh.bin") + + written, err := client.DownloadFile(context.Background(), "drive/Home/foo", dst, Options{}, nil) + if err != nil { + t.Fatalf("DownloadFile: %v", err) + } + if written != int64(len(body)) { + t.Errorf("written: want %d, got %d", len(body), written) + } + got, err := os.ReadFile(dst) + if err != nil { + t.Fatalf("read dst: %v", err) + } + if !bytes.Equal(got, body) { + t.Errorf("body mismatch: got %q want %q", got, body) + } + if _, err := os.Stat(dst + ".tmp"); !errors.Is(err, os.ErrNotExist) { + t.Errorf(".tmp file should be cleaned up after rename, stat err = %v", err) + } +} + +// TestDownloadFile_RefuseExisting confirms the safety policy: without +// --overwrite or --resume, an existing local file blocks the download. +func TestDownloadFile_RefuseExisting(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Errorf("should not have hit the server") + })) + dir := t.TempDir() + dst := filepath.Join(dir, "exists.bin") + if err := os.WriteFile(dst, []byte("existing"), 0o644); err != nil { + t.Fatalf("seed dst: %v", err) + } + _, err := client.DownloadFile(context.Background(), "drive/Home/foo", dst, Options{}, nil) + if err == nil { + t.Fatal("expected refusal error") + } + if !strings.Contains(err.Error(), "--overwrite") || !strings.Contains(err.Error(), "--resume") { + t.Errorf("error should mention both flags, got: %v", err) + } +} + +// TestDownloadFile_Resume sends a Range header and an appended body; +// the file should grow from local size to local+payload. +func TestDownloadFile_Resume(t *testing.T) { + prefix := []byte("first half-") + tail := []byte("second half!") + + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rng := r.Header.Get("Range") + wantRng := fmt.Sprintf("bytes=%d-", len(prefix)) + if rng != wantRng { + t.Fatalf("Range header: want %q, got %q", wantRng, rng) + } + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", + len(prefix), len(prefix)+len(tail)-1, len(prefix)+len(tail))) + w.Header().Set("Content-Length", strconv.Itoa(len(tail))) + w.WriteHeader(http.StatusPartialContent) + _, _ = w.Write(tail) + })) + dir := t.TempDir() + dst := filepath.Join(dir, "resume.bin") + if err := os.WriteFile(dst, prefix, 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + + written, err := client.DownloadFile(context.Background(), "drive/Home/foo", dst, Options{Resume: true}, nil) + if err != nil { + t.Fatalf("DownloadFile: %v", err) + } + if written != int64(len(tail)) { + t.Errorf("written should be the tail only: want %d, got %d", len(tail), written) + } + got, err := os.ReadFile(dst) + if err != nil { + t.Fatalf("read dst: %v", err) + } + want := append(append([]byte(nil), prefix...), tail...) + if !bytes.Equal(got, want) { + t.Errorf("body mismatch: got %q want %q", got, want) + } +} + +// TestDownloadFile_RangeIgnored covers the "we asked for Range but +// the server replied 200" case — falls back to a clean overwrite. +func TestDownloadFile_RangeIgnored(t *testing.T) { + full := []byte("ABCDEFGHIJKLMNOP") + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") == "" { + t.Fatal("client should have sent Range with --resume") + } + w.Header().Set("Content-Length", strconv.Itoa(len(full))) + _, _ = w.Write(full) // 200, ignoring Range + })) + dst := filepath.Join(t.TempDir(), "fallback.bin") + if err := os.WriteFile(dst, []byte("OLD-PARTIAL"), 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + written, err := client.DownloadFile(context.Background(), "drive/Home/foo", dst, Options{Resume: true}, nil) + if err != nil { + t.Fatalf("DownloadFile: %v", err) + } + if written != int64(len(full)) { + t.Errorf("written: want %d, got %d", len(full), written) + } + got, err := os.ReadFile(dst) + if err != nil { + t.Fatalf("read dst: %v", err) + } + if !bytes.Equal(got, full) { + t.Errorf("dst should hold full body now, got %q", got) + } +} + +// TestDownloadFile_416Complete covers the "we asked for Range, server +// says 416 because we already have everything" case — return success +// with 0 written. +func TestDownloadFile_416Complete(t *testing.T) { + full := []byte("complete") + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", len(full))) + w.WriteHeader(http.StatusRequestedRangeNotSatisfiable) + })) + dst := filepath.Join(t.TempDir(), "done.bin") + if err := os.WriteFile(dst, full, 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + written, err := client.DownloadFile(context.Background(), "drive/Home/foo", dst, Options{Resume: true}, nil) + if err != nil { + t.Fatalf("416 with --resume should succeed: %v", err) + } + if written != 0 { + t.Errorf("written should be 0 for already-complete, got %d", written) + } +} + +// TestDownloadFile_OverwriteUsesTmpRename confirms --overwrite goes +// through the .tmp + rename safe-write path. +func TestDownloadFile_OverwriteUsesTmpRename(t *testing.T) { + body := []byte("new contents") + dst := filepath.Join(t.TempDir(), "overwrite.bin") + if err := os.WriteFile(dst, []byte("old"), 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") != "" { + t.Fatalf("--overwrite should not send Range") + } + _, _ = w.Write(body) + })) + _, err := client.DownloadFile(context.Background(), "drive/Home/foo", dst, Options{Overwrite: true}, nil) + if err != nil { + t.Fatalf("DownloadFile: %v", err) + } + got, err := os.ReadFile(dst) + if err != nil { + t.Fatalf("read dst: %v", err) + } + if !bytes.Equal(got, body) { + t.Errorf("dst should be the new body: got %q", got) + } + if _, err := os.Stat(dst + ".tmp"); !errors.Is(err, os.ErrNotExist) { + t.Errorf(".tmp file should be cleaned up after rename") + } +} + +// TestDownloadFile_PermanentError: 4xx (non-416) should fail +// immediately, no retries. +func TestDownloadFile_PermanentError(t *testing.T) { + var hits int32 + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddInt32(&hits, 1) + w.WriteHeader(http.StatusForbidden) + })) + _, err := client.DownloadFile(context.Background(), "drive/Home/foo", + filepath.Join(t.TempDir(), "x"), Options{MaxRetries: 5}, nil) + if err == nil { + t.Fatal("expected error") + } + if got := atomic.LoadInt32(&hits); got != 1 { + t.Errorf("4xx should not retry; hit %d times", got) + } +} + +// TestDownloadFile_TransientRetry: 503 once, then 200. The retry loop +// should swallow the transient and return success. +func TestDownloadFile_TransientRetry(t *testing.T) { + body := []byte("ok") + var hits int32 + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + n := atomic.AddInt32(&hits, 1) + if n == 1 { + w.WriteHeader(http.StatusServiceUnavailable) + return + } + _, _ = w.Write(body) + })) + dst := filepath.Join(t.TempDir(), "retry.bin") + written, err := client.DownloadFile(context.Background(), "drive/Home/foo", dst, Options{ + MaxRetries: 3, + RetryBackoff: time.Millisecond, + }, nil) + if err != nil { + t.Fatalf("DownloadFile: %v", err) + } + if written != int64(len(body)) { + t.Errorf("written: want %d, got %d", len(body), written) + } + if got := atomic.LoadInt32(&hits); got != 2 { + t.Errorf("expected 2 hits (1 transient + 1 success), got %d", got) + } +} + +// TestStreamRaw_HappyPath covers cat's wire path: GET /api/raw with +// inline=true, no Range, body streamed to stdout. +func TestStreamRaw_HappyPath(t *testing.T) { + body := []byte("file body for cat\n") + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/api/raw/") { + t.Errorf("want /api/raw/ prefix, got %s", r.URL.Path) + } + if r.URL.Query().Get("inline") != "true" { + t.Errorf("want inline=true, got %q", r.URL.Query().Get("inline")) + } + if r.Header.Get("X-Authorization") != "test-token-XYZ" { + t.Errorf("missing X-Authorization") + } + _, _ = w.Write(body) + })) + var buf bytes.Buffer + n, err := client.StreamRaw(context.Background(), "drive/Home/foo.txt", &buf) + if err != nil { + t.Fatalf("StreamRaw: %v", err) + } + if n != int64(len(body)) { + t.Errorf("returned bytes: want %d, got %d", len(body), n) + } + if !bytes.Equal(buf.Bytes(), body) { + t.Errorf("body mismatch: got %q", buf.Bytes()) + } +} + +// TestStreamRaw_NonFile: server returns 400 (the raw_service.go +// "not a file" path). Surfaced as a typed *HTTPError so cat.go can +// turn it into a friendly message. +func TestStreamRaw_NonFile(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = io.WriteString(w, `{"error":"not a file"}`) + })) + _, err := client.StreamRaw(context.Background(), "drive/Home/Documents", io.Discard) + if err == nil { + t.Fatal("expected error") + } + var hErr *HTTPError + if !errors.As(err, &hErr) { + t.Fatalf("want *HTTPError, got %T: %v", err, err) + } + if hErr.Status != http.StatusBadRequest { + t.Errorf("status: want 400, got %d", hErr.Status) + } +} + +// TestDownloadFile_ContextCanceled confirms cancellation aborts +// without burning the retry budget. +func TestDownloadFile_ContextCanceled(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Block briefly so the cancellation has time to land. + time.Sleep(50 * time.Millisecond) + _, _ = w.Write([]byte("x")) + })) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + _, err := client.DownloadFile(ctx, "drive/Home/foo", + filepath.Join(t.TempDir(), "cancel.bin"), Options{}, nil) + if err == nil { + t.Fatal("expected cancellation error") + } + if !errors.Is(err, context.Canceled) { + t.Errorf("want context.Canceled, got %v", err) + } +} diff --git a/cli/pkg/files/download/list.go b/cli/pkg/files/download/list.go new file mode 100644 index 000000000..4bd803d55 --- /dev/null +++ b/cli/pkg/files/download/list.go @@ -0,0 +1,71 @@ +// list.go: list a remote directory via GET /api/resources//. +// The walker calls this once per directory to drive the recursive +// download. Same envelope shape as `files ls` consumes (see +// cli/cmd/ctl/files/ls.go's listingResponse), but we project even +// further down: name + isDir + size is everything the walker needs. +package download + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" +) + +// Entry is one item in a directory listing. +type Entry struct { + Name string + IsDir bool + Size int64 +} + +// itemEnvelope: the per-item shape inside the parent envelope's +// `items` array. We keep the json-vs-Go field mapping in one place +// rather than tagging Entry directly, so the public type stays free +// of wire-format concerns. The full backend envelope also carries +// parent-level Name / Modified / NumDirs / NumFiles fields (see +// cli/cmd/ctl/files/ls.go's listingResponse for the shape `files ls` +// renders) — the walker doesn't need them, so we don't decode them. +type itemEnvelope struct { + Name string `json:"name"` + IsDir bool `json:"isDir"` + Size int64 `json:"size"` +} + +// List does GET /api/resources// and returns the entries +// inside that directory. The trailing slash is enforced internally — +// the backend's FileParam.convert rejects requests with fewer than 3 +// '/'-split segments, and the trailing slash is what guarantees that +// invariant for shallow paths like `drive/Home/`. +// +// The envelope includes a `parent` block, but we only consume `items` +// here; callers that need parent metadata should Stat the path +// separately. +func (c *Client) List(ctx context.Context, plainPath string) ([]Entry, error) { + if !strings.HasSuffix(plainPath, "/") { + plainPath += "/" + } + endpoint := c.resourcesURL(plainPath) + body, err := c.do(ctx, http.MethodGet, endpoint, nil, http.Header{ + "Accept": []string{"application/json"}, + }) + if err != nil { + return nil, err + } + // Decode into the local struct first so we can convert each item + // into the public Entry without leaking json tags into our public + // surface. + var env struct { + Items []itemEnvelope `json:"items"` + } + if err := json.Unmarshal(body, &env); err != nil { + return nil, fmt.Errorf("decode listing response: %w", err) + } + out := make([]Entry, 0, len(env.Items)) + for _, it := range env.Items { + out = append(out, Entry{Name: it.Name, IsDir: it.IsDir, Size: it.Size}) + } + return out, nil +} + diff --git a/cli/pkg/files/download/stat.go b/cli/pkg/files/download/stat.go new file mode 100644 index 000000000..7b39dc453 --- /dev/null +++ b/cli/pkg/files/download/stat.go @@ -0,0 +1,140 @@ +// stat.go: figure out whether a remote path is a file or a directory +// and how big it is. Used by: +// +// - the download cobra cmd, to decide single-file vs. recursive +// directory mode and to print a remote-size line up-front; +// - the cat cobra cmd, to refuse early when the user points it at a +// directory (the `/api/raw` endpoint would 400, but the error +// message is much friendlier to surface here); +// - the recursive walker, to seed the traversal at the user-supplied +// root. +// +// Implementation strategy: list the parent directory and look up the +// basename in its items array, exactly like the LarePass web app does +// (every navigation in the UI uses the parent's listing for per-entry +// metadata, never a single-resource probe). Why not GET +// /api/resources/ directly? — see the comment on +// statByParentListing for the gory details, but the short version is +// that the backend's "List" handler is hard-coded to set +// `Content: true` (files/pkg/drivers/posix/posix/posix.go's getFiles) +// and tries to slurp the file's contents, returning HTTP 500 for +// json / binary / large files. We only need (Name, IsDir, Size), so +// the parent-listing path is both more reliable and strictly cheaper +// than fetching content we don't want. +package download + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" +) + +// StatInfo is the projection of files-backend's per-resource envelope +// that the download flow needs. The backend's full FileInfo carries +// many more fields (mode / modified / type / numFiles / numDirs / ...); +// we only decode what download / cat / walker actually branch on. +type StatInfo struct { + // Name is the basename the backend reports for the resource. + Name string + // IsDir is true when the resource is a directory. + IsDir bool + // Size is the file size in bytes; meaningless for directories + // (the backend may report 0 or the sum of immediate children + // depending on driver — we don't rely on this for dirs). + Size int64 +} + +// Stat resolves `plainPath` (an un-encoded `//` +// triple) to a (Name, IsDir, Size) record, by listing the parent +// directory and looking up the basename. The parent-listing strategy +// is what the LarePass web app uses; a direct +// GET /api/resources/ is unreliable on the current +// backend (returns HTTP 500 for many real files because the underlying +// List handler tries to read file content into the response). +// +// Special cases: +// - When `plainPath` resolves to the root of `/` +// (e.g. "drive/Home", "sync/") there is no parent to list +// — Stat returns a synthetic IsDir=true record. This matches what +// `files ls drive/Home/` does logically (the volume root is +// always a directory). +// - When the parent directory itself doesn't exist OR auth fails, +// the underlying List error is returned verbatim; callers use +// IsNotFound to distinguish "this path doesn't exist on the +// server" from "your token is bad / network is down". +// - When the parent exists but the basename isn't in its items +// array, Stat returns an *HTTPError with Status=404 so callers +// can branch on IsNotFound uniformly. +func (c *Client) Stat(ctx context.Context, plainPath string) (*StatInfo, error) { + clean := strings.Trim(plainPath, "/") + if clean == "" { + return nil, errors.New("Stat: empty path") + } + segs := strings.Split(clean, "/") + // Need at least 3 segments (fileType / extend / leaf) to have a + // parent under /. With 2 or fewer segments + // we're already at the volume root — synthesise a dir record. + if len(segs) <= 2 { + return &StatInfo{Name: segs[len(segs)-1], IsDir: true}, nil + } + return c.statByParentListing(ctx, segs) +} + +// statByParentListing implements the parent-list-and-lookup strategy. +// +// Why we don't probe GET /api/resources/: +// +// files/pkg/hertz/biz/handler/api/resources/resources_service.go's +// GetResourcesMethod always invokes Storage.List, and +// files/pkg/drivers/posix/posix/posix.go's List in turn calls +// getFiles(..., Expand, Content) — Content=true means the backend +// tries to read the entire file into the response on a single-file +// GET. That blows up (HTTP 500) for json / binary / large files, +// even though the metadata it returns first would have been +// perfectly fine. Listing the parent + finding the entry there +// side-steps this entirely and matches what the LarePass web app +// already does. +func (c *Client) statByParentListing(ctx context.Context, segs []string) (*StatInfo, error) { + leaf := segs[len(segs)-1] + parentSegs := segs[:len(segs)-1] + parentPath := strings.Join(parentSegs, "/") + "/" + + entries, err := c.List(ctx, parentPath) + if err != nil { + return nil, err + } + for _, e := range entries { + if e.Name == leaf { + return &StatInfo{Name: e.Name, IsDir: e.IsDir, Size: e.Size}, nil + } + } + // Parent listing succeeded but our leaf isn't there — synthesise + // a 404 so the caller's IsNotFound predicate fires. + return nil, &HTTPError{ + Status: http.StatusNotFound, + Body: fmt.Sprintf("entry %q not found in parent listing", leaf), + URL: c.resourcesURL(parentPath), + Method: http.MethodGet, + } +} + +// IsNotFound reports whether `err` represents "this remote path +// doesn't exist". Two cases collapse here: +// +// - the parent directory listing returned 404 (HTTPError); +// - the parent listing succeeded but the leaf basename wasn't in +// its items (synthetic 404 from statByParentListing). +// +// Callers (the download cobra cmd) use this to decide whether to +// emit a "did you mean ...?" hint vs. a generic auth/network error. +// errors.As keeps the predicate robust if the error gets wrapped by +// a higher layer. +func IsNotFound(err error) bool { + var hErr *HTTPError + if errors.As(err, &hErr) { + return hErr.Status == http.StatusNotFound + } + return false +} diff --git a/cli/pkg/files/download/walker.go b/cli/pkg/files/download/walker.go new file mode 100644 index 000000000..40d4302eb --- /dev/null +++ b/cli/pkg/files/download/walker.go @@ -0,0 +1,195 @@ +// walker.go: turn a remote directory path into a flat list of files to +// download plus the empty subdirectories that need to be created +// locally. Symmetric to upload's walker.go (which goes the other way), +// but drives a remote `List` instead of a local `filepath.WalkDir`. +// +// Path semantics: +// +// - The remote root's own basename becomes the top-level under the +// local destination, matching the LarePass folder-download UX +// (download `drive/Home/Documents/` into `./out/` produces +// `./out/Documents/...`). To pull contents only, the user can +// pass an explicit local target path that already includes the +// leaf name. +// - All wire paths use POSIX '/'; they're converted to host paths +// via filepath.Join only at the local-write boundary. +package download + +import ( + "context" + "fmt" + "path" + "path/filepath" + "sort" + "strings" +) + +// FileTask is one file scheduled for download. The download cobra +// command turns each task into a DownloadFile call, optionally in +// parallel via errgroup. +type FileTask struct { + // RemotePlainPath is the un-encoded `//` + // triple Client.DownloadFile expects. Always points at a regular + // file (the walker filters out directories before adding tasks). + RemotePlainPath string + // LocalPath is the full local destination path (including the + // recreated directory tree). Parent directories are created on + // the fly by DownloadFile via os.MkdirAll. + LocalPath string + // RelativePath is the path RELATIVE to the user-supplied local + // destination root, in POSIX form. Useful for progress display + // (`./out/Documents/foo.txt` reads more naturally as + // `Documents/foo.txt`). + RelativePath string + // Size is the file size in bytes from the remote listing. May be + // 0 for genuinely empty files; relied on by the downloader's + // resume probe and by the cobra command's progress totals. + Size int64 +} + +// Plan is the structured result of resolving a remote root + local +// destination pair. The cobra command consumes Files in parallel and +// (optionally) creates EmptyDirs locally before the file downloads +// start so the on-disk tree matches the remote one even if a +// directory happens to be empty. +type Plan struct { + // Files is the flat list of file download tasks. Ordering is + // deterministic (sorted by RelativePath) so retries / dry-runs + // are stable across runs. + Files []FileTask + // EmptyDirs lists subdirectories under LocalRoot that contain no + // files. Sorted shallow-to-deep so iterative MkdirAll works + // without surprises. May be empty for a fully-populated tree. + EmptyDirs []string + // LocalRoot is the local directory the plan is anchored to — + // either the user's --dst (in directory mode) or that path with + // the remote basename appended (the LarePass-folder-picker + // behavior). Useful for the cobra command's summary line. + LocalRoot string +} + +// BuildPlan walks the remote tree rooted at `remoteRoot` and lays +// out the corresponding local file paths under `localBase`. +// +// `remoteRoot` is the un-encoded plain path like `drive/Home/Documents` +// (the trailing slash is added internally — Stat/List handle either +// form). `localBase` is the user-supplied local destination directory +// (must exist OR be createable; the cobra cmd validates this before +// calling). +// +// On success Plan.Files is non-empty for any non-empty remote tree; +// callers should still defensively handle the all-empty case (a +// directory with only empty subdirectories) by checking len(Files). +func BuildPlan( + ctx context.Context, + c *Client, + remoteRoot, localBase string, +) (*Plan, error) { + // Pull the leaf name off the remote root so we can recreate it as + // the top-level directory under localBase, matching the upload + // walker's "preserve the source folder name" behavior. + cleanRoot := strings.Trim(remoteRoot, "/") + if cleanRoot == "" { + return nil, fmt.Errorf("remote root is empty") + } + leaf := path.Base(cleanRoot) + plan := &Plan{ + LocalRoot: filepath.Join(localBase, leaf), + } + + // The walker is depth-first iterative (BFS would be fine too; + // pick depth-first so progress output shows files within a + // directory together, which reads more naturally to humans). + type frame struct { + // remotePath is the parent directory's plain path on the + // server, e.g. "drive/Home/Documents/photos". + remotePath string + // relPath is the same path under the local LocalRoot, in + // POSIX form (e.g. "photos" or "photos/2026"). Empty for the + // root itself. + relPath string + } + stack := []frame{{remotePath: cleanRoot, relPath: ""}} + + for len(stack) > 0 { + // Pop from the end (depth-first). + top := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + entries, err := c.List(ctx, top.remotePath+"/") + if err != nil { + return nil, fmt.Errorf("list %s: %w", top.remotePath, err) + } + + if len(entries) == 0 && top.relPath != "" { + // Genuine empty subdirectory: the local tree should mirror + // it. The root itself is excluded because we always create + // it at the cobra layer. + plan.EmptyDirs = append(plan.EmptyDirs, top.relPath) + continue + } + + // Push children — directories onto the stack for further + // traversal, files into Plan.Files. Sort entries inside + // the loop so the depth-first traversal yields a stable + // per-directory ordering even if the server hands us an + // unsorted list (which it generally doesn't, but we don't + // want to bake that assumption in). + sort.SliceStable(entries, func(i, j int) bool { + if entries[i].IsDir != entries[j].IsDir { + // Files first inside this directory so their progress + // line lands before we descend into the next subdir's + // listing. + return !entries[i].IsDir + } + return entries[i].Name < entries[j].Name + }) + + for _, e := range entries { + childRel := joinPosix(top.relPath, e.Name) + childRemote := top.remotePath + "/" + e.Name + + if e.IsDir { + // Push subdirectory; it'll be popped after this loop + // and processed (which may produce further pushes). + stack = append(stack, frame{ + remotePath: childRemote, + relPath: childRel, + }) + continue + } + plan.Files = append(plan.Files, FileTask{ + RemotePlainPath: childRemote, + LocalPath: filepath.Join(plan.LocalRoot, filepath.FromSlash(childRel)), + RelativePath: childRel, + Size: e.Size, + }) + } + } + + // Stable, predictable orderings for retries / dry-runs. Files are + // sorted by RelativePath; empty dirs shallow-first so iterative + // MkdirAll matches expectation. + sort.SliceStable(plan.Files, func(i, j int) bool { + return plan.Files[i].RelativePath < plan.Files[j].RelativePath + }) + sort.SliceStable(plan.EmptyDirs, func(i, j int) bool { + di := strings.Count(plan.EmptyDirs[i], "/") + dj := strings.Count(plan.EmptyDirs[j], "/") + if di != dj { + return di < dj + } + return plan.EmptyDirs[i] < plan.EmptyDirs[j] + }) + + return plan, nil +} + +// joinPosix joins two POSIX path fragments with a single '/'. Empty +// `parent` means `child` is the top-level entry under LocalRoot. +func joinPosix(parent, child string) string { + if parent == "" { + return child + } + return parent + "/" + child +} diff --git a/cli/pkg/files/download/walker_test.go b/cli/pkg/files/download/walker_test.go new file mode 100644 index 000000000..7d41fc2d4 --- /dev/null +++ b/cli/pkg/files/download/walker_test.go @@ -0,0 +1,200 @@ +package download + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "path" + "strings" + "testing" +) + +// fakeTree is a trivial in-memory remote filesystem used to drive the +// walker via httptest. Keys are full plain paths (e.g. +// "drive/Home/Documents") to a `dirContents` value (nil for files). +type fakeTree map[string]*dirContents + +type dirContents struct { + entries []fakeEntry +} + +type fakeEntry struct { + Name string `json:"name"` + IsDir bool `json:"isDir"` + Size int64 `json:"size"` +} + +// handler exposes the tree as the subset of /api/resources the walker +// touches: listing requests (path ends in '/') return the dirContents +// for the matching key. Stat requests aren't used by the walker +// (BuildPlan only calls List), so we don't need to fake those. +func (t fakeTree) handler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // /api/resources/ — strip the prefix and percent-decode + // each segment to get the lookup key. + raw := strings.TrimPrefix(r.URL.Path, "/api/resources/") + hadTrailing := strings.HasSuffix(raw, "/") + raw = strings.Trim(raw, "/") + segs := strings.Split(raw, "/") + for i, s := range segs { + d, err := url.PathUnescape(s) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + segs[i] = d + } + key := strings.Join(segs, "/") + _ = hadTrailing + dc, ok := t[key] + if !ok || dc == nil { + http.Error(w, "not a directory", http.StatusNotFound) + return + } + _ = json.NewEncoder(w).Encode(map[string]interface{}{ + "items": dc.entries, + }) + }) +} + +func TestBuildPlan_FlatDirectory(t *testing.T) { + tree := fakeTree{ + "drive/Home/Documents": { + entries: []fakeEntry{ + {Name: "a.txt", Size: 10}, + {Name: "b.txt", Size: 20}, + }, + }, + } + client, _ := newTestClient(t, tree.handler()) + + plan, err := BuildPlan(context.Background(), client, "drive/Home/Documents", "/tmp/dest") + if err != nil { + t.Fatalf("BuildPlan: %v", err) + } + if len(plan.Files) != 2 { + t.Fatalf("want 2 files, got %d", len(plan.Files)) + } + if !strings.HasSuffix(plan.LocalRoot, "Documents") { + t.Errorf("LocalRoot should preserve remote leaf name, got %q", plan.LocalRoot) + } + if plan.Files[0].RelativePath != "a.txt" || plan.Files[1].RelativePath != "b.txt" { + t.Errorf("RelativePaths: %v", []string{plan.Files[0].RelativePath, plan.Files[1].RelativePath}) + } + for _, f := range plan.Files { + if !strings.HasPrefix(f.RemotePlainPath, "drive/Home/Documents/") { + t.Errorf("RemotePlainPath should be drive/Home/Documents/, got %q", f.RemotePlainPath) + } + } +} + +func TestBuildPlan_NestedWithEmptyDir(t *testing.T) { + tree := fakeTree{ + "drive/Home/Backups": { + entries: []fakeEntry{ + {Name: "top.txt", Size: 5}, + {Name: "photos", IsDir: true}, + {Name: "empty", IsDir: true}, + }, + }, + "drive/Home/Backups/photos": { + entries: []fakeEntry{ + {Name: "img.jpg", Size: 100}, + }, + }, + "drive/Home/Backups/empty": { + entries: nil, // genuinely empty subdir + }, + } + client, _ := newTestClient(t, tree.handler()) + + plan, err := BuildPlan(context.Background(), client, "drive/Home/Backups", "/tmp/out") + if err != nil { + t.Fatalf("BuildPlan: %v", err) + } + // Files: 2 ("top.txt" and "photos/img.jpg"). + if len(plan.Files) != 2 { + t.Fatalf("want 2 files, got %d (%+v)", len(plan.Files), plan.Files) + } + relPaths := []string{plan.Files[0].RelativePath, plan.Files[1].RelativePath} + want := []string{"photos/img.jpg", "top.txt"} + for i, w := range want { + if relPaths[i] != w { + t.Errorf("RelativePath[%d]: want %q, got %q", i, w, relPaths[i]) + } + } + // Empty subdir is captured. + if len(plan.EmptyDirs) != 1 || plan.EmptyDirs[0] != "empty" { + t.Errorf("EmptyDirs: want [empty], got %v", plan.EmptyDirs) + } +} + +func TestBuildPlan_DepthFirstOrdering(t *testing.T) { + // 3-deep tree — verify shallow-first sort on EmptyDirs and + // deterministic Files ordering. The walker is depth-first + // internally but Plan sorts before returning. + tree := fakeTree{ + "r": {entries: []fakeEntry{{Name: "deep", IsDir: true}, {Name: "leaf", Size: 1}}}, + "r/deep": {entries: []fakeEntry{ + {Name: "x", IsDir: true}, + {Name: "y.txt", Size: 1}, + }}, + "r/deep/x": {entries: nil}, // empty + } + client, _ := newTestClient(t, tree.handler()) + plan, err := BuildPlan(context.Background(), client, "r", "/tmp") + if err != nil { + t.Fatalf("BuildPlan: %v", err) + } + if got := []string{plan.Files[0].RelativePath, plan.Files[1].RelativePath}; !equal(got, []string{"deep/y.txt", "leaf"}) { + t.Errorf("Files ordering: got %v", got) + } + if !equal(plan.EmptyDirs, []string{"deep/x"}) { + t.Errorf("EmptyDirs: got %v", plan.EmptyDirs) + } +} + +func equal(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +// Sanity check: the tree handler should at least pretend to URL-encode, +// so this no-op test confirms no panics on funky names. We don't go +// deeper because the actual encoding lives in upload.EncodeURL, which +// has its own test suite. +func TestList_PathEncoding_Roundtrip(t *testing.T) { + want := "Special !'()*" + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Decode URL — make sure the un-encoded segment matches what we asked for. + raw := strings.TrimPrefix(r.URL.Path, "/api/resources/") + raw = strings.Trim(raw, "/") + segs := strings.Split(raw, "/") + decoded := make([]string, len(segs)) + for i, s := range segs { + d, err := url.PathUnescape(s) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + decoded[i] = d + } + got := path.Join(decoded...) + if got != want { + t.Errorf("decoded path: want %q, got %q", want, got) + } + _, _ = io.WriteString(w, `{"items":[]}`) + })) + if _, err := client.List(context.Background(), want); err != nil { + t.Fatalf("List: %v", err) + } +} diff --git a/cli/pkg/files/rm/rm.go b/cli/pkg/files/rm/rm.go new file mode 100644 index 000000000..3c663175e --- /dev/null +++ b/cli/pkg/files/rm/rm.go @@ -0,0 +1,245 @@ +// Package rm implements the wire side of `olares-cli files rm`. It +// drives the per-user files-backend's batch DELETE endpoint: +// +// DELETE /api/resources// body: {"dirents": [...]} +// +// The endpoint takes one parent directory in the URL and removes the +// listed entries inside it. Each dirent is a leading-slash name like +// `/foo` (file) or `/sub/` (directory); see +// files/pkg/drivers/posix/posix/posix.go's PosixStorage.Delete for +// the server's iteration logic. +// +// We mirror the LarePass web app's +// apps/packages/app/src/api/files/v2/common/utils.ts +// `batchDeleteFileItems` helper, which groups items by parent so the +// shape stays "fewest possible HTTP requests" — handy when removing, +// say, 200 files scattered across two directories. +package rm + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "sort" + "strings" + + "github.com/beclab/Olares/cli/pkg/files/upload" +) + +// Client is the per-FilesURL handle used by DeleteBatch. +// +// AccessToken is sent as `X-Authorization` (not `Authorization: Bearer`), +// because Olares' edge stack only forwards the X-Authorization header to +// per-user services. See pkg/cmdutil/factory.go for the full rationale. +type Client struct { + HTTPClient *http.Client + BaseURL string // FilesURL, e.g. https://files.alice.olares.com + AccessToken string +} + +// HTTPError carries the status + truncated body of a non-2xx response +// so the caller can branch on the status code (e.g. to give a friendly +// "not found" message vs. an auth-issue CTA). Same shape as the other +// per-package HTTP errors in this CLI to keep the error contract +// uniform. +type HTTPError struct { + Status int + Body string + URL string + Method string +} + +func (e *HTTPError) Error() string { + body := e.Body + if len(body) > 500 { + body = body[:500] + "...(truncated)" + } + return fmt.Sprintf("%s %s: HTTP %d: %s", e.Method, e.URL, e.Status, body) +} + +// Target is one user-supplied remote path to delete, normalized so the +// planner has a single canonical shape to group on. The cobra cmd +// constructs these from FrontendPath values; see ToTarget in cmd +// layer for the conversion (kept out of this package so the planner +// stays free of the FrontendPath type). +type Target struct { + // FileType + Extend together identify the storage class+volume + // (drive/Home, sync/, ...). Two targets with the same + // (FileType, Extend, ParentSubPath) tuple share a parent and can + // be batched into a single DELETE. + FileType string + Extend string + // ParentSubPath is the parent directory's path relative to + // `/` — always starts with '/' and ends with + // '/' (or is just "/" for items directly under Extend). This is + // the value that the `/api/resources//` URL is built + // from. Keeping the trailing slash explicit avoids "/Home" vs + // "/Home/" ambiguity. + ParentSubPath string + // Name is the basename of the entry to remove (no slashes). + Name string + // IsDirIntent: did the user signal this is a directory (e.g. by + // passing a trailing slash on the path)? Required to be true for + // directory removals when --recursive is set; the planner errors + // out for IsDirIntent=true without --recursive (Unix-style). + IsDirIntent bool +} + +// Group is one batch DELETE: a parent path and the list of dirents to +// remove from it. The wire shape comes straight from the LarePass web +// app's batchDelete helper. +type Group struct { + // FileType / Extend / ParentSubPath: same meaning as on Target, + // shared by every dirent in the group. + FileType string + Extend string + ParentSubPath string + // Dirents is the list of `/` (file) or `//` (dir) + // strings to send in the request body. Sorted alphabetically so + // the wire request is deterministic for tests / replay. + Dirents []string +} + +// Plan validates `--recursive` against each Target's IsDirIntent flag +// and groups the targets by parent directory. The returned []*Group +// is sorted by (FileType, Extend, ParentSubPath) so callers iterate +// in a stable order; within each group dirents are also sorted. +// +// Errors: +// - any IsDirIntent=true target with recursive=false → `is a +// directory: pass -r/-R to remove it` (matches Unix `rm`'s +// refusal). +// - any Target with empty Name → "refusing to delete the root of +// /". This guards against `rm drive/Home/` +// accidentally meaning "wipe my Drive" — that operation would +// have to be expressed differently. +func Plan(targets []Target, recursive bool) ([]*Group, error) { + if len(targets) == 0 { + return nil, errors.New("rm: no targets supplied") + } + + type key struct { + fileType, extend, parent string + } + groupIdx := map[key]int{} + var groups []*Group + + // Track per-(group, dirent) seen-set so duplicates in the user's + // input collapse to one wire entry. We DO want duplicate names + // across different parents to land in their own groups. + seen := map[string]map[string]struct{}{} + + for _, t := range targets { + if t.Name == "" { + return nil, fmt.Errorf("refusing to delete the root of %s/%s", + t.FileType, t.Extend) + } + if t.IsDirIntent && !recursive { + return nil, fmt.Errorf( + "%s/%s%s%s is a directory: pass -r/-R to remove it recursively", + t.FileType, t.Extend, t.ParentSubPath, t.Name) + } + dirent := "/" + t.Name + if t.IsDirIntent { + dirent += "/" + } + + k := key{t.FileType, t.Extend, t.ParentSubPath} + idx, ok := groupIdx[k] + if !ok { + idx = len(groups) + groupIdx[k] = idx + groups = append(groups, &Group{ + FileType: t.FileType, + Extend: t.Extend, + ParentSubPath: t.ParentSubPath, + }) + seen[k.parent+"|"+k.fileType+"|"+k.extend] = map[string]struct{}{} + } + seenKey := k.parent + "|" + k.fileType + "|" + k.extend + if _, dup := seen[seenKey][dirent]; dup { + continue + } + seen[seenKey][dirent] = struct{}{} + groups[idx].Dirents = append(groups[idx].Dirents, dirent) + } + + // Stable orderings: groups by (fileType, extend, parent), dirents + // by name within each group. + sort.SliceStable(groups, func(i, j int) bool { + if groups[i].FileType != groups[j].FileType { + return groups[i].FileType < groups[j].FileType + } + if groups[i].Extend != groups[j].Extend { + return groups[i].Extend < groups[j].Extend + } + return groups[i].ParentSubPath < groups[j].ParentSubPath + }) + for _, g := range groups { + sort.Strings(g.Dirents) + } + + return groups, nil +} + +// deleteRequestBody is the JSON body shape the files-backend's DELETE +// resource handler binds to (resources_service.go line 209). We keep +// the wire representation in a typed struct rather than a map so any +// future field additions show up in code review. +type deleteRequestBody struct { + Dirents []string `json:"dirents"` +} + +// DeleteBatch sends one DELETE request for the given group. URL is +// `/api/resources//` and +// body is `{"dirents": [...]}`. +// +// `parent` always ends with '/' on the wire; if the caller's +// ParentSubPath is missing the trailing slash we add it here so the +// FileParam.convert split-on-/ check on the server side passes. +func (c *Client) DeleteBatch(ctx context.Context, g *Group) error { + if len(g.Dirents) == 0 { + return nil + } + parent := g.ParentSubPath + if !strings.HasSuffix(parent, "/") { + parent += "/" + } + plain := g.FileType + "/" + g.Extend + parent + endpoint := c.BaseURL + "/api/resources/" + upload.EncodeURL(plain) + + bodyBytes, err := json.Marshal(deleteRequestBody{Dirents: g.Dirents}) + if err != nil { + return fmt.Errorf("marshal delete body: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, endpoint, bytes.NewReader(bodyBytes)) + if err != nil { + return fmt.Errorf("build request: %w", err) + } + if c.AccessToken != "" { + req.Header.Set("X-Authorization", c.AccessToken) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + + resp, err := c.HTTPClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + respBody, _ := io.ReadAll(resp.Body) + if resp.StatusCode/100 != 2 { + return &HTTPError{ + Status: resp.StatusCode, + Body: string(respBody), + URL: endpoint, + Method: http.MethodDelete, + } + } + return nil +} diff --git a/cli/pkg/files/rm/rm_test.go b/cli/pkg/files/rm/rm_test.go new file mode 100644 index 000000000..9a30e9f01 --- /dev/null +++ b/cli/pkg/files/rm/rm_test.go @@ -0,0 +1,228 @@ +package rm + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func newTestClient(t *testing.T, h http.Handler) (*Client, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(h) + t.Cleanup(srv.Close) + return &Client{ + HTTPClient: srv.Client(), + BaseURL: srv.URL, + AccessToken: "test-token-XYZ", + }, srv +} + +func TestPlan_GroupsByParent(t *testing.T) { + targets := []Target{ + {FileType: "drive", Extend: "Home", ParentSubPath: "/Documents/", Name: "a.pdf"}, + {FileType: "drive", Extend: "Home", ParentSubPath: "/Documents/", Name: "b.pdf"}, + {FileType: "drive", Extend: "Home", ParentSubPath: "/Logs/", Name: "today.log"}, + } + groups, err := Plan(targets, false) + if err != nil { + t.Fatalf("Plan: %v", err) + } + if len(groups) != 2 { + t.Fatalf("want 2 groups (one per parent), got %d", len(groups)) + } + // Sorted alphabetically: Documents/ < Logs/. + if groups[0].ParentSubPath != "/Documents/" { + t.Errorf("groups[0].ParentSubPath = %q", groups[0].ParentSubPath) + } + if !equal(groups[0].Dirents, []string{"/a.pdf", "/b.pdf"}) { + t.Errorf("groups[0].Dirents = %v", groups[0].Dirents) + } + if !equal(groups[1].Dirents, []string{"/today.log"}) { + t.Errorf("groups[1].Dirents = %v", groups[1].Dirents) + } +} + +// TestPlan_DirIntentRequiresRecursive replicates Unix `rm`'s refusal: +// a trailing-slash target without -r must error, and the message must +// name the offending path. +func TestPlan_DirIntentRequiresRecursive(t *testing.T) { + targets := []Target{ + {FileType: "drive", Extend: "Home", ParentSubPath: "/", Name: "Backups", IsDirIntent: true}, + } + _, err := Plan(targets, false) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "directory") || !strings.Contains(err.Error(), "Backups") { + t.Errorf("error should mention the directory and the -r flag, got: %v", err) + } + + groups, err := Plan(targets, true) + if err != nil { + t.Fatalf("with -r the same plan should succeed: %v", err) + } + if len(groups) != 1 || len(groups[0].Dirents) != 1 || groups[0].Dirents[0] != "/Backups/" { + t.Errorf("dirent for dir target should have trailing slash, got %+v", groups) + } +} + +func TestPlan_RefusesEmptyName(t *testing.T) { + _, err := Plan([]Target{ + {FileType: "drive", Extend: "Home", ParentSubPath: "/"}, // Name missing + }, true) + if err == nil { + t.Fatal("expected error for empty Name (root deletion)") + } + if !strings.Contains(err.Error(), "root") { + t.Errorf("error should mention 'root', got: %v", err) + } +} + +func TestPlan_DeduplicatesDirents(t *testing.T) { + // Same path twice on the command line — should land in one + // dirent, not two. + targets := []Target{ + {FileType: "drive", Extend: "Home", ParentSubPath: "/Logs/", Name: "x.log"}, + {FileType: "drive", Extend: "Home", ParentSubPath: "/Logs/", Name: "x.log"}, + } + groups, err := Plan(targets, false) + if err != nil { + t.Fatalf("Plan: %v", err) + } + if len(groups) != 1 || len(groups[0].Dirents) != 1 { + t.Errorf("duplicates should collapse, got groups=%+v", groups) + } +} + +func TestPlan_NoTargets(t *testing.T) { + _, err := Plan(nil, false) + if err == nil { + t.Fatal("expected error for no targets") + } +} + +// TestDeleteBatch_WireShape exercises the actual HTTP DELETE: URL +// path encoding, JSON body shape, X-Authorization injection, and +// trailing-slash on the parent. +func TestDeleteBatch_WireShape(t *testing.T) { + var ( + gotMethod string + gotPath string + gotAuth string + gotCType string + gotBody []byte + ) + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gotMethod = r.Method + gotPath = r.URL.Path + gotAuth = r.Header.Get("X-Authorization") + gotCType = r.Header.Get("Content-Type") + gotBody, _ = io.ReadAll(r.Body) + w.WriteHeader(http.StatusOK) + })) + g := &Group{ + FileType: "drive", + Extend: "Home", + ParentSubPath: "/Documents/", + Dirents: []string{"/a.pdf", "/sub/"}, + } + if err := client.DeleteBatch(context.Background(), g); err != nil { + t.Fatalf("DeleteBatch: %v", err) + } + if gotMethod != http.MethodDelete { + t.Errorf("Method: want DELETE, got %s", gotMethod) + } + if gotPath != "/api/resources/drive/Home/Documents/" { + t.Errorf("Path: got %q", gotPath) + } + if gotAuth != "test-token-XYZ" { + t.Errorf("X-Authorization: got %q", gotAuth) + } + if !strings.HasPrefix(gotCType, "application/json") { + t.Errorf("Content-Type: got %q", gotCType) + } + var body deleteRequestBody + if err := json.Unmarshal(gotBody, &body); err != nil { + t.Fatalf("body unmarshal: %v (raw=%s)", err, gotBody) + } + if !equal(body.Dirents, []string{"/a.pdf", "/sub/"}) { + t.Errorf("body.Dirents: got %v", body.Dirents) + } +} + +// TestDeleteBatch_ParentSlashEnforced confirms that a missing trailing +// slash on ParentSubPath is repaired before the wire call (the server +// requires it for the FileParam.convert split-on-/ check). +func TestDeleteBatch_ParentSlashEnforced(t *testing.T) { + var gotPath string + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gotPath = r.URL.Path + w.WriteHeader(http.StatusOK) + })) + g := &Group{ + FileType: "drive", + Extend: "Home", + ParentSubPath: "/Logs", // missing trailing slash + Dirents: []string{"/today.log"}, + } + if err := client.DeleteBatch(context.Background(), g); err != nil { + t.Fatalf("DeleteBatch: %v", err) + } + if !strings.HasSuffix(gotPath, "/") { + t.Errorf("DeleteBatch should ensure trailing slash, got %q", gotPath) + } +} + +func TestDeleteBatch_NoOpOnEmpty(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Errorf("should not have hit the server for empty group") + })) + if err := client.DeleteBatch(context.Background(), &Group{}); err != nil { + t.Errorf("empty group should be a no-op, got: %v", err) + } +} + +func TestDeleteBatch_HTTPError(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusForbidden) + _, _ = io.WriteString(w, `{"error":"nope"}`) + })) + g := &Group{ + FileType: "drive", Extend: "Home", ParentSubPath: "/", Dirents: []string{"/x"}, + } + err := client.DeleteBatch(context.Background(), g) + if err == nil { + t.Fatal("expected error") + } + var hErr *HTTPError + if !errors.As(err, &hErr) { + t.Fatalf("want *HTTPError, got %T", err) + } + if hErr.Status != http.StatusForbidden { + t.Errorf("status: want 403, got %d", hErr.Status) + } +} + +// equal is the bytes.Equal counterpart for string slices, kept local +// so the test file has no external test deps. +func equal(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +// Compile-time check that the unused bytes import isn't a problem; +// tests above pass body bytes around. +var _ = bytes.Equal diff --git a/cli/pkg/files/upload/api.go b/cli/pkg/files/upload/api.go new file mode 100644 index 000000000..e4bfac5c8 --- /dev/null +++ b/cli/pkg/files/upload/api.go @@ -0,0 +1,297 @@ +// api.go: thin HTTP client for the per-user files-backend's Drive v2 +// upload-related endpoints. The wire surface mirrors what +// apps/packages/app/src/api/files/v2/drive/data.ts (getFileServerUploadLink, +// getFileUploadedBytes) and apps/packages/app/src/api/files/v2/drive/utils.ts +// (createDir / postCreateFile) call from the web app, so probe / resume / +// chunk POST behavior stay byte-compatible across both clients. +// +// The HTTP client is supplied by the caller (so tests can use httptest) +// and the access token is passed in via X-Authorization on every request +// (same convention as the rest of olares-cli — see +// pkg/cmdutil/factory.go's authTransport for the rationale). +package upload + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" +) + +// Client is the per-FilesURL handle used by uploader.go and the cobra +// command. It is cheap to construct; reuse one per `files upload` +// invocation. +// +// AccessToken is sent as `X-Authorization` (not `Authorization: Bearer`), +// because Olares' edge stack only forwards the X-Authorization header to +// per-user services. See pkg/cmdutil/factory.go for the full rationale. +type Client struct { + HTTPClient *http.Client + BaseURL string // FilesURL, e.g. https://files.alice.olares.com + AccessToken string +} + +// Node is the projection of files-backend's `FileNode` that we actually +// use. The full struct also has a `master` boolean (see +// apps/packages/app/src/stores/files.ts), which we don't need for the +// upload flow — we just take the first node's name as the path segment +// for /upload/upload-link/{node}/ and /upload/file-uploaded-bytes/{node}/. +type Node struct { + Name string `json:"name"` + Master bool `json:"master"` +} + +// nodesEnvelope mirrors the {data: {nodes: [...]}} response shape that +// fetchNodeList in apps/packages/app/src/api/files/v2/common/utils.ts +// L320-L329 unpacks. We keep the envelope local to this package since +// it's not useful elsewhere. +type nodesEnvelope struct { + Data struct { + Nodes []Node `json:"nodes"` + } `json:"data"` +} + +// FetchNodes calls GET {filesURL}/api/nodes/ and returns the configured +// Drive nodes. The CLI uses nodes[0].Name (or a user-supplied --node +// override) as the per-request `{node}` path segment for the upload +// endpoints — same convention as the web app's getUploadNode(). +// +// Errors: +// - any non-2xx status surfaces as fmt.Errorf with status + body +// - empty `data.nodes` is reported with a clear message; the upload +// flow can't proceed without a node identifier. +func (c *Client) FetchNodes(ctx context.Context) ([]Node, error) { + endpoint := c.BaseURL + "/api/nodes/" + body, err := c.do(ctx, http.MethodGet, endpoint, nil, nil, "") + if err != nil { + return nil, fmt.Errorf("GET %s: %w", endpoint, err) + } + var env nodesEnvelope + if err := json.Unmarshal(body, &env); err != nil { + return nil, fmt.Errorf("decode /api/nodes/ response: %w (body=%s)", err, truncateBody(body)) + } + if len(env.Data.Nodes) == 0 { + return nil, errors.New("files-backend returned no Drive nodes; cannot upload") + } + return env.Data.Nodes, nil +} + +// GetUploadLink calls GET {filesURL}/upload/upload-link/{node}/?file_path=&from=web. +// The server replies with a plaintext path (e.g. +// `/seafhttp/upload-aj//?...`) that the browser then POSTs chunks +// to. The web app appends `?ret-json=1` to that path so the per-chunk +// response is JSON instead of a redirect — we do the same to match. +// +// `parentDir` is the parent directory path WITH the `/drive/Home/...` +// prefix and a TRAILING slash (e.g. `/drive/Home/Documents/`). That's +// what the web app passes through `files.formatPathtoUrl` → +// `path.pathname` before plumbing it into this call (see +// apps/packages/app/src/utils/resumejs.ts L412-L416). +// +// Returned string is a relative path (no scheme/host); the chunk POST +// uses `c.BaseURL + uploadLink` as the target. +func (c *Client) GetUploadLink(ctx context.Context, node, parentDir string) (string, error) { + endpoint := c.BaseURL + + "/upload/upload-link/" + url.PathEscape(node) + + "/?file_path=" + encodeURIComponent(parentDir) + + "&from=web" + body, err := c.do(ctx, http.MethodGet, endpoint, nil, nil, "") + if err != nil { + return "", fmt.Errorf("GET %s: %w", endpoint, err) + } + link := strings.TrimSpace(string(body)) + if link == "" { + return "", fmt.Errorf("upload-link response is empty (parent_dir=%q)", parentDir) + } + // Append ?ret-json=1 the same way the web app does (resumejs.ts / + // data.ts), so per-chunk responses come back as JSON instead of + // the default redirect that the browser-style upload would follow. + if strings.Contains(link, "?") { + link += "&ret-json=1" + } else { + link += "?ret-json=1" + } + return link, nil +} + +// uploadedBytesEnvelope is the JSON shape returned by +// /upload/file-uploaded-bytes/. The web app reads `uploadedBytes` and +// floors `uploadedBytes / chunkSize` to find the next chunk to send. +type uploadedBytesEnvelope struct { + UploadedBytes int64 `json:"uploadedBytes"` +} + +// GetUploadedBytes asks the server how many bytes of `/` +// have already been received. New / never-seen files return 0 (or an error +// the web app silently swallows — see resumejs.ts: any non-2xx is treated +// as "start from scratch"). We adopt the same lenient policy so a fresh +// upload doesn't fail just because the server doesn't know about the file +// yet. +// +// `parentDir` follows the same convention as GetUploadLink: full +// `/drive/Home/...` path with a trailing slash. `filename` is the bare +// basename (no directory components). +func (c *Client) GetUploadedBytes(ctx context.Context, node, parentDir, filename string) (int64, error) { + q := url.Values{} + q.Set("parent_dir", parentDir) + q.Set("file_name", filename) + endpoint := c.BaseURL + + "/upload/file-uploaded-bytes/" + url.PathEscape(node) + + "/?" + q.Encode() + body, err := c.do(ctx, http.MethodGet, endpoint, nil, nil, "") + if err != nil { + // Match the web app's silent fallback: probe failures (file + // doesn't exist, transient 404, ...) all collapse to "we + // haven't uploaded anything yet". + return 0, nil //nolint:nilerr // intentional: see docstring + } + var env uploadedBytesEnvelope + if err := json.Unmarshal(body, &env); err != nil { + return 0, nil //nolint:nilerr // best-effort probe; restart from 0 + } + if env.UploadedBytes < 0 { + return 0, nil + } + return env.UploadedBytes, nil +} + +// Mkdir POSTs an empty body to /api/resources/drive/Home// +// to create a directory under Drive/Home. The trailing slash is what the +// backend uses to discriminate "create directory" from "create empty file" +// (postCreateFile in v2/common/utils.ts does the same thing — `isDir +// ? '/' : ''`). +// +// `relSubPath` is the directory path RELATIVE to /Home (e.g. "Documents" +// or "Documents/photos"); it should NOT include leading or trailing +// slashes — Mkdir handles slash placement and percent-encoding so the +// caller can hand in plain UTF-8 segments. +// +// IMPORTANT: This call is NOT idempotent on the server side. The +// files-backend auto-renames colliding directories ("Documents" exists +// → POST creates "Documents (1)") instead of returning 409. We treat +// the 409 path as "already exists" for completeness but most servers +// won't take that branch — callers should reserve Mkdir for paths +// they're confident don't exist yet (e.g. brand-new subdirectories +// they computed from a local walk). The 409 fast-path stays in case +// some deployments do return 409 for collisions. +func (c *Client) Mkdir(ctx context.Context, relSubPath string) error { + clean := strings.Trim(relSubPath, "/") + if clean == "" { + // Drive/Home root always exists; nothing to do. + return nil + } + encoded := EncodeURL(clean) + endpoint := c.BaseURL + "/api/resources/drive/Home/" + encoded + "/" + _, err := c.do(ctx, http.MethodPost, endpoint, nil, nil, "") + if err != nil { + var hErr *HTTPError + if errors.As(err, &hErr) && hErr.Status == http.StatusConflict { + // Directory already exists — exactly what we wanted. + return nil + } + return fmt.Errorf("mkdir %q: %w", relSubPath, err) + } + return nil +} + +// CreateEmptyFile POSTs an empty body to +// /api/resources/drive/Home/ (no trailing slash) to +// materialize a zero-length file. The web app routes empty files through +// uploadEmptyFile() instead of the chunk pipeline (resumable.js cannot +// represent a 0-byte chunk), and we mirror that here. +// +// Unlike Mkdir, a 409 here is reported back to the caller — we don't +// silently overwrite or pretend success when the user explicitly asked +// to upload a file and a name collision happened. +func (c *Client) CreateEmptyFile(ctx context.Context, relPath string) error { + clean := strings.Trim(relPath, "/") + if clean == "" { + return fmt.Errorf("CreateEmptyFile: empty path") + } + encoded := EncodeURL(clean) + endpoint := c.BaseURL + "/api/resources/drive/Home/" + encoded + _, err := c.do(ctx, http.MethodPost, endpoint, nil, nil, "") + if err != nil { + return fmt.Errorf("create empty file %q: %w", relPath, err) + } + return nil +} + +// HTTPError carries the status + truncated body of a non-2xx response so +// callers that care (Mkdir's 409 fast-path, the chunk uploader's permanent +// vs. retryable classification) can branch on the status code without +// stringly-typed error parsing. +type HTTPError struct { + Status int + Body string + URL string + Method string +} + +func (e *HTTPError) Error() string { + body := e.Body + if len(body) > 500 { + body = body[:500] + "...(truncated)" + } + return fmt.Sprintf("%s %s: HTTP %d: %s", e.Method, e.URL, e.Status, body) +} + +// do performs a single HTTP request with the configured access token +// injected as `X-Authorization`, and returns the response body on 2xx. +// Non-2xx responses surface as *HTTPError so callers can branch on +// status (409 idempotent for Mkdir, the permanent/retryable split for +// the chunk uploader). +// +// `body` may be nil. `contentType` is honored only when non-empty. The +// extra `headers` map lets callers add chunk-POST headers +// (Content-Range / Content-Disposition) without bypassing the +// X-Authorization injection. +func (c *Client) do( + ctx context.Context, + method, endpoint string, + body io.Reader, + headers http.Header, + contentType string, +) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, method, endpoint, body) + if err != nil { + return nil, fmt.Errorf("build request: %w", err) + } + if c.AccessToken != "" { + req.Header.Set("X-Authorization", c.AccessToken) + } + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + for k, vs := range headers { + for _, v := range vs { + req.Header.Add(k, v) + } + } + resp, err := c.HTTPClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + respBody, _ := io.ReadAll(resp.Body) + if resp.StatusCode/100 != 2 { + return nil, &HTTPError{ + Status: resp.StatusCode, + Body: string(respBody), + URL: endpoint, + Method: method, + } + } + return respBody, nil +} + +func truncateBody(b []byte) string { + if len(b) <= 200 { + return string(b) + } + return string(b[:200]) + "...(truncated)" +} diff --git a/cli/pkg/files/upload/api_test.go b/cli/pkg/files/upload/api_test.go new file mode 100644 index 000000000..e7df6286c --- /dev/null +++ b/cli/pkg/files/upload/api_test.go @@ -0,0 +1,202 @@ +package upload + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +// newTestClient wires a Client to an httptest.Server. The fake token +// is here primarily so api_test exercises the X-Authorization header +// injection path that production traffic relies on. +func newTestClient(t *testing.T, h http.Handler) (*Client, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(h) + t.Cleanup(srv.Close) + return &Client{ + HTTPClient: srv.Client(), + BaseURL: srv.URL, + AccessToken: "test-token", + }, srv +} + +func TestFetchNodes(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/nodes/" { + t.Errorf("unexpected path: %q", r.URL.Path) + } + if got := r.Header.Get("X-Authorization"); got != "test-token" { + t.Errorf("X-Authorization = %q, want test-token", got) + } + fmt.Fprintln(w, `{"data":{"nodes":[{"name":"node-a","master":true},{"name":"node-b"}]}}`) + })) + nodes, err := client.FetchNodes(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 2 { + t.Fatalf("got %d nodes, want 2", len(nodes)) + } + if nodes[0].Name != "node-a" || !nodes[0].Master { + t.Errorf("nodes[0] = %+v", nodes[0]) + } + if nodes[1].Name != "node-b" || nodes[1].Master { + t.Errorf("nodes[1] = %+v", nodes[1]) + } +} + +func TestFetchNodes_EmptyList(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + fmt.Fprintln(w, `{"data":{"nodes":[]}}`) + })) + _, err := client.FetchNodes(context.Background()) + if err == nil { + t.Fatal("expected error for empty nodes list, got nil") + } +} + +func TestGetUploadLink(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Path: /upload/upload-link// + wantPath := "/upload/upload-link/node-a/" + if r.URL.Path != wantPath { + t.Errorf("path = %q, want %q", r.URL.Path, wantPath) + } + // Query: file_path uses encodeURIComponent semantics, so + // '/' should be encoded as %2F (NOT '/'). + raw := r.URL.RawQuery + if !strings.Contains(raw, "file_path=%2Fdrive%2FHome%2FDocuments%2F") { + t.Errorf("raw query missing properly-encoded file_path: %q", raw) + } + if !strings.Contains(raw, "from=web") { + t.Errorf("raw query missing from=web: %q", raw) + } + w.Header().Set("Content-Type", "text/plain") + fmt.Fprint(w, "/seafhttp/upload-aj/repo-1/") + })) + link, err := client.GetUploadLink(context.Background(), "node-a", "/drive/Home/Documents/") + if err != nil { + t.Fatal(err) + } + if want := "/seafhttp/upload-aj/repo-1/?ret-json=1"; link != want { + t.Errorf("link = %q, want %q", link, want) + } +} + +func TestGetUploadLink_AppendsRetJSONOnExistingQuery(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + fmt.Fprint(w, "/seafhttp/upload-aj/repo-1/?token=abc") + })) + link, err := client.GetUploadLink(context.Background(), "n", "/drive/Home/") + if err != nil { + t.Fatal(err) + } + if want := "/seafhttp/upload-aj/repo-1/?token=abc&ret-json=1"; link != want { + t.Errorf("link = %q, want %q", link, want) + } +} + +func TestGetUploadedBytes(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Query().Get("parent_dir") != "/drive/Home/Documents/" { + t.Errorf("parent_dir = %q", r.URL.Query().Get("parent_dir")) + } + if r.URL.Query().Get("file_name") != "report.pdf" { + t.Errorf("file_name = %q", r.URL.Query().Get("file_name")) + } + fmt.Fprint(w, `{"uploadedBytes":16777216}`) + })) + got, err := client.GetUploadedBytes(context.Background(), "n", "/drive/Home/Documents/", "report.pdf") + if err != nil { + t.Fatal(err) + } + if got != 16777216 { + t.Errorf("uploadedBytes = %d, want 16777216", got) + } +} + +// GetUploadedBytes is intentionally lenient: any error means "we +// haven't started yet, restart from 0" so a missing-file 404 doesn't +// abort a fresh upload. This is the same policy as the web app's +// silent .catch in resumejs.ts resumableUpload(). +func TestGetUploadedBytes_404Treats0(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + http.Error(w, "not found", http.StatusNotFound) + })) + got, err := client.GetUploadedBytes(context.Background(), "n", "/p/", "f") + if err != nil { + t.Fatalf("expected nil error on 404, got %v", err) + } + if got != 0 { + t.Errorf("got %d, want 0", got) + } +} + +func TestMkdir(t *testing.T) { + var gotPath string + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Errorf("method = %s", r.Method) + } + gotPath = r.URL.Path + w.WriteHeader(http.StatusOK) + })) + if err := client.Mkdir(context.Background(), "Documents/Backups"); err != nil { + t.Fatal(err) + } + if want := "/api/resources/drive/Home/Documents/Backups/"; gotPath != want { + t.Errorf("path = %q, want %q", gotPath, want) + } +} + +func TestMkdir_409Idempotent(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + http.Error(w, "exists", http.StatusConflict) + })) + if err := client.Mkdir(context.Background(), "Existing"); err != nil { + t.Errorf("Mkdir on 409 should be nil, got %v", err) + } +} + +func TestMkdir_OtherErrorReturns(t *testing.T) { + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + http.Error(w, "boom", http.StatusInternalServerError) + })) + err := client.Mkdir(context.Background(), "Bad") + if err == nil { + t.Fatal("want error, got nil") + } + var hErr *HTTPError + if !errors.As(err, &hErr) { + t.Fatalf("expected *HTTPError, got %T (%v)", err, err) + } + if hErr.Status != http.StatusInternalServerError { + t.Errorf("status = %d", hErr.Status) + } +} + +// Mkdir against root (or empty path) is a no-op — drive/Home always +// exists, and we don't want a stray POST to surprise the server. +func TestMkdir_RootIsNoop(t *testing.T) { + hit := false + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + hit = true + w.WriteHeader(http.StatusOK) + })) + if err := client.Mkdir(context.Background(), ""); err != nil { + t.Fatal(err) + } + if hit { + t.Errorf("Mkdir(\"\") should not hit the server") + } + if err := client.Mkdir(context.Background(), "/"); err != nil { + t.Fatal(err) + } + if hit { + t.Errorf("Mkdir(\"/\") should not hit the server") + } +} diff --git a/cli/pkg/files/upload/encode.go b/cli/pkg/files/upload/encode.go new file mode 100644 index 000000000..aed8321c3 --- /dev/null +++ b/cli/pkg/files/upload/encode.go @@ -0,0 +1,106 @@ +// Package upload implements the chunked / resumable upload client that the +// `olares-cli files upload` command drives. It speaks the same wire protocol +// as the LarePass web app (Resumable.js + the Drive v2 endpoints under +// /upload/upload-link, /upload/file-uploaded-bytes, /api/nodes, +// /api/resources/drive/Home/...). See docs/notes/auth-2fa-semantics.md for +// the auth header convention shared with the rest of the CLI, and the plan +// at .cursor/plans/cli_files_upload_resumable_*.plan.md for the design +// rationale. +// +// encode.go: percent-encoding helpers that mirror the web app's +// apps/packages/app/src/utils/encode.ts (encodeUrl). The standard library +// alone is NOT a 1:1 substitute: +// +// - url.QueryEscape encodes a space as '+' (form encoding) — JS +// encodeURIComponent uses '%20'. +// - url.QueryEscape escapes '!' '*' '(' ')' '\'' — JS encodeURIComponent +// does not. +// +// Both differences would round-trip to the server differently and break +// resume / probe path-matching for filenames containing those bytes, so we +// implement encodeURIComponent ourselves and use it everywhere we touch a +// path / filename / parent_dir value. +package upload + +import ( + "strings" +) + +// encodeURIComponent mirrors JavaScript's encodeURIComponent: it leaves +// the unreserved set (RFC 3986) plus !*'() alone and percent-encodes the +// rest as UTF-8 bytes. This is the building block for both EncodeURL +// (path-segment encoding, joined with '/') and the query-value encoding +// the upload protocol uses for parent_dir / file_name. +// +// Why we don't reuse net/url: +// - url.QueryEscape encodes ' ' as '+' (form encoding). The Drive +// backend was written against a JS client that emits '%20', so the +// two representations are not interchangeable for filename parity +// (probe and chunk POST must see byte-identical names for resume to +// line up). +// - url.PathEscape leaves '?', '#', '&', '=' alone (they're valid +// within a path component) but those characters DO need escaping when +// the value is destined for a query parameter, which is the bulk of +// our use case. +func encodeURIComponent(s string) string { + var b strings.Builder + b.Grow(len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if shouldNotEncode(c) { + b.WriteByte(c) + continue + } + b.WriteByte('%') + b.WriteByte(upperHex[c>>4]) + b.WriteByte(upperHex[c&0x0f]) + } + return b.String() +} + +// EncodeURL is the Go counterpart of apps/packages/app/src/utils/encode.ts +// `encodeUrl`: split on '/', encodeURIComponent each segment, rejoin with +// '/'. Used wherever a path is interpolated into a URL path component +// (e.g. the file_path query value the server uses to derive the upload +// link, or the /api/resources/drive/Home/... mkdir endpoint). +// +// The leading and trailing '/' are preserved verbatim so callers can keep +// the "directory hint" semantics the backend relies on (a trailing '/' +// signals "this is a directory" in several files-backend code paths; see +// files/pkg/models/file_param.go). +func EncodeURL(p string) string { + if p == "" { + return "" + } + // split keeps empty leading/trailing parts so the leading/trailing + // slashes survive the rejoin (e.g. "/a/b/" → ["", "a", "b", ""] → + // "/a/b/" again after encoding the non-empty pieces). + parts := strings.Split(p, "/") + for i, seg := range parts { + if seg == "" { + continue + } + parts[i] = encodeURIComponent(seg) + } + return strings.Join(parts, "/") +} + +const upperHex = "0123456789ABCDEF" + +// shouldNotEncode is the membership test for JS encodeURIComponent's +// "leave alone" set: A-Z a-z 0-9 plus the marks `- _ . ~ ! * ' ( )`. +func shouldNotEncode(c byte) bool { + switch { + case c >= 'A' && c <= 'Z': + return true + case c >= 'a' && c <= 'z': + return true + case c >= '0' && c <= '9': + return true + } + switch c { + case '-', '_', '.', '~', '!', '*', '\'', '(', ')': + return true + } + return false +} diff --git a/cli/pkg/files/upload/encode_test.go b/cli/pkg/files/upload/encode_test.go new file mode 100644 index 000000000..9513ae7d3 --- /dev/null +++ b/cli/pkg/files/upload/encode_test.go @@ -0,0 +1,70 @@ +package upload + +import "testing" + +// Test cases derived from JavaScript's encodeURIComponent reference output. +// If you ever change encode.go, re-run these examples in a Node REPL to +// re-confirm parity: +// +// > encodeURIComponent("hello world") // 'hello%20world' +// > encodeURIComponent("a&b=c") // 'a%26b%3Dc' +// > encodeURIComponent("中文.txt") // '%E4%B8%AD%E6%96%87.txt' +// +// We deliberately test the boundary characters where Go's url.QueryEscape +// would diverge from JS (' ' as '+', and the !*'() set escaping) so any +// regression to "just use url.QueryEscape" gets caught here. +func TestEncodeURIComponent(t *testing.T) { + cases := []struct { + in, want string + }{ + {"", ""}, + {"abc", "abc"}, + {"hello world", "hello%20world"}, // space → %20 (NOT '+') + {"a&b=c", "a%26b%3Dc"}, // query metacharacters + {"a+b", "a%2Bb"}, // '+' percent-encoded + {"a/b", "a%2Fb"}, // '/' is reserved + {"!*'()", "!*'()"}, // unreserved-extras (NOT escaped) + {"-_.~", "-_.~"}, // RFC 3986 unreserved + {"中文.txt", "%E4%B8%AD%E6%96%87.txt"}, // UTF-8 multibyte + {"~user@host", "~user%40host"}, // '@' encoded + {"file (1).txt", "file%20(1).txt"}, // spaces + parens together + {"100%", "100%25"}, // '%' itself + {"\x00\n\r\t", "%00%0A%0D%09"}, // control chars + {" / ", "%20%20%2F%20%20"}, // multiple spaces around '/' + {"a?b#c", "a%3Fb%23c"}, // '?' and '#' + } + for _, c := range cases { + got := encodeURIComponent(c.in) + if got != c.want { + t.Errorf("encodeURIComponent(%q) = %q, want %q", c.in, got, c.want) + } + } +} + +// EncodeURL is the path-segment-aware variant: '/' separators are kept, +// each segment is encodeURIComponent'd. The interesting cases are +// preserving leading/trailing slashes (the backend uses them as directory +// hints) and the empty-segment edge case from "//". +func TestEncodeURL(t *testing.T) { + cases := []struct { + in, want string + }{ + {"", ""}, + {"/", "/"}, + {"a/b/c", "a/b/c"}, + {"/a/b/", "/a/b/"}, + {"a b/c d/", "a%20b/c%20d/"}, + {"中文/files/x.txt", "%E4%B8%AD%E6%96%87/files/x.txt"}, + {"//x", "//x"}, // empty leading segment kept + {"x//", "x//"}, // empty trailing segment kept + {"/Home/Photos/IMG_001.jpg", "/Home/Photos/IMG_001.jpg"}, + {"/dir/file (1).txt", "/dir/file%20(1).txt"}, + {"/sub/a&b/c=d", "/sub/a%26b/c%3Dd"}, + } + for _, c := range cases { + got := EncodeURL(c.in) + if got != c.want { + t.Errorf("EncodeURL(%q) = %q, want %q", c.in, got, c.want) + } + } +} diff --git a/cli/pkg/files/upload/uploader.go b/cli/pkg/files/upload/uploader.go new file mode 100644 index 000000000..6366b4dbf --- /dev/null +++ b/cli/pkg/files/upload/uploader.go @@ -0,0 +1,535 @@ +// uploader.go: single-file chunked uploader. Drives the resumable upload +// protocol the LarePass web app uses (Resumable.js + Drive v2 endpoints): +// +// 1. probe the server for already-uploaded bytes via +// /upload/file-uploaded-bytes// (GetUploadedBytes) +// 2. align to a chunk boundary by flooring (matches the web app's +// `Math.floor(uploadedBytes / chunkSize)` — re-uploading the +// "overflow" within that chunk is harmless and identical-byte) +// 3. ask the server for an upload link via +// /upload/upload-link// (GetUploadLink, once per file) +// 4. POST each remaining chunk as multipart/form-data with the +// Resumable.js parameter shape (resumableChunkNumber, ..., file=chunk) +// plus the Drive-specific extras (parent_dir, driveType, ...) +// 5. classify each chunk response: +// - 200 / 201 → chunk accepted, advance +// - permanent codes → fail fast (see permanentStatuses) +// - everything else → retry up to opts.MaxRetries with backoff +// +// Empty files are routed through CreateEmptyFile (the web app does the +// same — Resumable.js can't represent a 0-byte chunk). +package upload + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/hex" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +// DefaultChunkSize is 8 MiB — the same value the web app uses +// (apps/packages/app/src/api/files/v2/drive/data.ts L55: SIZE = 8MB). +// Stick to it unless you have a very good reason; the server's +// already-uploaded-bytes accounting is keyed on the chunk size the +// previous run used, so changing this mid-stream means the resume +// boundary computation diverges (the floor() trick still gets you to a +// safe re-upload offset, but you waste bandwidth re-sending bytes the +// server already had). +const DefaultChunkSize = 8 * 1024 * 1024 + +// DefaultMaxRetries: per-chunk retry budget. Matches the web app's +// `maxChunkRetries: 3` (resumejs.ts init() L166). +const DefaultMaxRetries = 3 + +// DefaultRetryBackoff: between failed-chunk retries. Matches the web +// app's chunkRetryInterval default of 5s. +const DefaultRetryBackoff = 5 * time.Second + +// UploadOpts is everything UploadFile needs to push one local file into +// Drive/Home. It's a value type so callers (the cobra command, the +// directory walker) can build one per file and tweak fields per call +// without sharing mutable state. +type UploadOpts struct { + // LocalPath is the absolute or working-directory-relative path to + // the file on disk that we're uploading. + LocalPath string + + // Node is the {node} path segment for /upload/upload-link// + // and /upload/file-uploaded-bytes//. Resolved by the cobra + // command up-front via Client.FetchNodes. + Node string + + // ParentDir is the destination directory on the server WITH the + // `/drive/Home/...` prefix and a TRAILING `/`, e.g. + // `/drive/Home/Documents/`. This is the value passed as the + // `file_path` query for upload-link, the `parent_dir` query for + // file-uploaded-bytes, AND the `parent_dir` form field on each + // chunk POST. They MUST agree byte-for-byte for resume to find the + // existing partial upload — that's why we plumb a single value + // rather than recomputing it at each call site. + ParentDir string + + // RemoteName is the bare filename on the server (no directory + // components). For directory uploads, this is the leaf file name — + // the directory components live in RelativePath. + RemoteName string + + // RelativePath is the file's path relative to the upload root, in + // POSIX form (forward slashes). For a single-file upload this is + // just RemoteName; for a directory upload it includes the in-tree + // directory components, e.g. `mydir/photos/IMG_001.jpg`. The web + // app uses this for resumableRelativePath + the per-chunk + // `relative_path` form field; the server uses both for sub-directory + // auto-creation under parent_dir. + RelativePath string + + // ChunkSize: bytes per chunk. Defaults to DefaultChunkSize when + // zero. + ChunkSize int64 + // MaxRetries: retries per chunk on transient failures. Defaults to + // DefaultMaxRetries when zero. Negative disables retries. + MaxRetries int + // RetryBackoff: wait between retries. Defaults to + // DefaultRetryBackoff when zero. + RetryBackoff time.Duration +} + +// ProgressFunc is the per-chunk callback the cobra command uses to +// surface a one-line text progress indicator. `uploaded` is the total +// bytes pushed (cumulative) and `total` is the file size; either may be +// reported as `(0, 0)` to indicate "an empty file just completed". +type ProgressFunc func(uploaded, total int64) + +// permanentStatuses lists HTTP status codes that the web app's +// Resumable.js treats as non-retryable (resumable.js: see the +// `permanentErrors` option, which the web app passes as the array +// below). 5xx is partial: 500/501 are fatal, 502/503/504 are not. +var permanentStatuses = map[int]struct{}{ + 400: {}, 401: {}, 403: {}, 404: {}, 409: {}, 415: {}, + 440: {}, 441: {}, 442: {}, 443: {}, + 500: {}, 501: {}, +} + +// UploadFile uploads `opts.LocalPath` to `opts.ParentDir` + `opts.RemoteName`, +// resuming from whatever the server already has. Empty files are routed +// to CreateEmptyFile (the chunk pipeline can't express 0-byte chunks). +// +// `progress`, if non-nil, is invoked once after the resume probe (with +// uploaded=, total=) and once per accepted +// chunk thereafter. It is NOT invoked per retry attempt. +func (c *Client) UploadFile(ctx context.Context, opts UploadOpts, progress ProgressFunc) error { + if err := opts.normalize(); err != nil { + return err + } + + st, err := os.Stat(opts.LocalPath) + if err != nil { + return fmt.Errorf("stat %s: %w", opts.LocalPath, err) + } + if st.IsDir() { + return fmt.Errorf("UploadFile: %s is a directory; use the walker", opts.LocalPath) + } + fileSize := st.Size() + + // Empty file: web app sends a separate POST to /api/resources/... + // (uploadEmptyFile) instead of routing through Resumable.js. Mirror + // that here so 0-byte files actually materialize on the server + // (Resumable.js cannot generate a chunk of length 0). + if fileSize == 0 { + // RemotePath inside Drive/Home for the create call: strip the + // `/drive/Home/` prefix the API client expects. + rel := joinRemote(opts.ParentDir, opts.RemoteName) + if err := c.CreateEmptyFile(ctx, rel); err != nil { + return err + } + if progress != nil { + progress(0, 0) + } + return nil + } + + // Resume probe: how many bytes does the server already have for + // this exact (parent_dir, file_name) pair? Web app does + // `Math.floor(uploadedBytes / chunkSize)` to find the next chunk + // index — anything smaller than that is "behind us", anything + // equal-or-larger means "the chunk this byte falls into still needs + // to be (re)sent in full". We follow the same floor convention so + // the wire-level resume boundary matches. + uploadedBytes, err := c.GetUploadedBytes(ctx, opts.Node, opts.ParentDir, opts.RemoteName) + if err != nil { + // GetUploadedBytes already swallows errors as "0", but defend + // in depth in case that policy changes. + uploadedBytes = 0 + } + if uploadedBytes > fileSize { + // Server claims more bytes than the local file has. Could be a + // truncation on the source, or a mismatched name. Restart from + // 0 — sending fewer bytes than promised would surface a + // confusing "file size mismatch" error from the chunk endpoint. + uploadedBytes = 0 + } + startChunk := uploadedBytes / opts.ChunkSize + totalChunks := (fileSize + opts.ChunkSize - 1) / opts.ChunkSize + if startChunk > totalChunks { + startChunk = totalChunks + } + + if progress != nil { + progress(startChunk*opts.ChunkSize, fileSize) + } + + // File already complete on the server? Floor-aligned offset equal + // to total chunks means there's nothing left to push — but we still + // re-issue the last chunk to give the server a chance to finalize + // (Resumable.js's behavior when its file-uploaded-bytes probe + // returns the full file size is to skip the upload entirely; we + // match that here). + if startChunk >= totalChunks { + if progress != nil { + progress(fileSize, fileSize) + } + return nil + } + + // Open the file once and seek per chunk. Saves opening N file + // handles for big files; on POSIX, ReadAt is cheaper than Read+Seek + // because it doesn't mutate the file cursor. + f, err := os.Open(opts.LocalPath) + if err != nil { + return fmt.Errorf("open %s: %w", opts.LocalPath, err) + } + defer f.Close() + + uploadLink, err := c.GetUploadLink(ctx, opts.Node, opts.ParentDir) + if err != nil { + return err + } + chunkURL := c.BaseURL + uploadLink + + identifier := uploadIdentifier(opts.ParentDir, opts.RelativePath) + mimeType := guessMIME(opts.LocalPath) + + buf := make([]byte, opts.ChunkSize) + for chunkIdx := startChunk; chunkIdx < totalChunks; chunkIdx++ { + startByte := chunkIdx * opts.ChunkSize + chunkLen := opts.ChunkSize + if remaining := fileSize - startByte; remaining < chunkLen { + chunkLen = remaining + } + // ReadAt may return io.EOF on the final short read along with + // the actual byte count — that's fine, treat (n>0, EOF) as a + // successful read of n bytes. Anything else (real errors, or + // short reads that aren't at EOF) is a hard failure. + n, rerr := f.ReadAt(buf[:chunkLen], startByte) + if rerr != nil && !(rerr == io.EOF && int64(n) == chunkLen) { + return fmt.Errorf("read %s @ %d: %w", opts.LocalPath, startByte, rerr) + } + if int64(n) != chunkLen { + return fmt.Errorf("short read at chunk %d: got %d, want %d", chunkIdx+1, n, chunkLen) + } + chunkData := buf[:chunkLen] + + if err := c.uploadChunk(ctx, chunkURL, opts, chunkUploadCtx{ + ChunkIndex: chunkIdx, // 0-based; we send +1 on the wire + TotalChunks: totalChunks, + ChunkLen: chunkLen, + StartByte: startByte, + FileSize: fileSize, + Identifier: identifier, + MimeType: mimeType, + ChunkContents: chunkData, + }); err != nil { + return fmt.Errorf("upload chunk %d/%d of %s: %w", + chunkIdx+1, totalChunks, opts.LocalPath, err) + } + if progress != nil { + progress(startByte+chunkLen, fileSize) + } + } + return nil +} + +// chunkUploadCtx bundles the per-chunk state. Pulled out into a struct +// just so uploadChunk's signature doesn't grow to 10+ parameters. +type chunkUploadCtx struct { + ChunkIndex int64 + TotalChunks int64 + ChunkLen int64 + StartByte int64 + FileSize int64 + Identifier string + MimeType string + ChunkContents []byte +} + +// uploadChunk POSTs a single chunk and applies the +// permanent / retryable / success classification. It returns nil only +// on a 2xx response (the web app accepts both 200 and 201; we follow +// suit). Permanent errors short-circuit the retry loop. +func (c *Client) uploadChunk( + ctx context.Context, + chunkURL string, + opts UploadOpts, + cu chunkUploadCtx, +) error { + maxAttempts := opts.MaxRetries + 1 + if maxAttempts < 1 { + maxAttempts = 1 + } + + var lastErr error + for attempt := 1; attempt <= maxAttempts; attempt++ { + body, contentType, berr := buildChunkBody(opts, cu) + if berr != nil { + return berr + } + headers := http.Header{ + "Accept": []string{"application/json; text/javascript, */*; q=0.01"}, + "Content-Disposition": []string{ + `attachment; filename="` + encodeURIComponent(opts.RemoteName) + `"`, + }, + "Content-Range": []string{fmt.Sprintf( + "bytes %d-%d/%d", + cu.StartByte, + cu.StartByte+cu.ChunkLen-1, + cu.FileSize, + )}, + } + + _, err := c.do(ctx, http.MethodPost, chunkURL, body, headers, contentType) + if err == nil { + return nil + } + + // Don't burn retries on cancellation: ctx.Err is final. + if ctxErr := ctx.Err(); ctxErr != nil { + return ctxErr + } + + var hErr *HTTPError + if errors.As(err, &hErr) { + if _, isPermanent := permanentStatuses[hErr.Status]; isPermanent { + return err + } + } + lastErr = err + if attempt < maxAttempts { + // Sleep but stay cancelable. + select { + case <-time.After(opts.RetryBackoff): + case <-ctx.Done(): + return ctx.Err() + } + } + } + return fmt.Errorf("exhausted %d attempts: %w", maxAttempts, lastErr) +} + +// buildChunkBody assembles the multipart/form-data body for one chunk. +// Field set + ordering matches what Resumable.js + LarePass's +// resumejs.ts setQuery() emits, so the server can't tell whether this +// came from a browser or olares-cli. +// +// Three groups of fields: +// +// - Resumable.js core (resumableChunkNumber, ..., resumableTotalChunks): +// match the parameter names from resumable.js's `chunkNumberParameterName` +// etc. defaults. resumableChunkNumber is 1-indexed (offset+1). +// - Drive customQuery (parent_dir, driveType, ..., resumableType): +// the Drive override-set from setQuery() in resumejs.ts. +// - file: the actual chunk bytes, sent under the multipart filename +// of the basename (matches Resumable.js `fileParameterName: 'file'`). +// +// `relative_path` is included when RelativePath has a directory +// component (i.e. this is a folder-walk upload), matching +// resumejs.ts onChunkingComplete's `relative_path: +// relativePath.slice(0, lastIndexOf('/')+1)` semantics. +func buildChunkBody(opts UploadOpts, cu chunkUploadCtx) (io.Reader, string, error) { + var buf bytes.Buffer + mw := multipart.NewWriter(&buf) + + add := func(name, value string) error { + return mw.WriteField(name, value) + } + + // --- Resumable.js core fields (defaults from resumable.js). + for _, kv := range []struct{ k, v string }{ + {"resumableChunkNumber", strconv.FormatInt(cu.ChunkIndex+1, 10)}, + {"resumableChunkSize", strconv.FormatInt(opts.ChunkSize, 10)}, + {"resumableCurrentChunkSize", strconv.FormatInt(cu.ChunkLen, 10)}, + {"resumableTotalSize", strconv.FormatInt(cu.FileSize, 10)}, + {"resumableType", cu.MimeType}, + {"resumableIdentifier", cu.Identifier}, + {"resumableFilename", opts.RemoteName}, + {"resumableRelativePath", opts.RelativePath}, + {"resumableTotalChunks", strconv.FormatInt(cu.TotalChunks, 10)}, + } { + if err := add(kv.k, kv.v); err != nil { + return nil, "", err + } + } + + // --- Drive customQuery (resumejs.ts setQuery + onChunkingComplete). + if err := add("parent_dir", opts.ParentDir); err != nil { + return nil, "", err + } + if err := add("driveType", "Drive"); err != nil { + return nil, "", err + } + if dir := relativeDir(opts.RelativePath); dir != "" { + if err := add("relative_path", dir); err != nil { + return nil, "", err + } + } + + // --- file part. Use CreatePart instead of CreateFormFile so we can + // set the chunk's MIME type explicitly (CreateFormFile hardcodes + // "application/octet-stream"). This matches the web app's + // `setChunkTypeFromFile` path, where the chunk's blob carries the + // file's real MIME (resumable.js calls `file[func](start, end, + // fileType)`). + hdr := textproto.MIMEHeader{} + hdr.Set( + "Content-Disposition", + fmt.Sprintf(`form-data; name="file"; filename=%q`, opts.RemoteName), + ) + hdr.Set("Content-Type", cu.MimeType) + part, err := mw.CreatePart(hdr) + if err != nil { + return nil, "", err + } + if _, err := part.Write(cu.ChunkContents); err != nil { + return nil, "", err + } + if err := mw.Close(); err != nil { + return nil, "", err + } + return &buf, mw.FormDataContentType(), nil +} + +func (o *UploadOpts) normalize() error { + if o.LocalPath == "" { + return errors.New("UploadOpts.LocalPath is required") + } + if o.Node == "" { + return errors.New("UploadOpts.Node is required") + } + if o.ParentDir == "" { + return errors.New("UploadOpts.ParentDir is required") + } + if !strings.HasSuffix(o.ParentDir, "/") { + // The server-side resume probe + chunk endpoint both expect + // parent_dir to end in '/'. Force it rather than failing — the + // caller almost always means "this directory". + o.ParentDir += "/" + } + if o.RemoteName == "" { + o.RemoteName = filepath.Base(o.LocalPath) + } + if o.RelativePath == "" { + o.RelativePath = o.RemoteName + } + if o.ChunkSize <= 0 { + o.ChunkSize = DefaultChunkSize + } + if o.MaxRetries == 0 { + o.MaxRetries = DefaultMaxRetries + } + if o.RetryBackoff == 0 { + o.RetryBackoff = DefaultRetryBackoff + } + return nil +} + +// joinRemote builds the Drive/Home-relative path used by CreateEmptyFile. +// `parentDir` is the full `/drive/Home/...` form; we strip the prefix + +// trailing slash so CreateEmptyFile can rebuild the URL with the right +// percent-encoding. +func joinRemote(parentDir, name string) string { + pd := strings.TrimSuffix(parentDir, "/") + const prefix = "/drive/Home" + if strings.HasPrefix(pd, prefix) { + pd = strings.TrimPrefix(pd, prefix) + } + pd = strings.Trim(pd, "/") + if pd == "" { + return name + } + return pd + "/" + name +} + +// relativeDir returns the directory portion of a POSIX-style relative +// path with a trailing slash, or "" if the path has no directory +// component (single-file upload). Mirrors the web app's +// `relativePath.slice(0, lastIndexOf('/') + 1)` from +// resumejs.ts onChunkingComplete. +func relativeDir(relPath string) string { + idx := strings.LastIndex(relPath, "/") + if idx < 0 { + return "" + } + return relPath[:idx+1] +} + +// uploadIdentifier picks a stable per-(parent_dir, relativePath) +// identifier so retries / re-runs of the same file produce the same +// `resumableIdentifier` form value. The server-side resume key is +// (parent_dir, file_name), so this is effectively cosmetic — but +// keeping it stable across runs makes server logs easier to follow, +// and makes it impossible for two concurrent uploads of different +// files to collide on the identifier. +func uploadIdentifier(parentDir, relativePath string) string { + sum := md5.Sum([]byte(parentDir + relativePath)) // #nosec G401 -- not security + return hex.EncodeToString(sum[:]) +} + +// guessMIME returns a best-effort MIME type for the file, mirroring +// the web app's `mime.getType(fileName) || 'application/octet-stream'` +// fallback. We only sniff the extension (no magic bytes) — same as the +// web app, which only sees the filename + browser-detected blob type. +func guessMIME(localPath string) string { + switch strings.ToLower(filepath.Ext(localPath)) { + case ".txt": + return "text/plain" + case ".json": + return "application/json" + case ".html", ".htm": + return "text/html" + case ".css": + return "text/css" + case ".js": + return "application/javascript" + case ".png": + return "image/png" + case ".jpg", ".jpeg": + return "image/jpeg" + case ".gif": + return "image/gif" + case ".pdf": + return "application/pdf" + case ".mp4": + return "video/mp4" + case ".mov": + return "video/quicktime" + case ".mp3": + return "audio/mpeg" + case ".zip": + return "application/zip" + case ".tar": + return "application/x-tar" + case ".gz": + return "application/gzip" + } + return "application/octet-stream" +} diff --git a/cli/pkg/files/upload/uploader_test.go b/cli/pkg/files/upload/uploader_test.go new file mode 100644 index 000000000..224364587 --- /dev/null +++ b/cli/pkg/files/upload/uploader_test.go @@ -0,0 +1,566 @@ +package upload + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +// fixtureFile creates a temp file of the given size containing +// deterministic byte content (i % 256). Useful for tests that want to +// assert that the server received the right bytes for chunk N. +func fixtureFile(t *testing.T, size int64) string { + t.Helper() + dir := t.TempDir() + p := filepath.Join(dir, "fixture.bin") + f, err := os.Create(p) + if err != nil { + t.Fatal(err) + } + defer f.Close() + buf := make([]byte, 4096) + written := int64(0) + for written < size { + toWrite := int64(len(buf)) + if size-written < toWrite { + toWrite = size - written + } + for i := int64(0); i < toWrite; i++ { + buf[i] = byte((written + i) & 0xff) + } + n, err := f.Write(buf[:toWrite]) + if err != nil { + t.Fatal(err) + } + written += int64(n) + } + return p +} + +// recordedChunk captures everything we want to assert about a single +// chunk POST. Tests inspect a slice of these to verify the wire shape. +type recordedChunk struct { + contentRange string + contentDisp string + chunkBytes []byte + form map[string]string + xAuthHeader string +} + +// chunkRecorder is the upload-link target handler used by tests. It +// parses each multipart POST, records the relevant headers + form +// fields + chunk bytes, and (by default) returns 200 to accept the +// chunk. +// +// Tests that want to simulate retryable / permanent failures wrap +// chunkRecorder with extra logic — see TestUploadFile_Retries / +// TestUploadFile_PermanentError below. +type chunkRecorder struct { + mu sync.Mutex + chunks []recordedChunk +} + +func (cr *chunkRecorder) record(r *http.Request) (*recordedChunk, error) { + if err := r.ParseMultipartForm(64 << 20); err != nil { + return nil, err + } + form := map[string]string{} + for k, vs := range r.MultipartForm.Value { + form[k] = vs[0] + } + var chunkBytes []byte + if files, ok := r.MultipartForm.File["file"]; ok && len(files) > 0 { + f, err := files[0].Open() + if err != nil { + return nil, err + } + defer f.Close() + chunkBytes, err = io.ReadAll(f) + if err != nil { + return nil, err + } + } + rc := recordedChunk{ + contentRange: r.Header.Get("Content-Range"), + contentDisp: r.Header.Get("Content-Disposition"), + xAuthHeader: r.Header.Get("X-Authorization"), + form: form, + chunkBytes: chunkBytes, + } + cr.mu.Lock() + cr.chunks = append(cr.chunks, rc) + cr.mu.Unlock() + return &rc, nil +} + +// uploadServerOpts plumbs per-test customization into uploadServer +// without proliferating constructor variants. +type uploadServerOpts struct { + uploadedBytes int64 // probe response + uploadHandler func(*chunkRecorder, http.ResponseWriter, *http.Request) // override chunk POST + uploadLinkPath string // override default link path +} + +// uploadServer wires up an httptest.Server that knows how to answer +// the three endpoints UploadFile depends on: +// +// - GET /api/nodes/ → returns one node ("n") +// - GET /upload/upload-link// → returns a path the chunk +// POSTs target +// - GET /upload/file-uploaded-bytes// → returns opts.uploadedBytes +// - POST → routed through opts.uploadHandler +// +// The recorded chunks are returned via the *chunkRecorder so tests +// can assert on them post-hoc. +func uploadServer(t *testing.T, opts uploadServerOpts) (*httptest.Server, *chunkRecorder) { + t.Helper() + cr := &chunkRecorder{} + uploadLink := opts.uploadLinkPath + if uploadLink == "" { + uploadLink = "/seafhttp/upload-aj/repo-1/" + } + + mux := http.NewServeMux() + mux.HandleFunc("/api/nodes/", func(w http.ResponseWriter, _ *http.Request) { + fmt.Fprint(w, `{"data":{"nodes":[{"name":"n"}]}}`) + }) + mux.HandleFunc("/upload/upload-link/n/", func(w http.ResponseWriter, _ *http.Request) { + fmt.Fprint(w, uploadLink) + }) + mux.HandleFunc("/upload/file-uploaded-bytes/n/", func(w http.ResponseWriter, _ *http.Request) { + fmt.Fprintf(w, `{"uploadedBytes":%d}`, opts.uploadedBytes) + }) + mux.HandleFunc(uploadLink, func(w http.ResponseWriter, r *http.Request) { + if opts.uploadHandler != nil { + opts.uploadHandler(cr, w, r) + return + } + if _, err := cr.record(r); err != nil { + http.Error(w, err.Error(), 500) + return + } + w.WriteHeader(http.StatusOK) + }) + + srv := httptest.NewServer(mux) + t.Cleanup(srv.Close) + return srv, cr +} + +// TestUploadFile_Multichunk: the happy path with a multi-chunk file. +// Asserts that: +// +// - chunk count + chunk bytes are right +// - resumableChunkNumber is 1-indexed +// - resumableTotalChunks / resumableTotalSize / resumableCurrentChunkSize +// all line up with the file size + chunk size +// - Content-Range uses INCLUSIVE end-byte semantics (matches +// resumejs.ts setHeaders()) +// - X-Authorization is plumbed through on every chunk +// - the file's exact bytes round-trip +func TestUploadFile_Multichunk(t *testing.T) { + const chunkSize = 1024 + // 2.5 chunks: covers full + full + partial. + fileSize := int64(2*chunkSize + chunkSize/2) + local := fixtureFile(t, fileSize) + srv, recorder := uploadServer(t, uploadServerOpts{}) + + c := &Client{HTTPClient: srv.Client(), BaseURL: srv.URL, AccessToken: "tk"} + if err := c.UploadFile(context.Background(), UploadOpts{ + LocalPath: local, + Node: "n", + ParentDir: "/drive/Home/Docs/", + RemoteName: "f.bin", + RelativePath: "f.bin", + ChunkSize: chunkSize, + }, nil); err != nil { + t.Fatal(err) + } + + if got, want := len(recorder.chunks), 3; got != want { + t.Fatalf("got %d chunks, want %d", got, want) + } + expectedRanges := []string{ + fmt.Sprintf("bytes 0-%d/%d", chunkSize-1, fileSize), + fmt.Sprintf("bytes %d-%d/%d", chunkSize, 2*chunkSize-1, fileSize), + fmt.Sprintf("bytes %d-%d/%d", 2*chunkSize, fileSize-1, fileSize), + } + wantSizes := []int64{chunkSize, chunkSize, chunkSize / 2} + wholeFile := readFile(t, local) + gotFile := []byte{} + for i, ck := range recorder.chunks { + if ck.xAuthHeader != "tk" { + t.Errorf("chunk %d: X-Authorization = %q", i, ck.xAuthHeader) + } + if ck.contentRange != expectedRanges[i] { + t.Errorf("chunk %d: Content-Range = %q, want %q", + i, ck.contentRange, expectedRanges[i]) + } + if int64(len(ck.chunkBytes)) != wantSizes[i] { + t.Errorf("chunk %d: byte len = %d, want %d", + i, len(ck.chunkBytes), wantSizes[i]) + } + // resumableChunkNumber is 1-indexed. + if got := ck.form["resumableChunkNumber"]; got != strconv.Itoa(i+1) { + t.Errorf("chunk %d: resumableChunkNumber = %q", i, got) + } + if got := ck.form["resumableCurrentChunkSize"]; got != strconv.FormatInt(wantSizes[i], 10) { + t.Errorf("chunk %d: resumableCurrentChunkSize = %q", i, got) + } + if got := ck.form["resumableTotalSize"]; got != strconv.FormatInt(fileSize, 10) { + t.Errorf("chunk %d: resumableTotalSize = %q", i, got) + } + if got := ck.form["resumableTotalChunks"]; got != "3" { + t.Errorf("chunk %d: resumableTotalChunks = %q", i, got) + } + if got := ck.form["resumableFilename"]; got != "f.bin" { + t.Errorf("chunk %d: resumableFilename = %q", i, got) + } + if got := ck.form["resumableRelativePath"]; got != "f.bin" { + t.Errorf("chunk %d: resumableRelativePath = %q", i, got) + } + if got := ck.form["parent_dir"]; got != "/drive/Home/Docs/" { + t.Errorf("chunk %d: parent_dir = %q", i, got) + } + if got := ck.form["driveType"]; got != "Drive" { + t.Errorf("chunk %d: driveType = %q", i, got) + } + gotFile = append(gotFile, ck.chunkBytes...) + } + if !bytes.Equal(gotFile, wholeFile) { + t.Errorf("reassembled bytes don't match source file") + } +} + +// TestUploadFile_ResumesFromServerOffset: when /file-uploaded-bytes/ +// reports a non-zero count, UploadFile must skip the +// already-uploaded-chunks and start from the next one. The bytes +// reported by the server are floored to a chunk boundary (matches the +// web app's Math.floor(uploadedBytes / chunkSize) trick: it's safe to +// re-upload the unaligned tail because chunks are deterministic). +func TestUploadFile_ResumesFromServerOffset(t *testing.T) { + const chunkSize = 1024 + fileSize := int64(3 * chunkSize) + local := fixtureFile(t, fileSize) + srv, recorder := uploadServer(t, uploadServerOpts{ + // Server reports "I have 1.5 chunks worth of bytes." The + // floor-to-chunk-boundary logic should still send chunks 2 + 3 + // (i.e. resumableChunkNumber 2 + 3, 0-based offsets 1 + 2). + uploadedBytes: int64(chunkSize + chunkSize/2), + }) + c := &Client{HTTPClient: srv.Client(), BaseURL: srv.URL, AccessToken: "tk"} + if err := c.UploadFile(context.Background(), UploadOpts{ + LocalPath: local, Node: "n", + ParentDir: "/drive/Home/", RemoteName: "f.bin", RelativePath: "f.bin", + ChunkSize: chunkSize, + }, nil); err != nil { + t.Fatal(err) + } + if got := len(recorder.chunks); got != 2 { + t.Fatalf("got %d chunks, want 2 (resume should skip chunk 1)", got) + } + want := []string{ + fmt.Sprintf("bytes %d-%d/%d", chunkSize, 2*chunkSize-1, fileSize), + fmt.Sprintf("bytes %d-%d/%d", 2*chunkSize, 3*chunkSize-1, fileSize), + } + for i, ck := range recorder.chunks { + if ck.contentRange != want[i] { + t.Errorf("chunk %d Content-Range = %q, want %q", i, ck.contentRange, want[i]) + } + if got := ck.form["resumableChunkNumber"]; got != strconv.Itoa(int(int64(i)+2)) { + t.Errorf("chunk %d resumableChunkNumber = %q, want %d", i, got, i+2) + } + } +} + +// TestUploadFile_ServerHasFullFile: when uploadedBytes >= fileSize, +// nothing needs to be sent. UploadFile should return nil without any +// chunk POST. +func TestUploadFile_ServerHasFullFile(t *testing.T) { + const chunkSize = 1024 + fileSize := int64(2 * chunkSize) + local := fixtureFile(t, fileSize) + srv, recorder := uploadServer(t, uploadServerOpts{uploadedBytes: fileSize}) + c := &Client{HTTPClient: srv.Client(), BaseURL: srv.URL, AccessToken: "tk"} + if err := c.UploadFile(context.Background(), UploadOpts{ + LocalPath: local, Node: "n", + ParentDir: "/drive/Home/", RemoteName: "f.bin", RelativePath: "f.bin", + ChunkSize: chunkSize, + }, nil); err != nil { + t.Fatal(err) + } + if got := len(recorder.chunks); got != 0 { + t.Errorf("expected 0 chunks, got %d", got) + } +} + +// TestUploadFile_Retries: a transient 502 should retry, eventually +// succeed, and not abort the upload. +func TestUploadFile_Retries(t *testing.T) { + const chunkSize = 512 + fileSize := int64(chunkSize) + local := fixtureFile(t, fileSize) + var attempts int32 + srv, _ := uploadServer(t, uploadServerOpts{ + uploadHandler: func(cr *chunkRecorder, w http.ResponseWriter, r *http.Request) { + n := atomic.AddInt32(&attempts, 1) + if n < 3 { + http.Error(w, "transient", http.StatusBadGateway) + return + } + if _, err := cr.record(r); err != nil { + http.Error(w, err.Error(), 500) + return + } + w.WriteHeader(http.StatusOK) + }, + }) + c := &Client{HTTPClient: srv.Client(), BaseURL: srv.URL, AccessToken: "tk"} + if err := c.UploadFile(context.Background(), UploadOpts{ + LocalPath: local, Node: "n", + ParentDir: "/drive/Home/", RemoteName: "f.bin", RelativePath: "f.bin", + ChunkSize: chunkSize, + MaxRetries: 3, + RetryBackoff: time.Millisecond, // keep the test fast + }, nil); err != nil { + t.Fatal(err) + } + if got := atomic.LoadInt32(&attempts); got != 3 { + t.Errorf("attempts = %d, want 3", got) + } +} + +// TestUploadFile_PermanentError: a 400 should NOT trigger a retry, so +// the chunk handler should be hit exactly once before UploadFile +// surfaces the permanent error. +func TestUploadFile_PermanentError(t *testing.T) { + const chunkSize = 512 + fileSize := int64(chunkSize) + local := fixtureFile(t, fileSize) + var attempts int32 + srv, _ := uploadServer(t, uploadServerOpts{ + uploadHandler: func(_ *chunkRecorder, w http.ResponseWriter, _ *http.Request) { + atomic.AddInt32(&attempts, 1) + http.Error(w, "bad", http.StatusBadRequest) + }, + }) + c := &Client{HTTPClient: srv.Client(), BaseURL: srv.URL, AccessToken: "tk"} + err := c.UploadFile(context.Background(), UploadOpts{ + LocalPath: local, Node: "n", + ParentDir: "/drive/Home/", RemoteName: "f.bin", RelativePath: "f.bin", + ChunkSize: chunkSize, + MaxRetries: 3, + RetryBackoff: time.Millisecond, + }, nil) + if err == nil { + t.Fatal("expected error, got nil") + } + var hErr *HTTPError + if !errors.As(err, &hErr) { + t.Fatalf("expected *HTTPError, got %T (%v)", err, err) + } + if hErr.Status != 400 { + t.Errorf("status = %d, want 400", hErr.Status) + } + if got := atomic.LoadInt32(&attempts); got != 1 { + t.Errorf("attempts = %d, want 1 (permanent error must not retry)", got) + } +} + +// TestUploadFile_EmptyFile: 0-byte files go through the +// /api/resources POST (CreateEmptyFile), NOT through the chunk +// pipeline. Resumable.js can't represent a 0-byte chunk so we have to +// take the same detour as the web app. +func TestUploadFile_EmptyFile(t *testing.T) { + local := fixtureFile(t, 0) + chunkHits := int32(0) + emptyHit := int32(0) + mux := http.NewServeMux() + mux.HandleFunc("/api/nodes/", func(w http.ResponseWriter, _ *http.Request) { + fmt.Fprint(w, `{"data":{"nodes":[{"name":"n"}]}}`) + }) + mux.HandleFunc("/upload/upload-link/n/", func(w http.ResponseWriter, _ *http.Request) { + fmt.Fprint(w, "/upload-target/") + }) + mux.HandleFunc("/upload/file-uploaded-bytes/n/", func(w http.ResponseWriter, _ *http.Request) { + fmt.Fprint(w, `{"uploadedBytes":0}`) + }) + mux.HandleFunc("/upload-target/", func(w http.ResponseWriter, _ *http.Request) { + atomic.AddInt32(&chunkHits, 1) + w.WriteHeader(200) + }) + // /api/resources/drive/Home/.../empty.bin (no trailing slash → empty file create). + mux.HandleFunc("/api/resources/drive/Home/Docs/empty.bin", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Errorf("empty file create: method = %s", r.Method) + } + atomic.AddInt32(&emptyHit, 1) + w.WriteHeader(200) + }) + srv := httptest.NewServer(mux) + defer srv.Close() + c := &Client{HTTPClient: srv.Client(), BaseURL: srv.URL, AccessToken: "tk"} + if err := c.UploadFile(context.Background(), UploadOpts{ + LocalPath: local, Node: "n", + ParentDir: "/drive/Home/Docs/", RemoteName: "empty.bin", RelativePath: "empty.bin", + ChunkSize: 1024, + }, nil); err != nil { + t.Fatal(err) + } + if got := atomic.LoadInt32(&emptyHit); got != 1 { + t.Errorf("empty-file create hit = %d, want 1", got) + } + if got := atomic.LoadInt32(&chunkHits); got != 0 { + t.Errorf("chunk handler hit = %d, want 0 (empty file should bypass)", got) + } +} + +// TestUploadFile_FolderRelativePath: when a file lives in a subdir +// of the upload root (RelativePath has '/'), the per-chunk +// `relative_path` form field carries the directory prefix WITH a +// trailing slash — same shape resumejs.ts onChunkingComplete emits. +func TestUploadFile_FolderRelativePath(t *testing.T) { + const chunkSize = 256 + local := fixtureFile(t, chunkSize) + srv, recorder := uploadServer(t, uploadServerOpts{}) + c := &Client{HTTPClient: srv.Client(), BaseURL: srv.URL, AccessToken: "tk"} + if err := c.UploadFile(context.Background(), UploadOpts{ + LocalPath: local, + Node: "n", + ParentDir: "/drive/Home/X/", + RemoteName: "foo.txt", + RelativePath: "mydir/sub/foo.txt", + ChunkSize: chunkSize, + }, nil); err != nil { + t.Fatal(err) + } + if len(recorder.chunks) != 1 { + t.Fatalf("got %d chunks, want 1", len(recorder.chunks)) + } + ck := recorder.chunks[0] + if got := ck.form["relative_path"]; got != "mydir/sub/" { + t.Errorf("relative_path = %q, want %q", got, "mydir/sub/") + } + if got := ck.form["resumableRelativePath"]; got != "mydir/sub/foo.txt" { + t.Errorf("resumableRelativePath = %q, want %q", got, "mydir/sub/foo.txt") + } +} + +// TestUploadFile_ContextCancel: cancelling ctx mid-retry should bail +// out promptly with ctx.Err(), NOT keep grinding through the retry +// budget. +func TestUploadFile_ContextCancel(t *testing.T) { + const chunkSize = 256 + local := fixtureFile(t, chunkSize) + srv, _ := uploadServer(t, uploadServerOpts{ + uploadHandler: func(_ *chunkRecorder, w http.ResponseWriter, _ *http.Request) { + http.Error(w, "transient", http.StatusBadGateway) + }, + }) + c := &Client{HTTPClient: srv.Client(), BaseURL: srv.URL, AccessToken: "tk"} + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(50 * time.Millisecond) + cancel() + }() + err := c.UploadFile(ctx, UploadOpts{ + LocalPath: local, Node: "n", + ParentDir: "/drive/Home/", RemoteName: "f.bin", RelativePath: "f.bin", + ChunkSize: chunkSize, + MaxRetries: 1000, + RetryBackoff: 50 * time.Millisecond, + }, nil) + if err == nil { + t.Fatal("expected error after cancel") + } + if !errors.Is(err, context.Canceled) { + t.Errorf("err = %v; want context.Canceled", err) + } +} + +// readFile reads a fixture back so tests can compare the round-trip. +func readFile(t *testing.T, p string) []byte { + t.Helper() + b, err := os.ReadFile(p) + if err != nil { + t.Fatal(err) + } + return b +} + +// Sanity check that ParseMultipartForm sees the chunk under field +// name "file" — guards against accidental rename of fileParameterName. +func TestBuildChunkBody_FileFieldNameIsFile(t *testing.T) { + rdr, ct, err := buildChunkBody(UploadOpts{ + ChunkSize: 1024, RemoteName: "x.bin", RelativePath: "x.bin", + ParentDir: "/drive/Home/", + }, chunkUploadCtx{ + ChunkIndex: 0, TotalChunks: 1, ChunkLen: 4, StartByte: 0, FileSize: 4, + Identifier: "id", MimeType: "application/octet-stream", + ChunkContents: []byte{1, 2, 3, 4}, + }) + if err != nil { + t.Fatal(err) + } + _, params, err := mediaType(ct) + if err != nil { + t.Fatal(err) + } + mr := multipart.NewReader(rdr, params["boundary"]) + sawFile := false + for { + p, err := mr.NextPart() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + if p.FormName() == "file" { + sawFile = true + b, _ := io.ReadAll(p) + if !bytes.Equal(b, []byte{1, 2, 3, 4}) { + t.Errorf("file part = %v", b) + } + } + } + if !sawFile { + t.Error("did not find a part named 'file' in the multipart body") + } +} + +// mediaType is a tiny stand-in for mime.ParseMediaType to avoid the +// extra import in the test (we only need the boundary). +func mediaType(s string) (string, map[string]string, error) { + idx := strings.Index(s, ";") + if idx < 0 { + return s, map[string]string{}, nil + } + out := map[string]string{} + for _, kv := range strings.Split(s[idx+1:], ";") { + kv = strings.TrimSpace(kv) + eq := strings.Index(kv, "=") + if eq < 0 { + continue + } + k := kv[:eq] + v := strings.Trim(kv[eq+1:], "\"") + v, _ = url.QueryUnescape(v) + out[k] = v + } + return s[:idx], out, nil +} diff --git a/cli/pkg/files/upload/walker.go b/cli/pkg/files/upload/walker.go new file mode 100644 index 000000000..589b07d42 --- /dev/null +++ b/cli/pkg/files/upload/walker.go @@ -0,0 +1,312 @@ +// walker.go: turn a local-path + remote-path pair into a flat list of +// per-file upload tasks (UploadOpts) plus the empty-directory mkdirs +// the chunk-only protocol can't express on its own. +// +// Path semantics (deliberately rsync-LIKE-but-not-rsync): +// +// - is a regular file: +// - ends with '/' → upload to / +// - else → upload to (treat as full target +// path, i.e. user is renaming on the way in) +// - is a directory: +// - MUST end with '/' (the destination is a directory). +// - The walker recursively emits every regular file under ; +// each file's RelativePath includes as the +// top-level component so the source folder's name appears under +// on the server (i.e. `upload mydir drive/Home/X/` → +// drive/Home/X/mydir/...). This matches the LarePass folder-upload +// UI, which always preserves the picked folder's name. +// - Empty subdirectories are recorded as EmptyDirs so the cobra +// command can pre-mkdir them before the chunk uploads start — +// Resumable.js's chunk pipeline can't represent a 0-byte +// directory entry on its own. +// +// All wire-level paths use POSIX-style '/' separators regardless of the +// host OS, because the server expects forward-slash paths. +package upload + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strings" +) + +// FileTask is one regular file to upload. The cobra command turns each +// FileTask into an UploadOpts and pushes it through Client.UploadFile; +// see Plan.ToUploadOpts for the conversion (kept on Plan so a future +// `--dry-run` can render tasks without actually uploading). +type FileTask struct { + // LocalPath is the on-disk path (absolute or working-directory- + // relative — same form the user passed in). + LocalPath string + // RelativePath is the file's path relative to the destination + // parent_dir, in POSIX form. For a single-file upload this is just + // the basename / target name; for a folder upload this includes + // the source-folder prefix (e.g. "mydir/sub/foo.txt"). + RelativePath string + // RemoteName is the bare basename for the upload (typically the + // last segment of RelativePath; for the "rename on upload" single- + // file case it differs from filepath.Base(LocalPath)). + RemoteName string + // Size is the file size in bytes at plan time. Useful for sorting + // (largest-first scheduling) and for the progress display. + Size int64 +} + +// Plan is the structured result of resolving a (, ) pair +// against the local filesystem. The cobra command consumes it directly: +// run EmptyDirs through Client.Mkdir, then schedule Files through an +// errgroup of Client.UploadFile. +type Plan struct { + // ParentDir is the constant `/drive/Home/...` parent directory + // (with trailing '/') that the upload session is anchored to. Same + // value goes into UploadOpts.ParentDir for every file. + ParentDir string + // RelativeRoot is the path RELATIVE to /Home that ParentDir maps + // to (no leading or trailing slash, e.g. "Documents/Backups"). The + // cobra command passes this to Client.Mkdir to ensure the + // destination dir itself exists before any file upload runs. May + // be empty when uploading directly to /Home. + RelativeRoot string + // EmptyDirs lists the additional sub-directories (POSIX-relative + // to RelativeRoot, no leading slash, no trailing slash) that need + // to be pre-created because the source contains them but no files + // were emitted underneath. Sorted shallow-to-deep so naive + // sequential mkdir works. + EmptyDirs []string + // Files is the flat list of per-file upload tasks. Order is + // deterministic (sorted by RelativePath) so retries / dry-runs are + // stable. + Files []FileTask +} + +// BuildPlan validates inputs against the local filesystem and returns +// a Plan ready to be executed. +// +// `remoteSubPath` is the path RELATIVE to drive/Home as parsed from a +// FrontendPath (so "Documents/Backups" or "Documents/Backups/" — with +// or without leading slash; both are accepted). The trailing slash IS +// significant: it tells BuildPlan to interpret the remote as a +// directory rather than a file rename target. +// +// Errors: +// - doesn't exist +// - is a directory but doesn't end with '/' +func BuildPlan(localPath, remoteSubPath string) (*Plan, error) { + st, err := os.Stat(localPath) + if err != nil { + return nil, fmt.Errorf("stat %s: %w", localPath, err) + } + + remoteIsDir := strings.HasSuffix(remoteSubPath, "/") + cleanRemote := strings.Trim(remoteSubPath, "/") + + if st.Mode().IsRegular() { + return planForFile(localPath, st.Size(), cleanRemote, remoteIsDir), nil + } + if st.IsDir() { + if !remoteIsDir { + return nil, fmt.Errorf("local %q is a directory; remote %q must end with '/'", + localPath, remoteSubPath) + } + return planForDir(localPath, cleanRemote) + } + return nil, fmt.Errorf("%s is not a regular file or directory", localPath) +} + +// planForFile handles the single-regular-file branch. +// +// - remoteIsDir (caller-supplied trailing '/'): upload as +// /. cleanRemote is the parent_dir. +// - !remoteIsDir: cleanRemote is the full target path; we split it +// into parent + basename so parent_dir is well-formed (the chunk +// POST always wants a directory parent_dir, never a full path). +func planForFile(localPath string, size int64, cleanRemote string, remoteIsDir bool) *Plan { + base := filepath.Base(localPath) + + var ( + relativeRoot string // dir under /Home (no slashes) + remoteName string // file basename on server + ) + if remoteIsDir { + relativeRoot = cleanRemote + remoteName = base + } else if cleanRemote == "" { + // "/" with no trailing slash and no body — upload to /Home/. + relativeRoot = "" + remoteName = base + } else { + // Treat cleanRemote as the full destination path. + idx := strings.LastIndex(cleanRemote, "/") + if idx < 0 { + relativeRoot = "" + remoteName = cleanRemote + } else { + relativeRoot = cleanRemote[:idx] + remoteName = cleanRemote[idx+1:] + } + } + + return &Plan{ + ParentDir: parentDirFor(relativeRoot), + RelativeRoot: relativeRoot, + Files: []FileTask{{ + LocalPath: localPath, + RelativePath: remoteName, + RemoteName: remoteName, + Size: size, + }}, + } +} + +// planForDir handles the recursive directory branch. The source +// directory's basename becomes the top-level under the destination +// (so `upload mydir drive/Home/X/` produces drive/Home/X/mydir/...). +// To upload contents-only without the wrapper folder, the user can do +// `upload mydir/sub drive/Home/X/mydir/sub/` — i.e. specify the +// destination explicitly. We deliberately do NOT support the rsync +// `local/` "contents only" trailing-slash convention in this first +// cut because it conflicts with the file-vs-directory disambiguation +// trailing slashes carry on the remote side; the few users who need +// it can chain mv on the server. +func planForDir(localDir, cleanRemote string) (*Plan, error) { + srcBase := filepath.Base(localDir) + relativeRoot := strings.Trim(cleanRemote, "/") + + plan := &Plan{ + ParentDir: parentDirFor(relativeRoot), + RelativeRoot: relativeRoot, + } + + // Pre-collect: walk first, then sort + decide which dirs are + // "empty" (no files emitted under them). This is O(N) extra memory + // over a streaming walk but lets us emit a deterministic plan + a + // clean EmptyDirs list, which makes the resulting upload trivially + // resumable across runs (no order-of-operations subtleties). + var allDirs []string + dirHasFile := map[string]bool{} + + err := filepath.WalkDir(localDir, func(path string, d os.DirEntry, walkErr error) error { + if walkErr != nil { + return walkErr + } + // Compute the path relative to localDir, then prefix with + // srcBase so the source folder name appears in the upload tree. + rel, relErr := filepath.Rel(localDir, path) + if relErr != nil { + return relErr + } + // On Windows, filepath.Rel would return backslash-separated. + // Normalize to POSIX for the wire. + rel = filepath.ToSlash(rel) + if rel == "." { + rel = "" + } + + // posixRel is the relative path under ParentDir on the server + // — always includes the source basename as the first segment. + posixRel := srcBase + if rel != "" { + posixRel = srcBase + "/" + rel + } + + if d.IsDir() { + // Skip the source root itself; "" gets implicitly + // created when its first file lands. Track sub-directories + // so we can mkdir the empty ones explicitly later. + if rel == "" { + return nil + } + allDirs = append(allDirs, posixRel) + return nil + } + if !d.Type().IsRegular() { + // Skip symlinks / devices / sockets — we don't want to + // silently follow links and explode the upload, and we + // can't meaningfully upload a device. + return nil + } + info, infoErr := d.Info() + if infoErr != nil { + return infoErr + } + plan.Files = append(plan.Files, FileTask{ + LocalPath: path, + RelativePath: posixRel, + RemoteName: filepath.Base(path), + Size: info.Size(), + }) + // Mark every ancestor directory of this file as "has-file" so + // it doesn't show up in EmptyDirs. + for d := dirOfPosix(posixRel); d != ""; d = dirOfPosix(d) { + if dirHasFile[d] { + break + } + dirHasFile[d] = true + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("walk %s: %w", localDir, err) + } + + // Determine the empty dirs: subtree dirs the walk saw that don't + // contain any file. Sorted shallow-first so iterative mkdir works. + for _, d := range allDirs { + if !dirHasFile[d] { + plan.EmptyDirs = append(plan.EmptyDirs, d) + } + } + sort.SliceStable(plan.EmptyDirs, func(i, j int) bool { + return depth(plan.EmptyDirs[i]) < depth(plan.EmptyDirs[j]) + }) + sort.SliceStable(plan.Files, func(i, j int) bool { + return plan.Files[i].RelativePath < plan.Files[j].RelativePath + }) + + return plan, nil +} + +// ToUploadOpts converts one FileTask into an UploadOpts ready for +// Client.UploadFile, threading per-call settings (node, chunk size, +// retries) from the cobra command. Side-effect free. +func (p *Plan) ToUploadOpts(t FileTask, node string, chunkSize int64, maxRetries int) UploadOpts { + return UploadOpts{ + LocalPath: t.LocalPath, + Node: node, + ParentDir: p.ParentDir, + RemoteName: t.RemoteName, + RelativePath: t.RelativePath, + ChunkSize: chunkSize, + MaxRetries: maxRetries, + } +} + +// parentDirFor returns the `/drive/Home/...` parent_dir form for a +// /Home-relative directory. Always ends with '/'. +func parentDirFor(relativeRoot string) string { + rr := strings.Trim(relativeRoot, "/") + if rr == "" { + return "/drive/Home/" + } + return "/drive/Home/" + rr + "/" +} + +// dirOfPosix returns the POSIX-style parent directory of `p`, or "" +// when p has no '/'. Used to walk up the ancestor chain when marking +// dirs that contain files. +func dirOfPosix(p string) string { + idx := strings.LastIndex(p, "/") + if idx < 0 { + return "" + } + return p[:idx] +} + +// depth counts '/' separators; used to sort EmptyDirs shallow-first +// (ancestor dirs before descendants) so mkdir order is correct. +func depth(p string) int { + return strings.Count(p, "/") +} diff --git a/cli/pkg/files/upload/walker_test.go b/cli/pkg/files/upload/walker_test.go new file mode 100644 index 000000000..435b7bb61 --- /dev/null +++ b/cli/pkg/files/upload/walker_test.go @@ -0,0 +1,121 @@ +package upload + +import ( + "os" + "path/filepath" + "reflect" + "sort" + "testing" +) + +// writeFile is a tiny helper for the walker tests so each test reads +// linearly without inline `os.Create` / `defer Close` clutter. +func writeFile(t *testing.T, p string, content string) { + t.Helper() + if err := os.MkdirAll(filepath.Dir(p), 0o755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(p, []byte(content), 0o644); err != nil { + t.Fatal(err) + } +} + +// TestBuildPlan_SingleFile_RemoteIsDir: trailing slash on remote means +// "upload into this directory"; the server-side filename is the local +// basename. +func TestBuildPlan_SingleFile_RemoteIsDir(t *testing.T) { + dir := t.TempDir() + src := filepath.Join(dir, "report.pdf") + writeFile(t, src, "hello") + plan, err := BuildPlan(src, "Documents/") + if err != nil { + t.Fatal(err) + } + if plan.ParentDir != "/drive/Home/Documents/" { + t.Errorf("ParentDir = %q", plan.ParentDir) + } + if plan.RelativeRoot != "Documents" { + t.Errorf("RelativeRoot = %q", plan.RelativeRoot) + } + if len(plan.Files) != 1 { + t.Fatalf("Files = %d", len(plan.Files)) + } + f := plan.Files[0] + if f.RelativePath != "report.pdf" || f.RemoteName != "report.pdf" { + t.Errorf("file = %+v", f) + } +} + +// TestBuildPlan_SingleFile_RenameOnUpload: no trailing slash means +// the remote path IS the destination; we split it into parent + base. +func TestBuildPlan_SingleFile_RenameOnUpload(t *testing.T) { + dir := t.TempDir() + src := filepath.Join(dir, "report.pdf") + writeFile(t, src, "x") + plan, err := BuildPlan(src, "Documents/2026.pdf") + if err != nil { + t.Fatal(err) + } + if plan.ParentDir != "/drive/Home/Documents/" { + t.Errorf("ParentDir = %q", plan.ParentDir) + } + if plan.RelativeRoot != "Documents" { + t.Errorf("RelativeRoot = %q", plan.RelativeRoot) + } + f := plan.Files[0] + if f.RemoteName != "2026.pdf" || f.RelativePath != "2026.pdf" { + t.Errorf("file = %+v", f) + } +} + +// TestBuildPlan_DirectoryRequiresTrailingSlash: lowest-friction error +// when user forgets the trailing slash on a directory destination. +func TestBuildPlan_DirectoryRequiresTrailingSlash(t *testing.T) { + dir := t.TempDir() + if _, err := BuildPlan(dir, "Backups"); err == nil { + t.Fatal("expected error when local is dir but remote has no trailing slash") + } +} + +// TestBuildPlan_Directory_Recursion: walks a small tree and checks the +// emitted file list + the empty-dir mkdir list. Source basename +// becomes the top-level component under remote. +func TestBuildPlan_Directory_Recursion(t *testing.T) { + root := t.TempDir() + src := filepath.Join(root, "mydir") + writeFile(t, filepath.Join(src, "a.txt"), "a") + writeFile(t, filepath.Join(src, "sub", "b.txt"), "bb") + writeFile(t, filepath.Join(src, "sub", "deep", "c.txt"), "ccc") + if err := os.MkdirAll(filepath.Join(src, "empty", "nested"), 0o755); err != nil { + t.Fatal(err) + } + + plan, err := BuildPlan(src, "Backups/") + if err != nil { + t.Fatal(err) + } + if plan.ParentDir != "/drive/Home/Backups/" { + t.Errorf("ParentDir = %q", plan.ParentDir) + } + + gotFiles := make([]string, 0, len(plan.Files)) + for _, f := range plan.Files { + gotFiles = append(gotFiles, f.RelativePath) + } + wantFiles := []string{ + "mydir/a.txt", + "mydir/sub/b.txt", + "mydir/sub/deep/c.txt", + } + if !reflect.DeepEqual(gotFiles, wantFiles) { + t.Errorf("files:\n got %v\n want %v", gotFiles, wantFiles) + } + + // Empty dir + its (also empty) child should be in EmptyDirs. + gotEmpty := append([]string(nil), plan.EmptyDirs...) + sort.Strings(gotEmpty) + wantEmpty := []string{"mydir/empty", "mydir/empty/nested"} + if !reflect.DeepEqual(gotEmpty, wantEmpty) { + t.Errorf("empty dirs:\n got %v\n want %v", gotEmpty, wantEmpty) + } +} From 68fa9424c4259e415ecaba49b5b95e08b1440dc8 Mon Sep 17 00:00:00 2001 From: Peng Peng Date: Sat, 25 Apr 2026 22:39:39 +0800 Subject: [PATCH 06/12] docs(cli): add olares-shared and olares-files agent skills MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two new SKILL.md files under cli/skills/, modeled after larksuite-cli's lark-shared / lark-drive style, so AI coding assistants get reliable guidance on: - olares-shared: profile model, password+TOTP login (mode A), refresh-token import (mode B), profile use/list/remove + global --profile, OS keychain storage layout, the re-authentication state machine, and a recovery table for HTTP 401/403 / already-authenticated / 2FA-required errors. - olares-files: the 3-segment frontend path schema, trailing-slash conventions, two server-side quirks the agent MUST respect (POST mkdir auto-renames existing dirs to "Foo (1)"; GET single-file resource returns HTTP 500 — Stat must list parents instead), X-Authorization transport, and per-verb cheatsheets for ls / upload / download / cat / rm with wire shapes and key flags. The frontmatter follows the lark-cli schema (name / version / description with trigger keywords / metadata.requires.bins / metadata.cliHelp), so Cursor's skill loader can pick them up from cli/skills//SKILL.md. Made-with: Cursor --- cli/skills/olares-files/SKILL.md | 264 ++++++++++++++++++++++++++++++ cli/skills/olares-shared/SKILL.md | 203 +++++++++++++++++++++++ 2 files changed, 467 insertions(+) create mode 100644 cli/skills/olares-files/SKILL.md create mode 100644 cli/skills/olares-shared/SKILL.md diff --git a/cli/skills/olares-files/SKILL.md b/cli/skills/olares-files/SKILL.md new file mode 100644 index 000000000..7abbfd5d3 --- /dev/null +++ b/cli/skills/olares-files/SKILL.md @@ -0,0 +1,264 @@ +--- +name: olares-files +version: 1.0.0 +description: "olares-cli files command tree: list (ls), upload, download, cat, and rm against the per-user files-backend (drive/Home, drive/Data, sync, cache, external, awss3, dropbox, google, tencent, share). Covers the 3-segment frontend path schema (//), resumable chunked upload (Drive v2 protocol), Range-based resumable download, recursive directory transfer with errgroup parallelism, batch DELETE wire shape, and two server-side quirks the user MUST know about (POST mkdir auto-renames existing dirs to 'Foo (1)'; GET single-file resource returns HTTP 500). Use whenever the user mentions files / drive / Home / Data / sync / cache, uploading or downloading files, listing a remote directory, deleting remote files, cat-ting a remote file, resumable transfers, /api/resources, /api/raw, frontend path, or sees errors like 'Documents (1)' appearing on the server." +metadata: + requires: + bins: ["olares-cli"] + cliHelp: "olares-cli files --help" +--- + +# files (Drive v2 + per-user files-backend) + +**CRITICAL — before doing anything, MUST use the Read tool to read [`../olares-shared/SKILL.md`](../olares-shared/SKILL.md) for the profile selection, login, and HTTP 401/403 recovery rules that every command here depends on.** + +## Core concept: the 3-segment frontend path + +Every resource on the per-user files-backend is addressed by a 3-segment "frontend path" (see [`cli/cmd/ctl/files/path.go`](cli/cmd/ctl/files/path.go)): + +``` +/[/] +``` + +| Segment | Meaning | +|---------|---------| +| `fileType` | Storage class (lowercase, case-sensitive). One of: `drive`, `cache`, `sync`, `external`, `awss3`, `dropbox`, `google`, `tencent`, `share`, `internal` | +| `extend` | Volume / repo / account inside that class. **Case-sensitive.** Drive: only `Home` or `Data`. Cache / external: node name. Sync: seafile repo id. Cloud (`awss3`/`dropbox`/`google`/`tencent`): account key | +| `subPath` | Path inside `extend` (root if omitted). The leading `/` is implicit | + +Examples: + +```bash +drive/Home/ # Home volume root +drive/Home/Documents/report.pdf # a file under Home/Documents +drive/Data/Backups/ # Data volume, Backups subfolder +sync//notes/ # seafile sync repo +cache// # node-local cache +awss3///key.txt # S3-compatible cloud drive +``` + +> The first segment is normalized to lowercase by the backend; the CLI accepts only the canonical lowercase form on input. Drive's `extend` MUST be `Home` or `Data` exactly — `home` will be rejected with `invalid drive type`. + +## Trailing-slash convention (critical) + +Whether a path ends with `/` is meaningful and changes command behavior: + +| Path form | Meaning | +|-----------|---------| +| `drive/Home/Foo/` | Directory intent | +| `drive/Home/Foo` | File intent | + +This shows up in three places: + +- `files rm drive/Home/Foo/` requires `-r` (the trailing `/` declares "this is a directory") +- `files upload drive/Home/Documents/` means "upload INTO Documents/"; `files upload drive/Home/Documents/2026-Q1.pdf` means "upload AS that exact path (rename on the way in)" +- `files ls drive/Home/` lists the volume root; the parser tolerates both `drive/Home` and `drive/Home/` for ls but the trailing slash is recommended for clarity + +## Server-side quirks (critical, do not work around) + +These are real backend behaviors that have already cost us debugging time. Teach yourself and the user to respect them; **do not** suggest "workarounds" that bypass the CLI's existing handling. + +### 1. POST `/api/resources//` auto-renames existing directories + +Hitting the directory-create endpoint against an existing directory does **not** return 409. The server creates a sibling named ` (1)` instead. See the docstring on [`cli/pkg/files/upload/api.go`](cli/pkg/files/upload/api.go)'s `Mkdir` for the precise wording. + +Consequence baked into the CLI: `files upload` does **not** pre-create the destination directory. It relies on the chunk POST to implicitly materialize parents. **The destination directory MUST already exist on the server** — if you need a fresh directory, create it through the LarePass web app first (a future `files mkdir` verb may cover this). + +User-visible symptom of getting this wrong (older CLI versions): an extra `Documents (1)` directory appears on the server even though the upload "succeeded". + +### 2. GET `/api/resources/` (no trailing slash) returns HTTP 500 + +The backend's single-file `List` handler hard-codes `Content: true` (`files/pkg/drivers/posix/posix/posix.go` `getFiles`) and tries to slurp the file's bytes into the response envelope. For json / binary / large files, this just 500s. + +Consequence baked into the CLI: `Stat` always lists the **parent** directory and looks up the leaf in its items array (see [`cli/pkg/files/download/stat.go`](cli/pkg/files/download/stat.go)). This matches what the LarePass web app does — it never probes a single-file resource directly. Both `download` and `cat` use this code path. + +If the user reports `HTTP 500` against `/api/resources/.../` with no trailing slash, do NOT suggest "just retry". The right answer is: use the CLI command (`files cat` / `files download`), or list the parent and look at items. + +## Authentication transport + +Every files API call carries `X-Authorization: ` as a header (NOT the standard `Authorization: Bearer ...`). The Factory injects this automatically; see [`cli/pkg/cmdutil/factory.go`](cli/pkg/cmdutil/factory.go). Do not try to call the backend via `curl` with a Bearer token — that header shape is not what the per-user files-backend expects and the request will fail. + +## Command cheatsheet (5 verbs) + +### `files ls [--json]` + +List a remote directory. See [`cli/cmd/ctl/files/ls.go`](cli/cmd/ctl/files/ls.go). + +```bash +olares-cli files ls drive/Home/ +olares-cli files ls drive/Home/Documents +olares-cli files ls sync// +olares-cli files ls drive/Home/Documents --json # raw envelope, pretty-printed +``` + +Default output: a one-line header (` (N dirs, M files, modified ...)`) followed by a 5-column table `MODE SIZE TYPE MODIFIED NAME`. Directories sort before files; directory names get a trailing `/`. Empty directories print `(empty)`. + +`--json` prints the raw JSON envelope from the backend, useful for scripting. + +### `files upload ` + +Resumable chunked upload to drive/Home/<...>. See [`cli/cmd/ctl/files/upload.go`](cli/cmd/ctl/files/upload.go) and [`cli/pkg/files/upload/`](cli/pkg/files/upload/). + +```bash +# Upload one file into an existing directory. +olares-cli files upload report.pdf drive/Home/Documents/ + +# Upload AND rename on the server. +olares-cli files upload report.pdf drive/Home/Documents/2026-Q1.pdf + +# Upload a whole directory tree. +olares-cli files upload ./photos drive/Home/Backups/ + +# Two files in flight at a time, chunks remain sequential per file. +olares-cli files upload ./photos drive/Home/Backups/ --parallel 2 +``` + +Wire protocol (Drive v2 / Resumable.js-compatible): + +1. `GET /upload/upload-link//...` → upload session +2. `GET /upload/file-uploaded-bytes//...` → server-driven resume offset (no local progress file) +3. `POST` chunks (8 MiB default) with `Content-Range: bytes -/` until done + +Constraints / flags: + +- **Destination MUST be under `drive/Home`** (`drive/Data` is read-only on the wire); the CLI rejects anything else with `upload destination must be under drive/Home`. +- **Destination directory MUST already exist** — see "POST auto-renames" above. +- A trailing `/` on `` means "into this directory"; without one, `` is treated as the full target path (rename on the way in). +- `--parallel N` (default 2): per-file concurrency. **Per-file chunks remain sequential** by design — the resume probe assumes one in-flight chunk per file. +- `--chunk-size ` (default 8 MiB): align with the server's expected size; rarely needs tuning. +- `--max-retries N`: per-chunk retry budget on transient failures. +- `--node `: override the upload node; default is the first node from `/api/nodes/`. + +Resume is automatic and server-driven: re-running the same command after a Ctrl-C / network drop just re-asks the server how many bytes it already has, floors to a chunk boundary, and continues. + +### `files download []` + +Download a single file or a whole directory tree. See [`cli/cmd/ctl/files/download.go`](cli/cmd/ctl/files/download.go) and [`cli/pkg/files/download/`](cli/pkg/files/download/). + +```bash +# Single file into the current directory (./). +olares-cli files download drive/Home/Documents/report.pdf + +# Same, but pick a different local name. +olares-cli files download drive/Home/Documents/report.pdf ./Q1.pdf + +# Resume an interrupted download via Range:. +olares-cli files download drive/Home/Backups/big.tar ./big.tar --resume + +# Recursively pull a directory; 4 files at a time. +olares-cli files download drive/Home/Documents/ ./out/ --parallel 4 +``` + +Local destination resolution (single-file mode): + +| `` | Result | +|----------------|--------| +| omitted | `./` | +| existing directory | `/` (mirrors `cp`) | +| any other path (incl. trailing `/` if not yet existing) | treated as the full target file path | + +Flags: + +- `--resume`: send `Range: bytes=-` and append (server-native, no sidecar progress file). +- `--overwrite`: replace an existing local file via `.tmp` + atomic rename. The previous version stays intact until the new one lands. +- `--resume` and `--overwrite` are **mutually exclusive** — pick one. +- `--parallel N` (default 4): only meaningful in directory mode (errgroup-bounded concurrency). +- `--max-retries N`: per-file transient-failure budget (5xx triggers retry; 4xx fails fast). + +Directory mode (trailing `/` on ``): + +- The remote tree is walked recursively via `/api/resources/.../`. +- The remote root's basename becomes the top-level directory under `` (matches the LarePass folder-download UX). Empty subdirectories are mirrored locally. +- Single `Stat` lookup at the start to confirm the path is actually a directory; then `BuildPlan` materializes the file list before any byte is written. + +### `files cat ` + +Stream a single file's bytes to stdout. See [`cli/cmd/ctl/files/cat.go`](cli/cmd/ctl/files/cat.go). + +```bash +olares-cli files cat drive/Home/Documents/notes.md +olares-cli files cat drive/Home/Logs/today.log | tail -n 50 +olares-cli files cat drive/Home/Photos/banner.png > banner.png # binary-safe +``` + +Wire shape: `GET /api/raw/?inline=true` (the same endpoint LarePass uses for text previews; `inline=true` only affects `Content-Disposition`, the body is identical). + +- Binary-safe: bytes are copied verbatim, no sniffing or transformation. Pipe into `less` / `head -c` / `hexdump` as needed. +- Pre-flight `Stat` (parent listing) refuses directories early with a clear error, instead of letting the server return its terser 400. Use `files download` for directories. + +### `files rm [-r] [-f] ...` + +Delete one or more remote files / directories. See [`cli/cmd/ctl/files/rm.go`](cli/cmd/ctl/files/rm.go) and [`cli/pkg/files/rm/`](cli/pkg/files/rm/). + +```bash +# Delete one file. +olares-cli files rm drive/Home/Documents/old.pdf + +# Recursively remove a directory. +olares-cli files rm -r drive/Home/Backups/2024/ + +# Multiple targets, no prompt (scripts). +olares-cli files rm -rf drive/Home/junk drive/Home/scratch/ +``` + +Wire shape (one batch DELETE per parent dir): + +``` +DELETE /api/resources// body: {"dirents": ["", "", ...]} +``` + +Targets sharing a parent collapse into a single request (matches the LarePass web app's `batchDeleteFileItems`). Targets across different parents send one request each, sorted by `fileType + extend + parent` for stable output. + +Flags / rules: + +- `-r` / `-R` / `--recursive`: required for directories. A trailing `/` on a target IS a directory-intent signal and triggers the same check (so `files rm drive/Home/Foo/` errors without `-r` even if `Foo` is technically empty). +- `-f` / `--force`: skip the y/N prompt. **In a non-TTY context (CI, piped stdin) the command refuses without `--force`** rather than guessing. +- Without `-f`: prints "will delete N entries in M batches" with the full list, then prompts `[y/N]`. +- Removing the root of a volume (`drive/Home/`, `sync//`, ...) is rejected by the planner: `refusing to delete the root of /`. + +Aliases: `olares-cli files remove ...`, `olares-cli files delete ...` are the same command. + +## Common errors → fixes + +| Error message (excerpt) | Likely cause | Fix | +|-------------------------|--------------|-----| +| `server rejected the access token (HTTP 401)` / `(HTTP 403)` | Token expired / revoked | Follow olares-shared's recovery: `olares-cli profile login --olares-id ` | +| `HTTP 404 ... not found on the server` | Path typo or wrong case (`Home` vs `home`, `Data` vs `data`) | `files ls` the parent directory to confirm spelling | +| `invalid drive type: ` | Drive's `extend` isn't `Home` or `Data` | Use exactly `Home` or `Data` | +| `upload destination must be under drive/Home` | Tried to upload to `drive/Data/...` or another fileType | Move the target under `drive/Home/...` | +| `Documents (1)` (or similar) appearing on the server after upload | Older CLI version triggered the POST-mkdir auto-rename quirk | Upgrade to a CLI version that has the pre-mkdir removal fix | +| `--overwrite and --resume are mutually exclusive` | Passed both download flags | Pick one | +| `refusing to delete without --force in a non-interactive context (no TTY)` | `files rm` from a script with no TTY | Add `-f` after **explicitly listing the targets to the user first** | +| `refusing to delete the root of /` | Tried `files rm -r drive/Home/` (or another volume root) | The CLI does not support volume-root deletion; remove children individually | +| `cat ... is a directory` | `files cat` on a path that resolves to a directory | Use `files download /` instead | +| `HTTP 500` against `/api/resources/.../` (no trailing slash) | Hit the backend's single-file List quirk directly (e.g. via curl) | Don't bypass the CLI; the CLI uses parent-listing Stat for a reason | + +## Typical workflow + +```bash +# 1. Explore +olares-cli files ls drive/Home/ +olares-cli files ls drive/Home/Documents/ + +# 2. Push a local tree up (target dir must already exist). +olares-cli files upload ~/local-dir drive/Home/Documents/ + +# 3. Pull a tree down with parallelism + resume. +olares-cli files download drive/Home/Documents/ ./out --parallel 4 --resume + +# 4. Quick peek at a file. +olares-cli files cat drive/Home/Notes/today.md | tail -n 50 + +# 5. Clean up after confirming with `ls` first. +olares-cli files ls drive/Home/Old/ +olares-cli files rm -r drive/Home/Old/ +``` + +When operating across multiple Olares instances, prefix each command with `--profile ` (see `olares-shared` for the global flag) instead of flipping the persistent current pointer. + +## Security rules + +- **Always preview destructive operations.** Before passing `-f` to `rm` in a script, list the exact paths to the user and get explicit confirmation. The interactive `[y/N]` prompt is a safety net, not a substitute for thoughtful intent. +- **Local files are never overwritten implicitly.** `files download` refuses to clobber unless `--overwrite` (atomic via `.tmp`+rename) or `--resume` (append) is passed. Never recommend `--overwrite` without checking with the user. +- **Do not echo `` to the terminal.** The token lives in the OS keychain for a reason; pulling it out into a shell variable for `curl` defeats that. Use the CLI commands. +- **`files upload` does NOT delete the local source** — it's a copy, not a move. If a user wants delete-after-upload semantics, they have to do it explicitly and after verifying the upload succeeded. diff --git a/cli/skills/olares-shared/SKILL.md b/cli/skills/olares-shared/SKILL.md new file mode 100644 index 000000000..84b5318af --- /dev/null +++ b/cli/skills/olares-shared/SKILL.md @@ -0,0 +1,203 @@ +--- +name: olares-shared +version: 1.0.0 +description: "Shared olares-cli foundation: profile model, first-time login (profile login with password + TOTP), bootstrapping a profile from an existing refresh token (profile import), switching/listing/removing profiles, the global --profile flag, where access/refresh tokens live in the OS keychain, and how to recover from auth errors (HTTP 401/403, token expired, token invalidated, two-factor authentication required). Use whenever the user is configuring olares-cli for the first time, logging in or importing credentials, switching/listing/removing profiles, or seeing errors like 'server rejected the access token', 'already authenticated', or 'two-factor authentication required'; also use when the user asks about refresh tokens, the keychain, olaresId, or profile management." +metadata: + requires: + bins: ["olares-cli"] + cliHelp: "olares-cli profile --help" +--- + +# olares-cli shared rules + +This skill explains: what a profile is, how to obtain access credentials for it, where those credentials live, and how to recover when the server rejects a token. **Every `olares-cli files ...` (and other business) command depends on the profile selection + auth flow described here.** + +## Profile model + +One profile = one Olares instance + one user identity. The identity is uniquely keyed by an **olaresId** (e.g. `alice@olares.com`). Each profile owns its own access_token / refresh_token pair, stored in the OS keychain. + +The `profile` command tree exposes 5 verbs (see [`cmd/ctl/profile/root.go`](cli/cmd/ctl/profile/root.go)): + +| Command | Purpose | +|---------|---------| +| `olares-cli profile login` | Authenticate with a password (+ TOTP if 2FA is on); auto-creates the profile on first run (mode A) | +| `olares-cli profile import` | Bootstrap an access_token from an existing refresh_token (mode B) | +| `olares-cli profile list` | List every profile, mark the current one, show login status per profile | +| `olares-cli profile use ` | Switch the current profile; `-` reverts to the previous one (analogous to `cd -`) | +| `olares-cli profile remove ` | Delete a profile and its stored token in one shot | + +> **There is no `olares-cli auth login` / `auth logout` namespace.** Every auth-related action lives under `profile`. "Logout" is `profile remove`. + +## Global `--profile` flag + +The root command registers a persistent `--profile ` flag (see [`cmd/ctl/root.go`](cli/cmd/ctl/root.go) L57). It overrides the currently-selected profile for one invocation without flipping the persisted current pointer: + +```bash +# Doesn't change current; this single ls runs against alice's credentials. +olares-cli files ls drive/Home/ --profile alice@olares.com +``` + +Use this for: scripting parallel operations against multiple profiles, sanity-checking a specific profile's status, and avoiding pollution of the interactive terminal's current pointer. + +## First-time login (mode A: password + optional TOTP) + +```bash +olares-cli profile login --olares-id +``` + +Behavior (see [`cmd/ctl/profile/login.go`](cli/cmd/ctl/profile/login.go) and [`cmd/ctl/profile/credentials.go`](cli/cmd/ctl/profile/credentials.go)): + +- Profile does not exist → auto-created +- Profile exists, token expired or invalidated → reuse the profile entry, write a fresh token +- Profile exists, token still valid → **rejected** with a hint to run `olares-cli profile remove ` first + +### Password + +- Default: read from the controlling TTY with echo disabled +- Scripts: `--password-stdin`, e.g. `printf '%s' "$PW" | olares-cli profile login --olares-id --password-stdin` +- **There is no `--password ` flag.** The CLI deliberately omits it so passwords never leak into shell history or `ps` output. + +### 2FA / TOTP + +When the server's `/api/firstfactor` returns `fa2=true`, a second factor is required: + +- TTY: the CLI prompts `two-factor code for <id>:` automatically; the user types the 6-digit code +- Non-TTY (`--password-stdin`, CI, etc.): you MUST pass `--totp <code>` up front, otherwise the command fails with `two-factor authentication required: re-run with --totp <code>` + +> **The CLI does not try to guess whether 2FA is enabled.** It probes every login with a targetURL that triggers Authelia's 2FA policy. Accounts without 2FA pass through transparently; accounts with 2FA get prompted for TOTP and then proceed. + +### Agent-driven login (recommended pattern) + +When you (an AI agent) drive the login on the user's behalf, **do not** pass the password or TOTP as plaintext command-line arguments. Recommended flow: + +1. Spawn `olares-cli profile login --olares-id <id>` as a background process so it parks at the password prompt. +2. Forward the prompt verbatim to the user and wait for them to type the password / TOTP into their own terminal. +3. After the command exits, read its output to confirm whether the login succeeded. + +Alternatively, instruct the user to run the login in their terminal themselves; the agent then takes over for follow-up command orchestration. + +## Bootstrap from an existing refresh_token (mode B) + +If the user already has a refresh_token (from LarePass, the wizard activation flow, or any other source), there is no need to run through password + 2FA again: + +```bash +olares-cli profile import --olares-id <olaresId> --refresh-token <tok> +``` + +The CLI exchanges the refresh_token for an access_token via a single `/api/refresh` call (see [`cmd/ctl/profile/import.go`](cli/cmd/ctl/profile/import.go)) and writes both into the keychain. The "reject if a valid token already exists" rule from `login` applies here too. + +> **Never write `--refresh-token <tok>` as plaintext in scripts.** Read it from an environment variable or a secret manager: +> ```bash +> olares-cli profile import --olares-id <id> --refresh-token "$OLARES_REFRESH_TOKEN" +> ``` + +## Switching and inspecting profiles + +### `profile list` + +Output (see [`cmd/ctl/profile/list.go`](cli/cmd/ctl/profile/list.go)): + +``` + NAME OLARES-ID STATUS +* alice alice@olares.com logged-in (23h59m) + bob bob@olares.com expired + eve eve@olares.com invalidated + frank frank@olares.com never +``` + +| STATUS | Meaning | Recovery | +|--------|---------|----------| +| `logged-in (Xh Ym)` | Token is valid; column shows time-to-expiry | — | +| `logged-in` | Token is present but its JWT has no exp claim, so we can't verify locally | Trust until the server says no | +| `expired` | Token JWT exp is in the past | `profile login` to re-authenticate | +| `invalidated` | The server explicitly rejected the refresh leg (`/api/refresh` returned 401/403) | `profile login` directly — no need to `profile remove` first | +| `never` | No token has ever been stored for this profile | `profile login` or `profile import` | + +The leading `*` marks the current profile. + +### `profile use <name|->` + +```bash +olares-cli profile use alice # by NAME alias +olares-cli profile use alice@olares.com # by olaresId (also accepted) +olares-cli profile use - # back to the previous current (errors when none) +``` + +Updates `currentProfile` and `previousProfile` inside `~/.config/olares-cli/config.json`. + +### `profile remove <name>` + +```bash +olares-cli profile remove alice +``` + +Performs four actions atomically: + +1. Removes the profile entry from `config.json`. +2. Deletes the stored token for that olaresId from the keychain. +3. If the removed profile was current, current falls back to the previous (when valid) or to the first remaining profile. +4. If the removed profile was the last one, the keychain namespace itself is purged so no orphan entries remain in Keychain Access.app / regedit / etc. + +## Token storage + +| OS | Backend | Location | +|------|---------|----------| +| darwin | macOS Keychain | service `olares-cli`, account = olaresId | +| linux | AES-256-GCM file | under `~/.local/share/olares-cli/` | +| windows | DPAPI | `HKCU\Software\OlaresCli\keychain` | + +**The plaintext `~/.olares-cli/tokens.json` from older builds is deprecated** — tokens written there by previous versions are no longer read. If the user upgraded and suddenly appears "logged out", the correct fix is `profile login` to repopulate the new storage. + +After `login` / `import` succeeds, the CLI prints a line like `token stored via <backend> (service "olares-cli", account "<id>")`. That message is the source of truth for "where did my token actually land". If the backend resolves to `file-fallback` (sandboxed / CI environments without access to a system keychain), be aware: that token is now sitting in a file with **different security properties than the system keychain**. + +## Re-authentication rules (critical) + +`profile login` and `profile import` both apply the same logic per olaresId: + +``` + ┌───────────────────────────────────────┐ +profile not exist ──▶│ Auto-create and write the new token │ + └───────────────────────────────────────┘ + ┌──────────────────────────────────────────────┐ +token expired ──▶│ Reuse the profile entry, write a new token │ + └──────────────────────────────────────────────┘ + ┌──────────────────────────────────────────────┐ +token invalidated ──▶│ Reuse the profile entry, write a new token │ + └──────────────────────────────────────────────┘ + ┌──────────────────────────────────────────────────────────┐ +token still valid ──▶│ Reject; tell the user to run profile remove <id> first │ + └──────────────────────────────────────────────────────────┘ +``` + +Logic lives in [`cmd/ctl/profile/credentials.go`](cli/cmd/ctl/profile/credentials.go) `ensureProfileWritable`. If a script needs unconditional overwrite, it MUST `profile remove` first and then `profile login` / `profile import`. + +## Auth error recovery table + +| Error message (excerpt) | Meaning | Fix | +|-------------------------|---------|-----| +| `server rejected the access token (HTTP 401)` / `(HTTP 403)` | The current profile's access_token was rejected (expired, revoked, password rotated, ...) | `olares-cli profile login --olares-id <id>` | +| `--olares-id is required` | login / import was invoked without olaresId | Add `--olares-id <id>` | +| `already authenticated for <id> (expires in ...)` | A still-valid token exists for this olaresId | `olares-cli profile remove <id>` and re-run login / import | +| `a token is already stored for <id> but its expiry can't be determined client-side` | Token present but JWT carries no exp claim, so we conservatively reject | Same: `profile remove <id>` and re-run | +| `two-factor authentication required: re-run with --totp <code>` | 2FA is on and we're in a non-TTY context (no way to prompt) | Re-run with `--totp <code>`, or run interactively in a TTY | +| `password is empty` / `TOTP code is empty` | stdin / TTY returned an empty string | Check for premature EOF or an empty pipe | +| `profile <name> not found` | `profile use` / `profile remove` referenced an unknown profile | `profile list` to see the actual names | + +> **Do not silently retry auth errors.** 401/403 / `already authenticated` are deterministic — follow the table; blind retries make the situation worse. + +## dev / internal flags + +For internal debugging or self-hosted dev environments only — **never include these in user-facing examples or scripts**: + +| Flag | Use | +|------|-----| +| `--auth-url-override <url>` | Hard-pin the Authelia URL instead of deriving it from olaresId | +| `--local-url-prefix <label>` | Inject an extra label between the auth subdomain and the terminus name | +| `--insecure-skip-verify` | Disable TLS verification (only for self-signed local environments) | + +## Security rules + +- **Never** invent a `--password <plaintext>` argument (it does not exist). Passwords go through the TTY or `--password-stdin` fed by a secret pipe. +- **Never** echo `access_token` / `refresh_token` to the terminal. When passing a refresh_token to `profile import`, source it from an environment variable or external secret store: `--refresh-token "$OLARES_REFRESH_TOKEN"`. +- **Confirm intent before write/delete actions** (`profile remove`, `files rm`, `files upload --overwrite`, ...). Do not act unilaterally on the user's behalf. +- **TOTP is not a password** — it is single-use and short-lived, so the CLI deliberately echoes it to make manual entry less error-prone (matching `gh auth login`, `aws sso login`, kubectl OIDC plugins). That said, never persist a TOTP in a shared script. From f8cdfdc04541e22cc454870154e4ff592a79c31e Mon Sep 17 00:00:00 2001 From: Peng Peng <billpengpeng@gmail.com> Date: Sun, 26 Apr 2026 13:38:46 +0800 Subject: [PATCH 07/12] fix(cli): resume download retries + dedupe byte format helpers - Re-stat the local file before each download attempt in --resume mode so Range: bytes=N- tracks partial progress after a failed append; accumulate per-attempt bytes for the success return and align error returns with that accounting. - Add TestDownloadFile_ResumeRetryRefreshesRange (partial 206 + cut, then second GET with bytes=6-). - Drop redundant atomic adds under mutex in files upload/download commands; use formatBytes from ls.go and remove duplicate humanBytes. Made-with: Cursor --- cli/cmd/ctl/files/download.go | 17 +++--- cli/cmd/ctl/files/ls.go | 9 ++++ cli/cmd/ctl/files/upload.go | 34 +++--------- cli/pkg/files/download/download.go | 47 ++++++++++++++++- cli/pkg/files/download/download_test.go | 70 +++++++++++++++++++++++++ 5 files changed, 139 insertions(+), 38 deletions(-) diff --git a/cli/cmd/ctl/files/download.go b/cli/cmd/ctl/files/download.go index 33e081383..714a11bd2 100644 --- a/cli/cmd/ctl/files/download.go +++ b/cli/cmd/ctl/files/download.go @@ -9,7 +9,6 @@ import ( "path/filepath" "strings" "sync" - "sync/atomic" "time" "github.com/spf13/cobra" @@ -182,7 +181,7 @@ func runDownloadFile( return err } - fmt.Fprintf(out, "downloading %s (%s) → %s\n", fp.String(), humanBytes(remoteSize), dst) + fmt.Fprintf(out, "downloading %s (%s) → %s\n", fp.String(), formatBytes(remoteSize), dst) start := time.Now() written, err := client.DownloadFile(ctx, plain, dst, download.Options{ @@ -194,9 +193,9 @@ func runDownloadFile( return reformatHTTPErr(err, "", "download", plain) } fmt.Fprintf(out, "done: wrote %s in %s (file size %s)\n", - humanBytes(written), + formatBytes(written), time.Since(start).Truncate(time.Millisecond), - humanBytes(remoteSize), + formatBytes(remoteSize), ) return nil } @@ -249,7 +248,7 @@ func runDownloadDir( totalBytes += t.Size } fmt.Fprintf(out, "downloading %d file(s), %s, into %s (parallel=%d)\n", - len(plan.Files), humanBytes(totalBytes), plan.LocalRoot, o.parallel) + len(plan.Files), formatBytes(totalBytes), plan.LocalRoot, o.parallel) g, gctx := errgroup.WithContext(ctx) g.SetLimit(o.parallel) @@ -264,7 +263,7 @@ func runDownloadDir( task := task g.Go(func() error { start := time.Now() - fmt.Fprintf(out, " → %s (%s)\n", task.RelativePath, humanBytes(task.Size)) + fmt.Fprintf(out, " → %s (%s)\n", task.RelativePath, formatBytes(task.Size)) written, err := client.DownloadFile(gctx, task.RemotePlainPath, task.LocalPath, download.Options{ Overwrite: o.overwrite, Resume: o.resume, @@ -275,11 +274,11 @@ func runDownloadDir( } mu.Lock() completed++ - atomic.AddInt64(&bytesDone, written) + bytesDone += written done := completed mu.Unlock() fmt.Fprintf(out, " ✓ %s (%s, %s) [%d/%d]\n", - task.RelativePath, humanBytes(written), + task.RelativePath, formatBytes(written), time.Since(start).Truncate(time.Millisecond), done, totalFiles) return nil @@ -288,7 +287,7 @@ func runDownloadDir( if err := g.Wait(); err != nil { return err } - fmt.Fprintf(out, "done: %d file(s), %s\n", completed, humanBytes(bytesDone)) + fmt.Fprintf(out, "done: %d file(s), %s\n", completed, formatBytes(bytesDone)) return nil } diff --git a/cli/cmd/ctl/files/ls.go b/cli/cmd/ctl/files/ls.go index 4df936583..89ed54f25 100644 --- a/cli/cmd/ctl/files/ls.go +++ b/cli/cmd/ctl/files/ls.go @@ -284,7 +284,16 @@ func formatSize(n int64, isDir bool) string { if isDir { return "-" } + return formatBytes(n) +} + +// formatBytes renders a byte count for CLI progress lines (ls rows use +// formatSize; upload/download share this helper). +func formatBytes(n int64) string { const unit = 1024 + if n < 0 { + return fmt.Sprintf("%dB", n) + } if n < unit { return fmt.Sprintf("%dB", n) } diff --git a/cli/cmd/ctl/files/upload.go b/cli/cmd/ctl/files/upload.go index b013d1ba1..2d1737347 100644 --- a/cli/cmd/ctl/files/upload.go +++ b/cli/cmd/ctl/files/upload.go @@ -8,7 +8,6 @@ import ( "net/http" "strings" "sync" - "sync/atomic" "time" "github.com/spf13/cobra" @@ -225,8 +224,8 @@ func runUpload( totalBytes += ft.Size } fmt.Fprintf(out, "uploading %d file(s), %s, into %s (parallel=%d, chunk=%s)\n", - len(plan.Files), humanBytes(totalBytes), plan.ParentDir, - o.parallel, humanBytes(o.chunkSize)) + len(plan.Files), formatBytes(totalBytes), plan.ParentDir, + o.parallel, formatBytes(o.chunkSize)) return runUploads(ctx, client, plan, node, o, out) } @@ -260,7 +259,7 @@ func runUploads( opts := plan.ToUploadOpts(task, node, o.chunkSize, o.maxRetries) start := time.Now() - fmt.Fprintf(out, " → %s (%s)\n", task.RelativePath, humanBytes(task.Size)) + fmt.Fprintf(out, " → %s (%s)\n", task.RelativePath, formatBytes(task.Size)) var lastReported int64 progress := func(uploaded, total int64) { @@ -279,7 +278,7 @@ func runUploads( lastReported = uploaded fmt.Fprintf(out, " %s: %d/%d (%s/%s)\n", task.RelativePath, uploaded, total, - humanBytes(uploaded), humanBytes(total)) + formatBytes(uploaded), formatBytes(total)) } } if err := client.UploadFile(gctx, opts, progress); err != nil { @@ -288,11 +287,11 @@ func runUploads( mu.Lock() completed++ - atomic.AddInt64(&bytesDone, task.Size) + bytesDone += task.Size done := completed mu.Unlock() fmt.Fprintf(out, " ✓ %s (%s, %s) [%d/%d]\n", - task.RelativePath, humanBytes(task.Size), + task.RelativePath, formatBytes(task.Size), time.Since(start).Truncate(time.Millisecond), done, totalFiles) return nil @@ -301,7 +300,7 @@ func runUploads( if err := g.Wait(); err != nil { return err } - fmt.Fprintf(out, "done: %d file(s), %s\n", completed, humanBytes(bytesDone)) + fmt.Fprintf(out, "done: %d file(s), %s\n", completed, formatBytes(bytesDone)) return nil } @@ -336,22 +335,3 @@ func pluralYies(n int) string { } return "ies" } - -// humanBytes is the local copy of the same helper from ls.go's -// formatSize, kept inline so upload.go has no test-time dependency on -// the listing-only render code. -func humanBytes(n int64) string { - const unit = 1024 - if n < 0 { - return fmt.Sprintf("%dB", n) - } - if n < unit { - return fmt.Sprintf("%dB", n) - } - div, exp := int64(unit), 0 - for n2 := n / unit; n2 >= unit; n2 /= unit { - div *= unit - exp++ - } - return fmt.Sprintf("%.1f%cB", float64(n)/float64(div), "KMGTPE"[exp]) -} diff --git a/cli/pkg/files/download/download.go b/cli/pkg/files/download/download.go index b31d35d3b..9d2cfeb5f 100644 --- a/cli/pkg/files/download/download.go +++ b/cli/pkg/files/download/download.go @@ -21,7 +21,9 @@ // - 4xx (other than 416 above) is a permanent error — no retries. // - 5xx and transport errors retry with exponential backoff up to // opts.MaxRetries times. Same retry classification spirit as the -// upload package's chunk POST loop. +// upload package's chunk POST loop. In --resume mode the local +// offset is re-read from disk before each attempt so a partial +// append from a failed try never re-sends a stale Range: header. // // Atomicity: // @@ -105,7 +107,7 @@ func (c *Client) DownloadFile( // Decide the strategy first — it dictates which path we open and // what Range header (if any) we send. - mode, localSize, err := planLocalWrite(dst, opts) + mode, _, err := planLocalWrite(dst, opts) if err != nil { return 0, err } @@ -115,16 +117,30 @@ func (c *Client) DownloadFile( maxAttempts = 1 } + var resumeWritten int64 // only for writeResume: sum of per-attempt appends var lastErr error backoff := opts.RetryBackoff for attempt := 1; attempt <= maxAttempts; attempt++ { + localSize, err := localSizeForAttempt(dst, mode) + if err != nil { + return 0, err + } written, err := c.attemptDownload(ctx, plainPath, dst, mode, localSize, progress) + if mode == writeResume { + resumeWritten += written + } if err == nil { + if mode == writeResume { + return resumeWritten, nil + } return written, nil } // Cancellation is always final. if ctxErr := ctx.Err(); ctxErr != nil { + if mode == writeResume { + return resumeWritten, ctxErr + } return written, ctxErr } @@ -132,6 +148,9 @@ func (c *Client) DownloadFile( // "already complete") is permanent: no point retrying. var hErr *HTTPError if errors.As(err, &hErr) && hErr.Status >= 400 && hErr.Status < 500 { + if mode == writeResume { + return resumeWritten, err + } return written, err } @@ -140,6 +159,9 @@ func (c *Client) DownloadFile( select { case <-time.After(backoff): case <-ctx.Done(): + if mode == writeResume { + return resumeWritten, ctx.Err() + } return written, ctx.Err() } // Exponential backoff capped at 4s. @@ -149,6 +171,9 @@ func (c *Client) DownloadFile( } } } + if mode == writeResume { + return resumeWritten, fmt.Errorf("download %s: exhausted %d attempts: %w", plainPath, maxAttempts, lastErr) + } return 0, fmt.Errorf("download %s: exhausted %d attempts: %w", plainPath, maxAttempts, lastErr) } @@ -198,6 +223,24 @@ func planLocalWrite(dst string, opts Options) (writeMode, int64, error) { } } +// localSizeForAttempt returns the byte offset the next HTTP GET should +// use for Range (resume) or 0 for fresh/overwrite paths. For resume it +// always reflects the current on-disk file size so retries after a +// partial append never send a stale `bytes=N-` header. +func localSizeForAttempt(dst string, mode writeMode) (int64, error) { + if mode != writeResume { + return 0, nil + } + st, err := os.Stat(dst) + if err != nil { + return 0, fmt.Errorf("stat resume destination %s: %w", dst, err) + } + if st.IsDir() { + return 0, fmt.Errorf("local destination %q is an existing directory", dst) + } + return st.Size(), nil +} + // attemptDownload runs one HTTP request + body copy. It is called // from DownloadFile inside a retry loop, so it must: // - leave dst in a valid state on failure (tmp file is cleaned up; diff --git a/cli/pkg/files/download/download_test.go b/cli/pkg/files/download/download_test.go index 91ea0c24d..602638bf0 100644 --- a/cli/pkg/files/download/download_test.go +++ b/cli/pkg/files/download/download_test.go @@ -264,6 +264,76 @@ func TestDownloadFile_Resume(t *testing.T) { } } +// errCut is an io.Reader that fails immediately — used after a prefix +// in io.MultiReader so the server sends a short 206 body then stops. +type errCut struct{} + +func (errCut) Read([]byte) (int, error) { + return 0, errors.New("simulated transport cut") +} + +// TestDownloadFile_ResumeRetryRefreshesRange: a partial 206 append +// followed by a read error must not duplicate bytes on retry; the +// second GET must send Range: bytes=<current file size>-. +func TestDownloadFile_ResumeRetryRefreshesRange(t *testing.T) { + prefix := []byte("AAAA") // 4 bytes on disk; full object is 8 bytes + wantFull := []byte("AAAABBBB") + var hits int32 + client, _ := newTestClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + n := atomic.AddInt32(&hits, 1) + rng := r.Header.Get("Range") + switch n { + case 1: + if rng != "bytes=4-" { + t.Fatalf("first Range: want bytes=4-, got %q", rng) + } + w.Header().Set("Content-Range", "bytes 4-7/8") + w.Header().Set("Content-Length", "4") + w.WriteHeader(http.StatusPartialContent) + _, _ = io.Copy(w, io.MultiReader(bytes.NewReader([]byte("BB")), errCut{})) + return + case 2: + if rng != "bytes=6-" { + t.Fatalf("second Range after partial append: want bytes=6-, got %q", rng) + } + w.Header().Set("Content-Range", "bytes 6-7/8") + w.Header().Set("Content-Length", "2") + w.WriteHeader(http.StatusPartialContent) + _, _ = w.Write([]byte("BB")) + return + default: + t.Fatalf("unexpected request #%d", n) + } + })) + + dst := filepath.Join(t.TempDir(), "partial.bin") + if err := os.WriteFile(dst, prefix, 0o644); err != nil { + t.Fatalf("seed: %v", err) + } + + written, err := client.DownloadFile(context.Background(), "drive/Home/foo", dst, Options{ + Resume: true, + MaxRetries: 3, + RetryBackoff: time.Millisecond, + }, nil) + if err != nil { + t.Fatalf("DownloadFile: %v", err) + } + if written != 4 { + t.Errorf("written: want 4 (2+2 appended bytes this call), got %d", written) + } + got, err := os.ReadFile(dst) + if err != nil { + t.Fatalf("read dst: %v", err) + } + if !bytes.Equal(got, wantFull) { + t.Errorf("final file: got %q want %q", got, wantFull) + } + if hits != 2 { + t.Errorf("hits: want 2, got %d", hits) + } +} + // TestDownloadFile_RangeIgnored covers the "we asked for Range but // the server replied 200" case — falls back to a clean overwrite. func TestDownloadFile_RangeIgnored(t *testing.T) { From 1c070d2e74495946ebfddcc320fc4a9c4769d310 Mon Sep 17 00:00:00 2001 From: Peng Peng <billpengpeng@gmail.com> Date: Sun, 26 Apr 2026 13:49:43 +0800 Subject: [PATCH 08/12] fix(cli): align files ls URLPath with EncodeURL (JS encodeUrl) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bugbot: FrontendPath.URLPath used url.PathEscape per segment while download/cat/rm/upload use upload.EncodeURL, diverging for '+', '!*()', etc. Delegate URLPath to EncodeURL(p.String()) and extend path tests (report (1).txt, x+y → x%2By). Update ls command comment. Made-with: Cursor --- cli/cmd/ctl/files/ls.go | 4 +-- cli/cmd/ctl/files/path.go | 46 ++++++++-------------------------- cli/cmd/ctl/files/path_test.go | 7 +++++- 3 files changed, 18 insertions(+), 39 deletions(-) diff --git a/cli/cmd/ctl/files/ls.go b/cli/cmd/ctl/files/ls.go index 89ed54f25..ce752461a 100644 --- a/cli/cmd/ctl/files/ls.go +++ b/cli/cmd/ctl/files/ls.go @@ -125,8 +125,8 @@ func runLs(ctx context.Context, f *cmdutil.Factory, out io.Writer, rawPath strin return err } - // URLPath percent-encodes each path segment (mirrors the web app's - // encodeUrl helper) so filenames with '#', '?', '+', spaces, etc. survive + // URLPath uses upload.EncodeURL (same as download/cat/rm/upload) so + // filenames with '#', '?', '+', spaces, '!*'()', etc. survive // the trip to the backend. ParseFrontendPath already guarantees that // listing the extend root ("drive/Home" or "drive/Home/") yields a // SubPath of "/", so URLPath() naturally ends with '/' there — which is diff --git a/cli/cmd/ctl/files/path.go b/cli/cmd/ctl/files/path.go index fb0518f59..409b1500c 100644 --- a/cli/cmd/ctl/files/path.go +++ b/cli/cmd/ctl/files/path.go @@ -19,10 +19,11 @@ package files import ( "fmt" - "net/url" "path" "sort" "strings" + + "github.com/beclab/Olares/cli/pkg/files/upload" ) // Known fileType values understood by the files-backend. @@ -171,42 +172,15 @@ func (p FrontendPath) String() string { return p.FileType + "/" + p.Extend + p.SubPath } -// URLPath returns the same path as String() but with every segment -// percent-encoded via url.PathEscape, while preserving '/' separators and -// any trailing '/'. This matches the web app's handling -// (apps/packages/app/src/utils/encode.ts: encodeUrl) and is what the -// files-backend's /api/resources endpoint expects for filenames containing -// '#', '?', '+', spaces, '%', non-ASCII, etc. -// -// FileType is always one of the known lowercase tokens (no escaping needed -// in practice), but we run it through the same escape so callers get a -// single, predictable encoder. +// URLPath returns the same logical path as String() but percent-encoded +// with pkg/files/upload.EncodeURL — the Go counterpart of the web app's +// apps/packages/app/src/utils/encode.ts `encodeUrl` (encodeURIComponent per +// '/' segment). This MUST stay aligned with download/cat/rm/upload, which +// already use EncodeURL; url.PathEscape is not equivalent (e.g. '+' and +// '!*'()' differ) and would make `ls` hit different wire paths than the +// other verbs for the same user-typed path. func (p FrontendPath) URLPath() string { - subEscaped := escapeSubPath(p.SubPath) - return url.PathEscape(p.FileType) + "/" + url.PathEscape(p.Extend) + subEscaped -} - -// escapeSubPath percent-encodes each segment of SubPath while keeping the -// leading '/' and any trailing '/' (the trailing slash is the backend's -// "this is a directory" hint — see FileParam.convert in files-backend). -func escapeSubPath(sub string) string { - if sub == "" || sub == "/" { - return sub - } - trailing := strings.HasSuffix(sub, "/") - trimmed := strings.Trim(sub, "/") - if trimmed == "" { - return sub - } - parts := strings.Split(trimmed, "/") - for i, p := range parts { - parts[i] = url.PathEscape(p) - } - out := "/" + strings.Join(parts, "/") - if trailing { - out += "/" - } - return out + return upload.EncodeURL(p.String()) } // HasTrailingSlash reports whether the original input ended with '/'. Useful diff --git a/cli/cmd/ctl/files/path_test.go b/cli/cmd/ctl/files/path_test.go index fbff4ab65..abcfe84b1 100644 --- a/cli/cmd/ctl/files/path_test.go +++ b/cli/cmd/ctl/files/path_test.go @@ -189,7 +189,12 @@ func TestFrontendPathURLPath(t *testing.T) { { name: "filename with plus and percent", input: "drive/Home/100%/x+y.txt", - want: "drive/Home/100%25/x+y.txt", + want: "drive/Home/100%25/x%2By.txt", + }, + { + name: "parens and space like duplicate filename", + input: "drive/Home/Documents/report (1).txt", + want: "drive/Home/Documents/report%20(1).txt", }, { name: "non-ASCII filename", From 4ad2cad6c1c2bc682738cee325eaf7fc0a2d5ca4 Mon Sep 17 00:00:00 2001 From: Peng Peng <billpengpeng@gmail.com> Date: Sun, 26 Apr 2026 13:58:33 +0800 Subject: [PATCH 09/12] refactor(cli): extract JS-shaped path encoding to pkg/files/encodepath Move EncodeURL / EncodeURIComponent out of package upload into a small shared package so download, rm, path, and upload all depend on the same wire encoder without cmd/pkg layers importing upload only for URLs. - Add cli/pkg/files/encodepath with tests (former upload/encode*.go). - Update upload api/uploader, download client, rm, FrontendPath.URLPath. - Clarify download/list.go is for the download walker, not `files ls`. Made-with: Cursor --- cli/cmd/ctl/files/ls.go | 2 +- cli/cmd/ctl/files/path.go | 8 +- cli/pkg/files/download/client.go | 6 +- cli/pkg/files/download/download.go | 2 +- cli/pkg/files/download/list.go | 7 +- cli/pkg/files/download/walker_test.go | 2 +- cli/pkg/files/encodepath/encodepath.go | 73 ++++++++++++++ cli/pkg/files/encodepath/encodepath_test.go | 55 ++++++++++ cli/pkg/files/rm/rm.go | 4 +- cli/pkg/files/upload/api.go | 10 +- cli/pkg/files/upload/encode.go | 106 -------------------- cli/pkg/files/upload/encode_test.go | 70 ------------- cli/pkg/files/upload/uploader.go | 4 +- 13 files changed, 153 insertions(+), 196 deletions(-) create mode 100644 cli/pkg/files/encodepath/encodepath.go create mode 100644 cli/pkg/files/encodepath/encodepath_test.go delete mode 100644 cli/pkg/files/upload/encode.go delete mode 100644 cli/pkg/files/upload/encode_test.go diff --git a/cli/cmd/ctl/files/ls.go b/cli/cmd/ctl/files/ls.go index ce752461a..9c6437f42 100644 --- a/cli/cmd/ctl/files/ls.go +++ b/cli/cmd/ctl/files/ls.go @@ -125,7 +125,7 @@ func runLs(ctx context.Context, f *cmdutil.Factory, out io.Writer, rawPath strin return err } - // URLPath uses upload.EncodeURL (same as download/cat/rm/upload) so + // URLPath uses encodepath.EncodeURL (same as download/cat/rm/upload) so // filenames with '#', '?', '+', spaces, '!*'()', etc. survive // the trip to the backend. ParseFrontendPath already guarantees that // listing the extend root ("drive/Home" or "drive/Home/") yields a diff --git a/cli/cmd/ctl/files/path.go b/cli/cmd/ctl/files/path.go index 409b1500c..02d76e681 100644 --- a/cli/cmd/ctl/files/path.go +++ b/cli/cmd/ctl/files/path.go @@ -23,7 +23,7 @@ import ( "sort" "strings" - "github.com/beclab/Olares/cli/pkg/files/upload" + "github.com/beclab/Olares/cli/pkg/files/encodepath" ) // Known fileType values understood by the files-backend. @@ -173,14 +173,14 @@ func (p FrontendPath) String() string { } // URLPath returns the same logical path as String() but percent-encoded -// with pkg/files/upload.EncodeURL — the Go counterpart of the web app's +// with pkg/files/encodepath.EncodeURL — the Go counterpart of the web app's // apps/packages/app/src/utils/encode.ts `encodeUrl` (encodeURIComponent per // '/' segment). This MUST stay aligned with download/cat/rm/upload, which -// already use EncodeURL; url.PathEscape is not equivalent (e.g. '+' and +// all use pkg/files/encodepath; url.PathEscape is not equivalent (e.g. '+' and // '!*'()' differ) and would make `ls` hit different wire paths than the // other verbs for the same user-typed path. func (p FrontendPath) URLPath() string { - return upload.EncodeURL(p.String()) + return encodepath.EncodeURL(p.String()) } // HasTrailingSlash reports whether the original input ended with '/'. Useful diff --git a/cli/pkg/files/download/client.go b/cli/pkg/files/download/client.go index c21e899dc..1d07883b2 100644 --- a/cli/pkg/files/download/client.go +++ b/cli/pkg/files/download/client.go @@ -26,7 +26,7 @@ import ( "net/http" "strings" - "github.com/beclab/Olares/cli/pkg/files/upload" + "github.com/beclab/Olares/cli/pkg/files/encodepath" ) // Client is the per-FilesURL handle used by Stat / List / DownloadFile @@ -68,7 +68,7 @@ func (e *HTTPError) Error() string { // listings). plainPath looks like `drive/Home/Documents` or // `drive/Home/Documents/`. func (c *Client) resourcesURL(plainPath string) string { - return c.BaseURL + "/api/resources/" + upload.EncodeURL(plainPath) + return c.BaseURL + "/api/resources/" + encodepath.EncodeURL(plainPath) } // rawURL returns `<BaseURL>/api/raw/<encPlainPath>`. Mirrors the web @@ -76,7 +76,7 @@ func (c *Client) resourcesURL(plainPath string) string { // raw endpoint refuses non-file paths with a 400, so callers should // Stat first when the user-supplied path could be either. func (c *Client) rawURL(plainPath string) string { - return c.BaseURL + "/api/raw/" + upload.EncodeURL(plainPath) + return c.BaseURL + "/api/raw/" + encodepath.EncodeURL(plainPath) } // do performs a single HTTP request with the configured access token diff --git a/cli/pkg/files/download/download.go b/cli/pkg/files/download/download.go index 9d2cfeb5f..c2b1bbe2c 100644 --- a/cli/pkg/files/download/download.go +++ b/cli/pkg/files/download/download.go @@ -88,7 +88,7 @@ type Options struct { type ProgressFunc func(written, total int64) // DownloadFile fetches `plainPath` (a `<fileType>/<extend>/<subPath>` -// triple, un-encoded — the client encodes with EncodeURL internally) +// triple, un-encoded — the client encodes with encodepath.EncodeURL internally) // into the local file at `dst`. // // Returns the number of bytes WRITTEN to dst by this call (so a diff --git a/cli/pkg/files/download/list.go b/cli/pkg/files/download/list.go index 4bd803d55..511c294a3 100644 --- a/cli/pkg/files/download/list.go +++ b/cli/pkg/files/download/list.go @@ -1,8 +1,9 @@ // list.go: list a remote directory via GET /api/resources/<encPath>/. // The walker calls this once per directory to drive the recursive -// download. Same envelope shape as `files ls` consumes (see -// cli/cmd/ctl/files/ls.go's listingResponse), but we project even -// further down: name + isDir + size is everything the walker needs. +// download. This is not the `olares-cli files ls` implementation (that +// lives in cli/cmd/ctl/files/ls.go); we only share the same JSON envelope +// shape (see ls.go's listingResponse) but project down to name + isDir + +// size — everything the download walker needs. package download import ( diff --git a/cli/pkg/files/download/walker_test.go b/cli/pkg/files/download/walker_test.go index 7d41fc2d4..12345249f 100644 --- a/cli/pkg/files/download/walker_test.go +++ b/cli/pkg/files/download/walker_test.go @@ -170,7 +170,7 @@ func equal(a, b []string) bool { // Sanity check: the tree handler should at least pretend to URL-encode, // so this no-op test confirms no panics on funky names. We don't go -// deeper because the actual encoding lives in upload.EncodeURL, which +// deeper because the actual encoding lives in encodepath.EncodeURL, which // has its own test suite. func TestList_PathEncoding_Roundtrip(t *testing.T) { want := "Special !'()*" diff --git a/cli/pkg/files/encodepath/encodepath.go b/cli/pkg/files/encodepath/encodepath.go new file mode 100644 index 000000000..d9105b950 --- /dev/null +++ b/cli/pkg/files/encodepath/encodepath.go @@ -0,0 +1,73 @@ +// Package encodepath implements percent-encoding for Olares files-backend +// wire paths so the CLI matches the LarePass web client +// (apps/packages/app/src/utils/encode.ts: encodeUrl / encodeURIComponent). +// +// This is shared by upload, download, rm, and the files Cobra commands — it +// does not belong in package upload alone: net/url helpers are not +// byte-identical to JS (spaces as '+', !*'() escaping, etc.), and resume / +// probes require the same encoding everywhere. +package encodepath + +import "strings" + +// EncodeURIComponent mirrors JavaScript's encodeURIComponent: it leaves the +// unreserved set (RFC 3986) plus !*'() alone and percent-encodes the rest as +// UTF-8 bytes. Use for query values (e.g. file_path=) and header fragments +// where the server expects JS-shaped bytes. +// +// Why we don't reuse net/url: +// - url.QueryEscape encodes a space as '+' (form encoding) — JS uses '%20'. +// - url.QueryEscape escapes '!' '*' '(' ')' '\” — JS does not. +func EncodeURIComponent(s string) string { + var b strings.Builder + b.Grow(len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if shouldNotEncode(c) { + b.WriteByte(c) + continue + } + b.WriteByte('%') + b.WriteByte(upperHex[c>>4]) + b.WriteByte(upperHex[c&0x0f]) + } + return b.String() +} + +// EncodeURL is the Go counterpart of encode.ts `encodeUrl`: split on '/', +// EncodeURIComponent each segment, rejoin with '/'. Use for path segments in +// /api/resources/..., /api/raw/..., and upload link paths. +// +// Leading and trailing '/' are preserved so directory-hint semantics survive. +func EncodeURL(p string) string { + if p == "" { + return "" + } + parts := strings.Split(p, "/") + for i, seg := range parts { + if seg == "" { + continue + } + parts[i] = EncodeURIComponent(seg) + } + return strings.Join(parts, "/") +} + +const upperHex = "0123456789ABCDEF" + +// shouldNotEncode matches JS encodeURIComponent's leave-alone set. +func shouldNotEncode(c byte) bool { + switch { + case c >= 'A' && c <= 'Z': + return true + case c >= 'a' && c <= 'z': + return true + case c >= '0' && c <= '9': + return true + } + switch c { + case '-', '_', '.', '~', '!', '*', '\'', '(', ')': + return true + } + return false +} diff --git a/cli/pkg/files/encodepath/encodepath_test.go b/cli/pkg/files/encodepath/encodepath_test.go new file mode 100644 index 000000000..934dc81d2 --- /dev/null +++ b/cli/pkg/files/encodepath/encodepath_test.go @@ -0,0 +1,55 @@ +package encodepath + +import "testing" + +func TestEncodeURIComponent(t *testing.T) { + cases := []struct { + in, want string + }{ + {"", ""}, + {"abc", "abc"}, + {"hello world", "hello%20world"}, + {"a&b=c", "a%26b%3Dc"}, + {"a+b", "a%2Bb"}, + {"a/b", "a%2Fb"}, + {"!*'()", "!*'()"}, + {"-_.~", "-_.~"}, + {"中文.txt", "%E4%B8%AD%E6%96%87.txt"}, + {"~user@host", "~user%40host"}, + {"file (1).txt", "file%20(1).txt"}, + {"100%", "100%25"}, + {"\x00\n\r\t", "%00%0A%0D%09"}, + {" / ", "%20%20%2F%20%20"}, + {"a?b#c", "a%3Fb%23c"}, + } + for _, c := range cases { + got := EncodeURIComponent(c.in) + if got != c.want { + t.Errorf("EncodeURIComponent(%q) = %q, want %q", c.in, got, c.want) + } + } +} + +func TestEncodeURL(t *testing.T) { + cases := []struct { + in, want string + }{ + {"", ""}, + {"/", "/"}, + {"a/b/c", "a/b/c"}, + {"/a/b/", "/a/b/"}, + {"a b/c d/", "a%20b/c%20d/"}, + {"中文/files/x.txt", "%E4%B8%AD%E6%96%87/files/x.txt"}, + {"//x", "//x"}, + {"x//", "x//"}, + {"/Home/Photos/IMG_001.jpg", "/Home/Photos/IMG_001.jpg"}, + {"/dir/file (1).txt", "/dir/file%20(1).txt"}, + {"/sub/a&b/c=d", "/sub/a%26b/c%3Dd"}, + } + for _, c := range cases { + got := EncodeURL(c.in) + if got != c.want { + t.Errorf("EncodeURL(%q) = %q, want %q", c.in, got, c.want) + } + } +} diff --git a/cli/pkg/files/rm/rm.go b/cli/pkg/files/rm/rm.go index 3c663175e..7f1cef57b 100644 --- a/cli/pkg/files/rm/rm.go +++ b/cli/pkg/files/rm/rm.go @@ -27,7 +27,7 @@ import ( "sort" "strings" - "github.com/beclab/Olares/cli/pkg/files/upload" + "github.com/beclab/Olares/cli/pkg/files/encodepath" ) // Client is the per-FilesURL handle used by DeleteBatch. @@ -210,7 +210,7 @@ func (c *Client) DeleteBatch(ctx context.Context, g *Group) error { parent += "/" } plain := g.FileType + "/" + g.Extend + parent - endpoint := c.BaseURL + "/api/resources/" + upload.EncodeURL(plain) + endpoint := c.BaseURL + "/api/resources/" + encodepath.EncodeURL(plain) bodyBytes, err := json.Marshal(deleteRequestBody{Dirents: g.Dirents}) if err != nil { diff --git a/cli/pkg/files/upload/api.go b/cli/pkg/files/upload/api.go index e4bfac5c8..3ecf0e64c 100644 --- a/cli/pkg/files/upload/api.go +++ b/cli/pkg/files/upload/api.go @@ -20,6 +20,8 @@ import ( "net/http" "net/url" "strings" + + "github.com/beclab/Olares/cli/pkg/files/encodepath" ) // Client is the per-FilesURL handle used by uploader.go and the cobra @@ -97,7 +99,7 @@ func (c *Client) FetchNodes(ctx context.Context) ([]Node, error) { func (c *Client) GetUploadLink(ctx context.Context, node, parentDir string) (string, error) { endpoint := c.BaseURL + "/upload/upload-link/" + url.PathEscape(node) + - "/?file_path=" + encodeURIComponent(parentDir) + + "/?file_path=" + encodepath.EncodeURIComponent(parentDir) + "&from=web" body, err := c.do(ctx, http.MethodGet, endpoint, nil, nil, "") if err != nil { @@ -163,7 +165,7 @@ func (c *Client) GetUploadedBytes(ctx context.Context, node, parentDir, filename // to create a directory under Drive/Home. The trailing slash is what the // backend uses to discriminate "create directory" from "create empty file" // (postCreateFile in v2/common/utils.ts does the same thing — `isDir -// ? '/' : ''`). +// ? '/' : ”`). // // `relSubPath` is the directory path RELATIVE to /Home (e.g. "Documents" // or "Documents/photos"); it should NOT include leading or trailing @@ -184,7 +186,7 @@ func (c *Client) Mkdir(ctx context.Context, relSubPath string) error { // Drive/Home root always exists; nothing to do. return nil } - encoded := EncodeURL(clean) + encoded := encodepath.EncodeURL(clean) endpoint := c.BaseURL + "/api/resources/drive/Home/" + encoded + "/" _, err := c.do(ctx, http.MethodPost, endpoint, nil, nil, "") if err != nil { @@ -212,7 +214,7 @@ func (c *Client) CreateEmptyFile(ctx context.Context, relPath string) error { if clean == "" { return fmt.Errorf("CreateEmptyFile: empty path") } - encoded := EncodeURL(clean) + encoded := encodepath.EncodeURL(clean) endpoint := c.BaseURL + "/api/resources/drive/Home/" + encoded _, err := c.do(ctx, http.MethodPost, endpoint, nil, nil, "") if err != nil { diff --git a/cli/pkg/files/upload/encode.go b/cli/pkg/files/upload/encode.go deleted file mode 100644 index aed8321c3..000000000 --- a/cli/pkg/files/upload/encode.go +++ /dev/null @@ -1,106 +0,0 @@ -// Package upload implements the chunked / resumable upload client that the -// `olares-cli files upload` command drives. It speaks the same wire protocol -// as the LarePass web app (Resumable.js + the Drive v2 endpoints under -// /upload/upload-link, /upload/file-uploaded-bytes, /api/nodes, -// /api/resources/drive/Home/...). See docs/notes/auth-2fa-semantics.md for -// the auth header convention shared with the rest of the CLI, and the plan -// at .cursor/plans/cli_files_upload_resumable_*.plan.md for the design -// rationale. -// -// encode.go: percent-encoding helpers that mirror the web app's -// apps/packages/app/src/utils/encode.ts (encodeUrl). The standard library -// alone is NOT a 1:1 substitute: -// -// - url.QueryEscape encodes a space as '+' (form encoding) — JS -// encodeURIComponent uses '%20'. -// - url.QueryEscape escapes '!' '*' '(' ')' '\'' — JS encodeURIComponent -// does not. -// -// Both differences would round-trip to the server differently and break -// resume / probe path-matching for filenames containing those bytes, so we -// implement encodeURIComponent ourselves and use it everywhere we touch a -// path / filename / parent_dir value. -package upload - -import ( - "strings" -) - -// encodeURIComponent mirrors JavaScript's encodeURIComponent: it leaves -// the unreserved set (RFC 3986) plus !*'() alone and percent-encodes the -// rest as UTF-8 bytes. This is the building block for both EncodeURL -// (path-segment encoding, joined with '/') and the query-value encoding -// the upload protocol uses for parent_dir / file_name. -// -// Why we don't reuse net/url: -// - url.QueryEscape encodes ' ' as '+' (form encoding). The Drive -// backend was written against a JS client that emits '%20', so the -// two representations are not interchangeable for filename parity -// (probe and chunk POST must see byte-identical names for resume to -// line up). -// - url.PathEscape leaves '?', '#', '&', '=' alone (they're valid -// within a path component) but those characters DO need escaping when -// the value is destined for a query parameter, which is the bulk of -// our use case. -func encodeURIComponent(s string) string { - var b strings.Builder - b.Grow(len(s)) - for i := 0; i < len(s); i++ { - c := s[i] - if shouldNotEncode(c) { - b.WriteByte(c) - continue - } - b.WriteByte('%') - b.WriteByte(upperHex[c>>4]) - b.WriteByte(upperHex[c&0x0f]) - } - return b.String() -} - -// EncodeURL is the Go counterpart of apps/packages/app/src/utils/encode.ts -// `encodeUrl`: split on '/', encodeURIComponent each segment, rejoin with -// '/'. Used wherever a path is interpolated into a URL path component -// (e.g. the file_path query value the server uses to derive the upload -// link, or the /api/resources/drive/Home/... mkdir endpoint). -// -// The leading and trailing '/' are preserved verbatim so callers can keep -// the "directory hint" semantics the backend relies on (a trailing '/' -// signals "this is a directory" in several files-backend code paths; see -// files/pkg/models/file_param.go). -func EncodeURL(p string) string { - if p == "" { - return "" - } - // split keeps empty leading/trailing parts so the leading/trailing - // slashes survive the rejoin (e.g. "/a/b/" → ["", "a", "b", ""] → - // "/a/b/" again after encoding the non-empty pieces). - parts := strings.Split(p, "/") - for i, seg := range parts { - if seg == "" { - continue - } - parts[i] = encodeURIComponent(seg) - } - return strings.Join(parts, "/") -} - -const upperHex = "0123456789ABCDEF" - -// shouldNotEncode is the membership test for JS encodeURIComponent's -// "leave alone" set: A-Z a-z 0-9 plus the marks `- _ . ~ ! * ' ( )`. -func shouldNotEncode(c byte) bool { - switch { - case c >= 'A' && c <= 'Z': - return true - case c >= 'a' && c <= 'z': - return true - case c >= '0' && c <= '9': - return true - } - switch c { - case '-', '_', '.', '~', '!', '*', '\'', '(', ')': - return true - } - return false -} diff --git a/cli/pkg/files/upload/encode_test.go b/cli/pkg/files/upload/encode_test.go deleted file mode 100644 index 9513ae7d3..000000000 --- a/cli/pkg/files/upload/encode_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package upload - -import "testing" - -// Test cases derived from JavaScript's encodeURIComponent reference output. -// If you ever change encode.go, re-run these examples in a Node REPL to -// re-confirm parity: -// -// > encodeURIComponent("hello world") // 'hello%20world' -// > encodeURIComponent("a&b=c") // 'a%26b%3Dc' -// > encodeURIComponent("中文.txt") // '%E4%B8%AD%E6%96%87.txt' -// -// We deliberately test the boundary characters where Go's url.QueryEscape -// would diverge from JS (' ' as '+', and the !*'() set escaping) so any -// regression to "just use url.QueryEscape" gets caught here. -func TestEncodeURIComponent(t *testing.T) { - cases := []struct { - in, want string - }{ - {"", ""}, - {"abc", "abc"}, - {"hello world", "hello%20world"}, // space → %20 (NOT '+') - {"a&b=c", "a%26b%3Dc"}, // query metacharacters - {"a+b", "a%2Bb"}, // '+' percent-encoded - {"a/b", "a%2Fb"}, // '/' is reserved - {"!*'()", "!*'()"}, // unreserved-extras (NOT escaped) - {"-_.~", "-_.~"}, // RFC 3986 unreserved - {"中文.txt", "%E4%B8%AD%E6%96%87.txt"}, // UTF-8 multibyte - {"~user@host", "~user%40host"}, // '@' encoded - {"file (1).txt", "file%20(1).txt"}, // spaces + parens together - {"100%", "100%25"}, // '%' itself - {"\x00\n\r\t", "%00%0A%0D%09"}, // control chars - {" / ", "%20%20%2F%20%20"}, // multiple spaces around '/' - {"a?b#c", "a%3Fb%23c"}, // '?' and '#' - } - for _, c := range cases { - got := encodeURIComponent(c.in) - if got != c.want { - t.Errorf("encodeURIComponent(%q) = %q, want %q", c.in, got, c.want) - } - } -} - -// EncodeURL is the path-segment-aware variant: '/' separators are kept, -// each segment is encodeURIComponent'd. The interesting cases are -// preserving leading/trailing slashes (the backend uses them as directory -// hints) and the empty-segment edge case from "//". -func TestEncodeURL(t *testing.T) { - cases := []struct { - in, want string - }{ - {"", ""}, - {"/", "/"}, - {"a/b/c", "a/b/c"}, - {"/a/b/", "/a/b/"}, - {"a b/c d/", "a%20b/c%20d/"}, - {"中文/files/x.txt", "%E4%B8%AD%E6%96%87/files/x.txt"}, - {"//x", "//x"}, // empty leading segment kept - {"x//", "x//"}, // empty trailing segment kept - {"/Home/Photos/IMG_001.jpg", "/Home/Photos/IMG_001.jpg"}, - {"/dir/file (1).txt", "/dir/file%20(1).txt"}, - {"/sub/a&b/c=d", "/sub/a%26b/c%3Dd"}, - } - for _, c := range cases { - got := EncodeURL(c.in) - if got != c.want { - t.Errorf("EncodeURL(%q) = %q, want %q", c.in, got, c.want) - } - } -} diff --git a/cli/pkg/files/upload/uploader.go b/cli/pkg/files/upload/uploader.go index 6366b4dbf..396c7a6fe 100644 --- a/cli/pkg/files/upload/uploader.go +++ b/cli/pkg/files/upload/uploader.go @@ -36,6 +36,8 @@ import ( "strconv" "strings" "time" + + "github.com/beclab/Olares/cli/pkg/files/encodepath" ) // DefaultChunkSize is 8 MiB — the same value the web app uses @@ -297,7 +299,7 @@ func (c *Client) uploadChunk( headers := http.Header{ "Accept": []string{"application/json; text/javascript, */*; q=0.01"}, "Content-Disposition": []string{ - `attachment; filename="` + encodeURIComponent(opts.RemoteName) + `"`, + `attachment; filename="` + encodepath.EncodeURIComponent(opts.RemoteName) + `"`, }, "Content-Range": []string{fmt.Sprintf( "bytes %d-%d/%d", From 542005f81127a8593a49377348247e851de5b9a0 Mon Sep 17 00:00:00 2001 From: Peng Peng <billpengpeng@gmail.com> Date: Sun, 26 Apr 2026 14:05:33 +0800 Subject: [PATCH 10/12] refactor(cli): move Drive files-backend clients under internal/files Relocate encodepath, download, rm, and upload from cli/pkg/files to cli/internal/files so they are clearly olares-cli implementation details (not a stable public library surface). The legacy root package cli/pkg/files (installer URLs, rate limiter, etc.) stays in pkg for existing importers (storage, bootstrap, terminus, ...). Update `files` Cobra imports and olares-files SKILL links. Made-with: Cursor --- cli/cmd/ctl/files/cat.go | 2 +- cli/cmd/ctl/files/download.go | 2 +- cli/cmd/ctl/files/path.go | 6 +++--- cli/cmd/ctl/files/rm.go | 2 +- cli/cmd/ctl/files/upload.go | 4 ++-- cli/{pkg => internal}/files/download/client.go | 2 +- cli/{pkg => internal}/files/download/download.go | 0 cli/{pkg => internal}/files/download/download_test.go | 0 cli/{pkg => internal}/files/download/list.go | 1 - cli/{pkg => internal}/files/download/stat.go | 0 cli/{pkg => internal}/files/download/walker.go | 0 cli/{pkg => internal}/files/download/walker_test.go | 0 cli/{pkg => internal}/files/encodepath/encodepath.go | 0 .../files/encodepath/encodepath_test.go | 0 cli/{pkg => internal}/files/rm/rm.go | 2 +- cli/{pkg => internal}/files/rm/rm_test.go | 0 cli/{pkg => internal}/files/upload/api.go | 2 +- cli/{pkg => internal}/files/upload/api_test.go | 0 cli/{pkg => internal}/files/upload/uploader.go | 2 +- cli/{pkg => internal}/files/upload/uploader_test.go | 4 ++-- cli/{pkg => internal}/files/upload/walker.go | 0 cli/{pkg => internal}/files/upload/walker_test.go | 0 cli/skills/olares-files/SKILL.md | 10 +++++----- 23 files changed, 19 insertions(+), 20 deletions(-) rename cli/{pkg => internal}/files/download/client.go (98%) rename cli/{pkg => internal}/files/download/download.go (100%) rename cli/{pkg => internal}/files/download/download_test.go (100%) rename cli/{pkg => internal}/files/download/list.go (99%) rename cli/{pkg => internal}/files/download/stat.go (100%) rename cli/{pkg => internal}/files/download/walker.go (100%) rename cli/{pkg => internal}/files/download/walker_test.go (100%) rename cli/{pkg => internal}/files/encodepath/encodepath.go (100%) rename cli/{pkg => internal}/files/encodepath/encodepath_test.go (100%) rename cli/{pkg => internal}/files/rm/rm.go (99%) rename cli/{pkg => internal}/files/rm/rm_test.go (100%) rename cli/{pkg => internal}/files/upload/api.go (99%) rename cli/{pkg => internal}/files/upload/api_test.go (100%) rename cli/{pkg => internal}/files/upload/uploader.go (99%) rename cli/{pkg => internal}/files/upload/uploader_test.go (98%) rename cli/{pkg => internal}/files/upload/walker.go (100%) rename cli/{pkg => internal}/files/upload/walker_test.go (100%) diff --git a/cli/cmd/ctl/files/cat.go b/cli/cmd/ctl/files/cat.go index 02b9f674b..db870a862 100644 --- a/cli/cmd/ctl/files/cat.go +++ b/cli/cmd/ctl/files/cat.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/beclab/Olares/cli/pkg/cmdutil" - "github.com/beclab/Olares/cli/pkg/files/download" + "github.com/beclab/Olares/cli/internal/files/download" ) // NewCatCommand: `olares-cli files cat <remote-path>` diff --git a/cli/cmd/ctl/files/download.go b/cli/cmd/ctl/files/download.go index 714a11bd2..6e54873bd 100644 --- a/cli/cmd/ctl/files/download.go +++ b/cli/cmd/ctl/files/download.go @@ -15,7 +15,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/beclab/Olares/cli/pkg/cmdutil" - "github.com/beclab/Olares/cli/pkg/files/download" + "github.com/beclab/Olares/cli/internal/files/download" ) type downloadOptions struct { diff --git a/cli/cmd/ctl/files/path.go b/cli/cmd/ctl/files/path.go index 02d76e681..218c4437a 100644 --- a/cli/cmd/ctl/files/path.go +++ b/cli/cmd/ctl/files/path.go @@ -23,7 +23,7 @@ import ( "sort" "strings" - "github.com/beclab/Olares/cli/pkg/files/encodepath" + "github.com/beclab/Olares/cli/internal/files/encodepath" ) // Known fileType values understood by the files-backend. @@ -173,10 +173,10 @@ func (p FrontendPath) String() string { } // URLPath returns the same logical path as String() but percent-encoded -// with pkg/files/encodepath.EncodeURL — the Go counterpart of the web app's +// with internal/files/encodepath.EncodeURL — the Go counterpart of the web app's // apps/packages/app/src/utils/encode.ts `encodeUrl` (encodeURIComponent per // '/' segment). This MUST stay aligned with download/cat/rm/upload, which -// all use pkg/files/encodepath; url.PathEscape is not equivalent (e.g. '+' and +// all use internal/files/encodepath; url.PathEscape is not equivalent (e.g. '+' and // '!*'()' differ) and would make `ls` hit different wire paths than the // other verbs for the same user-typed path. func (p FrontendPath) URLPath() string { diff --git a/cli/cmd/ctl/files/rm.go b/cli/cmd/ctl/files/rm.go index 670d1d23b..d967d927a 100644 --- a/cli/cmd/ctl/files/rm.go +++ b/cli/cmd/ctl/files/rm.go @@ -14,7 +14,7 @@ import ( "golang.org/x/term" "github.com/beclab/Olares/cli/pkg/cmdutil" - "github.com/beclab/Olares/cli/pkg/files/rm" + "github.com/beclab/Olares/cli/internal/files/rm" ) type rmOptions struct { diff --git a/cli/cmd/ctl/files/upload.go b/cli/cmd/ctl/files/upload.go index 2d1737347..c0334c081 100644 --- a/cli/cmd/ctl/files/upload.go +++ b/cli/cmd/ctl/files/upload.go @@ -13,8 +13,8 @@ import ( "github.com/spf13/cobra" "golang.org/x/sync/errgroup" + "github.com/beclab/Olares/cli/internal/files/upload" "github.com/beclab/Olares/cli/pkg/cmdutil" - "github.com/beclab/Olares/cli/pkg/files/upload" ) type uploadOptions struct { @@ -31,7 +31,7 @@ type uploadOptions struct { // same chunked-resumable protocol the LarePass web app speaks // (Resumable.js + the Drive v2 endpoints under /upload/upload-link, // /upload/file-uploaded-bytes, /api/resources/drive/Home/...). See -// pkg/files/upload/uploader.go for the wire-level details. +// internal/files/upload/uploader.go for the wire-level details. // // Resume is enabled by default and is server-driven: before each file // the CLI calls /upload/file-uploaded-bytes/<node>/ to ask "how much do diff --git a/cli/pkg/files/download/client.go b/cli/internal/files/download/client.go similarity index 98% rename from cli/pkg/files/download/client.go rename to cli/internal/files/download/client.go index 1d07883b2..9b9affa72 100644 --- a/cli/pkg/files/download/client.go +++ b/cli/internal/files/download/client.go @@ -26,7 +26,7 @@ import ( "net/http" "strings" - "github.com/beclab/Olares/cli/pkg/files/encodepath" + "github.com/beclab/Olares/cli/internal/files/encodepath" ) // Client is the per-FilesURL handle used by Stat / List / DownloadFile diff --git a/cli/pkg/files/download/download.go b/cli/internal/files/download/download.go similarity index 100% rename from cli/pkg/files/download/download.go rename to cli/internal/files/download/download.go diff --git a/cli/pkg/files/download/download_test.go b/cli/internal/files/download/download_test.go similarity index 100% rename from cli/pkg/files/download/download_test.go rename to cli/internal/files/download/download_test.go diff --git a/cli/pkg/files/download/list.go b/cli/internal/files/download/list.go similarity index 99% rename from cli/pkg/files/download/list.go rename to cli/internal/files/download/list.go index 511c294a3..aae4309ee 100644 --- a/cli/pkg/files/download/list.go +++ b/cli/internal/files/download/list.go @@ -69,4 +69,3 @@ func (c *Client) List(ctx context.Context, plainPath string) ([]Entry, error) { } return out, nil } - diff --git a/cli/pkg/files/download/stat.go b/cli/internal/files/download/stat.go similarity index 100% rename from cli/pkg/files/download/stat.go rename to cli/internal/files/download/stat.go diff --git a/cli/pkg/files/download/walker.go b/cli/internal/files/download/walker.go similarity index 100% rename from cli/pkg/files/download/walker.go rename to cli/internal/files/download/walker.go diff --git a/cli/pkg/files/download/walker_test.go b/cli/internal/files/download/walker_test.go similarity index 100% rename from cli/pkg/files/download/walker_test.go rename to cli/internal/files/download/walker_test.go diff --git a/cli/pkg/files/encodepath/encodepath.go b/cli/internal/files/encodepath/encodepath.go similarity index 100% rename from cli/pkg/files/encodepath/encodepath.go rename to cli/internal/files/encodepath/encodepath.go diff --git a/cli/pkg/files/encodepath/encodepath_test.go b/cli/internal/files/encodepath/encodepath_test.go similarity index 100% rename from cli/pkg/files/encodepath/encodepath_test.go rename to cli/internal/files/encodepath/encodepath_test.go diff --git a/cli/pkg/files/rm/rm.go b/cli/internal/files/rm/rm.go similarity index 99% rename from cli/pkg/files/rm/rm.go rename to cli/internal/files/rm/rm.go index 7f1cef57b..94701e11a 100644 --- a/cli/pkg/files/rm/rm.go +++ b/cli/internal/files/rm/rm.go @@ -27,7 +27,7 @@ import ( "sort" "strings" - "github.com/beclab/Olares/cli/pkg/files/encodepath" + "github.com/beclab/Olares/cli/internal/files/encodepath" ) // Client is the per-FilesURL handle used by DeleteBatch. diff --git a/cli/pkg/files/rm/rm_test.go b/cli/internal/files/rm/rm_test.go similarity index 100% rename from cli/pkg/files/rm/rm_test.go rename to cli/internal/files/rm/rm_test.go diff --git a/cli/pkg/files/upload/api.go b/cli/internal/files/upload/api.go similarity index 99% rename from cli/pkg/files/upload/api.go rename to cli/internal/files/upload/api.go index 3ecf0e64c..1ee4d3937 100644 --- a/cli/pkg/files/upload/api.go +++ b/cli/internal/files/upload/api.go @@ -21,7 +21,7 @@ import ( "net/url" "strings" - "github.com/beclab/Olares/cli/pkg/files/encodepath" + "github.com/beclab/Olares/cli/internal/files/encodepath" ) // Client is the per-FilesURL handle used by uploader.go and the cobra diff --git a/cli/pkg/files/upload/api_test.go b/cli/internal/files/upload/api_test.go similarity index 100% rename from cli/pkg/files/upload/api_test.go rename to cli/internal/files/upload/api_test.go diff --git a/cli/pkg/files/upload/uploader.go b/cli/internal/files/upload/uploader.go similarity index 99% rename from cli/pkg/files/upload/uploader.go rename to cli/internal/files/upload/uploader.go index 396c7a6fe..1d20dcbe4 100644 --- a/cli/pkg/files/upload/uploader.go +++ b/cli/internal/files/upload/uploader.go @@ -37,7 +37,7 @@ import ( "strings" "time" - "github.com/beclab/Olares/cli/pkg/files/encodepath" + "github.com/beclab/Olares/cli/internal/files/encodepath" ) // DefaultChunkSize is 8 MiB — the same value the web app uses diff --git a/cli/pkg/files/upload/uploader_test.go b/cli/internal/files/upload/uploader_test.go similarity index 98% rename from cli/pkg/files/upload/uploader_test.go rename to cli/internal/files/upload/uploader_test.go index 224364587..65e5a5a25 100644 --- a/cli/pkg/files/upload/uploader_test.go +++ b/cli/internal/files/upload/uploader_test.go @@ -110,9 +110,9 @@ func (cr *chunkRecorder) record(r *http.Request) (*recordedChunk, error) { // uploadServerOpts plumbs per-test customization into uploadServer // without proliferating constructor variants. type uploadServerOpts struct { - uploadedBytes int64 // probe response + uploadedBytes int64 // probe response uploadHandler func(*chunkRecorder, http.ResponseWriter, *http.Request) // override chunk POST - uploadLinkPath string // override default link path + uploadLinkPath string // override default link path } // uploadServer wires up an httptest.Server that knows how to answer diff --git a/cli/pkg/files/upload/walker.go b/cli/internal/files/upload/walker.go similarity index 100% rename from cli/pkg/files/upload/walker.go rename to cli/internal/files/upload/walker.go diff --git a/cli/pkg/files/upload/walker_test.go b/cli/internal/files/upload/walker_test.go similarity index 100% rename from cli/pkg/files/upload/walker_test.go rename to cli/internal/files/upload/walker_test.go diff --git a/cli/skills/olares-files/SKILL.md b/cli/skills/olares-files/SKILL.md index 7abbfd5d3..6cfab7698 100644 --- a/cli/skills/olares-files/SKILL.md +++ b/cli/skills/olares-files/SKILL.md @@ -60,7 +60,7 @@ These are real backend behaviors that have already cost us debugging time. Teach ### 1. POST `/api/resources/<dir>/` auto-renames existing directories -Hitting the directory-create endpoint against an existing directory does **not** return 409. The server creates a sibling named `<dir> (1)` instead. See the docstring on [`cli/pkg/files/upload/api.go`](cli/pkg/files/upload/api.go)'s `Mkdir` for the precise wording. +Hitting the directory-create endpoint against an existing directory does **not** return 409. The server creates a sibling named `<dir> (1)` instead. See the docstring on [`cli/internal/files/upload/api.go`](cli/internal/files/upload/api.go)'s `Mkdir` for the precise wording. Consequence baked into the CLI: `files upload` does **not** pre-create the destination directory. It relies on the chunk POST to implicitly materialize parents. **The destination directory MUST already exist on the server** — if you need a fresh directory, create it through the LarePass web app first (a future `files mkdir` verb may cover this). @@ -70,7 +70,7 @@ User-visible symptom of getting this wrong (older CLI versions): an extra `Docum The backend's single-file `List` handler hard-codes `Content: true` (`files/pkg/drivers/posix/posix/posix.go` `getFiles`) and tries to slurp the file's bytes into the response envelope. For json / binary / large files, this just 500s. -Consequence baked into the CLI: `Stat` always lists the **parent** directory and looks up the leaf in its items array (see [`cli/pkg/files/download/stat.go`](cli/pkg/files/download/stat.go)). This matches what the LarePass web app does — it never probes a single-file resource directly. Both `download` and `cat` use this code path. +Consequence baked into the CLI: `Stat` always lists the **parent** directory and looks up the leaf in its items array (see [`cli/internal/files/download/stat.go`](cli/internal/files/download/stat.go)). This matches what the LarePass web app does — it never probes a single-file resource directly. Both `download` and `cat` use this code path. If the user reports `HTTP 500` against `/api/resources/.../<filename>` with no trailing slash, do NOT suggest "just retry". The right answer is: use the CLI command (`files cat` / `files download`), or list the parent and look at items. @@ -97,7 +97,7 @@ Default output: a one-line header (`<path> (N dirs, M files, modified ...)`) fo ### `files upload <local-path> <remote-path>` -Resumable chunked upload to drive/Home/<...>. See [`cli/cmd/ctl/files/upload.go`](cli/cmd/ctl/files/upload.go) and [`cli/pkg/files/upload/`](cli/pkg/files/upload/). +Resumable chunked upload to drive/Home/<...>. See [`cli/cmd/ctl/files/upload.go`](cli/cmd/ctl/files/upload.go) and [`cli/internal/files/upload/`](cli/internal/files/upload/). ```bash # Upload one file into an existing directory. @@ -133,7 +133,7 @@ Resume is automatic and server-driven: re-running the same command after a Ctrl- ### `files download <remote-path> [<local-path>]` -Download a single file or a whole directory tree. See [`cli/cmd/ctl/files/download.go`](cli/cmd/ctl/files/download.go) and [`cli/pkg/files/download/`](cli/pkg/files/download/). +Download a single file or a whole directory tree. See [`cli/cmd/ctl/files/download.go`](cli/cmd/ctl/files/download.go) and [`cli/internal/files/download/`](cli/internal/files/download/). ```bash # Single file into the current directory (./<basename>). @@ -188,7 +188,7 @@ Wire shape: `GET /api/raw/<encPath>?inline=true` (the same endpoint LarePass use ### `files rm [-r] [-f] <remote-path>...` -Delete one or more remote files / directories. See [`cli/cmd/ctl/files/rm.go`](cli/cmd/ctl/files/rm.go) and [`cli/pkg/files/rm/`](cli/pkg/files/rm/). +Delete one or more remote files / directories. See [`cli/cmd/ctl/files/rm.go`](cli/cmd/ctl/files/rm.go) and [`cli/internal/files/rm/`](cli/internal/files/rm/). ```bash # Delete one file. From b3ba00470c4a9614f0c86503143ac2fa9bf8ee87 Mon Sep 17 00:00:00 2001 From: Peng Peng <billpengpeng@gmail.com> Date: Sun, 26 Apr 2026 16:46:59 +0800 Subject: [PATCH 11/12] feat(cli): add olares-cli market commands with --watch + skill MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a profile-authenticated `market` command tree that talks to the per-user Market app-store v2 API (`<MarketURL>/app-store/api/v2`) using the same `cmdutil.Factory` / `X-Authorization` transport as `olares-cli files`. The existing `cli/cmd/ctl/app/` tree is kept intact so reviewers can diff the two side by side. Verbs: - Catalog (read-only): `list`, `categories`, `get`. - Runtime: `status` with the "not installed" UX fix, the source-fallback hint, and `--watch` for op-agnostic recovery (e.g. confirming a previously fire-and-forget install reaches `running`). - Lifecycle (mutating, all support `--watch`): `install`, `upgrade`, `uninstall`, `clone`, `stop`, `resume`, `cancel`. - Local sources: `upload` / `delete`, restricted to `-s upload|studio|cli`. The `--watch` machinery (`cli/cmd/ctl/market/watch.go`) blocks until the backend reaches a terminal state. Per-op success/failure sets are derived from the backend's `ApplicationManagerState` enum; `matchOpType` gates "success" against the in-flight `OpType` so an `upgrade` issued on an already-`running` app cannot return success on tick zero. `cancel` and `status --watch` are deliberately op-agnostic. Output: TTY emits one info line per state transition; `-o json` emits a single final `OperationResult` with the new `finalState` / `finalOpType` fields (`omitempty`, so non-watch JSON output is byte-identical). Ctrl-C exits cleanly via `signal.NotifyContext` with `watch canceled by user`; the underlying mutation is not stopped — re-attach via `status --watch`. Plumbing: - `cli/pkg/olares.ID.MarketURL(localPrefix)` derives the market origin in the same shape as the existing `FilesURL` / `DesktopURL` / `VaultURL`. - `credential.ResolvedProfile.MarketURL` and `buildResolved` propagate it through the factory, so commands never touch kubeconfig. - `cmd/ctl/root.go` registers `market.NewMarketCommand(factory)` next to the existing `app` / `profile` / `files` commands. Skill: - `cli/skills/olares-market/SKILL.md` mirrors the `olares-files` / `olares-shared` shape (frontmatter, top callout, core concepts → auth transport → command cheatsheet → `--watch` section → errors → workflows → security rules). Defers profile / login / 401-403 recovery to `olares-shared` instead of duplicating it. Tests: `cli/cmd/ctl/market/watch_test.go` covers the classifier (per-op terminal sets, OpType gating, `cancel` op-agnostic path, op-agnostic `status --watch`) and end-to-end `waitForTerminal` against an `httptest.Server` (state transitions, timeout surfacing last-seen state, JSON output stability). Made-with: Cursor --- cli/cmd/ctl/market/cancel.go | 49 +++ cli/cmd/ctl/market/client.go | 309 +++++++++++++++ cli/cmd/ctl/market/clone.go | 349 +++++++++++++++++ cli/cmd/ctl/market/common.go | 271 +++++++++++++ cli/cmd/ctl/market/delete.go | 86 +++++ cli/cmd/ctl/market/env.go | 242 ++++++++++++ cli/cmd/ctl/market/get.go | 179 +++++++++ cli/cmd/ctl/market/install.go | 83 ++++ cli/cmd/ctl/market/list.go | 247 ++++++++++++ cli/cmd/ctl/market/options.go | 230 +++++++++++ cli/cmd/ctl/market/resume.go | 47 +++ cli/cmd/ctl/market/root.go | 55 +++ cli/cmd/ctl/market/status.go | 407 +++++++++++++++++++ cli/cmd/ctl/market/stop.go | 50 +++ cli/cmd/ctl/market/types.go | 188 +++++++++ cli/cmd/ctl/market/uninstall.go | 65 ++++ cli/cmd/ctl/market/upgrade.go | 82 ++++ cli/cmd/ctl/market/upload.go | 166 ++++++++ cli/cmd/ctl/market/watch.go | 431 +++++++++++++++++++++ cli/cmd/ctl/market/watch_test.go | 516 +++++++++++++++++++++++++ cli/cmd/ctl/root.go | 2 + cli/pkg/credential/default_provider.go | 1 + cli/pkg/credential/types.go | 1 + cli/pkg/olares/id.go | 10 + cli/skills/olares-market/SKILL.md | 319 +++++++++++++++ 25 files changed, 4385 insertions(+) create mode 100644 cli/cmd/ctl/market/cancel.go create mode 100644 cli/cmd/ctl/market/client.go create mode 100644 cli/cmd/ctl/market/clone.go create mode 100644 cli/cmd/ctl/market/common.go create mode 100644 cli/cmd/ctl/market/delete.go create mode 100644 cli/cmd/ctl/market/env.go create mode 100644 cli/cmd/ctl/market/get.go create mode 100644 cli/cmd/ctl/market/install.go create mode 100644 cli/cmd/ctl/market/list.go create mode 100644 cli/cmd/ctl/market/options.go create mode 100644 cli/cmd/ctl/market/resume.go create mode 100644 cli/cmd/ctl/market/root.go create mode 100644 cli/cmd/ctl/market/status.go create mode 100644 cli/cmd/ctl/market/stop.go create mode 100644 cli/cmd/ctl/market/types.go create mode 100644 cli/cmd/ctl/market/uninstall.go create mode 100644 cli/cmd/ctl/market/upgrade.go create mode 100644 cli/cmd/ctl/market/upload.go create mode 100644 cli/cmd/ctl/market/watch.go create mode 100644 cli/cmd/ctl/market/watch_test.go create mode 100644 cli/skills/olares-market/SKILL.md diff --git a/cli/cmd/ctl/market/cancel.go b/cli/cmd/ctl/market/cancel.go new file mode 100644 index 000000000..07d8382d2 --- /dev/null +++ b/cli/cmd/ctl/market/cancel.go @@ -0,0 +1,49 @@ +package market + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketCancel(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "cancel {app-name}", + Short: "Cancel the current in-progress app operation", + Long: `Cancel the current in-progress operation for an app. + +Examples: + olares-cli market cancel myapp`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runCancel(opts, args[0]) + }, + } + opts.addOutputFlags(cmd) + opts.addWatchFlags(cmd) + return cmd +} + +func runCancel(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("cancel", appName, err) + } + + opts.info("Canceling in-progress operation for '%s' (user '%s')...", appName, mc.olaresID) + + ctx := context.Background() + resp, err := mc.CancelOperation(ctx, appName) + if err != nil { + return opts.failOp("cancel", appName, err) + } + + result := newOperationResult(mc, "cancel", appName, "", "", "cancel requested", resp) + // Cancel's terminal row carries the *underlying* OpType (install / + // upgrade / ...), not "cancel", so the watch target opts out of + // strict OpType matching via matchOpType=false in newWatchTarget. + return runWithWatch(opts, mc, result, newWatchTarget(watchCancel, appName, opts.Source)) +} diff --git a/cli/cmd/ctl/market/client.go b/cli/cmd/ctl/market/client.go new file mode 100644 index 000000000..d7306d7f2 --- /dev/null +++ b/cli/cmd/ctl/market/client.go @@ -0,0 +1,309 @@ +package market + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "os" + "strings" + + "github.com/beclab/Olares/cli/pkg/credential" +) + +// apiPrefix is the app-store v2 path the Market SPA also uses; see +// apps/packages/app/src/stores/market/center.ts (`appUrl`). +const apiPrefix = "/app-store/api/v2" + +// APIResponse is the canonical envelope the app-store v2 backend wraps every +// response in (success/message/data). We keep it identical to the shape the +// SPA's axios layer parses so the CLI's diagnostics can use the same fields. +type APIResponse struct { + Success bool `json:"success"` + Message string `json:"message"` + Data json.RawMessage `json:"data,omitempty"` +} + +// MarketClient talks to the per-user app-store v2 API at +// `<MarketURL>/app-store/api/v2`. It is the moral counterpart of `files`'s +// `download.Client`: a thin HTTP wrapper that delegates auth to the caller's +// http.Client (Factory's authTransport injects `X-Authorization`) and +// otherwise just maps Go method calls to JSON requests. +// +// Two HTTP clients are stored: +// - httpClient comes from cmdutil.Factory and inherits the 30s overall +// timeout + authTransport injection. Used for short JSON requests. +// - For multipart chart uploads we build a per-call client without an +// overall timeout (see newMarketUploadHTTPClient) and inject the access +// token manually, mirroring files/upload.go's `newUploadHTTPClient`. +type MarketClient struct { + httpClient *http.Client + baseURL string + source string + + // Identity bits captured from the resolved profile, used by: + // - OperationResult.User (for diagnostics / scripting), + // - upload helper to build the long-timeout client + X-Authorization. + olaresID string + accessToken string + insecureSkipVerify bool +} + +// NewMarketClient builds a MarketClient from a factory-provided http.Client +// (already wired with X-Authorization injection) and a resolved profile. +// The base URL is `<rp.MarketURL>/app-store/api/v2`. +func NewMarketClient(hc *http.Client, rp *credential.ResolvedProfile, source string) *MarketClient { + base := strings.TrimRight(rp.MarketURL, "/") + apiPrefix + return &MarketClient{ + httpClient: hc, + baseURL: base, + source: source, + olaresID: rp.OlaresID, + accessToken: rp.AccessToken, + insecureSkipVerify: rp.InsecureSkipVerify, + } +} + +func (c *MarketClient) doRequest(ctx context.Context, method, path string, body interface{}) (*APIResponse, error) { + var reqBody io.Reader + if body != nil { + data, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("failed to marshal request body: %w", err) + } + reqBody = bytes.NewReader(data) + } + + url := c.baseURL + path + req, err := http.NewRequestWithContext(ctx, method, url, reqBody) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + + return c.executeRequest(c.httpClient, req) +} + +func (c *MarketClient) doMultipart(ctx context.Context, path, filename string, data io.Reader, source string) (*APIResponse, error) { + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("chart", filename) + if err != nil { + return nil, fmt.Errorf("failed to create form file: %w", err) + } + if _, err := io.Copy(part, data); err != nil { + return nil, fmt.Errorf("failed to copy chart data: %w", err) + } + if err := writer.WriteField("source", source); err != nil { + return nil, fmt.Errorf("failed to write source field: %w", err) + } + if err := writer.Close(); err != nil { + return nil, fmt.Errorf("failed to close multipart writer: %w", err) + } + + url := c.baseURL + path + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, &buf) + if err != nil { + return nil, fmt.Errorf("failed to create upload request: %w", err) + } + req.Header.Set("Content-Type", writer.FormDataContentType()) + req.Header.Set("Accept", "application/json") + // authTransport on c.httpClient also injects X-Authorization, but we use + // the dedicated upload client (no overall timeout) and add the header + // here ourselves — same pattern as files/upload.go. + if c.accessToken != "" { + req.Header.Set("X-Authorization", c.accessToken) + } + + return c.executeRequest(newMarketUploadHTTPClient(c.insecureSkipVerify), req) +} + +// newMarketUploadHTTPClient is the sibling of files/upload.go's +// newUploadHTTPClient: same TLS knob, no overall Timeout (we rely on context +// cancellation), no authTransport (the caller sets X-Authorization). +func newMarketUploadHTTPClient(insecureSkipVerify bool) *http.Client { + base := http.DefaultTransport.(*http.Transport).Clone() + if insecureSkipVerify { + base.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} // #nosec G402 -- explicit profile opt-in + } + return &http.Client{Transport: base} +} + +func (c *MarketClient) executeRequest(hc *http.Client, req *http.Request) (*APIResponse, error) { + resp, err := hc.Do(req) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response: %w", err) + } + + // 401/403 are reformatted with the standard `profile login` CTA so users + // hit the same wording they get from `files ls`/`files cat` (see + // reformatMarketAuthErr). The body may not be JSON (the edge proxy can + // short-circuit to a plaintext page), so the JSON parse below is best- + // effort. + if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden { + return nil, reformatMarketAuthErr(resp.StatusCode, respBody, c.olaresID) + } + + var apiResp APIResponse + if err := json.Unmarshal(respBody, &apiResp); err != nil { + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(respBody)) + } + return nil, fmt.Errorf("failed to parse response: %w", err) + } + + if resp.StatusCode != http.StatusOK || !apiResp.Success { + message := strings.TrimSpace(apiResp.Message) + if message == "" { + message = strings.TrimSpace(string(respBody)) + } + if message == "" { + message = fmt.Sprintf("HTTP %d", resp.StatusCode) + } + return &apiResp, fmt.Errorf("API error (HTTP %d): %s", resp.StatusCode, message) + } + + return &apiResp, nil +} + +// reformatMarketAuthErr mirrors reformatHTTPErr in cmd/ctl/files/download.go: +// turn 401/403 into the same `olares-cli profile login --olares-id <id>` CTA +// users see from the files verbs, so the troubleshooting story is consistent. +func reformatMarketAuthErr(status int, respBody []byte, olaresID string) error { + body := strings.TrimSpace(string(respBody)) + if len(body) > 200 { + body = body[:200] + } + if olaresID != "" { + if body != "" { + return fmt.Errorf("server rejected the access token (HTTP %d: %s); please run: olares-cli profile login --olares-id %s", + status, body, olaresID) + } + return fmt.Errorf("server rejected the access token (HTTP %d); please run: olares-cli profile login --olares-id %s", + status, olaresID) + } + return fmt.Errorf("server rejected the access token (HTTP %d); please re-run `olares-cli profile login`", status) +} + +func (c *MarketClient) GetMarketData(ctx context.Context) (*APIResponse, error) { + return c.doRequest(ctx, http.MethodGet, "/market/data", nil) +} + +func (c *MarketClient) GetMarketState(ctx context.Context) (*APIResponse, error) { + return c.doRequest(ctx, http.MethodGet, "/market/state", nil) +} + +func (c *MarketClient) GetAppsInfo(ctx context.Context, apps []AppQueryInfo) (*APIResponse, error) { + return c.doRequest(ctx, http.MethodPost, "/apps", map[string]interface{}{ + "apps": apps, + }) +} + +func (c *MarketClient) UploadChart(ctx context.Context, filePath, source string) (*APIResponse, error) { + if source == "" { + source = c.source + } + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + return c.doMultipart(ctx, "/apps/upload", file.Name(), file, source) +} + +func (c *MarketClient) UploadChartFromReader(ctx context.Context, filename string, data io.Reader, source string) (*APIResponse, error) { + if source == "" { + source = c.source + } + return c.doMultipart(ctx, "/apps/upload", filename, data, source) +} + +func (c *MarketClient) DeleteLocalApp(ctx context.Context, appName, appVersion, sourceID string) (*APIResponse, error) { + if sourceID == "" { + sourceID = c.source + } + return c.doRequest(ctx, http.MethodDelete, "/local-apps/delete", map[string]string{ + "app_name": appName, + "app_version": appVersion, + "source": sourceID, + }) +} + +func (c *MarketClient) InstallApp(ctx context.Context, appName, version, source string, envs []AppEnvVar) (*APIResponse, error) { + if source == "" { + source = c.source + } + return c.doRequest(ctx, http.MethodPost, "/apps/"+appName+"/install", InstallRequest{ + Source: source, + AppName: appName, + Version: version, + Sync: true, + Envs: envs, + }) +} + +func (c *MarketClient) CloneApp(ctx context.Context, appName, source, title string, envs []AppEnvVar, entrances []AppEntrance) (*APIResponse, error) { + if source == "" { + source = c.source + } + return c.doRequest(ctx, http.MethodPost, "/apps/"+appName+"/clone", CloneRequest{ + Source: source, + AppName: appName, + Title: title, + Sync: true, + Envs: envs, + Entrances: entrances, + }) +} + +func (c *MarketClient) UninstallApp(ctx context.Context, appName string, all, deleteData bool) (*APIResponse, error) { + return c.doRequest(ctx, http.MethodDelete, "/apps/"+appName, UninstallRequest{ + Sync: true, + All: all, + DeleteData: deleteData, + }) +} + +func (c *MarketClient) UpgradeApp(ctx context.Context, appName, version, source string, envs []AppEnvVar) (*APIResponse, error) { + if source == "" { + source = c.source + } + return c.doRequest(ctx, http.MethodPut, "/apps/"+appName+"/upgrade", InstallRequest{ + Source: source, + AppName: appName, + Version: version, + Sync: true, + Envs: envs, + }) +} + +func (c *MarketClient) CancelOperation(ctx context.Context, appName string) (*APIResponse, error) { + return c.doRequest(ctx, http.MethodDelete, "/apps/"+appName+"/install", map[string]interface{}{ + "sync": true, + }) +} + +func (c *MarketClient) ResumeApp(ctx context.Context, appName string) (*APIResponse, error) { + return c.doRequest(ctx, http.MethodPost, "/apps/resume", map[string]string{ + "appName": appName, + }) +} + +func (c *MarketClient) StopApp(ctx context.Context, appName string, all bool) (*APIResponse, error) { + return c.doRequest(ctx, http.MethodPost, "/apps/stop", map[string]interface{}{ + "appName": appName, + "all": all, + }) +} diff --git a/cli/cmd/ctl/market/clone.go b/cli/cmd/ctl/market/clone.go new file mode 100644 index 000000000..80de28fee --- /dev/null +++ b/cli/cmd/ctl/market/clone.go @@ -0,0 +1,349 @@ +package market + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "strings" + + appserviceapi "github.com/beclab/Olares/framework/app-service/pkg/apiserver/api" + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketClone(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "clone {app-name}", + Short: "Clone an app as a new instance", + Long: `Clone an installed application to create a new instance with a different title. +Only apps that support multiple instances can be cloned. + +Use --entrance-title NAME=TITLE to override cloned desktop shortcut titles. +For apps with a single visible entrance, the entrance title defaults to --title. + +The --title flag is required. + +Examples: + olares-cli market clone firefox --title "Firefox Cloned" + olares-cli market clone myapp --title "MyApp Cloned" --env API_URL=http://dev.example.com + olares-cli market clone myapp --title "MyApp Cloned" --entrance-title ui="New UI" --entrance-title api="New API"`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runClone(opts, args[0]) + }, + } + opts.addCommonFlags(cmd) + opts.addOutputFlags(cmd) + opts.addTitleFlag(cmd) + opts.addEnvFlag(cmd) + opts.addEntranceTitleFlag(cmd) + opts.addWatchFlags(cmd) + return cmd +} + +func runClone(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("clone", appName, err) + } + + source := resolveCatalogSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Using source: %s", source) + } + + title := strings.TrimSpace(opts.Title) + if title == "" { + return opts.failOp("clone", appName, fmt.Errorf("--title is required for cloning")) + } + if len(title) > 30 { + return opts.failOp("clone", appName, fmt.Errorf("--title cannot exceed 30 characters")) + } + + ctx := context.Background() + appInfo, err := fetchAppInfo(ctx, mc, appName, source) + if err != nil { + return opts.failOp("clone", appName, err) + } + if !appSupportsClone(appInfo) { + return opts.failOp("clone", appName, fmt.Errorf("app '%s' from source '%s' does not support clone", appName, source)) + } + + entrances, err := buildCloneEntrances(appInfo, title, opts.EntranceTitles) + if err != nil { + return opts.failOp("clone", appName, err) + } + + envs, err := parseEnvFlags(opts.Envs) + if err != nil { + return opts.failOp("clone", appName, err) + } + + opts.info("Cloning '%s' as '%s' from '%s' for user '%s'...", appName, title, source, mc.olaresID) + + resp, err := mc.CloneApp(ctx, appName, source, title, envs, entrances) + if err != nil { + if envErr := parseServerEnvError(resp, appName); envErr != nil { + return opts.failOp("clone", appName, envErr) + } + if cloneErr := parseServerCloneError(resp); cloneErr != nil { + return opts.failOp("clone", appName, cloneErr) + } + return opts.failOp("clone", appName, err) + } + + result := newOperationResult(mc, "clone", appName, source, "", fmt.Sprintf("clone requested with title %q", title), resp) + // Clone runs through the install lifecycle backend-side, but the row + // that matters is the *new* instance (TargetApp) — the source app is + // already running. Fall back to the source appName when the backend + // hasn't surfaced the cloned uid yet, so the watcher still finds a + // row to track. + target := result.TargetApp + if strings.TrimSpace(target) == "" { + target = appName + } + return runWithWatch(opts, mc, result, newWatchTarget(watchInstall, target, source)) +} + +type cloneEntranceSpec struct { + Name string + Title string +} + +type cloneValidationError struct { + TitleMessage string + MissingEntrances []appserviceapi.EntranceClone + InvalidEntrances []appserviceapi.EntranceClone +} + +func (e *cloneValidationError) Error() string { + var parts []string + + if msg := strings.TrimSpace(e.TitleMessage); msg != "" { + parts = append(parts, "app title: "+msg) + } + if len(e.MissingEntrances) > 0 { + parts = append(parts, "missing entrance titles: "+formatBackendCloneEntrances(e.MissingEntrances, false)) + } + if len(e.InvalidEntrances) > 0 { + parts = append(parts, "invalid entrance titles: "+formatBackendCloneEntrances(e.InvalidEntrances, true)) + } + + if len(parts) == 0 { + return "invalid clone titles" + } + return strings.Join(parts, "; ") +} + +func buildCloneEntrances(appInfo map[string]interface{}, appTitle string, rawTitles []string) ([]AppEntrance, error) { + cloneEntrances := requiredCloneEntrances(appInfo) + + provided, err := parseCloneEntranceTitles(rawTitles) + if err != nil { + return nil, err + } + + if len(cloneEntrances) == 0 { + if len(provided) > 0 { + return nil, fmt.Errorf("--entrance-title is not needed because this app has no visible entrances") + } + return nil, nil + } + + if len(cloneEntrances) == 1 && len(provided) == 0 { + return []AppEntrance{{ + Name: cloneEntrances[0].Name, + Title: appTitle, + }}, nil + } + + validNames := make(map[string]struct{}, len(cloneEntrances)) + for _, entrance := range cloneEntrances { + validNames[entrance.Name] = struct{}{} + } + + var unknown []string + for name := range provided { + if _, ok := validNames[name]; !ok { + unknown = append(unknown, name) + } + } + if len(unknown) > 0 { + sort.Strings(unknown) + return nil, fmt.Errorf("unknown --entrance-title target(s): %s; available entrances: %s", strings.Join(unknown, ", "), formatRequiredCloneEntrances(cloneEntrances)) + } + + var missing []string + entrances := make([]AppEntrance, 0, len(cloneEntrances)) + for _, entrance := range cloneEntrances { + title, ok := provided[entrance.Name] + if !ok { + missing = append(missing, describeCloneEntrance(entrance)) + continue + } + entrances = append(entrances, AppEntrance{ + Name: entrance.Name, + Title: title, + }) + } + + if len(missing) > 0 { + return nil, fmt.Errorf("missing --entrance-title for entrances: %s; repeat --entrance-title NAME=TITLE for each visible entrance", strings.Join(missing, ", ")) + } + return entrances, nil +} + +func parseCloneEntranceTitles(rawTitles []string) (map[string]string, error) { + titles := make(map[string]string, len(rawTitles)) + for _, raw := range rawTitles { + parts := strings.SplitN(raw, "=", 2) + name := strings.TrimSpace(parts[0]) + if len(parts) != 2 || name == "" { + return nil, fmt.Errorf("invalid --entrance-title value %q: expected NAME=TITLE", raw) + } + + title := strings.TrimSpace(parts[1]) + if title == "" { + return nil, fmt.Errorf("invalid --entrance-title for entrance %q: title cannot be empty", name) + } + if len(title) > 30 { + return nil, fmt.Errorf("invalid --entrance-title for entrance %q: title cannot exceed 30 characters", name) + } + if _, exists := titles[name]; exists { + return nil, fmt.Errorf("duplicate --entrance-title for entrance %q", name) + } + titles[name] = title + } + return titles, nil +} + +func requiredCloneEntrances(appInfo map[string]interface{}) []cloneEntranceSpec { + rawEntrances, _ := getNestedValue(appInfo, "app_info", "app_entry", "entrances").([]interface{}) + entrances := make([]cloneEntranceSpec, 0, len(rawEntrances)) + for _, raw := range rawEntrances { + entry, ok := raw.(map[string]interface{}) + if !ok { + continue + } + + name := strings.TrimSpace(getStringValue(entry, "name")) + if name == "" { + continue + } + + if invisible, _ := entry["invisible"].(bool); invisible { + continue + } + + entrances = append(entrances, cloneEntranceSpec{ + Name: name, + Title: strings.TrimSpace(extractLocalizedString(entry["title"])), + }) + } + return entrances +} + +func describeCloneEntrance(entrance cloneEntranceSpec) string { + if entrance.Title == "" || entrance.Title == entrance.Name { + return entrance.Name + } + return fmt.Sprintf("%s (current: %s)", entrance.Name, entrance.Title) +} + +func formatRequiredCloneEntrances(entrances []cloneEntranceSpec) string { + names := make([]string, 0, len(entrances)) + for _, entrance := range entrances { + names = append(names, describeCloneEntrance(entrance)) + } + return strings.Join(names, ", ") +} + +func parseServerCloneError(resp *APIResponse) *cloneValidationError { + if resp == nil || len(resp.Data) == 0 { + return nil + } + + data := parseResponseData(resp) + checkResult := extractServerCloneCheckResult(data) + if checkResult == nil { + return nil + } + + result := &cloneValidationError{ + MissingEntrances: checkResult.MissingValues, + InvalidEntrances: checkResult.InvalidValues, + } + if !checkResult.TitleValidation.IsValid { + result.TitleMessage = strings.TrimSpace(checkResult.TitleValidation.Message) + } + + if result.TitleMessage == "" && len(result.MissingEntrances) == 0 && len(result.InvalidEntrances) == 0 { + return nil + } + return result +} + +func extractServerCloneCheckResult(data map[string]interface{}) *appserviceapi.AppEntranceCheckResult { + if data == nil { + return nil + } + + checkPayload := data + if backendResp, ok := data["backend_response"].(map[string]interface{}); ok { + backendData, ok := backendResp["data"].(map[string]interface{}) + if !ok { + return nil + } + checkPayload = backendData + } + + checkType, _ := checkPayload["type"].(string) + if checkType != appserviceapi.CheckTypeAppEntrance { + return nil + } + + if nested, ok := checkPayload["Data"].(map[string]interface{}); ok { + checkPayload = nested + } + + payload, err := json.Marshal(checkPayload) + if err != nil { + return nil + } + + var result appserviceapi.AppEntranceCheckResult + if err := json.Unmarshal(payload, &result); err != nil { + return nil + } + return &result +} + +func formatBackendCloneEntrances(entrances []appserviceapi.EntranceClone, includeMessage bool) string { + parts := make([]string, 0, len(entrances)) + for _, entrance := range entrances { + label := strings.TrimSpace(entrance.Name) + if label == "" { + label = strings.TrimSpace(entrance.Title) + } + if label == "" { + continue + } + + message := strings.TrimSpace(entrance.Message) + if includeMessage && message != "" { + parts = append(parts, fmt.Sprintf("%s (%s)", label, message)) + continue + } + if title := strings.TrimSpace(entrance.Title); title != "" && title != label { + parts = append(parts, fmt.Sprintf("%s (current: %s)", label, title)) + continue + } + parts = append(parts, label) + } + + sort.Strings(parts) + return strings.Join(parts, ", ") +} diff --git a/cli/cmd/ctl/market/common.go b/cli/cmd/ctl/market/common.go new file mode 100644 index 000000000..1e0f566f3 --- /dev/null +++ b/cli/cmd/ctl/market/common.go @@ -0,0 +1,271 @@ +package market + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + + "github.com/Masterminds/semver/v3" +) + +const ( + defaultCatalogSource = "market.olares" + defaultLocalSource = "cli" +) + +var localSources = map[string]bool{ + "upload": true, + "studio": true, + "cli": true, +} + +func resolveCatalogSource(opts *MarketOptions) string { + if s := strings.TrimSpace(opts.Source); s != "" { + return s + } + return defaultCatalogSource +} + +func resolveLocalSource(opts *MarketOptions) string { + if s := strings.TrimSpace(opts.Source); s != "" { + return s + } + return defaultLocalSource +} + +func validateLocalSource(source string) error { + if !localSources[source] { + return fmt.Errorf("invalid local source '%s': must be one of upload, studio, cli", source) + } + return nil +} + +func validateVersion(version string) error { + if _, err := semver.StrictNewVersion(strings.TrimPrefix(version, "v")); err != nil { + return fmt.Errorf("invalid version '%s': must be a valid semver (e.g. 1.0.0, 1.2.3)", version) + } + return nil +} + +var envNamePattern = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`) + +func validateEnvName(name string) error { + if !envNamePattern.MatchString(name) { + return fmt.Errorf("invalid env name '%s': must start with a letter and contain only letters, digits, and underscores", name) + } + return nil +} + +func resolveVersionInSource(mc *MarketClient, appName, source string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + appInfo, err := fetchAppInfo(ctx, mc, appName, source) + if err != nil { + return "", err + } + + version, _ := appInfo["version"].(string) + if version == "" { + return "", fmt.Errorf("app '%s' version not found in source '%s'", appName, source) + } + return version, nil +} + +func fetchAppInfo(ctx context.Context, mc *MarketClient, appName, source string) (map[string]interface{}, error) { + resp, err := mc.GetAppsInfo(ctx, []AppQueryInfo{{AppID: appName, SourceDataName: source}}) + if err != nil { + return nil, fmt.Errorf("failed to query app info: %w", err) + } + + var result map[string]interface{} + if err := json.Unmarshal(resp.Data, &result); err != nil { + return nil, fmt.Errorf("failed to parse app info response: %w", err) + } + + apps, _ := result["apps"].([]interface{}) + if len(apps) == 0 { + return nil, fmt.Errorf("app '%s' not found in source '%s'", appName, source) + } + + appInfo, ok := apps[0].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("failed to parse app '%s' info", appName) + } + return appInfo, nil +} + +func appSupportsClone(appInfo map[string]interface{}) bool { + supported, ok := deepFindBoolValue(appInfo, "allowMultipleInstall") + return ok && supported +} + +func getNestedValue(m map[string]interface{}, keys ...string) interface{} { + var current interface{} = m + for _, key := range keys { + cm, ok := current.(map[string]interface{}) + if !ok { + return nil + } + current = cm[key] + } + return current +} + +func getNestedString(m map[string]interface{}, keys ...string) string { + v := getNestedValue(m, keys...) + if s, ok := v.(string); ok { + return s + } + return "" +} + +func getStringValue(m map[string]interface{}, key string) string { + if s, ok := m[key].(string); ok { + return s + } + return "" +} + +func newOperationResult(mc *MarketClient, op, app, source, version, message string, resp *APIResponse) OperationResult { + result := OperationResult{ + App: app, + Operation: op, + Status: "accepted", + Message: message, + Source: source, + Version: version, + } + if mc != nil { + result.User = mc.olaresID + } + + data := parseResponseData(resp) + result.TargetApp = deepFindStringValue(data, "app_name", "appName", "uid") + if result.TargetApp == result.App { + result.TargetApp = "" + } + + return result +} + +func finishOperation(opts *MarketOptions, _ *MarketClient, result OperationResult) error { + if opts.Quiet { + return nil + } + opts.printResult(result) + return nil +} + +func parseEnvFlags(rawEnvs []string) ([]AppEnvVar, error) { + if len(rawEnvs) == 0 { + return nil, nil + } + + var envs []AppEnvVar + for _, raw := range rawEnvs { + parts := strings.SplitN(raw, "=", 2) + key := strings.TrimSpace(parts[0]) + if len(parts) != 2 || key == "" { + return nil, fmt.Errorf("invalid env format '%s': expected KEY=VALUE", raw) + } + if err := validateEnvName(key); err != nil { + return nil, err + } + envs = append(envs, AppEnvVar{ + EnvName: key, + Value: parts[1], + }) + } + return envs, nil +} + +func parseResponseData(resp *APIResponse) map[string]interface{} { + if resp == nil || len(resp.Data) == 0 { + return nil + } + + var generic interface{} + if err := json.Unmarshal(resp.Data, &generic); err != nil { + return map[string]interface{}{"raw": string(resp.Data)} + } + + data, ok := generic.(map[string]interface{}) + if !ok { + return map[string]interface{}{"value": generic} + } + + normalizeEmbeddedJSON(data, "response") + normalizeEmbeddedJSON(data, "result") + return data +} + +func normalizeEmbeddedJSON(data map[string]interface{}, key string) { + raw, ok := data[key].(string) + if !ok || raw == "" { + return + } + var parsed interface{} + if err := json.Unmarshal([]byte(raw), &parsed); err == nil { + data[key] = parsed + } +} + +func deepFindStringValue(data interface{}, keys ...string) string { + switch value := data.(type) { + case map[string]interface{}: + for _, key := range keys { + if s, ok := value[key].(string); ok && strings.TrimSpace(s) != "" { + return strings.TrimSpace(s) + } + } + for _, child := range value { + if found := deepFindStringValue(child, keys...); found != "" { + return found + } + } + case []interface{}: + for _, item := range value { + if found := deepFindStringValue(item, keys...); found != "" { + return found + } + } + } + return "" +} + +func deepFindBoolValue(data interface{}, keys ...string) (bool, bool) { + switch value := data.(type) { + case map[string]interface{}: + for _, key := range keys { + if raw, ok := value[key]; ok { + switch v := raw.(type) { + case bool: + return v, true + case string: + switch strings.ToLower(strings.TrimSpace(v)) { + case "true", "yes", "1": + return true, true + case "false", "no", "0": + return false, true + } + } + } + } + for _, child := range value { + if found, ok := deepFindBoolValue(child, keys...); ok { + return found, true + } + } + case []interface{}: + for _, item := range value { + if found, ok := deepFindBoolValue(item, keys...); ok { + return found, true + } + } + } + return false, false +} diff --git a/cli/cmd/ctl/market/delete.go b/cli/cmd/ctl/market/delete.go new file mode 100644 index 000000000..698bd9899 --- /dev/null +++ b/cli/cmd/ctl/market/delete.go @@ -0,0 +1,86 @@ +package market + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketDelete(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "delete {app-name}", + Aliases: []string{"del"}, + Short: "Delete a local app chart from the market source", + Long: `Remove an app chart that was uploaded to a local source. +This does not uninstall the app if it is running. + +Examples: + olares-cli market delete myapp + olares-cli market delete myapp --version 1.0.0`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDelete(opts, args[0]) + }, + } + opts.addSourceFlag(cmd, "local source id to delete the chart from (auto-detected when omitted)") + opts.addOutputFlags(cmd) + opts.addVersionFlag(cmd) + return cmd +} + +func runDelete(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("delete", appName, err) + } + + if s := strings.TrimSpace(opts.Source); s != "" { + if err := validateLocalSource(s); err != nil { + return opts.failOp("delete", appName, err) + } + } + + source := resolveLocalSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Using source: %s", source) + } + + version := strings.TrimSpace(opts.Version) + if version != "" { + if err := validateVersion(version); err != nil { + return opts.failOp("delete", appName, err) + } + } else { + v, err := resolveVersionInSource(mc, appName, source) + if err != nil { + return opts.failOp("delete", appName, fmt.Errorf("cannot determine version in source '%s': %w (use --version to specify)", source, err)) + } + version = v + opts.info("Using version: %s", version) + } + + opts.info("Deleting chart '%s' version '%s' from source '%s'...", appName, version, source) + + ctx := context.Background() + if _, err := mc.DeleteLocalApp(ctx, appName, version, source); err != nil { + return opts.failOp("delete", appName, err) + } + + result := OperationResult{ + App: appName, + Operation: "delete", + Status: "success", + Message: fmt.Sprintf("version %s deleted from source '%s'", version, source), + Source: source, + Version: version, + } + if !opts.Quiet { + opts.printResult(result) + } + return nil +} diff --git a/cli/cmd/ctl/market/env.go b/cli/cmd/ctl/market/env.go new file mode 100644 index 000000000..c0c700b1e --- /dev/null +++ b/cli/cmd/ctl/market/env.go @@ -0,0 +1,242 @@ +package market + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + sysv1alpha1 "github.com/beclab/Olares/framework/app-service/api/sys.bytetrade.io/v1alpha1" + appserviceapi "github.com/beclab/Olares/framework/app-service/pkg/apiserver/api" +) + +type envValidationError struct { + AppName string + MissingValues []string + MissingRefs []string + InvalidValues []string +} + +func (e *envValidationError) Error() string { + return formatEnvValidationError(e) +} + +func decodeAppEnvSpecs(raw interface{}) []sysv1alpha1.AppEnvVar { + if raw == nil { + return nil + } + + payload, err := json.Marshal(raw) + if err != nil { + return nil + } + + var specs []sysv1alpha1.AppEnvVar + if err := json.Unmarshal(payload, &specs); err != nil { + return nil + } + return specs +} + +func formatEnvValidationError(e *envValidationError) string { + var b strings.Builder + b.WriteString("environment variable requirements not met\n") + + if len(e.MissingValues) > 0 { + b.WriteString(fmt.Sprintf("\n Missing required values: %s\n", strings.Join(e.MissingValues, ", "))) + } + + if len(e.MissingRefs) > 0 { + b.WriteString(fmt.Sprintf("\n Missing referenced values: %s\n", strings.Join(e.MissingRefs, ", "))) + } + + if len(e.InvalidValues) > 0 { + b.WriteString(fmt.Sprintf("\n Invalid values: %s\n", strings.Join(e.InvalidValues, ", "))) + } + + b.WriteString("\nRun 'olares-cli market get ") + b.WriteString(e.AppName) + b.WriteString("' to inspect the declared envs, then use --env KEY=VALUE to provide or correct values.") + return b.String() +} + +func formatAppEnvDetails(specs []sysv1alpha1.AppEnvVar) string { + if len(specs) == 0 { + return "" + } + + var b strings.Builder + b.WriteString("Envs:\n") + for _, spec := range specs { + writeAppEnvDetail(&b, spec) + } + return strings.TrimRight(b.String(), "\n") +} + +func writeAppEnvDetail(b *strings.Builder, spec sysv1alpha1.AppEnvVar) { + status := "optional" + if spec.Required { + status = "required" + } + + b.WriteString(fmt.Sprintf(" - %s", spec.EnvName)) + if spec.Type != "" { + b.WriteString(fmt.Sprintf(" (%s, type: %s)", status, spec.Type)) + } else { + b.WriteString(fmt.Sprintf(" (%s)", status)) + } + b.WriteString("\n") + + if spec.Title != "" && spec.Title != spec.EnvName { + b.WriteString(fmt.Sprintf(" title: %s\n", spec.Title)) + } + if spec.Description != "" { + b.WriteString(fmt.Sprintf(" description: %s\n", spec.Description)) + } + if spec.ValueFrom != nil && strings.TrimSpace(spec.ValueFrom.EnvName) != "" { + b.WriteString(fmt.Sprintf(" referenced from: %s\n", spec.ValueFrom.EnvName)) + } else if spec.Default != "" { + b.WriteString(fmt.Sprintf(" default: %s\n", spec.Default)) + } + writeEnvConstraints(b, spec) +} + +func writeEnvConstraints(b *strings.Builder, spec sysv1alpha1.AppEnvVar) { + if len(spec.Options) > 0 { + b.WriteString(fmt.Sprintf(" options: %s\n", formatOptionsInline(spec.Options))) + } + if spec.RemoteOptions != "" { + remoteOpts, err := tryFetchRemoteOptions(spec.RemoteOptions) + if err == nil && len(remoteOpts) > 0 { + b.WriteString(fmt.Sprintf(" options (remote): %s\n", formatOptionsInline(remoteOpts))) + } else { + b.WriteString(fmt.Sprintf(" options: (fetch from) %s\n", spec.RemoteOptions)) + } + } + if spec.Regex != "" { + b.WriteString(fmt.Sprintf(" pattern: %s\n", spec.Regex)) + } +} + +func formatOptionsInline(options []sysv1alpha1.EnvValueOptionItem) string { + const maxShow = 10 + items := make([]string, 0, len(options)) + for i, opt := range options { + if i >= maxShow { + items = append(items, fmt.Sprintf("... and %d more", len(options)-maxShow)) + break + } + if opt.Title != "" && opt.Title != opt.Value { + items = append(items, fmt.Sprintf("%s (%s)", opt.Value, opt.Title)) + } else { + items = append(items, opt.Value) + } + } + return strings.Join(items, ", ") +} + +func tryFetchRemoteOptions(endpoint string) ([]sysv1alpha1.EnvValueOptionItem, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" { + return nil, fmt.Errorf("unsupported scheme: %s", u.Scheme) + } + client := &http.Client{Timeout: 5 * time.Second} + resp, err := client.Get(endpoint) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("HTTP %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var items []sysv1alpha1.EnvValueOptionItem + if err := json.Unmarshal(body, &items); err != nil { + return nil, err + } + return items, nil +} + +// parseServerEnvError extracts env validation details from a failed API response. +// This handles the case where the market service wraps app-service's 422 response. +func parseServerEnvError(resp *APIResponse, appName string) *envValidationError { + if resp == nil || len(resp.Data) == 0 { + return nil + } + + data := parseResponseData(resp) + checkResult := extractServerEnvCheckResult(data) + if checkResult == nil { + return nil + } + + result := &envValidationError{ + AppName: appName, + MissingValues: envNames(checkResult.MissingValues), + MissingRefs: envNames(checkResult.MissingRefs), + InvalidValues: envNames(checkResult.InvalidValues), + } + + if len(result.MissingValues) == 0 && len(result.MissingRefs) == 0 && len(result.InvalidValues) == 0 { + return nil + } + return result +} + +func extractServerEnvCheckResult(data map[string]interface{}) *appserviceapi.AppEnvCheckResult { + if data == nil { + return nil + } + + checkPayload := data + if backendResp, ok := data["backend_response"].(map[string]interface{}); ok { + backendData, ok := backendResp["data"].(map[string]interface{}) + if !ok { + return nil + } + checkPayload = backendData + } + + checkType, _ := checkPayload["type"].(string) + if checkType != appserviceapi.CheckTypeAppEnv { + return nil + } + + if nested, ok := checkPayload["Data"].(map[string]interface{}); ok { + checkPayload = nested + } + + payload, err := json.Marshal(checkPayload) + if err != nil { + return nil + } + + var result appserviceapi.AppEnvCheckResult + if err := json.Unmarshal(payload, &result); err != nil { + return nil + } + return &result +} + +func envNames(envs []sysv1alpha1.AppEnvVar) []string { + names := make([]string, 0, len(envs)) + for _, env := range envs { + if strings.TrimSpace(env.EnvName) == "" { + continue + } + names = append(names, env.EnvName) + } + return names +} diff --git a/cli/cmd/ctl/market/get.go b/cli/cmd/ctl/market/get.go new file mode 100644 index 000000000..764b785f1 --- /dev/null +++ b/cli/cmd/ctl/market/get.go @@ -0,0 +1,179 @@ +package market + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketGet(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "get {app-name}", + Aliases: []string{"info", "show"}, + Short: "Get detailed information about an app", + Long: `Get detailed information about an app from the market. + +Table output shows a curated summary. JSON output includes the full API response. + +Examples: + olares-cli market get firefox + olares-cli market get firefox -o json`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runGet(opts, args[0]) + }, + } + opts.addCommonFlags(cmd) + opts.addOutputFlags(cmd) + return cmd +} + +func runGet(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("get", appName, err) + } + + source := resolveCatalogSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Using source: %s", source) + } + + ctx := context.Background() + appInfo, err := fetchAppInfo(ctx, mc, appName, source) + if err != nil { + return opts.failOp("get", appName, err) + } + + if opts.Quiet { + return nil + } + + if opts.isJSON() { + return opts.printJSON(appInfo) + } + + printAppDetail(appInfo, source) + return nil +} + +func printAppDetail(raw interface{}, source string) { + m, ok := raw.(map[string]interface{}) + if !ok { + fmt.Fprintf(os.Stdout, "%v\n", raw) + return + } + + name := getNestedString(m, "app_info", "app_entry", "name") + if name == "" { + name = getNestedString(m, "app_simple_info", "app_name") + } + + title := resolveI18nField(m, "app_info", "app_entry", "i18n", "title") + if title == "" { + title = extractLocalizedString(getNestedValue(m, "app_simple_info", "app_title")) + } + + version := getNestedString(m, "version") + cloneable := appSupportsClone(m) + + description := resolveI18nField(m, "app_info", "app_entry", "i18n", "description") + developer := getNestedString(m, "app_info", "app_entry", "developer") + cfgType := getNestedString(m, "app_info", "app_entry", "cfgType") + + var categories []string + if cats, ok := getNestedValue(m, "app_info", "app_entry", "categories").([]interface{}); ok { + for _, c := range cats { + if s, ok := c.(string); ok { + categories = append(categories, s) + } + } + } + + var entrances []string + if ents, ok := getNestedValue(m, "app_info", "app_entry", "entrances").([]interface{}); ok { + for _, e := range ents { + if em, ok := e.(map[string]interface{}); ok { + eName, _ := em["name"].(string) + eTitle, _ := em["title"].(string) + eHost, _ := em["host"].(string) + ePort, _ := em["port"].(float64) + label := eName + if eTitle != "" && eTitle != eName { + label = fmt.Sprintf("%s (%s)", eTitle, eName) + } + entrances = append(entrances, fmt.Sprintf("%s -> %s:%d", label, eHost, int(ePort))) + } + } + } + envSpecs := decodeAppEnvSpecs(getNestedValue(m, "raw_data", "envs")) + + fmt.Printf("Name: %s\n", name) + if title != "" { + fmt.Printf("Title: %s\n", title) + } + if version != "" { + fmt.Printf("Version: %s\n", version) + } + fmt.Printf("Source: %s\n", source) + fmt.Printf("Cloneable: %t\n", cloneable) + if cfgType != "" { + fmt.Printf("Type: %s\n", cfgType) + } + if developer != "" { + fmt.Printf("Developer: %s\n", developer) + } + if len(categories) > 0 { + fmt.Printf("Categories: %s\n", strings.Join(categories, ", ")) + } + if len(entrances) > 0 { + fmt.Println("Entrances:") + for _, e := range entrances { + fmt.Printf(" - %s\n", e) + } + } + if details := formatAppEnvDetails(envSpecs); details != "" { + fmt.Println(details) + } + if description != "" { + if len(description) > 200 { + description = description[:200] + "..." + } + fmt.Printf("Description: %s\n", strings.TrimSpace(description)) + } +} + +func resolveI18nField(m map[string]interface{}, path ...string) string { + if len(path) < 2 { + return "" + } + fieldName := path[len(path)-1] + i18nPath := append(path[:len(path)-1], "i18n") + + i18n := getNestedValue(m, i18nPath...) + i18nMap, ok := i18n.(map[string]interface{}) + if !ok { + return "" + } + + for _, locale := range []string{"en-US", "en", "zh-CN"} { + localeData, ok := i18nMap[locale].(map[string]interface{}) + if !ok { + continue + } + meta, ok := localeData["metadata"].(map[string]interface{}) + if !ok { + continue + } + if s, ok := meta[fieldName].(string); ok && s != "" { + return s + } + } + return "" +} diff --git a/cli/cmd/ctl/market/install.go b/cli/cmd/ctl/market/install.go new file mode 100644 index 000000000..5f9b494a0 --- /dev/null +++ b/cli/cmd/ctl/market/install.go @@ -0,0 +1,83 @@ +package market + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketInstall(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "install {app-name}", + Short: "Install an app", + Long: `Install an application from a market source. + +If --version is not specified, the latest available version is used. +For apps that declare environment variables, use --env to provide values. + +Examples: + olares-cli market install firefox + olares-cli market install myapp --version 1.0.0 -s market.olares + olares-cli market install myapp --env API_KEY=abc123 --env REGION=us-east`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runInstall(opts, args[0]) + }, + } + opts.addCommonFlags(cmd) + opts.addOutputFlags(cmd) + opts.addVersionFlag(cmd) + opts.addEnvFlag(cmd) + opts.addWatchFlags(cmd) + return cmd +} + +func runInstall(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("install", appName, err) + } + + source := resolveCatalogSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Using source: %s", source) + } + + version := strings.TrimSpace(opts.Version) + if version != "" { + if err := validateVersion(version); err != nil { + return opts.failOp("install", appName, err) + } + } else { + v, err := resolveVersionInSource(mc, appName, source) + if err != nil { + return opts.failOp("install", appName, fmt.Errorf("cannot determine version in source '%s': %w (use --version to specify)", source, err)) + } + version = v + opts.info("Using latest version: %s", version) + } + + envs, err := parseEnvFlags(opts.Envs) + if err != nil { + return opts.failOp("install", appName, err) + } + + opts.info("Installing '%s' version '%s' from '%s' for user '%s'...", appName, version, source, mc.olaresID) + + ctx := context.Background() + resp, err := mc.InstallApp(ctx, appName, version, source, envs) + if err != nil { + if envErr := parseServerEnvError(resp, appName); envErr != nil { + return opts.failOp("install", appName, envErr) + } + return opts.failOp("install", appName, err) + } + + result := newOperationResult(mc, "install", appName, source, version, fmt.Sprintf("install requested for version %s", version), resp) + return runWithWatch(opts, mc, result, newWatchTarget(watchInstall, appName, source)) +} diff --git a/cli/cmd/ctl/market/list.go b/cli/cmd/ctl/market/list.go new file mode 100644 index 000000000..d7b8f1263 --- /dev/null +++ b/cli/cmd/ctl/market/list.go @@ -0,0 +1,247 @@ +package market + +import ( + "context" + "encoding/json" + "fmt" + "os" + "sort" + "strings" + "text/tabwriter" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketList(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "list", + Aliases: []string{"ls", "l"}, + Short: "List apps from market sources", + Long: `List available apps from market sources. + +By default the CLI auto-selects a source from market settings. Use -s to choose +one explicitly, or -a to include all sources. + +Examples: + olares-cli market list + olares-cli market list -s market.olares + olares-cli market list -a + olares-cli market list -c AI + olares-cli market list -o json`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(opts) + }, + } + opts.addCommonFlags(cmd) + opts.addOutputFlags(cmd) + opts.addAllSourcesFlag(cmd) + cmd.Flags().StringVarP(&opts.Category, "category", "c", "", "filter by category") + return cmd +} + +func NewCmdMarketCategories(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "categories", + Aliases: []string{"cats"}, + Short: "List available app categories", + Long: `List app categories with counts from market sources. + +Examples: + olares-cli market categories + olares-cli market categories -a + olares-cli market categories -o json`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runListCategories(opts) + }, + } + opts.addCommonFlags(cmd) + opts.addOutputFlags(cmd) + opts.addAllSourcesFlag(cmd) + return cmd +} + +func fetchApps(mc *MarketClient, source string, showAll bool) ([]AppDisplayInfo, error) { + ctx := context.Background() + resp, err := mc.GetMarketData(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get market data: %w", err) + } + + var data MarketDataResponse + if err := json.Unmarshal(resp.Data, &data); err != nil { + return nil, fmt.Errorf("failed to parse market data: %w", err) + } + + if data.UserData == nil { + return nil, nil + } + + var apps []AppDisplayInfo + for sourceName, sourceData := range data.UserData.Sources { + if !showAll && sourceName != source { + continue + } + if sourceData == nil { + continue + } + for _, item := range sourceData.AppInfoLatest { + info := extractAppDisplayInfo(item, sourceName) + if info != nil { + apps = append(apps, *info) + } + } + } + + sort.Slice(apps, func(i, j int) bool { + if apps[i].Source == apps[j].Source { + if apps[i].Name == apps[j].Name { + return apps[i].Version < apps[j].Version + } + return apps[i].Name < apps[j].Name + } + return apps[i].Source < apps[j].Source + }) + + return apps, nil +} + +func runList(opts *MarketOptions) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("list", "", err) + } + + source := "" + if !opts.AllSources { + source = resolveCatalogSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Using source: %s", source) + } + } + + apps, err := fetchApps(mc, source, opts.AllSources) + if err != nil { + return opts.failOp("list", "", err) + } + + category := strings.TrimSpace(opts.Category) + if category != "" { + apps = filterByCategory(apps, category) + } + + if opts.Quiet { + return nil + } + + if opts.isJSON() { + return opts.printJSON(apps) + } + + if len(apps) == 0 { + if category != "" { + fmt.Fprintf(os.Stderr, "No apps found in category '%s'\n", category) + } else if source != "" { + fmt.Fprintf(os.Stderr, "No apps found in source '%s'\n", source) + } else { + fmt.Fprintln(os.Stderr, "No apps found") + } + return nil + } + + w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0) + if !opts.NoHeaders { + fmt.Fprintln(w, "NAME\tTITLE\tVERSION\tSOURCE\tCATEGORIES") + } + for _, a := range apps { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + a.Name, a.Title, a.Version, a.Source, strings.Join(a.Categories, ", ")) + } + w.Flush() + + if !opts.NoHeaders { + fmt.Fprintf(os.Stderr, "\nTotal: %d app(s)\n", len(apps)) + } + return nil +} + +func runListCategories(opts *MarketOptions) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("categories", "", err) + } + + source := "" + if !opts.AllSources { + source = resolveCatalogSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Using source: %s", source) + } + } + + apps, err := fetchApps(mc, source, opts.AllSources) + if err != nil { + return opts.failOp("categories", "", err) + } + + counts := map[string]int{} + for _, a := range apps { + for _, c := range a.Categories { + counts[c]++ + } + } + + if opts.Quiet { + return nil + } + + if opts.isJSON() { + return opts.printJSON(counts) + } + + if len(counts) == 0 { + fmt.Fprintln(os.Stderr, "No categories found") + return nil + } + + type catRow struct { + Name string + Count int + } + var rows []catRow + for name, count := range counts { + rows = append(rows, catRow{name, count}) + } + sort.Slice(rows, func(i, j int) bool { + return rows[i].Name < rows[j].Name + }) + + w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0) + if !opts.NoHeaders { + fmt.Fprintln(w, "CATEGORY\tAPPS") + } + for _, r := range rows { + fmt.Fprintf(w, "%s\t%d\n", r.Name, r.Count) + } + w.Flush() + return nil +} + +func filterByCategory(apps []AppDisplayInfo, category string) []AppDisplayInfo { + lower := strings.ToLower(category) + var result []AppDisplayInfo + for _, a := range apps { + for _, c := range a.Categories { + if strings.ToLower(c) == lower { + result = append(result, a) + break + } + } + } + return result +} diff --git a/cli/cmd/ctl/market/options.go b/cli/cmd/ctl/market/options.go new file mode 100644 index 000000000..1b06913c7 --- /dev/null +++ b/cli/cmd/ctl/market/options.go @@ -0,0 +1,230 @@ +package market + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +var errReported = errors.New("(already reported)") + +// MarketOptions is the per-command shared option bag. Identity (--user) and +// transport (--host, --kubeconfig) flags from the legacy `app` tree are gone: +// they are replaced by the global `--profile` flag wired through +// cmdutil.Factory, exactly the way `olares-cli files` resolves identity. +type MarketOptions struct { + factory *cmdutil.Factory + + // Source still varies per command (some accept --source / -s; some don't), + // so it stays a per-command flag. + Source string + + Output string + Quiet bool + NoHeaders bool + + Version string + AllSources bool + Cascade bool + Category string + Envs []string + EntranceTitles []string + DeleteData bool + Title string + + // Watch-mode flags. Off by default so today's "fire and forget" + // scripts keep their current exit semantics; opt in per invocation. + // Defaults (15m / 2s) match the SPA's effective polling cadence and + // give enough headroom for the slowest install paths (image pulls) + // without inviting infinite hangs in CI. + Watch bool + WatchTimeout time.Duration + WatchInterval time.Duration +} + +// newMarketOptions seeds MarketOptions with the factory the parent command +// was constructed with. Default Output stays "table" to preserve current +// behavior across all subcommands. +func newMarketOptions(f *cmdutil.Factory) *MarketOptions { + return &MarketOptions{factory: f, Output: "table"} +} + +func (o *MarketOptions) isJSON() bool { + return strings.EqualFold(strings.TrimSpace(o.Output), "json") +} + +// info prints an informational message to stderr. +// Suppressed in JSON and quiet modes. +func (o *MarketOptions) info(format string, args ...interface{}) { + if o.Quiet || o.isJSON() { + return + } + fmt.Fprintf(os.Stderr, format+"\n", args...) +} + +func (o *MarketOptions) addSourceFlag(cmd *cobra.Command, desc string) { + if desc == "" { + desc = "market source id (auto-detected when omitted)" + } + cmd.Flags().StringVarP(&o.Source, "source", "s", "", desc) +} + +// addCommonFlags wires the flags shared by source-aware commands. After +// dropping --user/--host/--kubeconfig (replaced by the global --profile), +// "common" effectively means just the source selector. +func (o *MarketOptions) addCommonFlags(cmd *cobra.Command) { + o.addSourceFlag(cmd, "") +} + +func (o *MarketOptions) addOutputFlags(cmd *cobra.Command) { + cmd.Flags().StringVarP(&o.Output, "output", "o", "table", "output format: table, json") + cmd.Flags().BoolVarP(&o.Quiet, "quiet", "q", false, "suppress output; exit code indicates success/failure") + cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", false, "omit table headers (useful for scripting)") +} + +func (o *MarketOptions) addVersionFlag(cmd *cobra.Command) { + cmd.Flags().StringVar(&o.Version, "version", "", "app version (default: latest available)") +} + +func (o *MarketOptions) addAllSourcesFlag(cmd *cobra.Command) { + cmd.Flags().BoolVarP(&o.AllSources, "all-sources", "a", false, "include apps from all sources") +} + +func (o *MarketOptions) addCascadeFlag(cmd *cobra.Command) { + cmd.Flags().BoolVar(&o.Cascade, "cascade", false, "apply to all sub-charts (for v2 multi-chart apps)") +} + +func (o *MarketOptions) addEnvFlag(cmd *cobra.Command) { + cmd.Flags().StringSliceVar(&o.Envs, "env", nil, "set env var in KEY=VALUE format (repeatable)") +} + +func (o *MarketOptions) addEntranceTitleFlag(cmd *cobra.Command) { + cmd.Flags().StringSliceVar(&o.EntranceTitles, "entrance-title", nil, "set cloned entrance title in NAME=TITLE format (repeatable)") +} + +func (o *MarketOptions) addDeleteDataFlag(cmd *cobra.Command) { + cmd.Flags().BoolVar(&o.DeleteData, "delete-data", false, "delete persistent data when uninstalling") +} + +func (o *MarketOptions) addTitleFlag(cmd *cobra.Command) { + cmd.Flags().StringVar(&o.Title, "title", "", "display title for the cloned app instance") +} + +// addWatchFlags exposes --watch / --watch-timeout / --watch-interval on +// every lifecycle-mutating verb. They are deliberately attached one-by-one +// (rather than baked into addCommonFlags) because read-only verbs like +// `list` / `get` / `status` have no use for them and we don't want them +// showing up in those help blurbs. +func (o *MarketOptions) addWatchFlags(cmd *cobra.Command) { + cmd.Flags().BoolVarP(&o.Watch, "watch", "w", false, + "wait until the app reaches a terminal state (success or failure) before exiting") + cmd.Flags().DurationVar(&o.WatchTimeout, "watch-timeout", 15*time.Minute, + "maximum total time to wait when --watch is set (e.g. 15m, 1h)") + cmd.Flags().DurationVar(&o.WatchInterval, "watch-interval", 2*time.Second, + "polling interval when --watch is set (e.g. 2s, 5s)") +} + +// prepare resolves the active profile and returns a ready-to-use MarketClient +// pointed at <MarketURL>/app-store/api/v2. Auth is handled transparently by +// the Factory's authTransport (X-Authorization header injection). +// +// Background context is fine here: ResolveProfile reads from the local +// credential store and HTTPClient builds the http.Client lazily; neither is a +// long-running call in practice. Per-call I/O context is set by the run* +// callers when invoking client methods. +func (o *MarketOptions) prepare() (*MarketClient, error) { + if o.factory == nil { + return nil, fmt.Errorf("internal error: market options not wired with cmdutil.Factory") + } + + ctx := context.Background() + rp, err := o.factory.ResolveProfile(ctx) + if err != nil { + return nil, err + } + hc, err := o.factory.HTTPClient(ctx) + if err != nil { + return nil, err + } + return NewMarketClient(hc, rp, strings.TrimSpace(o.Source)), nil +} + +func (o *MarketOptions) failOp(op, app string, err error) error { + if o.Quiet { + return errReported + } + result := OperationResult{ + App: app, + Operation: op, + Status: "failed", + Message: err.Error(), + } + o.printResult(result) + return errReported +} + +func (o *MarketOptions) printJSON(v interface{}) error { + encoder := json.NewEncoder(os.Stdout) + encoder.SetIndent("", " ") + return encoder.Encode(v) +} + +func (o *MarketOptions) printResult(result OperationResult) { + if o.Quiet { + return + } + if o.isJSON() { + if err := o.printJSON(result); err != nil { + fmt.Fprintf(os.Stderr, "failed to encode JSON output: %v\n", err) + } + return + } + + writer := os.Stdout + appLabel := result.App + if result.TargetApp != "" && result.TargetApp != result.App { + appLabel = fmt.Sprintf("%s -> %s", result.App, result.TargetApp) + } + + message := strings.TrimSpace(result.Message) + if message == "" { + switch result.Status { + case "accepted": + message = "request accepted" + case "success": + message = "completed successfully" + case "failed": + message = "request failed" + default: + message = "completed" + } + } + + if result.Status == "failed" { + writer = os.Stderr + fmt.Fprintf(writer, "%s '%s' failed: %s\n", result.Operation, appLabel, message) + } else { + fmt.Fprintf(writer, "%s '%s': %s\n", result.Operation, appLabel, message) + } + + if result.Source != "" { + fmt.Fprintf(writer, " source: %s\n", result.Source) + } + if result.Version != "" { + fmt.Fprintf(writer, " version: %s\n", result.Version) + } + if result.State != "" { + fmt.Fprintf(writer, " state: %s\n", result.State) + } + if result.Progress != "" && result.Progress != "0.00" { + fmt.Fprintf(writer, " progress: %s\n", result.Progress) + } +} diff --git a/cli/cmd/ctl/market/resume.go b/cli/cmd/ctl/market/resume.go new file mode 100644 index 000000000..3092df1d8 --- /dev/null +++ b/cli/cmd/ctl/market/resume.go @@ -0,0 +1,47 @@ +package market + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketResume(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "resume {app-name}", + Aliases: []string{"start"}, + Short: "Resume a stopped app", + Long: `Resume a stopped (suspended) application. + +Examples: + olares-cli market resume myapp`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runResume(opts, args[0]) + }, + } + opts.addOutputFlags(cmd) + opts.addWatchFlags(cmd) + return cmd +} + +func runResume(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("resume", appName, err) + } + + opts.info("Resuming '%s' for user '%s'...", appName, mc.olaresID) + + ctx := context.Background() + resp, err := mc.ResumeApp(ctx, appName) + if err != nil { + return opts.failOp("resume", appName, err) + } + + result := newOperationResult(mc, "resume", appName, "", "", "resume requested", resp) + return runWithWatch(opts, mc, result, newWatchTarget(watchResume, appName, opts.Source)) +} diff --git a/cli/cmd/ctl/market/root.go b/cli/cmd/ctl/market/root.go new file mode 100644 index 000000000..d91bb5836 --- /dev/null +++ b/cli/cmd/ctl/market/root.go @@ -0,0 +1,55 @@ +package market + +import ( + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +// NewMarketCommand assembles the `olares-cli market` subtree. Identity (which +// Olares user) and transport (which cluster) are resolved from the global +// `--profile` flag via cmdutil.Factory rather than per-command flags, so this +// tree intentionally diverges from `cli/cmd/ctl/app` (which still discovers +// the cluster via kubeconfig + X-Bfl-User). The two trees can therefore be +// reviewed side-by-side; once `market` is GA the `app` tree should retire. +// +// Note: the legacy `sync` verb is intentionally not exposed here. It depends +// on chart-repo-service which is only reachable from inside the cluster +// network, not via the user-subdomain edge route the rest of the market API +// goes through. +func NewMarketCommand(f *cmdutil.Factory) *cobra.Command { + cmd := &cobra.Command{ + Use: "market", + Short: "Manage Olares applications via the per-user Market API", + Long: `Manage applications through the Olares Market app-store API. + +This command tree is the profile-based parallel of "olares-cli app": same +verbs (install / upgrade / uninstall / list / status / clone / upload / ...), +but identity and the API endpoint are resolved from the active profile +(--profile) instead of from kubeconfig + --user. + +Authentication uses the access token from "olares-cli profile login" and the +same edge auth chain the Olares web app uses (Authelia + l4-bfl-proxy).`, + } + cmd.SilenceErrors = true + cmd.SilenceUsage = true + cmd.PersistentPreRun = func(c *cobra.Command, args []string) { + c.SilenceErrors = true + c.SilenceUsage = true + } + + cmd.AddCommand(NewCmdMarketList(f)) + cmd.AddCommand(NewCmdMarketCategories(f)) + cmd.AddCommand(NewCmdMarketGet(f)) + cmd.AddCommand(NewCmdMarketInstall(f)) + cmd.AddCommand(NewCmdMarketUninstall(f)) + cmd.AddCommand(NewCmdMarketUpgrade(f)) + cmd.AddCommand(NewCmdMarketClone(f)) + cmd.AddCommand(NewCmdMarketCancel(f)) + cmd.AddCommand(NewCmdMarketStop(f)) + cmd.AddCommand(NewCmdMarketResume(f)) + cmd.AddCommand(NewCmdMarketUpload(f)) + cmd.AddCommand(NewCmdMarketDelete(f)) + cmd.AddCommand(NewCmdMarketStatus(f)) + return cmd +} diff --git a/cli/cmd/ctl/market/status.go b/cli/cmd/ctl/market/status.go new file mode 100644 index 000000000..df13e3903 --- /dev/null +++ b/cli/cmd/ctl/market/status.go @@ -0,0 +1,407 @@ +package market + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "sort" + "strings" + "text/tabwriter" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketStatus(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "status [app-name]", + Aliases: []string{"stat", "st"}, + Short: "Show runtime status of installed apps", + Long: `Show runtime status of installed apps. + +If an app name is provided, shows detailed status for that app only. +Without an app name, lists status of all installed apps. + +Examples: + olares-cli market status + olares-cli market status myapp + olares-cli market status -a + olares-cli market status -o json`, + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + return runStatusSingle(opts, args[0]) + } + return runStatusAll(opts) + }, + } + opts.addCommonFlags(cmd) + opts.addOutputFlags(cmd) + opts.addAllSourcesFlag(cmd) + // `status <app> --watch` is the "I forgot to pass --watch on install" + // recovery path: poll until the app reaches a terminal state without + // having to re-run status by hand. The flags are also accepted on + // the all-apps form (no app name), but we explicitly reject that + // combination in runStatusAll so the error message is actionable. + opts.addWatchFlags(cmd) + return cmd +} + +type statusRow struct { + Name string `json:"name"` + State string `json:"state"` + OpType string `json:"opType,omitempty"` + Progress string `json:"progress,omitempty"` + CfgType string `json:"cfgType,omitempty"` + Message string `json:"message,omitempty"` + Source string `json:"source"` +} + +func parseStatusRows(resp *APIResponse, source string, showAll bool) ([]statusRow, error) { + var data MarketStateResponse + if err := json.Unmarshal(resp.Data, &data); err != nil { + return nil, fmt.Errorf("failed to parse state data: %w", err) + } + + if data.UserData == nil { + return nil, nil + } + + var rows []statusRow + for sourceName, sourceData := range data.UserData.Sources { + sourceName = strings.TrimSpace(sourceName) + if sourceName == "" { + continue + } + if sourceData == nil { + continue + } + for _, appState := range sourceData.AppStateLatest { + name := appState.Status.Name + if name == "" { + name = appState.Status.RawName + } + if name == "" { + continue + } + if !showAll && sourceName != source { + continue + } + progress := appState.Status.Progress + if progress == "" || progress == "0.00" { + progress = "-" + } + rows = append(rows, statusRow{ + Name: name, + State: appState.Status.State, + OpType: appState.Status.OpType, + Progress: progress, + CfgType: appState.Status.CfgType, + Message: appState.Status.Message, + Source: sourceName, + }) + } + } + + sort.Slice(rows, func(i, j int) bool { + if rows[i].Source == rows[j].Source { + return rows[i].Name < rows[j].Name + } + return rows[i].Source < rows[j].Source + }) + + return rows, nil +} + +// describeOtherSources renders a short summary of where the user does have +// installed apps, used when the active source filter has hidden everything. +// We list distinct source names verbatim when there are at most three of +// them (typical home cluster) and fall back to a count otherwise. +func describeOtherSources(rows []statusRow) string { + seen := make(map[string]struct{}, len(rows)) + var sources []string + for _, r := range rows { + s := strings.TrimSpace(r.Source) + if s == "" { + continue + } + if _, ok := seen[s]; ok { + continue + } + seen[s] = struct{}{} + sources = append(sources, s) + } + sort.Strings(sources) + + switch { + case len(rows) == 1: + return fmt.Sprintf("1 installed in %q", firstNonEmpty(sources, "another source")) + case len(sources) == 0: + return fmt.Sprintf("%d installed in other sources", len(rows)) + case len(sources) <= 3: + quoted := make([]string, len(sources)) + for i, s := range sources { + quoted[i] = fmt.Sprintf("%q", s) + } + return fmt.Sprintf("%d installed in %s", len(rows), strings.Join(quoted, ", ")) + default: + return fmt.Sprintf("%d installed across %d other sources", len(rows), len(sources)) + } +} + +func firstNonEmpty(values []string, fallback string) string { + for _, v := range values { + if strings.TrimSpace(v) != "" { + return v + } + } + return fallback +} + +func runStatusAll(opts *MarketOptions) error { + if opts.Watch { + // All-apps watch has no obvious terminal: every app may be in a + // different lifecycle. We require the user to pin a specific + // app so the wait condition is well-defined. + return opts.failOp("status", "", + fmt.Errorf("--watch requires an app name (e.g. 'olares-cli market status <app-name> --watch')")) + } + mc, err := opts.prepare() + if err != nil { + return opts.failOp("status", "", err) + } + + ctx := context.Background() + resp, err := mc.GetMarketState(ctx) + if err != nil { + return opts.failOp("status", "", fmt.Errorf("failed to get app status: %w", err)) + } + + source := "" + if !opts.AllSources { + source = resolveCatalogSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Filtering installed apps by source '%s' (use -a for all sources)", source) + } + } + + rows, err := parseStatusRows(resp, source, opts.AllSources) + if err != nil { + return opts.failOp("status", "", err) + } + + if opts.Quiet { + return nil + } + + if len(rows) == 0 { + if opts.isJSON() { + return opts.printJSON([]statusRow{}) + } + // If the source filter hid everything, peek at the unfiltered set so + // we can tell the user "you have N installs, just not in this + // source" rather than implying nothing is installed at all. + if source != "" { + if allRows, parseErr := parseStatusRows(resp, "", true); parseErr == nil && len(allRows) > 0 { + fmt.Fprintf(os.Stderr, "No installed apps in source '%s' (%s; run with -a to include them)\n", + source, describeOtherSources(allRows)) + return nil + } + fmt.Fprintf(os.Stderr, "No installed apps found in source '%s'\n", source) + } else { + fmt.Fprintln(os.Stderr, "No installed apps found") + } + return nil + } + + if opts.isJSON() { + return opts.printJSON(rows) + } + + w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0) + if !opts.NoHeaders { + fmt.Fprintln(w, "NAME\tSTATE\tOPERATION\tPROGRESS\tSOURCE") + } + for _, r := range rows { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", r.Name, r.State, r.OpType, r.Progress, r.Source) + } + w.Flush() + return nil +} + +func runStatusSingle(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("status", appName, err) + } + + ctx := context.Background() + resp, err := mc.GetMarketState(ctx) + if err != nil { + return opts.failOp("status", appName, fmt.Errorf("failed to get app status: %w", err)) + } + + source := "" + if !opts.AllSources { + source = resolveCatalogSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Filtering installed apps by source '%s' (use -a for all sources)", source) + } + } + + rows, err := parseStatusRows(resp, source, opts.AllSources) + if err != nil { + return opts.failOp("status", appName, err) + } + + var matches []statusRow + for _, row := range rows { + if row.Name == appName { + matches = append(matches, row) + } + } + + // When the scoped scan misses, fall back to a global scan so an app + // installed under a non-default source (e.g. cli/upload/studio) still + // surfaces. We track whether this fallback fired so the renderer can + // nudge the user about why the row's SOURCE column differs from the + // filter they passed. + fallbackHit := false + if len(matches) == 0 { + allRows, parseErr := parseStatusRows(resp, "", true) + if parseErr == nil { + for _, row := range allRows { + if row.Name == appName { + matches = append(matches, row) + } + } + fallbackHit = len(matches) > 0 + } + } + + if len(matches) == 0 { + // Both scans came up empty — the app simply isn't installed. + // The previous "not found in source 'X'" wording read like a + // catalog/source-filter problem; this CTA points users at the + // actual fix instead. + if source != "" { + return opts.failOp("status", appName, + fmt.Errorf("app '%s' is not installed (run 'olares-cli market install %s' to install it)", appName, appName)) + } + return opts.failOp("status", appName, fmt.Errorf("app '%s' is not installed", appName)) + } + + if fallbackHit && source != "" && matches[0].Source != source { + opts.info("App is installed under source '%s' (not '%s'); showing that record.", matches[0].Source, source) + } + + if opts.Watch { + // Hand off to the watch loop. Watch always pins the first match + // (status doesn't really make sense across multiple sources for + // the same app name) and tracks its source so a row that lives + // outside the default catalog still resolves correctly. + return runStatusWatch(opts, mc, appName, matches[0]) + } + + if opts.Quiet { + return nil + } + + return renderStatusMatches(opts, matches) +} + +// renderStatusMatches is the shared output renderer used by both the +// one-shot status path and the post-watch path. Behavior matches the +// pre-refactor code: JSON for `-o json` (single object unless `-a` and +// multiple matches), human-readable detail block otherwise. +func renderStatusMatches(opts *MarketOptions, matches []statusRow) error { + if len(matches) == 0 { + return nil + } + + if opts.isJSON() { + if opts.AllSources && len(matches) > 1 { + return opts.printJSON(matches) + } + return opts.printJSON(matches[0]) + } + + for idx, match := range matches { + if idx > 0 { + fmt.Println() + } + fmt.Printf("App: %s\n", match.Name) + fmt.Printf("Source: %s\n", match.Source) + fmt.Printf("State: %s\n", match.State) + if match.OpType != "" { + fmt.Printf("Operation: %s\n", match.OpType) + } + // parseStatusRows maps empty/0.00 to "-"; the watch path may + // also synthesize rows with Progress unset, so suppress both. + if match.Progress != "-" && match.Progress != "" { + fmt.Printf("Progress: %s\n", match.Progress) + } + if match.Message != "" { + fmt.Printf("Message: %s\n", match.Message) + } + if !opts.AllSources { + break + } + } + return nil +} + +// runStatusWatch polls the per-user market state until the row reaches a +// terminal classification (per watchStatus) or the deadline / Ctrl-C +// fires, then renders the latest known row through the same path the +// one-shot status command uses. Failure / timeout still render the row so +// JSON consumers see the structured state, but the process exits non-zero +// via errReported. +func runStatusWatch(opts *MarketOptions, mc *MarketClient, appName string, initial statusRow) error { + if !opts.Quiet && !opts.isJSON() { + opts.info("Watching '%s' (source '%s', current state '%s') until terminal state (timeout: %s)...", + appName, initial.Source, initial.State, opts.WatchTimeout) + } + + target := newWatchTarget(watchStatus, appName, initial.Source) + finalRow, werr := waitForTerminal(context.Background(), mc, opts, target) + + rowToRender := &initial + var fail *watchFailureError + var to *watchTimeoutError + switch { + case werr == nil: + rowToRender = &finalRow + case errors.As(werr, &fail): + rowToRender = &fail.row + case errors.As(werr, &to): + if to.last != nil { + rowToRender = to.last + } + default: + // Ctrl-C / context cancel: short-circuit through failOp so + // users get the standard "operation failed" framing. + return opts.failOp("status", appName, werr) + } + + if !opts.Quiet { + if err := renderStatusMatches(opts, []statusRow{*rowToRender}); err != nil { + return err + } + } + if werr != nil { + // failOp would re-render an OperationResult on top of the row + // we just printed; emit the watcher's message directly to + // stderr instead so JSON callers still get a clean stdout + // payload (the row) and humans see the failure detail. + if !opts.Quiet { + fmt.Fprintln(os.Stderr, werr.Error()) + } + return errReported + } + return nil +} diff --git a/cli/cmd/ctl/market/stop.go b/cli/cmd/ctl/market/stop.go new file mode 100644 index 000000000..446cc2e28 --- /dev/null +++ b/cli/cmd/ctl/market/stop.go @@ -0,0 +1,50 @@ +package market + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketStop(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "stop {app-name}", + Short: "Stop a running app", + Long: `Stop a running application (suspend it). + +For C/S architecture apps, use --cascade to stop all sub-charts. + +Examples: + olares-cli market stop myapp + olares-cli market stop myapp --cascade`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runStop(opts, args[0]) + }, + } + opts.addOutputFlags(cmd) + opts.addCascadeFlag(cmd) + opts.addWatchFlags(cmd) + return cmd +} + +func runStop(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("stop", appName, err) + } + + opts.info("Stopping '%s' for user '%s'...", appName, mc.olaresID) + + ctx := context.Background() + resp, err := mc.StopApp(ctx, appName, opts.Cascade) + if err != nil { + return opts.failOp("stop", appName, err) + } + + result := newOperationResult(mc, "stop", appName, "", "", "stop requested", resp) + return runWithWatch(opts, mc, result, newWatchTarget(watchStop, appName, opts.Source)) +} diff --git a/cli/cmd/ctl/market/types.go b/cli/cmd/ctl/market/types.go new file mode 100644 index 000000000..1c28a838a --- /dev/null +++ b/cli/cmd/ctl/market/types.go @@ -0,0 +1,188 @@ +package market + +type AppQueryInfo struct { + AppID string `json:"appid"` + SourceDataName string `json:"sourceDataName"` +} + +type AppEnvVar struct { + EnvName string `json:"envName" yaml:"envName"` + Value string `json:"value,omitempty" yaml:"value,omitempty"` +} + +type AppEntrance struct { + Name string `json:"name"` + Title string `json:"title"` +} + +type InstallRequest struct { + Source string `json:"source"` + AppName string `json:"app_name"` + Version string `json:"version"` + Sync bool `json:"sync"` + Envs []AppEnvVar `json:"envs,omitempty"` +} + +type CloneRequest struct { + Source string `json:"source"` + AppName string `json:"app_name"` + Title string `json:"title"` + Sync bool `json:"sync"` + Envs []AppEnvVar `json:"envs,omitempty"` + Entrances []AppEntrance `json:"entrances,omitempty"` +} + +type UninstallRequest struct { + Sync bool `json:"sync"` + All bool `json:"all"` + DeleteData bool `json:"deleteData"` +} + +// OperationResult is the structured output for mutating commands. +// +// FinalState / FinalOpType are populated only by --watch flows once a +// terminal classification has been reached; both use omitempty so JSON +// emitted by non-watch invocations stays byte-identical to the previous +// release. They duplicate State so scripts can distinguish "the latest +// state we observed" from "the state the watcher classified as terminal" +// (e.g. when failures surface a Reason that already moved the row on). +type OperationResult struct { + App string `json:"app"` + TargetApp string `json:"targetApp,omitempty"` + Operation string `json:"operation"` + Status string `json:"status"` + Message string `json:"message,omitempty"` + Source string `json:"source,omitempty"` + Version string `json:"version,omitempty"` + User string `json:"user,omitempty"` + State string `json:"state,omitempty"` + Progress string `json:"progress,omitempty"` + FinalState string `json:"finalState,omitempty"` + FinalOpType string `json:"finalOpType,omitempty"` +} + +type SourceStateData struct { + Type string `json:"type"` + AppStateLatest []AppStateLatest `json:"app_state_latest"` +} + +type AppStateLatest struct { + Status AppStatus `json:"status"` +} + +type AppStatus struct { + Name string `json:"name"` + RawName string `json:"rawAppName"` + Title string `json:"title,omitempty"` + State string `json:"state"` + OpType string `json:"opType,omitempty"` + CfgType string `json:"cfgType,omitempty"` + Progress string `json:"progress,omitempty"` + Message string `json:"message,omitempty"` + Reason string `json:"reason,omitempty"` +} + +type SourceInfoData struct { + Type string `json:"type"` + AppInfoLatest []AppInfoLatestItem `json:"app_info_latest"` +} + +type AppInfoLatestItem struct { + Type string `json:"type"` + Timestamp int64 `json:"timestamp"` + Version string `json:"version,omitempty"` + AppSimpleInfo map[string]interface{} `json:"app_simple_info"` +} + +type MarketDataResponse struct { + UserData *UserDataFiltered `json:"user_data"` + UserID string `json:"user_id"` + Timestamp int64 `json:"timestamp"` +} + +type UserDataFiltered struct { + Sources map[string]*SourceInfoData `json:"sources"` + Hash string `json:"hash"` +} + +type MarketStateResponse struct { + UserData *UserDataStateFiltered `json:"user_data"` + UserID string `json:"user_id"` + Timestamp int64 `json:"timestamp"` +} + +type UserDataStateFiltered struct { + Sources map[string]*SourceStateData `json:"sources"` + Hash string `json:"hash"` +} + +type AppDisplayInfo struct { + Name string `json:"name"` + Title string `json:"title"` + Version string `json:"version"` + Source string `json:"source"` + Categories []string `json:"categories,omitempty"` +} + +// extractLocalizedString resolves a value that may be a plain string +// or an i18n map (e.g. {"en-US": "Firefox", "zh-CN": "火狐"}). +func extractLocalizedString(v interface{}) string { + switch val := v.(type) { + case string: + return val + case map[string]interface{}: + for _, locale := range []string{"en-US", "en", "zh-CN"} { + if s, ok := val[locale].(string); ok && s != "" { + return s + } + } + for _, s := range val { + if str, ok := s.(string); ok && str != "" { + return str + } + } + } + return "" +} + +func extractAppDisplayInfo(item AppInfoLatestItem, sourceName string) *AppDisplayInfo { + m := item.AppSimpleInfo + if m == nil { + return nil + } + + name, _ := m["app_name"].(string) + if name == "" { + name, _ = m["app_id"].(string) + } + if name == "" { + return nil + } + + title := extractLocalizedString(m["app_title"]) + if title == "" { + title = extractLocalizedString(m["title"]) + } + + version := item.Version + if version == "" { + version, _ = m["app_version"].(string) + } + + var categories []string + if cats, ok := m["categories"].([]interface{}); ok { + for _, c := range cats { + if s, ok := c.(string); ok && s != "" { + categories = append(categories, s) + } + } + } + + return &AppDisplayInfo{ + Name: name, + Title: title, + Version: version, + Source: sourceName, + Categories: categories, + } +} diff --git a/cli/cmd/ctl/market/uninstall.go b/cli/cmd/ctl/market/uninstall.go new file mode 100644 index 000000000..4121c2e19 --- /dev/null +++ b/cli/cmd/ctl/market/uninstall.go @@ -0,0 +1,65 @@ +package market + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketUninstall(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "uninstall {app-name}", + Aliases: []string{"remove", "rm"}, + Short: "Uninstall an app", + Long: `Uninstall an application. + +For C/S (client/server) architecture apps with multiple sub-charts, +use --cascade to uninstall both server and client parts. + +Use --delete-data to also remove the app's persistent data. + +Examples: + olares-cli market uninstall myapp + olares-cli market uninstall myapp --cascade + olares-cli market uninstall myapp --cascade --delete-data`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUninstall(opts, args[0]) + }, + } + opts.addOutputFlags(cmd) + opts.addCascadeFlag(cmd) + opts.addDeleteDataFlag(cmd) + opts.addWatchFlags(cmd) + return cmd +} + +func runUninstall(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("uninstall", appName, err) + } + + opts.info("Uninstalling '%s' for user '%s'...", appName, mc.olaresID) + if opts.Cascade { + opts.info(" --cascade: will uninstall all sub-charts") + } + if opts.DeleteData { + opts.info(" --delete-data: will delete persistent data") + } + + ctx := context.Background() + resp, err := mc.UninstallApp(ctx, appName, opts.Cascade, opts.DeleteData) + if err != nil { + return opts.failOp("uninstall", appName, err) + } + + result := newOperationResult(mc, "uninstall", appName, "", "", "uninstall requested", resp) + // Uninstall is unique: the row may simply disappear from /market/state + // once the backend cleans it up, so the watch target opts in to the + // "absent means success (provided we saw it earlier)" shortcut. + return runWithWatch(opts, mc, result, newWatchTarget(watchUninstall, appName, opts.Source)) +} diff --git a/cli/cmd/ctl/market/upgrade.go b/cli/cmd/ctl/market/upgrade.go new file mode 100644 index 000000000..7d7367f95 --- /dev/null +++ b/cli/cmd/ctl/market/upgrade.go @@ -0,0 +1,82 @@ +package market + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketUpgrade(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "upgrade {app-name}", + Short: "Upgrade an installed app", + Long: `Upgrade an installed application to a new version. + +If --version is not specified, the latest available version is used. + +Examples: + olares-cli market upgrade myapp + olares-cli market upgrade myapp --version 2.0.0 + olares-cli market upgrade myapp --env API_KEY=new-key`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpgrade(opts, args[0]) + }, + } + opts.addCommonFlags(cmd) + opts.addOutputFlags(cmd) + opts.addVersionFlag(cmd) + opts.addEnvFlag(cmd) + opts.addWatchFlags(cmd) + return cmd +} + +func runUpgrade(opts *MarketOptions, appName string) error { + mc, err := opts.prepare() + if err != nil { + return opts.failOp("upgrade", appName, err) + } + + source := resolveCatalogSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Using source: %s", source) + } + + version := strings.TrimSpace(opts.Version) + if version != "" { + if err := validateVersion(version); err != nil { + return opts.failOp("upgrade", appName, err) + } + } else { + v, err := resolveVersionInSource(mc, appName, source) + if err != nil { + return opts.failOp("upgrade", appName, fmt.Errorf("cannot determine version in source '%s': %w (use --version to specify)", source, err)) + } + version = v + opts.info("Using latest version: %s", version) + } + + envs, err := parseEnvFlags(opts.Envs) + if err != nil { + return opts.failOp("upgrade", appName, err) + } + + opts.info("Upgrading '%s' to version '%s' from '%s' for user '%s'...", appName, version, source, mc.olaresID) + + ctx := context.Background() + resp, err := mc.UpgradeApp(ctx, appName, version, source, envs) + if err != nil { + if envErr := parseServerEnvError(resp, appName); envErr != nil { + return opts.failOp("upgrade", appName, envErr) + } + return opts.failOp("upgrade", appName, err) + } + + result := newOperationResult(mc, "upgrade", appName, source, version, fmt.Sprintf("upgrade requested for version %s", version), resp) + return runWithWatch(opts, mc, result, newWatchTarget(watchUpgrade, appName, source)) +} diff --git a/cli/cmd/ctl/market/upload.go b/cli/cmd/ctl/market/upload.go new file mode 100644 index 000000000..e447126e5 --- /dev/null +++ b/cli/cmd/ctl/market/upload.go @@ -0,0 +1,166 @@ +package market + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + + "github.com/beclab/Olares/cli/pkg/cmdutil" +) + +func NewCmdMarketUpload(f *cmdutil.Factory) *cobra.Command { + opts := newMarketOptions(f) + cmd := &cobra.Command{ + Use: "upload {chart-file-or-dir}", + Short: "Upload app chart package(s) to the market", + Long: `Upload Helm-style chart package(s) (.tgz or .tar.gz) to the market. +If the path is a directory, all chart files in the directory are uploaded. + +Examples: + olares-cli market upload myapp-1.0.0.tgz + olares-cli market upload ./charts/`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpload(opts, args[0]) + }, + } + opts.addSourceFlag(cmd, "local source id to upload charts into (auto-detected when omitted)") + opts.addOutputFlags(cmd) + return cmd +} + +type uploadItemResult struct { + File string `json:"file"` + Status string `json:"status"` + Message string `json:"message,omitempty"` +} + +func isChartFile(name string) bool { + lower := strings.ToLower(name) + return strings.HasSuffix(lower, ".tgz") || strings.HasSuffix(lower, ".tar.gz") +} + +func runUpload(opts *MarketOptions, path string) error { + info, err := os.Stat(path) + if err != nil { + return opts.failOp("upload", path, fmt.Errorf("cannot access '%s': %w", path, err)) + } + + mc, err := opts.prepare() + if err != nil { + return opts.failOp("upload", path, err) + } + + if s := strings.TrimSpace(opts.Source); s != "" { + if err := validateLocalSource(s); err != nil { + return opts.failOp("upload", path, err) + } + } + + source := resolveLocalSource(opts) + if strings.TrimSpace(opts.Source) == "" { + opts.info("Using source: %s", source) + } + + if info.IsDir() { + return uploadDir(opts, mc, path, source) + } + + if !isChartFile(info.Name()) { + return opts.failOp("upload", path, fmt.Errorf("unsupported file format: expected .tgz or .tar.gz")) + } + return uploadFile(opts, mc, path, source) +} + +func uploadDir(opts *MarketOptions, mc *MarketClient, dir, source string) error { + entries, err := os.ReadDir(dir) + if err != nil { + return opts.failOp("upload", dir, fmt.Errorf("failed to read directory: %w", err)) + } + + var charts []string + for _, e := range entries { + if e.IsDir() { + continue + } + if isChartFile(e.Name()) { + charts = append(charts, filepath.Join(dir, e.Name())) + } + } + + if len(charts) == 0 { + return opts.failOp("upload", dir, fmt.Errorf("no chart files (.tgz / .tar.gz) found in '%s'", dir)) + } + + opts.info("Found %d chart(s) in '%s'", len(charts), dir) + + var failed int + results := make([]uploadItemResult, 0, len(charts)) + for i, f := range charts { + opts.info("[%d/%d] Uploading %s ...", i+1, len(charts), filepath.Base(f)) + if err := doUploadFile(opts, mc, f, source); err != nil { + results = append(results, uploadItemResult{ + File: filepath.Base(f), + Status: "failed", + Message: err.Error(), + }) + opts.info(" ERROR: %v", err) + failed++ + continue + } + results = append(results, uploadItemResult{ + File: filepath.Base(f), + Status: "success", + }) + } + + if opts.Quiet { + if failed > 0 { + return errReported + } + return nil + } + + if opts.isJSON() { + return opts.printJSON(results) + } + + if failed > 0 { + fmt.Fprintf(os.Stderr, "upload '%s': %d of %d uploads failed\n", dir, failed, len(charts)) + return errReported + } + fmt.Fprintf(os.Stdout, "upload '%s': all %d chart(s) uploaded\n", dir, len(charts)) + return nil +} + +func uploadFile(opts *MarketOptions, mc *MarketClient, filePath, source string) error { + if err := doUploadFile(opts, mc, filePath, source); err != nil { + return opts.failOp("upload", filepath.Base(filePath), err) + } + result := OperationResult{ + App: filepath.Base(filePath), + Operation: "upload", + Status: "success", + Message: "chart uploaded", + Source: source, + } + if !opts.Quiet { + opts.printResult(result) + } + return nil +} + +func doUploadFile(opts *MarketOptions, mc *MarketClient, filePath, source string) error { + absPath, _ := filepath.Abs(filePath) + opts.info("Uploading '%s' to source '%s'...", filepath.Base(absPath), source) + ctx := context.Background() + _, err := mc.UploadChart(ctx, absPath, source) + if err != nil { + return fmt.Errorf("upload failed: %w", err) + } + return nil +} diff --git a/cli/cmd/ctl/market/watch.go b/cli/cmd/ctl/market/watch.go new file mode 100644 index 000000000..1d20cf7fe --- /dev/null +++ b/cli/cmd/ctl/market/watch.go @@ -0,0 +1,431 @@ +package market + +import ( + "context" + "errors" + "fmt" + "os" + "os/signal" + "strings" + "syscall" + "time" +) + +// watchOp is the lifecycle the user-facing CLI command kicked off and that we +// expect the per-app status row to converge on. Values match the backend +// `OpType` constants in framework/app-service/api/.../appmanager_states.go so +// we can compare directly to `statusRow.OpType` without translation. +type watchOp string + +const ( + watchInstall watchOp = "install" + watchUpgrade watchOp = "upgrade" + watchUninstall watchOp = "uninstall" + watchStop watchOp = "stop" + watchResume watchOp = "resume" + watchCancel watchOp = "cancel" + // watchStatus is the op-agnostic variant used by the `status --watch` + // command: the user didn't kick off any lifecycle in this CLI + // invocation, they just want to block until whatever lifecycle is + // already in flight finishes. Terminal sets are therefore the union + // of every per-op success/failure set. + watchStatus watchOp = "status" +) + +// State classification mirrors apps/packages/app/src/constant/config.ts. +// progressingStates are merely informational — we never declare them +// terminal; their main use is so a future caller could ask "is this row +// still in motion?". The terminal classification is done per-op via +// successSet / failureSet on watchTarget. +var ( + canceledStates = map[string]bool{ + "pendingCanceled": true, + "downloadingCanceled": true, + "installingCanceled": true, + "initializingCanceled": true, + "upgradingCanceled": true, + "applyingEnvCanceled": true, + "resumingCanceled": true, + } + + cancelFailedStates = map[string]bool{ + "pendingCancelFailed": true, + "downloadingCancelFailed": true, + "installingCancelFailed": true, + "upgradingCancelFailed": true, + "applyingEnvCancelFailed": true, + "resumingCancelFailed": true, + } + + operationFailedStates = map[string]bool{ + "downloadFailed": true, + "installFailed": true, + "uninstallFailed": true, + "upgradeFailed": true, + "stopFailed": true, + "resumeFailed": true, + "applyEnvFailed": true, + "failed": true, + } +) + +// watchTarget captures everything waitForTerminal needs to decide whether a +// given statusRow means "we're done": the op we issued, who to look for, and +// the per-op success/failure state sets. Built via newWatchTarget so callers +// don't have to reproduce the per-op set lookup table. +type watchTarget struct { + op watchOp + appName string + source string + + // matchOpType requires `row.OpType == string(op)` before we accept a + // state as terminal. This guards against tick-zero false positives: + // an `upgrade` issued on an already-`running` app would otherwise + // short-circuit to success before the backend even started the + // upgrade lifecycle. Cancel sets this to false because the canceled + // row's OpType remains the underlying op being canceled (install / + // upgrade / ...), not the literal string "cancel". + matchOpType bool + + successSet map[string]bool + failureSet map[string]bool + + // absentMeansSuccess flips the "row vanished from /market/state" + // signal from "still pending" to "we're done". Only true for + // uninstall, where the backend may stop reporting the app once it's + // fully uninstalled. We additionally require that we have seen the + // row at least once during this watch so a totally-unknown app name + // doesn't get reported as "successfully uninstalled". + absentMeansSuccess bool +} + +func newWatchTarget(op watchOp, appName, source string) watchTarget { + t := watchTarget{ + op: op, + appName: appName, + source: strings.TrimSpace(source), + matchOpType: true, + } + switch op { + case watchInstall: + t.successSet = map[string]bool{"running": true} + // During install, any *Failed terminates as failure; *Canceled + // likewise — a concurrent cancel means the install we asked for + // did not happen, so the user-facing exit code should be + // non-zero even though the backend reached a "clean" state. + t.failureSet = unionStateSets(operationFailedStates, canceledStates) + case watchUpgrade: + t.successSet = map[string]bool{"running": true} + t.failureSet = unionStateSets(operationFailedStates, canceledStates) + case watchUninstall: + t.successSet = map[string]bool{"uninstalled": true} + t.failureSet = map[string]bool{"uninstallFailed": true} + t.absentMeansSuccess = true + case watchStop: + t.successSet = map[string]bool{"stopped": true} + t.failureSet = map[string]bool{"stopFailed": true} + case watchResume: + t.successSet = map[string]bool{"running": true} + t.failureSet = map[string]bool{ + "resumeFailed": true, + "resumingCanceled": true, + "resumingCancelFailed": true, + } + case watchCancel: + t.successSet = canceledStates + t.failureSet = cancelFailedStates + t.matchOpType = false + case watchStatus: + // Op-agnostic: any stable resting state counts as success + // (running for install/upgrade/resume; stopped for stop; + // uninstalled for uninstall; *Canceled for cancel). Any + // *Failed or *CancelFailed state still maps to failure so + // scripts get a non-zero exit when something actually broke. + t.successSet = unionStateSets( + map[string]bool{ + "running": true, + "stopped": true, + "uninstalled": true, + }, + canceledStates, + ) + t.failureSet = unionStateSets(operationFailedStates, cancelFailedStates) + t.matchOpType = false + // If the row disappears mid-watch (uninstall finishing, app + // pruned, ...) treat that as terminal — same shortcut the + // dedicated uninstall watcher uses, but the status flow only + // enters waitForTerminal after confirming the row was present + // initially, so this can't fire on a never-installed app. + t.absentMeansSuccess = true + } + return t +} + +// watchTimeoutError is the error returned when --watch-timeout elapses before +// the row reaches a terminal state. We surface the last seen state so the +// user can decide whether to extend the timeout or investigate. +type watchTimeoutError struct { + target watchTarget + last *statusRow +} + +func (e *watchTimeoutError) Error() string { + if e.last != nil { + return fmt.Sprintf("%s '%s' watch timed out (last state: %s, op: %s)", + e.target.op, e.target.appName, + valueOrUnknown(e.last.State), valueOrUnknown(e.last.OpType)) + } + return fmt.Sprintf("%s '%s' watch timed out (no status reported by the backend)", + e.target.op, e.target.appName) +} + +// watchFailureError represents a terminal-failure classification. It exposes +// the row so callers can render a structured OperationResult. +type watchFailureError struct { + target watchTarget + row statusRow +} + +func (e *watchFailureError) Error() string { + parts := []string{fmt.Sprintf("state=%s", e.row.State)} + if e.row.OpType != "" { + parts = append(parts, "op="+e.row.OpType) + } + if detail := strings.TrimSpace(e.row.Message); detail != "" { + parts = append(parts, "reason: "+detail) + } + return fmt.Sprintf("%s '%s' failed: %s", + e.target.op, e.target.appName, strings.Join(parts, " ")) +} + +// waitForTerminal polls /market/state until the row classifies as terminal +// (success or failure) per `t`, or until the deadline / signal.NotifyContext +// fires. The first poll happens immediately so a state that was already +// terminal at issue time (e.g. a `stop` on an already-`stopped` app once the +// backend has switched OpType) returns without a wasted sleep cycle. +func waitForTerminal(parentCtx context.Context, mc *MarketClient, opts *MarketOptions, t watchTarget) (statusRow, error) { + interval := opts.WatchInterval + if interval <= 0 { + interval = 2 * time.Second + } + timeoutDur := opts.WatchTimeout + if timeoutDur <= 0 { + timeoutDur = 15 * time.Minute + } + deadline := time.Now().Add(timeoutDur) + + // signal.NotifyContext lets us distinguish "user pressed Ctrl-C" from + // "parent context canceled for some other reason" by checking whether + // parentCtx is still alive when the derived ctx is done. + ctx, stop := signal.NotifyContext(parentCtx, os.Interrupt, syscall.SIGTERM) + defer stop() + + var ( + last *statusRow + seen bool + consecErrors int + ) + + for { + if err := ctx.Err(); err != nil { + if parentCtx.Err() == nil { + return statusRow{}, fmt.Errorf("%s '%s' watch canceled by user", t.op, t.appName) + } + return statusRow{}, err + } + if time.Now().After(deadline) { + return statusRow{}, &watchTimeoutError{target: t, last: last} + } + + resp, err := mc.GetMarketState(ctx) + if err != nil { + // Transient errors (auth refresh, ephemeral 5xx, network + // blips) shouldn't kill the watch outright. We surface them + // to stderr at most once per occurrence and bail out only + // after several consecutive failures so the user isn't + // trapped in a pathological retry loop. + consecErrors++ + opts.info("watch: failed to fetch state: %v (retry %d)", err, consecErrors) + if consecErrors >= 5 { + return statusRow{}, fmt.Errorf("%s '%s' watch aborted after %d consecutive errors: %w", t.op, t.appName, consecErrors, err) + } + if waitErr := sleepOrCancel(ctx, interval); waitErr != nil { + continue + } + continue + } + consecErrors = 0 + + row, present := lookupWatchRow(resp, t.appName, t.source) + switch { + case !present: + if t.absentMeansSuccess && seen { + // We previously saw the row in a progressing state and + // the backend has now stopped reporting it: treat as + // a successful uninstall. Synthesize a row so the + // caller still has source/state context for output. + return statusRow{ + Name: t.appName, + State: "uninstalled", + OpType: string(t.op), + Source: t.source, + }, nil + } + // Not yet present; keep waiting (e.g. install just submitted). + default: + seen = true + + // Only emit an info line when something actually changed, + // to keep watch output proportional to real progress + // instead of one line per poll. + if last == nil || last.State != row.State || last.OpType != row.OpType || last.Progress != row.Progress { + opts.info("[%s] state=%s op=%s progress=%s source=%s", + row.Name, + valueOrUnknown(row.State), + valueOrUnknown(row.OpType), + valueOrUnknown(row.Progress), + valueOrUnknown(row.Source)) + } + rowCopy := row + last = &rowCopy + + if t.matchesOpType(row) && t.successSet[row.State] { + return row, nil + } + if t.matchesOpType(row) && t.failureSet[row.State] { + return row, &watchFailureError{target: t, row: row} + } + } + + if err := sleepOrCancel(ctx, interval); err != nil { + // Loop top will reclassify the ctx error. + continue + } + } +} + +func (t watchTarget) matchesOpType(row statusRow) bool { + if !t.matchOpType { + return true + } + return row.OpType == string(t.op) +} + +func unionStateSets(sets ...map[string]bool) map[string]bool { + total := 0 + for _, s := range sets { + total += len(s) + } + out := make(map[string]bool, total) + for _, s := range sets { + for k := range s { + out[k] = true + } + } + return out +} + +// lookupWatchRow finds the app's current row in a market-state response. It +// mirrors the source-then-fallback logic in runStatusSingle so an app +// installed under a non-default source still surfaces during watch. +func lookupWatchRow(resp *APIResponse, appName, source string) (statusRow, bool) { + if source != "" { + if rows, err := parseStatusRows(resp, source, false); err == nil { + for _, r := range rows { + if r.Name == appName { + return r, true + } + } + } + } + rows, err := parseStatusRows(resp, "", true) + if err != nil { + return statusRow{}, false + } + for _, r := range rows { + if r.Name == appName { + return r, true + } + } + return statusRow{}, false +} + +func sleepOrCancel(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + defer t.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + return nil + } +} + +func valueOrUnknown(s string) string { + if strings.TrimSpace(s) == "" { + return "-" + } + return s +} + +// runWithWatch is the shared post-mutation flow used by every command that +// adds --watch. When opts.Watch is false it simply prints the existing +// "accepted" result; when true it polls until terminal and folds the final +// state into the OperationResult so JSON callers get a single, fully-resolved +// record on stdout. +func runWithWatch(opts *MarketOptions, mc *MarketClient, accepted OperationResult, target watchTarget) error { + if !opts.Watch { + return finishOperation(opts, mc, accepted) + } + + if !opts.Quiet && !opts.isJSON() { + // Mirror the "request accepted" line users see without --watch + // so the watch transitions that follow have context. + opts.info("%s '%s' requested; watching until terminal state (timeout: %s)...", + accepted.Operation, accepted.App, opts.WatchTimeout) + } + + row, err := waitForTerminal(context.Background(), mc, opts, target) + if err != nil { + // On terminal-failure / timeout / interrupt, wrap the existing + // accepted result with whatever we learned about the final + // state so JSON consumers see structured data instead of a + // bare error string. + failed := accepted + failed.Status = "failed" + failed.Message = err.Error() + var fail *watchFailureError + if errors.As(err, &fail) { + failed.State = fail.row.State + failed.Progress = fail.row.Progress + failed.FinalState = fail.row.State + failed.FinalOpType = fail.row.OpType + } + var to *watchTimeoutError + if errors.As(err, &to) && to.last != nil { + failed.State = to.last.State + failed.Progress = to.last.Progress + failed.FinalState = to.last.State + failed.FinalOpType = to.last.OpType + } + // Render the structured result first, then return a sentinel + // so the cobra layer knows we've already reported. + opts.printResult(failed) + return errReported + } + + final := accepted + final.Status = "success" + final.State = row.State + final.Progress = row.Progress + final.FinalState = row.State + final.FinalOpType = row.OpType + final.Message = fmt.Sprintf("%s completed (state=%s)", accepted.Operation, row.State) + if row.Source != "" { + final.Source = row.Source + } + if !opts.Quiet { + opts.printResult(final) + } + return nil +} diff --git a/cli/cmd/ctl/market/watch_test.go b/cli/cmd/ctl/market/watch_test.go new file mode 100644 index 000000000..e3876748a --- /dev/null +++ b/cli/cmd/ctl/market/watch_test.go @@ -0,0 +1,516 @@ +package market + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/beclab/Olares/cli/pkg/credential" +) + +// classifyForTest mirrors the classification waitForTerminal performs on each +// poll: it returns "success" / "failure" / "progressing" for a hypothetical +// row, without invoking the actual poll loop. Keeping the helper local to the +// test file avoids growing the package's exported surface just for unit +// coverage. +func classifyForTest(t watchTarget, row statusRow) string { + if !t.matchesOpType(row) { + return "progressing" + } + switch { + case t.successSet[row.State]: + return "success" + case t.failureSet[row.State]: + return "failure" + default: + return "progressing" + } +} + +func TestClassifierInstallLifecycle(t *testing.T) { + target := newWatchTarget(watchInstall, "myapp", "market.olares") + + cases := []struct { + name string + row statusRow + expect string + }{ + {"pending", statusRow{State: "pending", OpType: "install"}, "progressing"}, + {"downloading", statusRow{State: "downloading", OpType: "install"}, "progressing"}, + {"installing", statusRow{State: "installing", OpType: "install"}, "progressing"}, + {"initializing", statusRow{State: "initializing", OpType: "install"}, "progressing"}, + {"running with install op", statusRow{State: "running", OpType: "install"}, "success"}, + {"installFailed", statusRow{State: "installFailed", OpType: "install"}, "failure"}, + {"downloadFailed", statusRow{State: "downloadFailed", OpType: "install"}, "failure"}, + // Cancel during install is also terminal-failure for the install + // CTA: from the user's perspective the install they asked for did + // not happen. + {"installingCanceled", statusRow{State: "installingCanceled", OpType: "install"}, "failure"}, + // Stale OpType from a prior lifecycle must not prematurely + // classify any state. + {"running with stale upgrade op", statusRow{State: "running", OpType: "upgrade"}, "progressing"}, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + if got := classifyForTest(target, c.row); got != c.expect { + t.Fatalf("classify(%+v) = %s, want %s", c.row, got, c.expect) + } + }) + } +} + +func TestClassifierUpgradeWaitsForOpTypeFlip(t *testing.T) { + // Issuing `upgrade` on a row currently `running, op=install` must NOT + // short-circuit to success on tick zero; only after the backend + // flips OpType to `upgrade` (plus reaches `running` again) is the + // upgrade complete. + target := newWatchTarget(watchUpgrade, "myapp", "market.olares") + + stale := statusRow{State: "running", OpType: "install"} + if got := classifyForTest(target, stale); got != "progressing" { + t.Fatalf("stale install OpType should keep progressing, got %s", got) + } + + mid := statusRow{State: "upgrading", OpType: "upgrade"} + if got := classifyForTest(target, mid); got != "progressing" { + t.Fatalf("upgrading should be progressing, got %s", got) + } + + done := statusRow{State: "running", OpType: "upgrade"} + if got := classifyForTest(target, done); got != "success" { + t.Fatalf("running with upgrade op should be success, got %s", got) + } + + failed := statusRow{State: "upgradeFailed", OpType: "upgrade"} + if got := classifyForTest(target, failed); got != "failure" { + t.Fatalf("upgradeFailed should be failure, got %s", got) + } +} + +func TestClassifierUninstall(t *testing.T) { + target := newWatchTarget(watchUninstall, "myapp", "market.olares") + if !target.absentMeansSuccess { + t.Fatalf("uninstall target must set absentMeansSuccess") + } + if got := classifyForTest(target, statusRow{State: "uninstalling", OpType: "uninstall"}); got != "progressing" { + t.Fatalf("uninstalling should be progressing, got %s", got) + } + if got := classifyForTest(target, statusRow{State: "uninstalled", OpType: "uninstall"}); got != "success" { + t.Fatalf("uninstalled should be success, got %s", got) + } + if got := classifyForTest(target, statusRow{State: "uninstallFailed", OpType: "uninstall"}); got != "failure" { + t.Fatalf("uninstallFailed should be failure, got %s", got) + } +} + +func TestClassifierCancelIgnoresOpType(t *testing.T) { + // cancel's terminal row keeps the *underlying* op (install / + // upgrade / ...), so matchOpType must be false; otherwise we'd never + // classify the canceled state as terminal. + target := newWatchTarget(watchCancel, "myapp", "") + if target.matchOpType { + t.Fatalf("cancel target must NOT require OpType match") + } + + row := statusRow{State: "installingCanceled", OpType: "install"} + if got := classifyForTest(target, row); got != "success" { + t.Fatalf("installingCanceled under cancel target should be success, got %s", got) + } + + failed := statusRow{State: "installingCancelFailed", OpType: "install"} + if got := classifyForTest(target, failed); got != "failure" { + t.Fatalf("installingCancelFailed should be failure, got %s", got) + } +} + +func TestClassifierStatusOpAgnostic(t *testing.T) { + target := newWatchTarget(watchStatus, "myapp", "market.olares") + if target.matchOpType { + t.Fatalf("status target must not require OpType match") + } + if !target.absentMeansSuccess { + t.Fatalf("status target must opt into absentMeansSuccess so a row vanishing mid-watch is terminal") + } + + cases := []struct { + name string + row statusRow + expect string + }{ + // Stable resting states for any lifecycle → success. + {"running after install", statusRow{State: "running", OpType: "install"}, "success"}, + {"running after upgrade", statusRow{State: "running", OpType: "upgrade"}, "success"}, + {"running after resume", statusRow{State: "running", OpType: "resume"}, "success"}, + {"stopped", statusRow{State: "stopped", OpType: "stop"}, "success"}, + {"uninstalled", statusRow{State: "uninstalled", OpType: "uninstall"}, "success"}, + {"installingCanceled", statusRow{State: "installingCanceled", OpType: "install"}, "success"}, + // Any in-flight state keeps polling. + {"pending", statusRow{State: "pending", OpType: "install"}, "progressing"}, + {"installing", statusRow{State: "installing", OpType: "install"}, "progressing"}, + {"upgrading", statusRow{State: "upgrading", OpType: "upgrade"}, "progressing"}, + {"stopping", statusRow{State: "stopping", OpType: "stop"}, "progressing"}, + // All declared failure states → failure regardless of OpType. + {"installFailed", statusRow{State: "installFailed", OpType: "install"}, "failure"}, + {"upgradeFailed", statusRow{State: "upgradeFailed", OpType: "upgrade"}, "failure"}, + {"stopFailed", statusRow{State: "stopFailed", OpType: "stop"}, "failure"}, + {"installingCancelFailed", statusRow{State: "installingCancelFailed", OpType: "install"}, "failure"}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + if got := classifyForTest(target, c.row); got != c.expect { + t.Fatalf("classify(%+v) = %s, want %s", c.row, got, c.expect) + } + }) + } +} + +func TestWaitForTerminalStatusReachesRunning(t *testing.T) { + // status --watch is fired against an app that's mid-install; the + // watcher must recognize `running` as terminal even though no + // specific op was specified by the caller. + seq := []statusRow{ + {State: "installing", OpType: "install"}, + {State: "initializing", OpType: "install"}, + {State: "running", OpType: "install"}, + } + srv := newFakeStateServer(t, "myapp", "market.olares", seq) + mc := newTestMarketClient(t, srv.srv.URL) + opts := quietOpts(5*time.Second, 5*time.Millisecond) + + row, err := waitForTerminal(context.Background(), mc, opts, newWatchTarget(watchStatus, "myapp", "market.olares")) + if err != nil { + t.Fatalf("expected status watch success, got %v", err) + } + if row.State != "running" { + t.Fatalf("expected terminal running, got %s", row.State) + } +} + +func TestWaitForTerminalStatusSurfacesFailure(t *testing.T) { + seq := []statusRow{ + {State: "downloading", OpType: "install"}, + {State: "installFailed", OpType: "install", Message: "image pull error"}, + } + srv := newFakeStateServer(t, "myapp", "market.olares", seq) + mc := newTestMarketClient(t, srv.srv.URL) + opts := quietOpts(5*time.Second, 5*time.Millisecond) + + _, err := waitForTerminal(context.Background(), mc, opts, newWatchTarget(watchStatus, "myapp", "market.olares")) + if err == nil { + t.Fatalf("expected installFailed surfaced as failure") + } + var fail *watchFailureError + if !errors.As(err, &fail) { + t.Fatalf("expected watchFailureError, got %T: %v", err, err) + } + if fail.row.State != "installFailed" { + t.Fatalf("expected installFailed, got %s", fail.row.State) + } +} + +func TestClassifierStopResume(t *testing.T) { + stopT := newWatchTarget(watchStop, "myapp", "") + if got := classifyForTest(stopT, statusRow{State: "stopping", OpType: "stop"}); got != "progressing" { + t.Fatalf("stopping should be progressing, got %s", got) + } + if got := classifyForTest(stopT, statusRow{State: "stopped", OpType: "stop"}); got != "success" { + t.Fatalf("stopped should be success, got %s", got) + } + if got := classifyForTest(stopT, statusRow{State: "stopFailed", OpType: "stop"}); got != "failure" { + t.Fatalf("stopFailed should be failure, got %s", got) + } + + resumeT := newWatchTarget(watchResume, "myapp", "") + if got := classifyForTest(resumeT, statusRow{State: "running", OpType: "resume"}); got != "success" { + t.Fatalf("running under resume target should be success, got %s", got) + } + if got := classifyForTest(resumeT, statusRow{State: "resumeFailed", OpType: "resume"}); got != "failure" { + t.Fatalf("resumeFailed should be failure, got %s", got) + } +} + +// fakeStateServer serves /app-store/api/v2/market/state with a configurable +// queue of states so we can drive waitForTerminal end-to-end without a real +// cluster. It models exactly the response shape parseStatusRows expects. +type fakeStateServer struct { + mu sync.Mutex + idx int32 + app string + source string + sequence []statusRow + missing bool + srv *httptest.Server +} + +func newFakeStateServer(t *testing.T, app, source string, seq []statusRow) *fakeStateServer { + t.Helper() + f := &fakeStateServer{app: app, source: source, sequence: seq} + f.srv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasSuffix(r.URL.Path, "/market/state") { + http.NotFound(w, r) + return + } + row := f.next() + body := f.envelope(row) + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(body) + })) + t.Cleanup(f.srv.Close) + return f +} + +func (f *fakeStateServer) next() (row statusRow) { + i := atomic.AddInt32(&f.idx, 1) - 1 + if i >= int32(len(f.sequence)) { + // Stay on the last state forever once the queue is exhausted — + // makes timeout assertions deterministic. + return f.sequence[len(f.sequence)-1] + } + return f.sequence[i] +} + +func (f *fakeStateServer) markMissing() { + f.mu.Lock() + defer f.mu.Unlock() + f.missing = true +} + +func (f *fakeStateServer) envelope(row statusRow) []byte { + f.mu.Lock() + missing := f.missing + f.mu.Unlock() + + apps := []map[string]interface{}{} + if !missing { + apps = append(apps, map[string]interface{}{ + "status": map[string]interface{}{ + "name": f.app, + "state": row.State, + "opType": row.OpType, + "progress": row.Progress, + "message": row.Message, + }, + }) + } + // Mirror the real /market/state shape parseStatusRows expects: + // the v2 envelope unmarshals resp.Data into MarketStateResponse, + // whose `user_data.sources[<source>].app_state_latest[].status` + // path holds the per-app records. + envelope := map[string]interface{}{ + "success": true, + "data": map[string]interface{}{ + "user_data": map[string]interface{}{ + "sources": map[string]interface{}{ + f.source: map[string]interface{}{ + "type": "market", + "app_state_latest": apps, + }, + }, + }, + }, + } + b, _ := json.Marshal(envelope) + return b +} + +func newTestMarketClient(t *testing.T, baseURL string) *MarketClient { + t.Helper() + rp := &credential.ResolvedProfile{ + Name: "test", + OlaresID: "tester@olares.test", + AccessToken: "test-token", + MarketURL: baseURL, + } + return NewMarketClient(http.DefaultClient, rp, "market.olares") +} + +// drain swallows any output runWithWatch / waitForTerminal would emit so +// `go test` output isn't polluted; we still inspect the OperationResult / +// error returned by the API. +func quietOpts(timeout, interval time.Duration) *MarketOptions { + return &MarketOptions{ + Source: "market.olares", + Output: "json", // suppresses opts.info + Quiet: true, + Watch: true, + WatchTimeout: timeout, + WatchInterval: interval, + } +} + +func TestWaitForTerminalInstallSuccess(t *testing.T) { + seq := []statusRow{ + {State: "pending", OpType: "install"}, + {State: "downloading", OpType: "install"}, + {State: "installing", OpType: "install"}, + {State: "running", OpType: "install"}, + } + srv := newFakeStateServer(t, "myapp", "market.olares", seq) + mc := newTestMarketClient(t, srv.srv.URL) + opts := quietOpts(5*time.Second, 5*time.Millisecond) + + row, err := waitForTerminal(context.Background(), mc, opts, newWatchTarget(watchInstall, "myapp", "market.olares")) + if err != nil { + t.Fatalf("expected success, got error: %v", err) + } + if row.State != "running" { + t.Fatalf("expected terminal state running, got %s", row.State) + } +} + +func TestWaitForTerminalInstallFailure(t *testing.T) { + seq := []statusRow{ + {State: "pending", OpType: "install"}, + {State: "installFailed", OpType: "install", Message: "image pull error"}, + } + srv := newFakeStateServer(t, "myapp", "market.olares", seq) + mc := newTestMarketClient(t, srv.srv.URL) + opts := quietOpts(5*time.Second, 5*time.Millisecond) + + _, err := waitForTerminal(context.Background(), mc, opts, newWatchTarget(watchInstall, "myapp", "market.olares")) + if err == nil { + t.Fatalf("expected failure, got nil") + } + var fail *watchFailureError + if !errors.As(err, &fail) { + t.Fatalf("expected watchFailureError, got %T: %v", err, err) + } + if fail.row.State != "installFailed" { + t.Fatalf("expected installFailed in error row, got %s", fail.row.State) + } + if !strings.Contains(err.Error(), "installFailed") { + t.Fatalf("error message should mention installFailed, got %q", err.Error()) + } +} + +func TestWaitForTerminalUninstallAbsent(t *testing.T) { + seq := []statusRow{ + {State: "uninstalling", OpType: "uninstall"}, + } + srv := newFakeStateServer(t, "myapp", "market.olares", seq) + mc := newTestMarketClient(t, srv.srv.URL) + opts := quietOpts(5*time.Second, 5*time.Millisecond) + + // After the first poll, drop the row entirely -> simulates the + // backend having finished cleanup. + go func() { + time.Sleep(20 * time.Millisecond) + srv.markMissing() + }() + + row, err := waitForTerminal(context.Background(), mc, opts, newWatchTarget(watchUninstall, "myapp", "market.olares")) + if err != nil { + t.Fatalf("expected success on row absence, got %v", err) + } + if row.State != "uninstalled" { + t.Fatalf("expected synthesized uninstalled row, got %s", row.State) + } +} + +func TestWaitForTerminalUpgradeWaitsForOpTypeFlip(t *testing.T) { + // Tick 0 sees the legacy `running, op=install` row from the previous + // install; only after the backend flips to op=upgrade and reaches + // running again should we declare success. + seq := []statusRow{ + {State: "running", OpType: "install"}, + {State: "upgrading", OpType: "upgrade"}, + {State: "running", OpType: "upgrade"}, + } + srv := newFakeStateServer(t, "myapp", "market.olares", seq) + mc := newTestMarketClient(t, srv.srv.URL) + opts := quietOpts(5*time.Second, 5*time.Millisecond) + + row, err := waitForTerminal(context.Background(), mc, opts, newWatchTarget(watchUpgrade, "myapp", "market.olares")) + if err != nil { + t.Fatalf("expected success once upgrade lifecycle completes, got %v", err) + } + if row.OpType != "upgrade" || row.State != "running" { + t.Fatalf("expected running/upgrade, got %s/%s", row.State, row.OpType) + } +} + +func TestWaitForTerminalCancelLifecycle(t *testing.T) { + seq := []statusRow{ + {State: "installing", OpType: "install"}, + {State: "installingCanceling", OpType: "install"}, + {State: "installingCanceled", OpType: "install"}, + } + srv := newFakeStateServer(t, "myapp", "market.olares", seq) + mc := newTestMarketClient(t, srv.srv.URL) + opts := quietOpts(5*time.Second, 5*time.Millisecond) + + row, err := waitForTerminal(context.Background(), mc, opts, newWatchTarget(watchCancel, "myapp", "market.olares")) + if err != nil { + t.Fatalf("expected cancel success, got %v", err) + } + if row.State != "installingCanceled" { + t.Fatalf("expected installingCanceled, got %s", row.State) + } +} + +func TestWaitForTerminalTimeoutSurfacesLastState(t *testing.T) { + // Stuck in `installing` forever: classifier never reaches a terminal + // set, so the deadline must fire and the error must carry the last + // observed state. + seq := []statusRow{{State: "installing", OpType: "install"}} + srv := newFakeStateServer(t, "myapp", "market.olares", seq) + mc := newTestMarketClient(t, srv.srv.URL) + opts := quietOpts(80*time.Millisecond, 5*time.Millisecond) + + _, err := waitForTerminal(context.Background(), mc, opts, newWatchTarget(watchInstall, "myapp", "market.olares")) + if err == nil { + t.Fatalf("expected timeout error, got nil") + } + var to *watchTimeoutError + if !errors.As(err, &to) { + t.Fatalf("expected watchTimeoutError, got %T: %v", err, err) + } + if to.last == nil || to.last.State != "installing" { + t.Fatalf("expected last state installing, got %+v", to.last) + } + if !strings.Contains(err.Error(), "installing") { + t.Fatalf("timeout error must surface last state, got %q", err.Error()) + } +} + +// Sanity-guard that the JSON tags on OperationResult haven't drifted: with +// FinalState/FinalOpType empty, the JSON output must NOT contain those keys +// (so existing scripted consumers keep their byte-identical output). +func TestOperationResultJSONOmitsFinalFieldsWhenUnset(t *testing.T) { + r := OperationResult{App: "a", Operation: "install", Status: "accepted"} + b, err := json.Marshal(r) + if err != nil { + t.Fatalf("marshal: %v", err) + } + if strings.Contains(string(b), "finalState") || strings.Contains(string(b), "finalOpType") { + t.Fatalf("non-watch JSON must omit finalState/finalOpType; got %s", b) + } + + r.FinalState = "running" + r.FinalOpType = "install" + b, err = json.Marshal(r) + if err != nil { + t.Fatalf("marshal: %v", err) + } + if !strings.Contains(string(b), `"finalState":"running"`) { + t.Fatalf("watch JSON must include finalState; got %s", b) + } +} + +// helper that builds a printable description of a watchTarget for error +// messages — keeps the test helpers self-contained. +func describeTarget(t watchTarget) string { + return fmt.Sprintf("op=%s app=%s source=%s matchOpType=%v absentMeansSuccess=%v", + t.op, t.appName, t.source, t.matchOpType, t.absentMeansSuccess) +} + +var _ = describeTarget // referenced by future tests; suppress lint diff --git a/cli/cmd/ctl/root.go b/cli/cmd/ctl/root.go index bee53b3b7..6a197d49e 100755 --- a/cli/cmd/ctl/root.go +++ b/cli/cmd/ctl/root.go @@ -9,6 +9,7 @@ import ( "github.com/beclab/Olares/cli/cmd/ctl/disk" "github.com/beclab/Olares/cli/cmd/ctl/files" "github.com/beclab/Olares/cli/cmd/ctl/gpu" + "github.com/beclab/Olares/cli/cmd/ctl/market" "github.com/beclab/Olares/cli/cmd/ctl/node" "github.com/beclab/Olares/cli/cmd/ctl/os" "github.com/beclab/Olares/cli/cmd/ctl/osinfo" @@ -65,6 +66,7 @@ func NewDefaultCommand() *cobra.Command { cmds.AddCommand(user.NewUserCommand()) cmds.AddCommand(wizard.NewWizardCommand()) cmds.AddCommand(disk.NewDiskCommand()) + cmds.AddCommand(market.NewMarketCommand(factory)) cmds.AddCommand(app.NewAppCommand()) cmds.AddCommand(profile.NewProfileCommand()) cmds.AddCommand(files.NewFilesCommand(factory)) diff --git a/cli/pkg/credential/default_provider.go b/cli/pkg/credential/default_provider.go index 5a2b28f3c..7b4a1de50 100644 --- a/cli/pkg/credential/default_provider.go +++ b/cli/pkg/credential/default_provider.go @@ -135,6 +135,7 @@ func buildResolved(profile *cliconfig.ProfileConfig, accessToken string, exp tim VaultURL: id.VaultURL(profile.LocalURLPrefix), DesktopURL: id.DesktopURL(profile.LocalURLPrefix), FilesURL: id.FilesURL(profile.LocalURLPrefix), + MarketURL: id.MarketURL(profile.LocalURLPrefix), AccessToken: accessToken, InsecureSkipVerify: profile.InsecureSkipVerify, } diff --git a/cli/pkg/credential/types.go b/cli/pkg/credential/types.go index f88c2427c..86dd734ac 100644 --- a/cli/pkg/credential/types.go +++ b/cli/pkg/credential/types.go @@ -28,6 +28,7 @@ type ResolvedProfile struct { VaultURL string DesktopURL string FilesURL string + MarketURL string AccessToken string // ExpiresAt is the unix-seconds expiry decoded from AccessToken's `exp` diff --git a/cli/pkg/olares/id.go b/cli/pkg/olares/id.go index a8b15c4c5..dd41218b6 100644 --- a/cli/pkg/olares/id.go +++ b/cli/pkg/olares/id.go @@ -94,3 +94,13 @@ func (id ID) DesktopURL(localPrefix string) string { func (id ID) FilesURL(localPrefix string) string { return fmt.Sprintf("https://files.%s%s", localPrefix, id.TerminusName()) } + +// MarketURL returns the per-user market base URL, e.g. +// "https://market.alice.olares.com". The Market app-store v2 API is reachable +// at `<MarketURL>/app-store/api/v2` — the same origin/path the Market SPA +// itself talks to via apps/packages/app/src/stores/market/center.ts. The same +// edge auth chain (Authelia + l4-bfl-proxy) accepts the `X-Authorization` +// header here as it does for files / vault / desktop. +func (id ID) MarketURL(localPrefix string) string { + return fmt.Sprintf("https://market.%s%s", localPrefix, id.TerminusName()) +} diff --git a/cli/skills/olares-market/SKILL.md b/cli/skills/olares-market/SKILL.md new file mode 100644 index 000000000..1f90fc7e6 --- /dev/null +++ b/cli/skills/olares-market/SKILL.md @@ -0,0 +1,319 @@ +--- +name: olares-market +version: 1.0.0 +description: "olares-cli market command tree against the per-user Market app-store v2 API: list / get / categories for catalog browsing; install / uninstall / upgrade / clone / cancel / stop / resume for lifecycle; status for runtime state; upload / delete for local chart sources (cli / upload / studio); --watch / --watch-timeout / --watch-interval to block until terminal state. Covers source resolution (market.olares catalog vs local sources), the install/upgrade/uninstall/stop/resume/cancel state machine, OpType gating for race-safe watching, the op-agnostic status --watch recovery path, and JSON shape additions (finalState/finalOpType). Use whenever the user mentions market / app store / install / upgrade / uninstall / clone / stop / resume / cancel / status / upload chart / app state / running / installFailed / --watch, asks 'is firefox installed yet', or sees errors like 'app X is not installed', 'watch timed out', 'watch canceled by user', '--watch requires an app name'." +metadata: + requires: + bins: ["olares-cli"] + cliHelp: "olares-cli market --help" +--- + +# market (App-store v2 + per-user market-backend) + +**CRITICAL — before doing anything, MUST use the Read tool to read [`../olares-shared/SKILL.md`](../olares-shared/SKILL.md) for the profile selection, login, and HTTP 401/403 recovery rules that every command here depends on.** + +## Core concepts + +### Source resolution + +The market backend serves multiple "sources" of charts. The CLI resolves which one to talk to from `-s / --source`, falling back to a default that depends on the verb: + +| Source id | What it is | Used by (default) | +|--------------|---------------------------------------------------------|--------------------------------| +| `market.olares` | Public catalog (read-only browse) | `list`, `get`, `categories`, `install`, `upgrade`, `clone`, `status` | +| `cli` | Local source for charts uploaded via this CLI | `upload`, `delete` (default) | +| `upload` | Local source for charts pushed through the SPA's "upload" UI | `upload`, `delete` | +| `studio` | Local source for charts produced by Devbox / Studio | `upload`, `delete` | + +Resolution is centralized in [`cli/cmd/ctl/market/common.go`](cli/cmd/ctl/market/common.go): + +- `resolveCatalogSource(opts)` → `opts.Source` if set, else `defaultCatalogSource = "market.olares"`. +- `resolveLocalSource(opts)` → `opts.Source` if set, else `defaultLocalSource = "cli"`. +- `validateLocalSource(s)` rejects anything outside `localSources = {"upload", "studio", "cli"}`. + +When `-s` is omitted, every command prints `Using source: <id>` to stderr so the agent can confirm which backend it hit. `-a / --all-sources` (where supported) bypasses the single-source resolver and asks the backend across every source the user has access to. + +### App lifecycle / state machine + +The backend tracks two orthogonal axes per app: **`State`** (where the row currently is) and **`OpType`** (which mutation is in flight). The full enum lives in [`framework/app-service/api/app.bytetrade.io/v1alpha1/appmanager_states.go`](framework/app-service/api/app.bytetrade.io/v1alpha1/appmanager_states.go). The CLI groups them into four buckets in [`cli/cmd/ctl/market/watch.go`](cli/cmd/ctl/market/watch.go): + +| Bucket | Examples | Meaning | +|----------------------|------------------------------------------------------------------|------------------------------------------| +| Progressing | `pending`, `installing`, `upgrading`, `uninstalling`, `stopping`, `resuming`, `installingCanceling`, …Canceling | Backend is actively working; keep polling | +| Terminal success | `running`, `stopped`, `uninstalled` | Mutation finished cleanly | +| Terminal failure | `installFailed`, `upgradeFailed`, `uninstallFailed`, `stopFailed`, `resumeFailed` | Mutation finished with a hard error | +| Canceled / cancel-failed | `installingCanceled`, `upgradingCanceled`, `resumingCanceled`, `installingCancelFailed`, `upgradingCancelFailed`, `resumingCancelFailed` | A `cancel` request landed (or failed) | + +The CLI maps each verb to the subset of buckets it considers terminal — see the `--watch` section below. + +### `OpType` vs `State` (race-safety) + +The same `State` can mean different things depending on which mutation is in flight. Concrete example: an `upgrade` issued against an app already in `running` will return `state=running, opType=running` for one or two ticks before the backend flips to `state=upgrading, opType=upgrade`. A naive watcher would declare success at tick zero. + +The fix lives in [`cli/cmd/ctl/market/watch.go`](cli/cmd/ctl/market/watch.go) (`waitForTerminal` + `watchTarget.matchOpType`): for mutating verbs the watcher refuses to accept any "success" classification until either: + +1. the row's `OpType` matches the op the CLI just issued, **or** +2. the row disappears entirely (only legal for `uninstall` / `status`). + +`cancel` and `status` deliberately set `matchOpType=false` because they are op-agnostic by design. + +## Authentication transport + +Every request goes through the factory-injected `*http.Client` and uses the resolved profile from `cmdutil.Factory`. There is no kubeconfig dependency. + +- Base URL: `<rp.MarketURL>/app-store/api/v2` — built in [`cli/cmd/ctl/market/client.go`](cli/cmd/ctl/market/client.go) (`NewMarketClient` / `apiPrefix`). +- Auth header: `X-Authorization: <access_token>` (NOT `Authorization: Bearer …`). Cite [`cli/cmd/ctl/market/client.go`](cli/cmd/ctl/market/client.go) (`do` / `doStream`). +- `MarketURL` is derived from the Olares ID (`https://market.<localPrefix><terminusName>`) and surfaced through [`cli/pkg/credential/types.go`](cli/pkg/credential/types.go) (`ResolvedProfile.MarketURL`). +- For multipart chart uploads the CLI builds a separate, no-timeout client (`newMarketUploadHTTPClient`) so large `.tgz` pushes are not killed by the default request timeout. +- 401 / 403 are reformatted into a single human message via `reformatMarketAuthErr`. **Recovery is not handled here — defer to [`../olares-shared/SKILL.md`](../olares-shared/SKILL.md).** + +## Command cheatsheet + +All verbs live under `olares-cli market <verb>`. Common flags: `-s / --source` (single source), `-a / --all-sources` (where supported), `-o / --output {table,json}` (default `table`), `-q / --quiet` (suppress info on stderr; exit code still propagates). Mutating verbs additionally accept `-w / --watch` plus the timing knobs (see the [`--watch`](#watch-flag) section). + +### Catalog (read-only) + +```bash +olares-cli market list # default source: market.olares +olares-cli market list -c AI # filter by category +olares-cli market list -a # query every source the user has +olares-cli market list -o json +``` + +```bash +olares-cli market categories # category counts +olares-cli market categories -a -o json +``` + +```bash +olares-cli market get firefox # detailed info, table view +olares-cli market get firefox -o json # full upstream payload +``` + +`get` answers questions like "is this app cloneable?" — look at the `cloneable` field in JSON output (see [`cli/cmd/ctl/market/get.go`](cli/cmd/ctl/market/get.go)). + +### Runtime (read-only) + +```bash +olares-cli market status # all installed apps in the resolved source +olares-cli market status firefox # one app, with the source-fallback hint +olares-cli market status firefox -a # search across every source +olares-cli market status firefox --watch # see the --watch section +``` + +`status <app>` UX rules — implemented in [`cli/cmd/ctl/market/status.go`](cli/cmd/ctl/market/status.go) (`runStatusSingle`): + +- If the row is missing in the resolved source **and** in every other source the user has, the CLI prints `app 'X' is not installed (run 'olares-cli market install X' to install it)`. +- If the row exists but under a different source than the one the user passed, the CLI prints an info hint `App is installed under source 'Y' (not 'X')` and continues to render the row, so the agent does not need to retry blindly. +- `runStatusAll` (no app argument) explicitly rejects `--watch`. Use `status <app> --watch` instead. + +### Lifecycle (mutating, support `--watch`) + +```bash +olares-cli market install firefox --watch +olares-cli market install firefox --version 1.0.11 --env DEBUG=1 --watch +``` + +```bash +olares-cli market upgrade firefox --version 1.0.12 --watch +``` + +```bash +olares-cli market uninstall firefox --watch +olares-cli market uninstall firefox --cascade --delete-data --watch # see Security rules +``` + +```bash +olares-cli market clone firefox --title "Firefox (work)" --watch +olares-cli market clone firefox --title "FF" --entrance-title firefox=Work --watch +``` + +`clone` quirks (cite [`cli/cmd/ctl/market/clone.go`](cli/cmd/ctl/market/clone.go)): + +- `--title` is required and capped at 30 characters. +- The clone target name is decided by the backend; the CLI tracks it in `OperationResult.TargetApp` and falls back to the source app name only if the backend never reports one. +- Only multi-instance apps are cloneable — confirm with `market get <app>` (`cloneable: true`) before running. + +```bash +olares-cli market stop firefox --watch +olares-cli market stop firefox --cascade --watch # also stop dependents +olares-cli market resume firefox --watch +``` + +```bash +olares-cli market cancel firefox --watch # cancel the in-flight op +``` + +`cancel` always sets `matchOpType=false` (it is itself op-agnostic) — see [`cli/cmd/ctl/market/cancel.go`](cli/cmd/ctl/market/cancel.go). + +### Local sources (chart push) + +`upload` and `delete` only accept `-s upload | studio | cli` — `validateLocalSource` rejects anything else. + +```bash +olares-cli market upload ./myapp-1.0.0.tgz # default: -s cli +olares-cli market upload ./charts/ # all .tgz / .tar.gz under dir +olares-cli market upload ./myapp-1.0.0.tgz -s studio +``` + +```bash +olares-cli market delete myapp # latest version in the source +olares-cli market delete myapp --version 1.0.0 +``` + +`upload` does not run a chart — `install -s cli <app>` does. + +<a id="watch-flag"></a> +## `--watch` (block until terminal state) + +The synchronous CLI experience: lifecycle verbs return immediately when the backend accepts the request, but the actual mutation is asynchronous. `--watch` polls the same backend the SPA polls and only returns once the row reaches a terminal state (or the watcher gives up). Implementation lives in [`cli/cmd/ctl/market/watch.go`](cli/cmd/ctl/market/watch.go) (`waitForTerminal`, `runWithWatch`, `newWatchTarget`). + +### Flags + +Defined in [`cli/cmd/ctl/market/options.go`](cli/cmd/ctl/market/options.go) (`addWatchFlags`): + +- `-w / --watch` — opt-in. Default off. +- `--watch-timeout` (default `15m`) — total deadline; surfaced as a typed timeout error including last-seen state. +- `--watch-interval` (default `2s`) — polling interval. Lower values just hammer the backend; raise to `5s`–`10s` on slow networks. + +### Per-op terminal sets + +| Op | Success | Failure | `matchOpType` | `absentMeansSuccess` | +|--------------|----------------------------------------------|----------------------------------------------------------|---------------|----------------------| +| `install` | `running` | `*Failed` ∪ `*Canceled` | true | false | +| `clone` | `running` | `*Failed` ∪ `*Canceled` | true | false | +| `upgrade` | `running` | `upgradeFailed` ∪ `*Canceled` | true | false | +| `uninstall` | `uninstalled` (or row absent) | `uninstallFailed` | true | true | +| `stop` | `stopped` | `stopFailed` | true | false | +| `resume` | `running` | `resumeFailed` / `resumingCanceled` / `resumingCancelFailed` | true | false | +| `cancel` | `*Canceled` | `*CancelFailed` | **false** | false | +| `status` | `running` / `stopped` / `uninstalled` / `*Canceled` | `*Failed` / `*CancelFailed` | **false** | true | + +The `*` columns expand to the matching state-set constants (`operationFailedStates`, `cancelFailedStates`, `canceledStates`) defined in [`cli/cmd/ctl/market/watch.go`](cli/cmd/ctl/market/watch.go). + +### `status --watch` (op-agnostic recovery) + +The reason `status` got its own `--watch` even though it is read-only: a user runs `install firefox` *without* `--watch`, then five minutes later wants to know whether it landed. The recovery is: + +```bash +olares-cli market status firefox --watch +``` + +The watcher uses `case watchStatus` in `newWatchTarget` (see [`cli/cmd/ctl/market/watch.go`](cli/cmd/ctl/market/watch.go)): any **stable** terminal state is success (the user did not declare an op, so any quiescent state is fine), and a row that has disappeared between ticks is also treated as success. `matchOpType` is forced to `false` so the CLI does not get stuck waiting for a specific OpType that is not its own. + +`status --watch` requires an app name — see the errors table. + +### OpType gating in practice + +Concretely, `upgrade`'s watcher will not classify `state=running, opType=running` (or any leftover OpType from the previous mutation) as success. It waits until the backend reports `opType=upgrade` at least once, and only then accepts a `state=running` tick as terminal success. The same logic protects `install` after a previous `upgrade`, etc. + +### Output semantics + +| Mode | What the user sees | +|---------------------|-------------------------------------------------------------------------------------| +| TTY (`-o table`) | Per-transition `info` lines on stderr (`installing`, `running`, …) and final OK/Fail line. | +| `-o json` | Exactly **one** final `OperationResult` JSON document with the new `finalState` / `finalOpType` fields populated (`omitempty` keeps non-watch JSON output unchanged). | +| `-q / --quiet` | No transition lines, no final summary; exit code still reflects success/failure. | + +### Ctrl-C and timeout + +- The watch context is wrapped in `signal.NotifyContext` for `SIGINT` / `SIGTERM`. Pressing Ctrl-C exits cleanly with `<op> '<app>' watch canceled by user`. **The underlying mutation is NOT canceled** — re-attach with `status --watch` if needed. +- Timeout exits with `<op> '<app>' watch timed out (last state: <S>, op: <O>)`. The mutation again may still be running on the cluster; `status --watch` is the canonical recovery. +- After three consecutive transport errors the watcher gives up with `<op> '<app>' watch aborted after N consecutive errors`. + +### Watch flow (mermaid) + +```mermaid +flowchart TD + A[verb returns 200] --> B{--watch set?} + B -- no --> Z[print summary / exit] + B -- yes --> C[waitForTerminal loop] + C --> D[GetMarketState every --watch-interval] + D --> E{matchOpType ok?} + E -- waiting --> C + E -- ok --> F{state in successSet?} + F -- yes --> G[exit 0 + finalState] + F -- no --> H{state in failureSet?} + H -- yes --> I[exit 1 + finalState] + H -- no --> J{ctx done?} + J -- timeout --> K[watch timed out] + J -- signal --> L[watch canceled by user] + J -- no --> C +``` + +## Common errors → fixes + +| Error message | Cause | Fix | +|-------------------------------------------------------------------------------------------|---------------------------------------------------------------------|------------------------------------------------------------------------| +| `server rejected the access token (HTTP 401/403)` | Profile token is expired / wrong / missing | Defer to [`../olares-shared/SKILL.md`](../olares-shared/SKILL.md) (login + profile rules) | +| `app 'X' is not installed (run 'olares-cli market install X' to install it)` | Row missing in every source the user has | It really is not installed — install it, or check spelling | +| `App is installed under source 'Y' (not 'X')` (info, not error) | The user passed `-s X` but the row lives in source `Y` | Re-run with `-s Y` for a clean filter, or `-a` to query every source | +| `--watch requires an app name (...)` | `status --watch` (or any lifecycle verb) was invoked without an app | Pass an app name, or drop `--watch` for the listing | +| `<op> '<app>' watch timed out (last state: <S>, op: <O>)` | `--watch-timeout` elapsed before terminal state | Bump `--watch-timeout`, or drill into the stuck state via `market status <app>` | +| `<op> '<app>' watch canceled by user` | User pressed Ctrl-C | The mutation is likely still running on the cluster — re-attach with `market status <app> --watch` | +| `<op> '<app>' watch aborted after N consecutive errors` | Network / proxy flake during polling | Check connectivity, then re-attach with `status --watch` | +| `--title is required for cloning` / `--title cannot exceed 30 characters` | `clone` was invoked without a valid title | Pass `--title "<= 30 chars>"` | +| `app '<app>' from source '<src>' does not support clone` | App is single-instance only | Verify with `market get <app>`; only `cloneable: true` apps clone | +| `invalid version '<v>'` | `--version` value is not semver (`MAJOR.MINOR.PATCH[-pre][+meta]`) | Use a valid semver string | +| `invalid local source '<s>': must be one of upload, studio, cli` | `upload` / `delete` got a catalog source | Use `-s upload`, `-s studio`, or `-s cli` | + +## Typical workflows + +Install with watch (the happy path): + +```bash +olares-cli market install firefox --watch --watch-timeout 30m +echo "exit=$?" # 0 only after firefox reports state=running with opType=install +``` + +Forgot `--watch` on install — recover via `status --watch`: + +```bash +olares-cli market install firefox # backgrounded; CLI returns immediately +olares-cli market status firefox --watch # waits for any stable terminal state +``` + +Upgrade in place, scripted: + +```bash +olares-cli market upgrade firefox --version 1.0.12 --watch -o json | jq '.finalState' +# expect "running"; finalOpType = "upgrade" +``` + +Clone with entrance titles: + +```bash +olares-cli market clone firefox \ + --title "Firefox (work)" \ + --entrance-title firefox=Work \ + --watch +``` + +Stop, then resume: + +```bash +olares-cli market stop firefox --watch +olares-cli market resume firefox --watch +``` + +Cancel a stuck install and confirm the cancellation: + +```bash +olares-cli market cancel firefox --watch # waits for *Canceled +olares-cli market status firefox --watch # confirms installingCanceled / uninstalled +``` + +Push a local chart, then install it from `cli` source: + +```bash +olares-cli market upload ./myapp-1.0.0.tgz # defaults to -s cli +olares-cli market install myapp -s cli --watch +``` + +## Security rules + +- Confirm intent before `uninstall --delete-data` — this is **irreversible** on the user's volumes. +- Confirm intent before `uninstall --cascade` / `stop --cascade` — they fan out to every dependent chart and can take down adjacent apps. +- Never echo `<access_token>` into the terminal or into a script. The CLI already injects it via `X-Authorization`; if the agent thinks it needs to print the token, it is doing the wrong thing — read [`../olares-shared/SKILL.md`](../olares-shared/SKILL.md) instead. +- Treat `cancel` as a **request**, not a guarantee. The backend may have already finished the mutation by the time the cancel lands. Always re-confirm the actual landed state with `market status <app> --watch` before reporting "canceled" to the user. +- `--watch` on Ctrl-C / timeout exits the CLI but does **not** stop the cluster-side mutation. Communicate this clearly when surfacing a watch error. From 602896ca1e6ae9db6d6478ee4b1df0880fc52b42 Mon Sep 17 00:00:00 2001 From: eball <liuy102@hotmail.com> Date: Sun, 26 Apr 2026 16:49:46 +0800 Subject: [PATCH 12/12] fix: update go module --- daemon/go.mod | 2 +- daemon/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/daemon/go.mod b/daemon/go.mod index 04394f9de..93586f07b 100644 --- a/daemon/go.mod +++ b/daemon/go.mod @@ -24,7 +24,7 @@ require ( github.com/distribution/distribution/v3 v3.0.0 github.com/dustin/go-humanize v1.0.1 github.com/eball/zeroconf v0.2.5 - github.com/godbus/dbus/v5 v5.1.0 + github.com/godbus/dbus/v5 v5.2.2 github.com/gofiber/fiber/v2 v2.52.12 github.com/google/gopacket v1.1.19 github.com/hirochachacha/go-smb2 v1.1.0 diff --git a/daemon/go.sum b/daemon/go.sum index f8de82598..0b1bafdfb 100644 --- a/daemon/go.sum +++ b/daemon/go.sum @@ -128,8 +128,8 @@ github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDq github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ= +github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c= github.com/gofiber/fiber/v2 v2.52.12 h1:0LdToKclcPOj8PktUdIKo9BUohjjwfnQl42Dhw8/WUw= github.com/gofiber/fiber/v2 v2.52.12/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=